code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import BeautifulSoup
from HTMLParser import HTMLParseError
from django.conf import settings
import re
DJANGOFEEDS_REMOVE_TRACKERS = getattr(settings,
"DJANGOFEEDS_REMOVE_TRACKERS", True)
# The obvious tracker images
DJANGOFEEDS_TRACKER_SERVICES = getattr(settings,
"DJANGOFEEDS_TRACKER_SERVICES", [
'http://feedads',
'http://feeds.feedburner.com/~r/',
'http://feeds.feedburner.com/~ff/',
'http://rss.feedsportal.com/c/',
'http://ads.pheedo.com/',
'http://a.rfihub.com/',
'http://segment-pixel.invitemedia.com/',
'http://pixel.quantserve.com/',
'http://feeds.newscientist.com/',
'http://mf.feeds.reuters.com/c/',
'http://telegraph.feedsportal.com/c/',
])
DJANGOFEEDS_SMALL_IMAGE_LIMIT = getattr(settings,
"DJANGOFEEDS_SMALL_IMAGE_LIMIT", 50)
class PostContentOptimizer(object):
"""Remove diverse abberation and annoying content in the posts.
The idea is to remove some tracker images in the feeds because
these images are a pollution to the user.
Identified tools that add tracker images and tools into the feeds
* Feedburner toolbar -- 4 toolbar images, 1 tracker image.
* Pheedcontent.com toolbar -- 4 toolbar images, 1 advertisement image.
* Digg/Reddit generic toolbar - 3 toolbar, no tracker image.
* http://res.feedsportal.com/ -- 2 toolbar images, 1 tracker image.
* http://a.rfihub.com/ -- associated with http://rocketfuelinc.com/,
used for ads or tracking. Not quite sure.
About 80% of them use feedburner. Few use cases of feeds:
* feedburner toolbar and tracker
* WULFMORGENSTALLER
* MarketWatch.com - Top Stories
* Hollywood.com - Recent News
* Wired: entertainement
* Livescience.com
* Reader Digest
* Pheedcontent.com toolbar
* Sports News : CBSSports.com
* Digg/Reddit toolbar
* Abstruse goose
* http://res.feedsportal.com/
* New scientist.com
"""
def looks_like_tracker(self, url):
"""Return True if the image URL has to be removed."""
for service in DJANGOFEEDS_TRACKER_SERVICES:
if url.startswith(service):
return True
return False
def optimize(self, html):
"""Remove unecessary spaces, <br> and image tracker."""
# Remove uneccesary white spaces
html = html.strip()
try:
soup = BeautifulSoup.BeautifulSoup(html)
self.remove_excessive_br(soup)
if DJANGOFEEDS_REMOVE_TRACKERS:
self.remove_trackers(soup)
except HTMLParseError:
return html
return str(soup).strip()
def remove_excessive_br(self, soup):
# start with true to remove any starting br tag
last_one_is_br = True
children = soup.childGenerator()
for el in children:
if isinstance(el, BeautifulSoup.Tag):
if el.name == 'br':
if last_one_is_br:
el.replaceWith("")
last_one_is_br = True
else:
last_one_is_br = False
def remove_trackers(self, soup):
"""Remove the trackers."""
stripped_count = 0
for image in soup("img"):
already_removed = False
# remove images that looks like tracker
image_source = image.get("src", "")
if (len(image_source) == 0 or
self.looks_like_tracker(image_source)):
image.replaceWith("")
already_removed = True
# remove small images
try:
image_width = int(image.get("width",
DJANGOFEEDS_SMALL_IMAGE_LIMIT))
except ValueError:
image_width = None
if (image_width is not None and
image_width < DJANGOFEEDS_SMALL_IMAGE_LIMIT and
not already_removed):
image.replaceWith("")
# remove links that looks like tracker
for link in soup("a"):
link_href = link.get("href")
if link_href and "://" in link_href:
if self.looks_like_tracker(link_href):
link.replaceWith("")
|
[
"BeautifulSoup.BeautifulSoup"
] |
[((2418, 2451), 'BeautifulSoup.BeautifulSoup', 'BeautifulSoup.BeautifulSoup', (['html'], {}), '(html)\n', (2445, 2451), False, 'import BeautifulSoup\n')]
|
import uuid
import json
from commander.commands.Command import Command
from commander.data_classes.Filter import Filter
class AddFilterCommand(Command):
"""
Add a filter for the camera command.
"""
def __init__(self):
Command.__init__(self)
def execute(self, **kwargs):
"""
Execute the command.
:param kwargs: key-worded arguments.
:keyword cameras: List of the cameras.
:keyword filter_types: List of the filter types.
:keyword camera_id: Camera's ID.
:keyword image_topic: Video camera's topic.
:keyword filter_type: Filter type.
:return: Response.
"""
cameras = kwargs.get('cameras')
filter_types = kwargs.get('filter_types')
camera_id = kwargs.get('camera_id')
filter_type = kwargs.get('filter_type')
if not camera_id in cameras:
return [json.dumps({"message": "Camera with this ID does not exist.", "code": 404})]
image_topic = "/" + camera_id + "/video_stream_to_topic/stream/image"
id = str(uuid.uuid1()).replace("-", "")
if not filter_type in filter_types:
return [json.dumps({"message": "Filter type does not exist.", "code": 404})]
# Run filter.
filter_executor = filter_types[filter_type]()
filter_executor.execute(image_topic=image_topic, namespace=id)
filter = Filter()
filter.id = id
filter.type = filter_type
filter.executor = filter_executor
cameras[camera_id].add_filter(filter)
return [json.dumps({"message": "Filter is added.", "code": 200, "camera": filter.to_dict()})]
|
[
"commander.data_classes.Filter.Filter",
"uuid.uuid1",
"commander.commands.Command.Command.__init__",
"json.dumps"
] |
[((245, 267), 'commander.commands.Command.Command.__init__', 'Command.__init__', (['self'], {}), '(self)\n', (261, 267), False, 'from commander.commands.Command import Command\n'), ((1413, 1421), 'commander.data_classes.Filter.Filter', 'Filter', ([], {}), '()\n', (1419, 1421), False, 'from commander.data_classes.Filter import Filter\n'), ((908, 983), 'json.dumps', 'json.dumps', (["{'message': 'Camera with this ID does not exist.', 'code': 404}"], {}), "({'message': 'Camera with this ID does not exist.', 'code': 404})\n", (918, 983), False, 'import json\n'), ((1178, 1245), 'json.dumps', 'json.dumps', (["{'message': 'Filter type does not exist.', 'code': 404}"], {}), "({'message': 'Filter type does not exist.', 'code': 404})\n", (1188, 1245), False, 'import json\n'), ((1082, 1094), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (1092, 1094), False, 'import uuid\n')]
|
# <NAME> and <NAME>
import unittest
import games_api
import json
class APITester(unittest.TestCase):
def setUp(self):
self.games_api = games_api.GamesApi() #change depending on how we code it, will it be a class?
def tearDown(self):
pass
def test_games_endpoint(self):
url = '/games'
self.assertIsNotNone(self.games_api.get_games(url))
self.assertEqual(json.load(self.games_api.get_games(url))[0].keys(),
['name', 'global_sales', 'publisher', 'platform', 'genre', 'year'])
url_wrong = '/games?random'
self.assertEqual(json.load(self.games_api.get_games(url_wrong))[0].keys(),
['name', 'global_sales', 'publisher', 'platform', 'genre', 'year'])
def test_platforms_endpoint(self):
url = '/platforms'
self.assertIsNotNone(self.games_api.get_platform(url))
def test_publishers_endpoint(self):
url = '/publishers'
self.assertIsNotNone(self.games_api.get_publisher(url))
def test_genres_endpoint(self):
url = '/genres'
self.assertIsNotNone(self.games_api.get_genre(url))
def test_categories_endpoint(self):
url = '/categories'
self.assertIsNotNone(self.games_api.get_categories(url))
self.asertEqual(self.games_api.get_categories(url).keys(), ['platforms', 'genres', 'publishers'])
url_wrong = '/categories?random'
self.asertEqual(self.games_api.get_categories(url).keys(), ['platforms', 'genres', 'publishers'])
def test_publisher_endpoint(self):
url = '/publisher?name=Nintendo'
self.assertIsNotNone(self.games_api.get_publisher_by_name(url))
self.assertEqual(json.load(self.games_api.get_publisher_by_name(url))[0].keys(),
['name', 'global_sales', 'publisher', 'platform', 'genre', 'year', 'na', 'eu', 'jp', 'user_score', 'critic_score'])
url_empty = '/publisher?name='
self.assertEqual(self.games_api.get_publisher_by_name(url_empty), '[]')
url_publisher_not_in_set = '/publisher?name=ThisDoesNotMakeSense'
self.assertEqual(self.games_api.get_publisher_by_name(url_publisher_not_in_set), '[]')
def test_platform_endpoint(self):
url = '/platform?name=Wii'
self.assertIsNotNone(self.games_api.get_platform_by_name(url))
self.assertEqual(json.load(self.games_api.get_platform_by_name(url))[0].keys(),
['name', 'global_sales', 'publisher', 'platform', 'genre', 'year', 'na', 'eu', 'jp', 'user_score', 'critic_score'])
url_empty = '/platform?name='
self.assertEqual(self.games_api.get_platform_by_name(url_empty), '[]')
url_publisher_not_in_set = '/platform?name=ThisDoesNotMakeSense'
self.assertEqual(self.games_api.get_platform_by_name(url_publisher_not_in_set), '[]')
def test_genre_endpoint(self):
url = '/genre?name=Action'
self.assertIsNotNone(self.games_api.get_genre_by_name(url))
self.assertEqual(json.load(self.games_api.get_genre_by_name(url))[0].keys(),
['name', 'global_sales', 'publisher', 'platform', 'genre', 'year', 'na', 'eu', 'jp', 'user_score', 'critic_score'])
url_empty = '/genre?name='
self.assertEqual(self.games_api.get_genre_by_name(url_empty), '[]')
url_genre_not_in_set = '/genre?name=ThisDoesNotMakeSense'
self.assertEqual(self.games_api.get_genre_by_name(url_genre_not_in_set), '[]')
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"games_api.GamesApi"
] |
[((3561, 3576), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3574, 3576), False, 'import unittest\n'), ((150, 170), 'games_api.GamesApi', 'games_api.GamesApi', ([], {}), '()\n', (168, 170), False, 'import games_api\n')]
|
#
# Copyright 2018 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from voltha.extensions.omci.tasks.task import Task
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, failure, returnValue
from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations
from voltha.extensions.omci.omci_me import *
from voltha.adapters.brcm_openomci_onu.uni_port import UniType
from voltha.adapters.brcm_openomci_onu.pon_port import BRDCM_DEFAULT_VLAN, DEFAULT_TPID
RC = ReasonCodes
OP = EntityOperations
RESERVED_VLAN = 4095
class BrcmVlanFilterException(Exception):
pass
class BrcmVlanFilterTask(Task):
"""
Apply Vlan Tagging Filter Data and Extended VLAN Tagging Operation Configuration on an ANI and UNI
"""
task_priority = 200
name = "Broadcom VLAN Filter Task"
def __init__(self, omci_agent, device_id, uni_port, set_vlan_id, add_tag=True,
priority=task_priority):
"""
Class initialization
:param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
:param device_id: (str) ONU Device ID
:param uni_port: (UniPort) UNI port
:param set_vlan_id: (int) VLAN to filter for and set
:param add_tag: (bool) Flag to identify VLAN Tagging or Untagging
:param priority: (int) OpenOMCI Task priority (0..255) 255 is the highest
"""
self.log = structlog.get_logger(device_id=device_id, uni_port=uni_port.port_number)
super(BrcmVlanFilterTask, self).__init__(BrcmVlanFilterTask.name,
omci_agent,
device_id,
priority=priority,
exclusive=True)
self._device = omci_agent.get_device(device_id)
self._uni_port = uni_port
self._set_vlan_id = set_vlan_id
self._results = None
self._local_deferred = None
self._config = self._device.configuration
self._add_tag = add_tag
# Port numbers
self._input_tpid = DEFAULT_TPID
self._output_tpid = DEFAULT_TPID
self._cvid = BRDCM_DEFAULT_VLAN
def cancel_deferred(self):
super(BrcmVlanFilterTask, self).cancel_deferred()
d, self._local_deferred = self._local_deferred, None
try:
if d is not None and not d.called:
d.cancel()
except:
pass
def start(self):
"""
Start Vlan Tagging Task
"""
super(BrcmVlanFilterTask, self).start()
self._local_deferred = reactor.callLater(0, self.perform_vlan_tagging, add_tag=self._add_tag)
@inlineCallbacks
def perform_vlan_tagging(self, add_tag=True):
"""
Perform the vlan tagging
"""
if add_tag:
self.log.info('setting-vlan-tagging')
else:
self.log.info('removing-vlan-tagging')
try:
# TODO: parameterize these from the handler, or objects in the handler
# TODO: make this a member of the onu gem port or the uni port
_mac_bridge_service_profile_entity_id = 0x201
_mac_bridge_port_ani_entity_id = 0x2102 # TODO: can we just use the entity id from the anis list?
vlan_tagging_entity_id = _mac_bridge_port_ani_entity_id + self._uni_port.mac_bridge_port_num
extended_vlan_tagging_entity_id = _mac_bridge_service_profile_entity_id + \
self._uni_port.mac_bridge_port_num
# Delete bridge ani side vlan filter
yield self._send_msg(VlanTaggingFilterDataFrame(vlan_tagging_entity_id), 'delete',
'flow-delete-vlan-tagging-filter-data')
forward_operation = 0x10 # VID investigation
# When the PUSH VLAN is RESERVED_VLAN (4095), let ONU be transparent
if self._set_vlan_id == RESERVED_VLAN:
forward_operation = 0x00 # no investigation, ONU transparent
if add_tag:
# Re-Create bridge ani side vlan filter
msg = VlanTaggingFilterDataFrame(
vlan_tagging_entity_id, # Entity ID
vlan_tcis=[self._set_vlan_id], # VLAN IDs
forward_operation=forward_operation
)
yield self._send_msg(msg, 'create', 'flow-create-vlan-tagging-filter-data')
else:
# Delete bridge ani side vlan filter
msg = VlanTaggingFilterDataFrame(
vlan_tagging_entity_id # Entity ID
)
yield self._send_msg(msg, 'delete', 'flow-delete-vlan-tagging-filter-data')
# Delete uni side extended vlan filter
msg = ExtendedVlanTaggingOperationConfigurationDataFrame(
extended_vlan_tagging_entity_id # Bridge Entity ID
)
yield self._send_msg(msg, 'delete', 'flow-delete-ext-vlan-tagging-op-config-data')
# Create uni side extended vlan filter
if add_tag:
# When flow is removed and immediately re-added tech_profile specific task is not re-played, hence
# Extended VLAN Tagging Operation configuration which is part of tech_profile specific task is not
# getting created. To create it, we do Extended VLAN Tagging Operation configuration here.
# TODO: do this for all uni/ports...
# TODO: magic. static variable for assoc_type
omci_cc = self._device.omci_cc
# default to PPTP
if self._uni_port.type is UniType.VEIP:
association_type = 10
elif self._uni_port.type is UniType.PPTP:
association_type = 2
else:
association_type = 2
attributes = dict(
association_type=association_type, # Assoc Type, PPTP/VEIP Ethernet UNI
associated_me_pointer=self._uni_port.entity_id, # Assoc ME, PPTP/VEIP Entity Id
# See VOL-1311 - Need to set table during create to avoid exception
# trying to read back table during post-create-read-missing-attributes
# But, because this is a R/W attribute. Some ONU may not accept the
# value during create. It is repeated again in a set below.
input_tpid=self._input_tpid, # input TPID
output_tpid=self._output_tpid, # output TPID
)
msg = ExtendedVlanTaggingOperationConfigurationDataFrame(
extended_vlan_tagging_entity_id, # Bridge Entity ID
attributes=attributes
)
yield self._send_msg(msg, 'create', 'create-extended-vlan-tagging-operation-configuration-data')
attributes = dict(
# Specifies the TPIDs in use and that operations in the downstream direction are
# inverse to the operations in the upstream direction
input_tpid=self._input_tpid, # input TPID
output_tpid=self._output_tpid, # output TPID
downstream_mode=0, # inverse of upstream
)
msg = ExtendedVlanTaggingOperationConfigurationDataFrame(
extended_vlan_tagging_entity_id, # Bridge Entity ID
attributes=attributes
)
yield self._send_msg(msg, 'set', 'set-extended-vlan-tagging-operation-configuration-data')
# parameters: Entity Id ( 0x900), Filter Inner Vlan Id(0x1000-4096,do not filter on Inner vid,
# Treatment Inner Vlan Id : 2
# Update uni side extended vlan filter
# filter for untagged
# probably for eapol
# TODO: lots of magic
# TODO: magic 0x1000 / 4096?
attributes = self._generate_attributes(
filter_outer_priority=15, # This entry is not a double-tag rule
filter_outer_vid=4096, # Do not filter on the outer VID value
filter_outer_tpid_de=0, # Do not filter on the outer TPID field
filter_inner_priority=15, filter_inner_vid=4096, filter_inner_tpid_de=0, filter_ether_type=0,
treatment_tags_to_remove=0, treatment_outer_priority=15, treatment_outer_vid=0,
treatment_outer_tpid_de=0, treatment_inner_priority=0, treatment_inner_vid=self._cvid,
treatment_inner_tpid_de=4)
msg = ExtendedVlanTaggingOperationConfigurationDataFrame(
extended_vlan_tagging_entity_id, # Bridge Entity ID
attributes=attributes
)
yield self._send_msg(msg, 'set', 'set-extended-vlan-tagging-operation-configuration-data-table')
if self._set_vlan_id == RESERVED_VLAN:
# Transparently send any single tagged packet.
# Any other specific rules will take priority over this
attributes = self._generate_attributes(
filter_outer_priority=15, filter_outer_vid=4096, filter_outer_tpid_de=0,
filter_inner_priority=14, filter_inner_vid=4096, filter_inner_tpid_de=0, filter_ether_type=0,
treatment_tags_to_remove=0, treatment_outer_priority=15, treatment_outer_vid=0,
treatment_outer_tpid_de=0, treatment_inner_priority=15, treatment_inner_vid=0,
treatment_inner_tpid_de=4)
msg = ExtendedVlanTaggingOperationConfigurationDataFrame(
extended_vlan_tagging_entity_id, # Bridge Entity ID
attributes=attributes # See above
)
yield self._send_msg(msg, 'set',
'flow-set-ext-vlan-tagging-op-config-data-single-tag-fwd-transparent')
else:
# Update uni side extended vlan filter
# filter for untagged
# probably for eapol
# TODO: Create constants for the operation values. See omci spec
attributes = self._generate_attributes(
filter_outer_priority=15, filter_outer_vid=4096, filter_outer_tpid_de=0,
filter_inner_priority=15, filter_inner_vid=4096, filter_inner_tpid_de=0, filter_ether_type=0,
treatment_tags_to_remove=0, treatment_outer_priority=15, treatment_outer_vid=0,
treatment_outer_tpid_de=0, treatment_inner_priority=0, treatment_inner_vid=self._set_vlan_id,
treatment_inner_tpid_de=4)
msg = ExtendedVlanTaggingOperationConfigurationDataFrame(
extended_vlan_tagging_entity_id, # Bridge Entity ID
attributes=attributes # See above
)
yield self._send_msg(msg, 'set', 'flow-set-ext-vlan-tagging-op-config-data-untagged')
# Update uni side extended vlan filter
# filter for vlan 0
# TODO: Create constants for the operation values. See omci spec
attributes = self._generate_attributes(
filter_outer_priority=15, # This entry is not a double-tag rule
filter_outer_vid=4096, # Do not filter on the outer VID value
filter_outer_tpid_de=0, # Do not filter on the outer TPID field
filter_inner_priority=8, # Filter on inner vlan
filter_inner_vid=0x0, # Look for vlan 0
filter_inner_tpid_de=0, # Do not filter on inner TPID field
filter_ether_type=0, # Do not filter on EtherType
treatment_tags_to_remove=1, treatment_outer_priority=15, treatment_outer_vid=0,
treatment_outer_tpid_de=0,
treatment_inner_priority=8, # Add an inner tag and insert this value as the priority
treatment_inner_vid=self._set_vlan_id, # use this value as the VID in the inner VLAN tag
treatment_inner_tpid_de=4) # set TPID
msg = ExtendedVlanTaggingOperationConfigurationDataFrame(
extended_vlan_tagging_entity_id, # Bridge Entity ID
attributes=attributes # See above
)
yield self._send_msg(msg, 'set', 'flow-set-ext-vlan-tagging-op-config-data-zero-tagged')
else:
msg = ExtendedVlanTaggingOperationConfigurationDataFrame(
extended_vlan_tagging_entity_id # Bridge Entity ID
)
yield self._send_msg(msg, 'delete', 'flow-delete-ext-vlan-tagging-op-config-data')
self.deferred.callback(self)
except Exception as e:
self.log.exception('setting-vlan-tagging', e=e)
self.deferred.errback(failure.Failure(e))
def check_status_and_state(self, results, operation=''):
"""
Check the results of an OMCI response. An exception is thrown
if the task was cancelled or an error was detected.
:param results: (OmciFrame) OMCI Response frame
:param operation: (str) what operation was being performed
:return: True if successful, False if the entity existed (already created)
"""
omci_msg = results.fields['omci_message'].fields
status = omci_msg['success_code']
error_mask = omci_msg.get('parameter_error_attributes_mask', 'n/a')
failed_mask = omci_msg.get('failed_attributes_mask', 'n/a')
unsupported_mask = omci_msg.get('unsupported_attributes_mask', 'n/a')
self.log.debug("OMCI Result: %s", operation, omci_msg=omci_msg,
status=status, error_mask=error_mask,
failed_mask=failed_mask, unsupported_mask=unsupported_mask)
if status == RC.Success:
self.strobe_watchdog()
return True
elif status == RC.InstanceExists:
return False
@inlineCallbacks
def _send_msg(self, msg, operation, vlan_tagging_operation_msg):
"""
Send frame to ONU.
:param msg: (VlanTaggingFilterDataFrame/ExtendedVlanTaggingOperationConfigurationDataFrame) message used
to generate OMCI frame
:param operation: (str) type of CUD(Create/Update/Delete) operation
:param vlan_tagging_operation_msg: (str) what operation was being performed
"""
if operation == 'create':
frame = msg.create()
elif operation == 'set':
frame = msg.set()
else:
frame = msg.delete()
self.log.debug('openomci-msg', omci_msg=msg)
self.strobe_watchdog()
results = yield self._device.omci_cc.send(frame)
self.check_status_and_state(results, vlan_tagging_operation_msg)
def _generate_attributes(self, **kwargs):
"""
Generate ExtendedVlanTaggingOperation attributes
:return: (dict) ExtendedVlanTaggingOperation attributes dictinary
"""
return dict(
received_frame_vlan_tagging_operation_table=
VlanTaggingOperation(
filter_outer_priority=kwargs['filter_outer_priority'],
filter_outer_vid=kwargs['filter_outer_vid'],
filter_outer_tpid_de=kwargs['filter_outer_tpid_de'],
filter_inner_priority=kwargs['filter_inner_priority'],
filter_inner_vid=kwargs['filter_inner_vid'],
filter_inner_tpid_de=kwargs['filter_inner_tpid_de'],
filter_ether_type=kwargs['filter_ether_type'],
treatment_tags_to_remove=kwargs['treatment_tags_to_remove'],
treatment_outer_priority=kwargs['treatment_outer_priority'],
treatment_outer_vid=kwargs['treatment_outer_vid'],
treatment_outer_tpid_de=kwargs['treatment_outer_tpid_de'],
treatment_inner_priority=kwargs['treatment_inner_priority'],
treatment_inner_vid=kwargs['treatment_inner_vid'],
treatment_inner_tpid_de=kwargs['treatment_inner_tpid_de'],
)
)
|
[
"twisted.internet.defer.failure.Failure",
"twisted.internet.reactor.callLater"
] |
[((3187, 3257), 'twisted.internet.reactor.callLater', 'reactor.callLater', (['(0)', 'self.perform_vlan_tagging'], {'add_tag': 'self._add_tag'}), '(0, self.perform_vlan_tagging, add_tag=self._add_tag)\n', (3204, 3257), False, 'from twisted.internet import reactor\n'), ((13956, 13974), 'twisted.internet.defer.failure.Failure', 'failure.Failure', (['e'], {}), '(e)\n', (13971, 13974), False, 'from twisted.internet.defer import inlineCallbacks, failure, returnValue\n')]
|
import sys
import os
# Leave the path changes here!!!
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
import networkx as nx
import matplotlib.pyplot as plt
from src.accelerated_graph_features.test_python_converter import create_graph
N = 3
def plot_graph(i):
G = create_graph(i)
pos = nx.spring_layout(G)
nx.draw(G,pos)
# labels
nx.draw_networkx_labels(G, pos, font_size=10, font_family='sans-serif')
plt.axis('off')
plt.show()
if __name__ == '__main__':
for i in range(1,N+1):
plot_graph(i)
|
[
"src.accelerated_graph_features.test_python_converter.create_graph",
"matplotlib.pyplot.show",
"os.path.dirname",
"matplotlib.pyplot.axis",
"networkx.spring_layout",
"networkx.draw",
"networkx.draw_networkx_labels"
] |
[((369, 384), 'src.accelerated_graph_features.test_python_converter.create_graph', 'create_graph', (['i'], {}), '(i)\n', (381, 384), False, 'from src.accelerated_graph_features.test_python_converter import create_graph\n'), ((396, 415), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (412, 415), True, 'import networkx as nx\n'), ((422, 437), 'networkx.draw', 'nx.draw', (['G', 'pos'], {}), '(G, pos)\n', (429, 437), True, 'import networkx as nx\n'), ((456, 527), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G', 'pos'], {'font_size': '(10)', 'font_family': '"""sans-serif"""'}), "(G, pos, font_size=10, font_family='sans-serif')\n", (479, 527), True, 'import networkx as nx\n'), ((535, 550), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (543, 550), True, 'import matplotlib.pyplot as plt\n'), ((556, 566), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (564, 566), True, 'import matplotlib.pyplot as plt\n'), ((88, 113), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (103, 113), False, 'import os\n'), ((152, 177), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (167, 177), False, 'import os\n')]
|
from flask_testing import LiveServerTestCase
import cornflow_client as cf
import json
from cornflow.app import create_app
from cornflow.commands import AccessInitialization
from cornflow.shared.utils import db
from cornflow.tests.const import PREFIX
from cornflow.models import UserModel, UserRoleModel
from cornflow.shared.const import ADMIN_ROLE, SERVICE_ROLE
from cornflow.tests.const import LOGIN_URL, SIGNUP_URL
class CustomTestCaseLive(LiveServerTestCase):
def create_app(self):
app = create_app("testing")
return app
def set_client(self, server):
self.client = cf.CornFlow(url=server)
return self.client
def login_or_signup(self, user_data):
try:
response = self.client.login(user_data["username"], user_data["pwd"])
except cf.CornFlowApiError:
response = self.client.sign_up(**user_data).json()
return response
def setUp(self, create_all=True):
if create_all:
db.create_all()
AccessInitialization().run()
user_data = dict(
username="testname",
email="<EMAIL>",
pwd="<PASSWORD>",
)
self.set_client(self.get_server_url())
response = self.login_or_signup(user_data)
self.client.token = response["token"]
self.url = None
self.model = None
self.items_to_check = []
def tearDown(self):
db.session.remove()
db.drop_all()
def create_user_with_role(self, role_id, data=None):
if data is None:
data = {
"username": "testuser" + str(role_id),
"email": "testemail" + str(role_id) + "@test.org",
"password": "<PASSWORD>",
}
response = self.login_or_signup(data)
user_role = UserRoleModel({"user_id": response["id"], "role_id": role_id})
user_role.save()
db.session.commit()
return self.login_or_signup(data)["token"]
def create_service_user(self, data=None):
return self.create_user_with_role(SERVICE_ROLE, data=data)
def create_admin(self, data=None):
return self.create_user_with_role(ADMIN_ROLE, data=data)
def get_server_url(self):
"""
Return the url of the test server
"""
prefix = PREFIX
if prefix:
prefix += "/"
return "http://localhost:%s" % self._port_value.value + prefix
|
[
"cornflow.commands.AccessInitialization",
"cornflow.shared.utils.db.drop_all",
"cornflow.models.UserRoleModel",
"cornflow.app.create_app",
"cornflow.shared.utils.db.create_all",
"cornflow.shared.utils.db.session.remove",
"cornflow_client.CornFlow",
"cornflow.shared.utils.db.session.commit"
] |
[((507, 528), 'cornflow.app.create_app', 'create_app', (['"""testing"""'], {}), "('testing')\n", (517, 528), False, 'from cornflow.app import create_app\n'), ((605, 628), 'cornflow_client.CornFlow', 'cf.CornFlow', ([], {'url': 'server'}), '(url=server)\n', (616, 628), True, 'import cornflow_client as cf\n'), ((1432, 1451), 'cornflow.shared.utils.db.session.remove', 'db.session.remove', ([], {}), '()\n', (1449, 1451), False, 'from cornflow.shared.utils import db\n'), ((1460, 1473), 'cornflow.shared.utils.db.drop_all', 'db.drop_all', ([], {}), '()\n', (1471, 1473), False, 'from cornflow.shared.utils import db\n'), ((1822, 1884), 'cornflow.models.UserRoleModel', 'UserRoleModel', (["{'user_id': response['id'], 'role_id': role_id}"], {}), "({'user_id': response['id'], 'role_id': role_id})\n", (1835, 1884), False, 'from cornflow.models import UserModel, UserRoleModel\n'), ((1919, 1938), 'cornflow.shared.utils.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1936, 1938), False, 'from cornflow.shared.utils import db\n'), ((991, 1006), 'cornflow.shared.utils.db.create_all', 'db.create_all', ([], {}), '()\n', (1004, 1006), False, 'from cornflow.shared.utils import db\n'), ((1015, 1037), 'cornflow.commands.AccessInitialization', 'AccessInitialization', ([], {}), '()\n', (1035, 1037), False, 'from cornflow.commands import AccessInitialization\n')]
|
from google.appengine.ext import ndb
class User(ndb.Model):
name = ndb.StringProperty()
@classmethod
def get_or_create(cls, name):
if not name:
return None
user = ndb.Key('User', name).get()
if not user:
user = User(name=name, id=name)
user.put()
return user
|
[
"google.appengine.ext.ndb.StringProperty",
"google.appengine.ext.ndb.Key"
] |
[((74, 94), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (92, 94), False, 'from google.appengine.ext import ndb\n'), ((208, 229), 'google.appengine.ext.ndb.Key', 'ndb.Key', (['"""User"""', 'name'], {}), "('User', name)\n", (215, 229), False, 'from google.appengine.ext import ndb\n')]
|
from collections import deque
import logging
import gevent
from .base import PoolSink
from ..asynchronous import AsyncResult
from ..constants import (Int, ChannelState, SinkProperties, SinkRole)
from ..sink import (
ClientMessageSink,
SinkProvider,
FailingMessageSink
)
from ..dispatch import ServiceClosedError
from ..varz import (
Gauge,
Source,
VarzBase
)
class QueuingMessageSink(ClientMessageSink):
def __init__(self, queue):
super(QueuingMessageSink, self).__init__()
self._queue = queue
def AsyncProcessRequest(self, sink_stack, msg, stream, headers):
self._queue.append((sink_stack, msg, stream, headers))
def AsyncProcessResponse(self, sink_stack, context, stream, msg):
raise NotImplementedError("This should never be called")
@property
def state(self):
return ChannelState.Open
class MaxWaitersError(Exception): pass
class WatermarkPoolSink(PoolSink):
"""A watermark pool keeps a cached number of sinks active (the low watermark).
Once the low watermark is hit, the pool will create new sinks until it hits the
high watermark. At that point, it will begin queuing requests.
The pool guarantees only a single request will be active on any underlying sink
at any given time, that is, each sink processes requests serially.
"""
ROOT_LOG = logging.getLogger('scales.pool.WatermarkPool')
class Varz(VarzBase):
"""
size - The current size of the pool.
queue_size - The length of the waiter queue.
min_size - The configured low-watermark.
max_size - The configured high-watermark.
"""
_VARZ_BASE_NAME = 'scales.pool.WatermarkPool'
_VARZ = {
'size': Gauge,
'queue_size': Gauge
}
def __init__(self, next_provider, sink_properties, global_properties):
endpoint = global_properties[SinkProperties.Endpoint]
name = global_properties[SinkProperties.Label]
self._cache = deque()
self._waiters = deque()
self._min_size = sink_properties.min_watermark
self._max_size = sink_properties.max_watermark
self._max_queue_size = sink_properties.max_queue_len
self._current_size = 0
self._state = ChannelState.Idle
socket_name = '%s:%s' % (endpoint.host, endpoint.port)
self.endpoint = socket_name
self._varz = self.Varz(Source(service=name,
endpoint=socket_name))
self._log = self.ROOT_LOG.getChild('[%s.%s]' % (name, socket_name))
super(WatermarkPoolSink, self).__init__(next_provider, global_properties)
def __PropagateShutdown(self, value):
self.on_faulted.Set(value)
def _DiscardSink(self, sink):
"""Close the sink and unsubscribe from fault notifications
Args:
sink - The sink to discard.
"""
sink.on_faulted.Unsubscribe(self.__PropagateShutdown)
sink.Close()
def _Dequeue(self):
"""Attempt to get a sink from the cache.
Returns:
A sink if one can be taken from the cache, else None.
"""
while any(self._cache):
item = self._cache.popleft()
if item.state <= ChannelState.Open:
return item
else:
self._DiscardSink(item)
return None
def _Get(self):
cached = self._Dequeue()
if cached:
return cached
elif self._current_size < self._max_size:
self._current_size += 1
self._varz.size(self._current_size)
sink = self._sink_provider.CreateSink(self._properties)
# TODO: we could get a better failure case here by detecting that Open()
# failed and retrying, however for now the simplest option is to just fail.
sink.Open().wait()
sink.on_faulted.Subscribe(self.__PropagateShutdown)
return sink
else:
if len(self._waiters) + 1 > self._max_queue_size:
return FailingMessageSink(MaxWaitersError())
else:
self._varz.queue_size(len(self._waiters) + 1)
return QueuingMessageSink(self._waiters)
def _Release(self, sink):
# Releasing a queuing sink is a noop
if (isinstance(sink, QueuingMessageSink) or
isinstance(sink, FailingMessageSink)):
self._varz.queue_size(len(self._waiters))
return
do_close = False
# This sink is already shutting down
if self.state == ChannelState.Closed:
self._current_size -= 1
# One of the underlying sinks failed, shut down
elif sink.state == ChannelState.Closed:
self._current_size -= 1
self.Close()
# There are some waiters queued, reuse this sink to process another request.
elif any(self._waiters):
gevent.spawn(self._ProcessQueue, sink)
# We're below the min-size specified, cache this sink
elif self._current_size <= self._min_size:
self._cache.append(sink)
# We're above the min-size, close the sink.
else:
self._current_size -= 1
do_close = True
self._varz.size(self._current_size)
if do_close:
self._DiscardSink(sink)
def _ProcessQueue(self, sink):
"""Called as a continuation of an underlying sink completing. Get the
next waiter and use 'sink' to process it.
Args:
sink - An open sink.
"""
sink_stack, msg, stream, headers = self._waiters.popleft()
self._varz.queue_size(len(self._waiters))
# The stack has a QueuingChannelSink on the top now, pop it off
# and push the real stack back on.
orig_sink, ctx = sink_stack.Pop()
sink_stack.Push(orig_sink, sink)
sink.AsyncProcessRequest(sink_stack, msg, stream, headers)
def Open(self):
ar = AsyncResult()
ar.SafeLink(self._OpenImpl)
return ar
def _OpenImpl(self):
sink = self._Get()
self._Release(sink)
self._state = ChannelState.Open
def _FlushCache(self):
[self._DiscardSink(sink) for sink in self._cache]
def Close(self):
self._state = ChannelState.Closed
self._FlushCache()
fail_sink = FailingMessageSink(ServiceClosedError)
[fail_sink.AsyncProcessRequest(sink_stack, msg, stream, headers)
for sink_stack, msg, stream, headers in self._waiters]
@property
def state(self):
return self._state
WatermarkPoolSink.Builder = SinkProvider(
WatermarkPoolSink,
SinkRole.Pool,
min_watermark = 1,
max_watermark = Int.MaxValue,
max_queue_len = Int.MaxValue
)
|
[
"collections.deque",
"gevent.spawn",
"logging.getLogger"
] |
[((1315, 1361), 'logging.getLogger', 'logging.getLogger', (['"""scales.pool.WatermarkPool"""'], {}), "('scales.pool.WatermarkPool')\n", (1332, 1361), False, 'import logging\n'), ((1903, 1910), 'collections.deque', 'deque', ([], {}), '()\n', (1908, 1910), False, 'from collections import deque\n'), ((1931, 1938), 'collections.deque', 'deque', ([], {}), '()\n', (1936, 1938), False, 'from collections import deque\n'), ((4519, 4557), 'gevent.spawn', 'gevent.spawn', (['self._ProcessQueue', 'sink'], {}), '(self._ProcessQueue, sink)\n', (4531, 4557), False, 'import gevent\n')]
|
from som.primitives.primitives import Primitives
from som.vm.globals import nilObject, falseObject, trueObject
from som.vmobjects.primitive import AstPrimitive as Primitive
from som.vm.universe import std_print, std_println
from rpython.rlib import rgc, jit
import time
def _load(ivkbl, rcvr, args):
argument = args[0]
result = ivkbl.get_universe().load_class(argument)
return result if result else nilObject
def _exit(ivkbl, rcvr, args):
error = args[0]
return ivkbl.get_universe().exit(error.get_embedded_integer())
def _global(ivkbl, rcvr, args):
argument = args[0]
result = ivkbl.get_universe().get_global(argument)
return result if result else nilObject
def _has_global(ivkbl, rcvr, args):
if ivkbl.get_universe().has_global(args[0]):
return trueObject
else:
return falseObject
def _global_put(ivkbl, rcvr, args):
value = args[1]
argument = args[0]
ivkbl.get_universe().set_global(argument, value)
return value
def _print_string(ivkbl, rcvr, args):
argument = args[0]
std_print(argument.get_embedded_string())
return rcvr
def _print_newline(ivkbl, rcvr, args):
std_println()
return rcvr
def _time(ivkbl, rcvr, args):
since_start = time.time() - ivkbl.get_universe().start_time
return ivkbl.get_universe().new_integer(int(since_start * 1000))
def _ticks(ivkbl, rcvr, args):
since_start = time.time() - ivkbl.get_universe().start_time
return ivkbl.get_universe().new_integer(int(since_start * 1000000))
@jit.dont_look_inside
def _fullGC(ivkbl, rcvr, args):
rgc.collect()
return trueObject
class SystemPrimitives(Primitives):
def install_primitives(self):
self._install_instance_primitive(Primitive("load:", self._universe, _load))
self._install_instance_primitive(Primitive("exit:", self._universe, _exit))
self._install_instance_primitive(Primitive("hasGlobal:", self._universe, _has_global))
self._install_instance_primitive(Primitive("global:", self._universe, _global))
self._install_instance_primitive(Primitive("global:put:", self._universe, _global_put))
self._install_instance_primitive(Primitive("printString:", self._universe, _print_string))
self._install_instance_primitive(Primitive("printNewline", self._universe, _print_newline))
self._install_instance_primitive(Primitive("time", self._universe, _time))
self._install_instance_primitive(Primitive("ticks", self._universe, _ticks))
self._install_instance_primitive(Primitive("fullGC", self._universe, _fullGC))
|
[
"som.vm.universe.std_println",
"rpython.rlib.rgc.collect",
"som.vmobjects.primitive.AstPrimitive",
"time.time"
] |
[((1175, 1188), 'som.vm.universe.std_println', 'std_println', ([], {}), '()\n', (1186, 1188), False, 'from som.vm.universe import std_print, std_println\n'), ((1599, 1612), 'rpython.rlib.rgc.collect', 'rgc.collect', ([], {}), '()\n', (1610, 1612), False, 'from rpython.rlib import rgc, jit\n'), ((1255, 1266), 'time.time', 'time.time', ([], {}), '()\n', (1264, 1266), False, 'import time\n'), ((1421, 1432), 'time.time', 'time.time', ([], {}), '()\n', (1430, 1432), False, 'import time\n'), ((1749, 1790), 'som.vmobjects.primitive.AstPrimitive', 'Primitive', (['"""load:"""', 'self._universe', '_load'], {}), "('load:', self._universe, _load)\n", (1758, 1790), True, 'from som.vmobjects.primitive import AstPrimitive as Primitive\n'), ((1833, 1874), 'som.vmobjects.primitive.AstPrimitive', 'Primitive', (['"""exit:"""', 'self._universe', '_exit'], {}), "('exit:', self._universe, _exit)\n", (1842, 1874), True, 'from som.vmobjects.primitive import AstPrimitive as Primitive\n'), ((1917, 1969), 'som.vmobjects.primitive.AstPrimitive', 'Primitive', (['"""hasGlobal:"""', 'self._universe', '_has_global'], {}), "('hasGlobal:', self._universe, _has_global)\n", (1926, 1969), True, 'from som.vmobjects.primitive import AstPrimitive as Primitive\n'), ((2012, 2057), 'som.vmobjects.primitive.AstPrimitive', 'Primitive', (['"""global:"""', 'self._universe', '_global'], {}), "('global:', self._universe, _global)\n", (2021, 2057), True, 'from som.vmobjects.primitive import AstPrimitive as Primitive\n'), ((2100, 2153), 'som.vmobjects.primitive.AstPrimitive', 'Primitive', (['"""global:put:"""', 'self._universe', '_global_put'], {}), "('global:put:', self._universe, _global_put)\n", (2109, 2153), True, 'from som.vmobjects.primitive import AstPrimitive as Primitive\n'), ((2196, 2252), 'som.vmobjects.primitive.AstPrimitive', 'Primitive', (['"""printString:"""', 'self._universe', '_print_string'], {}), "('printString:', self._universe, _print_string)\n", (2205, 2252), True, 'from som.vmobjects.primitive import AstPrimitive as Primitive\n'), ((2295, 2352), 'som.vmobjects.primitive.AstPrimitive', 'Primitive', (['"""printNewline"""', 'self._universe', '_print_newline'], {}), "('printNewline', self._universe, _print_newline)\n", (2304, 2352), True, 'from som.vmobjects.primitive import AstPrimitive as Primitive\n'), ((2395, 2435), 'som.vmobjects.primitive.AstPrimitive', 'Primitive', (['"""time"""', 'self._universe', '_time'], {}), "('time', self._universe, _time)\n", (2404, 2435), True, 'from som.vmobjects.primitive import AstPrimitive as Primitive\n'), ((2478, 2520), 'som.vmobjects.primitive.AstPrimitive', 'Primitive', (['"""ticks"""', 'self._universe', '_ticks'], {}), "('ticks', self._universe, _ticks)\n", (2487, 2520), True, 'from som.vmobjects.primitive import AstPrimitive as Primitive\n'), ((2563, 2607), 'som.vmobjects.primitive.AstPrimitive', 'Primitive', (['"""fullGC"""', 'self._universe', '_fullGC'], {}), "('fullGC', self._universe, _fullGC)\n", (2572, 2607), True, 'from som.vmobjects.primitive import AstPrimitive as Primitive\n')]
|
#! /usr/bin/env python
import os,sys
import cv2, re
import numpy as np
try:
from pyutil import PyLogger
except ImportError:
from .. import PyLogger
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
SRC_TYPE_NAME = ["WebCam","Video","IPCam"]
OUTPUT_VIDEO_NAME = "source{}.avi"
SAVE_FORMAT = 'XVID'
DEFAULT_FPS = 20
class VideoController():
def __init__(self, video_src, video_ratio=1, record_prefix="", record_name="", isRecord=False, log=False, debug=False):
# init logger
self.__logger = PyLogger(log=log,debug=debug)
self.__vid_caps = list()
self.__vid_writers = list()
self.__record_path = os.path.join(record_prefix,record_name) if record_name != "" else os.path.join(record_prefix,OUTPUT_VIDEO_NAME)
self.__video_ratio = video_ratio
self.fps = DEFAULT_FPS
# create a VideoCapture for each src
for src in video_src:
self.__initVideoSource(src)
# init writer parameters
self.__fourcc = cv2.VideoWriter_fourcc(*SAVE_FORMAT)
if isRecord:
self.__initVideoWriter()
def __initVideoSource(self, src, camId=-1):
"""
Initialise video input source
Args:
src (object): video source used by Opencv, could be int or String
camId (int): if any cameraId was given
"""
if src is None or src == "":
return
sourceType = -1
# usb cam/web cam
if type(src) is int:
sourceType = 0
# search for ipcams
elif re.search( r'(http)|(rstp)|(https) & *', src, re.M|re.I):
sourceType = 2
# videos
else:
sourceType = 1
cap = cv2.VideoCapture(src)
if cap.isOpened():
if camId == -1:
camId = len(self.__vid_caps)
if len(self.__vid_caps) > 0:
cams = np.array(self.__vid_caps)[:,0]
if camId in cams:
camId = np.amax(cams) + 1
fps = int(cap.get(cv2.CAP_PROP_FPS))
self.__vid_caps.append([camId, sourceType, cap, src,fps])
self.__logger.info("Video Input Connected to {}".format(src))
else:
self.__logger.error("No {} Source Found From {}".format(SRC_TYPE_NAME[sourceType], src))
def __initVideoWriter(self):
"""
Initialise video writer
"""
for cap_info in self.__vid_caps:
cap = cap_info[2] # get cv2.cap object
fps = cap_info[4]
if fps == 0 or self.fps < fps:
fps = self.fps
self.__vid_writers.append([cap_info[0],cv2.VideoWriter(self.__record_path.format(cap_info[0]),
self.__fourcc,
fps,
(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)/self.__video_ratio),int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)/self.__video_ratio)))])
def writeVideo(self, camId, frame):
"""
Write video to output
Args:
camId (int): if any cameraId was given
frame (np.array): video frame to be written
"""
if len(self.__vid_writers) > 0:
ids = np.array(self.__vid_writers)[:,0]
if frame is not None:
self.__vid_writers[np.where(ids == camId)[0][0]][1].write(frame)
def getFrame(self, camId):
"""
Return frame from video source
Args:
camId (int): camera ID
Returns:
**frame** (np.array) - current frame
"""
# Capture frame-by-frame
frame = None
try:
cap = self.__vid_caps[np.where(np.array(self.__vid_caps)[:,0]==camId)[0][0]][2]
if cap is not None:
ret, frame = cap.read()
frame = cv2.resize(frame, (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)/self.__video_ratio),int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)/self.__video_ratio)))
#frame = cv2.resize(frame, (420,240))
except cv2.error:
return None
return frame
def showFrame(self, frame, title="Video"):
"""
Using OpenCV to display the current frame
Title is important if need to display multi window
Args:
frame (np.array): frame given to be shown
title (string): display window title, associate frame and display window
"""
# Display the resulting frame
cv2.imshow(title,frame)
# This line is important to keep the video showing
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
def onClose(self):
for cap in self.__vid_caps:
cap[2].release()
for writer in self.__vid_writers:
writer[1].release()
cv2.destroyAllWindows()
def printVideoSrcInfo(self):
# header
self.__logger.info("{:5}|{:10}".format("CamID","Source"))
# body
for cap in self.__vid_caps:
src = cap[3]
if type(src) is int:
src = SRC_TYPE_NAME[0]+ " {}".format(src)
self.__logger.info("{:5}|{}".format(cap[0],src))
def getVideoSrcInfo(self):
"""
Return Camera Information
Returns:
* **cam_info** (numpy.array) - camera information (camId, src)
"""
if len(self.__vid_caps) <= 0:
return None
return np.array(self.__vid_caps)[:,[0,3]]
def drawInfo(self, frame, fps, color=(255,255,255), num_people=-1):
"""
Draw frame info
Args:
frame (numpy.array): input frame
fps (int): Frame per second
color (tuple): BGR color code
num_people (int): number of people detected
Returns:
* **frame** (numpy.array) - modified frame
"""
frame_size = frame.shape
cv2.putText(frame, "FPS:{}".format(fps), (20,frame_size[0]-20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
if num_people >= 0:
cv2.putText(frame, "Num.Person:{}".format(num_people), (frame_size[1]-150,frame_size[0]-20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
return frame
def setIsRecord(self, isRecord):
"""
Set is recorded video or not
Args:
isRecord (boolean): record video or not
"""
if isRecord and not self.isRecord:
self.__initVideoWriter()
self.isRecord = isRecord
|
[
"cv2.VideoWriter_fourcc",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"numpy.amax",
"numpy.where",
"numpy.array",
"re.search",
"cv2.destroyAllWindows",
"os.path.join",
"pyutil.PyLogger"
] |
[((566, 596), 'pyutil.PyLogger', 'PyLogger', ([], {'log': 'log', 'debug': 'debug'}), '(log=log, debug=debug)\n', (574, 596), False, 'from pyutil import PyLogger\n'), ((987, 1023), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['*SAVE_FORMAT'], {}), '(*SAVE_FORMAT)\n', (1009, 1023), False, 'import cv2, re\n'), ((1547, 1568), 'cv2.VideoCapture', 'cv2.VideoCapture', (['src'], {}), '(src)\n', (1563, 1568), False, 'import cv2, re\n'), ((3795, 3819), 'cv2.imshow', 'cv2.imshow', (['title', 'frame'], {}), '(title, frame)\n', (3805, 3819), False, 'import cv2, re\n'), ((4088, 4111), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4109, 4111), False, 'import cv2, re\n'), ((676, 716), 'os.path.join', 'os.path.join', (['record_prefix', 'record_name'], {}), '(record_prefix, record_name)\n', (688, 716), False, 'import os, sys\n'), ((742, 788), 'os.path.join', 'os.path.join', (['record_prefix', 'OUTPUT_VIDEO_NAME'], {}), '(record_prefix, OUTPUT_VIDEO_NAME)\n', (754, 788), False, 'import os, sys\n'), ((1426, 1482), 're.search', 're.search', (['"""(http)|(rstp)|(https) & *"""', 'src', '(re.M | re.I)'], {}), "('(http)|(rstp)|(https) & *', src, re.M | re.I)\n", (1435, 1482), False, 'import cv2, re\n'), ((3932, 3955), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3953, 3955), False, 'import cv2, re\n'), ((4594, 4619), 'numpy.array', 'np.array', (['self.__vid_caps'], {}), '(self.__vid_caps)\n', (4602, 4619), True, 'import numpy as np\n'), ((2763, 2791), 'numpy.array', 'np.array', (['self.__vid_writers'], {}), '(self.__vid_writers)\n', (2771, 2791), True, 'import numpy as np\n'), ((3877, 3891), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3888, 3891), False, 'import cv2, re\n'), ((1685, 1710), 'numpy.array', 'np.array', (['self.__vid_caps'], {}), '(self.__vid_caps)\n', (1693, 1710), True, 'import numpy as np\n'), ((1751, 1764), 'numpy.amax', 'np.amax', (['cams'], {}), '(cams)\n', (1758, 1764), True, 'import numpy as np\n'), ((2845, 2867), 'numpy.where', 'np.where', (['(ids == camId)'], {}), '(ids == camId)\n', (2853, 2867), True, 'import numpy as np\n'), ((3134, 3159), 'numpy.array', 'np.array', (['self.__vid_caps'], {}), '(self.__vid_caps)\n', (3142, 3159), True, 'import numpy as np\n')]
|
import unittest
from django.test import Client
from django.urls import reverse
from rest_framework import status
client = Client()
class VerifyTestCases(unittest.TestCase):
def setUp(self):
self.valid_payload = {
'third_party_company_name': 'UD Saragih Tbk'
}
self.valid_payload_f = {
'third_party_company_name': 'PT Hutasoit Januar (Persero) Tbk'
}
self.not_found_payload = {
'third_party_company_name': 'FAANG'
}
self.invalid = {
'third_party_company_name': ''
}
def test_check_valid_true(self):
response = client.post(reverse('verify_company'), self.valid_payload)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'vendor_company': {'UD Saragih Tbk'}, 'user': {True}})
def test_check_valid_false(self):
response = client.post(reverse('verify_company'), self.valid_payload_f)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'vendor_company': {'PT Hutasoit Januar (Persero) Tbk'}, 'user': {False}})
def test_check_notfound(self):
response = client.post(reverse('verify_company'), self.not_found_payload)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data, "FAANG Vendor Does Not Exist!")
def test_check_invalid(self):
response = client.post(reverse('verify_company'), self.invalid)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, "Vendor Name is required!")
class TransactionTestCases(unittest.TestCase):
def setUp(self):
self.valid_payload = {
'company_name': '<NAME>',
'company_vendor': 'PT Putra',
'from_date': '2020-04-05 08:42:35',
'to_date': '2020-05-06 14:03:08'
}
self.not_found_payload = {
'company_name': '<NAME>',
'company_vendor': 'FAANG',
'from_date': '2020-04-05 08:42:35',
'to_date': '2020-05-06 14:03:08'
}
self.invalid = {
'company_name': '',
'company_vendor': '',
'from_date': '',
'to_date': ''
}
self.greater_date = {
'company_name': '<NAME>',
'company_vendor': 'PT Putra',
'from_date': '2020-06-05 08:42:35',
'to_date': '2020-05-06 14:03:08'
}
self.invalid_date = {
'company_name': '<NAME>',
'company_vendor': 'PT Putra',
'from_date': '-06-05 08:42:35',
'to_date': '2020-05-06 14:03:08'
}
def test_check_valid(self):
response = client.post(reverse('transaction_frequency'), self.valid_payload)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {"companies": "Perum Prasetya Permadi & PT Putra", "transactions": {47}})
def test_check_notfound(self):
response = client.post(reverse('transaction_frequency'), self.not_found_payload)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data, "FAANG Vendor Does Not Exist!")
def test_check_invalid(self):
response = client.post(reverse('transaction_frequency'), self.invalid)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, "Vendor Name is required!")
def test_check_greater_date(self):
response = client.post(reverse('transaction_frequency'), self.greater_date)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, "to_date, to_date must be bigger than from_date")
def test_check_invalid_date(self):
response = client.post(reverse('transaction_frequency'), self.invalid_date)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, "Date: invalid_format")
|
[
"django.urls.reverse",
"django.test.Client"
] |
[((124, 132), 'django.test.Client', 'Client', ([], {}), '()\n', (130, 132), False, 'from django.test import Client\n'), ((657, 682), 'django.urls.reverse', 'reverse', (['"""verify_company"""'], {}), "('verify_company')\n", (664, 682), False, 'from django.urls import reverse\n'), ((937, 962), 'django.urls.reverse', 'reverse', (['"""verify_company"""'], {}), "('verify_company')\n", (944, 962), False, 'from django.urls import reverse\n'), ((1235, 1260), 'django.urls.reverse', 'reverse', (['"""verify_company"""'], {}), "('verify_company')\n", (1242, 1260), False, 'from django.urls import reverse\n'), ((1498, 1523), 'django.urls.reverse', 'reverse', (['"""verify_company"""'], {}), "('verify_company')\n", (1505, 1523), False, 'from django.urls import reverse\n'), ((2827, 2859), 'django.urls.reverse', 'reverse', (['"""transaction_frequency"""'], {}), "('transaction_frequency')\n", (2834, 2859), False, 'from django.urls import reverse\n'), ((3129, 3161), 'django.urls.reverse', 'reverse', (['"""transaction_frequency"""'], {}), "('transaction_frequency')\n", (3136, 3161), False, 'from django.urls import reverse\n'), ((3399, 3431), 'django.urls.reverse', 'reverse', (['"""transaction_frequency"""'], {}), "('transaction_frequency')\n", (3406, 3431), False, 'from django.urls import reverse\n'), ((3662, 3694), 'django.urls.reverse', 'reverse', (['"""transaction_frequency"""'], {}), "('transaction_frequency')\n", (3669, 3694), False, 'from django.urls import reverse\n'), ((3952, 3984), 'django.urls.reverse', 'reverse', (['"""transaction_frequency"""'], {}), "('transaction_frequency')\n", (3959, 3984), False, 'from django.urls import reverse\n')]
|
""" Functions to fix off file headers """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from multiprocessing import Pool
from ocnn.utils.file_utils import find_files
def clean_off_file(file_name):
""" Fixes header of OFF file
Args:
file_name (str): Name of file to fix
"""
file_cleaned = False
with open(file_name) as f_check:
file_str = f_check.read()
if file_str[0:3] != 'OFF':
raise AttributeError('Unexpected Header for {0}'.format(file_name))
elif file_str[0:4] != 'OFF\n':
new_str = file_str[0:3] + '\n' + file_str[3:]
with open(file_name, 'w') as f_rewrite:
f_rewrite.write(new_str)
file_cleaned = True
return file_cleaned
def clean_off_folder(input_folder):
""" Fixes headers of all OFF files in a given folder.
Args:
input_folder (str): Folder to search for off files
"""
executor = Pool()
file_list = find_files(input_folder, '*.[Oo][Ff][Ff]')
files_cleaned_list = executor.map(clean_off_file, file_list)
num_files_cleaned = 0
for file_cleaned in files_cleaned_list:
if file_cleaned:
num_files_cleaned += 1
print("{0} out of {1} files cleaned".format(num_files_cleaned, len(files_cleaned_list)))
|
[
"ocnn.utils.file_utils.find_files",
"multiprocessing.Pool"
] |
[((1000, 1006), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (1004, 1006), False, 'from multiprocessing import Pool\n'), ((1023, 1065), 'ocnn.utils.file_utils.find_files', 'find_files', (['input_folder', '"""*.[Oo][Ff][Ff]"""'], {}), "(input_folder, '*.[Oo][Ff][Ff]')\n", (1033, 1065), False, 'from ocnn.utils.file_utils import find_files\n')]
|
from kivy.app import App
from kivy.lang import Builder
from kivy.properties import StringProperty, ObjectProperty
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.anchorlayout import AnchorLayout
Builder.load_string("""
<Boxes>:
AnchorLayout:
anchor_x: 'center'
anchor_y: 'top'
size_hint: 1, .9
BoxLayout:
orientation: 'vertical'
padding: 10
BoxLayout:
padding: 10
orientation: 'horizontal'
textinputtext1: txt1.text
textinputtext2: txt2.text
textinputtext3: txt3.text
Button:
size_hint: 0.5,0.5
on_press: root.print_txt()
text:'Set velocities'
TextInput:
font_size: 40
id: txt1
text: root.textinputtext1
Button:
on_press: root.print_txt()
text:'Set samples'
TextInput:
id: txt2
text: root.textinputtext2
Button:
on_press: root.print_txt()
text:'Set pause'
TextInput:
id: txt3
text: root.textinputtext3
BoxLayout:
orientation: 'horizontal'
Button:
text: "2"
Button:
text: "3"
Button:
text: "4"
BoxLayout:
orientation: 'horizontal'
Button:
text: "5"
Button:
text: "6"
BoxLayout:
orientation: 'horizontal'
Button:
text: "7"
Button:
text: "8"
Button:
text: "9"
Button:
text: "10"
AnchorLayout:
anchor_x: 'center'
anchor_y: 'bottom'
BoxLayout:
orientation: 'horizontal'
size_hint: 1, .1
Button:
text: 'Go to Screen 1'
on_press: _screen_manager.current = 'screen1'
Button:
text: 'Go to Screen 1'
on_press: _screen_manager.current = 'screen1'
Button:
text: 'Go to Screen 1'
on_press: _screen_manager.current = 'screen1'
Button:
text: 'Go to Screen 2'
on_press: _screen_manager.current = 'screen2'""")
class Boxes(FloatLayout):
textinputtext1 = StringProperty()
textinputtext2 = StringProperty()
textinputtext3 = StringProperty()
def __init__(self, **kwargs):
super(Boxes, self).__init__(**kwargs)
self.textinputtext1 = 'palim'
self.textinputtext2 = '5'
self.textinputtext3 = '20'
def print_txt(self):
print(self.textinputtext1)
print(self.textinputtext2)
print(self.textinputtext3)
class TestApp(App):
def build(self):
return Boxes()
if __name__ == '__main__':
TestApp().run()
|
[
"kivy.properties.StringProperty",
"kivy.lang.Builder.load_string"
] |
[((207, 2761), 'kivy.lang.Builder.load_string', 'Builder.load_string', (['"""\n<Boxes>:\n AnchorLayout:\n anchor_x: \'center\'\n anchor_y: \'top\'\n size_hint: 1, .9\n BoxLayout: \n orientation: \'vertical\'\n padding: 10\n\n BoxLayout:\n padding: 10\n orientation: \'horizontal\'\n textinputtext1: txt1.text\n textinputtext2: txt2.text\n textinputtext3: txt3.text\n \n Button:\n size_hint: 0.5,0.5\n on_press: root.print_txt()\n text:\'Set velocities\'\n TextInput:\n font_size: 40\n id: txt1\n text: root.textinputtext1\n \n Button:\n on_press: root.print_txt()\n text:\'Set samples\'\n TextInput:\n id: txt2\n text: root.textinputtext2 \n \n Button:\n on_press: root.print_txt()\n text:\'Set pause\'\n TextInput:\n id: txt3\n text: root.textinputtext3\n \n BoxLayout:\n orientation: \'horizontal\'\n Button:\n text: "2"\n Button:\n text: "3"\n Button:\n text: "4"\n BoxLayout:\n orientation: \'horizontal\'\n Button:\n text: "5"\n Button:\n text: "6"\n BoxLayout:\n orientation: \'horizontal\'\n Button:\n text: "7"\n Button:\n text: "8"\n Button:\n text: "9"\n Button:\n text: "10"\n AnchorLayout:\n anchor_x: \'center\'\n anchor_y: \'bottom\'\n BoxLayout:\n orientation: \'horizontal\'\n size_hint: 1, .1\n Button:\n text: \'Go to Screen 1\'\n on_press: _screen_manager.current = \'screen1\'\n Button:\n text: \'Go to Screen 1\'\n on_press: _screen_manager.current = \'screen1\' \n Button:\n text: \'Go to Screen 1\'\n on_press: _screen_manager.current = \'screen1\' \n Button:\n text: \'Go to Screen 2\'\n on_press: _screen_manager.current = \'screen2\'"""'], {}), '(\n """\n<Boxes>:\n AnchorLayout:\n anchor_x: \'center\'\n anchor_y: \'top\'\n size_hint: 1, .9\n BoxLayout: \n orientation: \'vertical\'\n padding: 10\n\n BoxLayout:\n padding: 10\n orientation: \'horizontal\'\n textinputtext1: txt1.text\n textinputtext2: txt2.text\n textinputtext3: txt3.text\n \n Button:\n size_hint: 0.5,0.5\n on_press: root.print_txt()\n text:\'Set velocities\'\n TextInput:\n font_size: 40\n id: txt1\n text: root.textinputtext1\n \n Button:\n on_press: root.print_txt()\n text:\'Set samples\'\n TextInput:\n id: txt2\n text: root.textinputtext2 \n \n Button:\n on_press: root.print_txt()\n text:\'Set pause\'\n TextInput:\n id: txt3\n text: root.textinputtext3\n \n BoxLayout:\n orientation: \'horizontal\'\n Button:\n text: "2"\n Button:\n text: "3"\n Button:\n text: "4"\n BoxLayout:\n orientation: \'horizontal\'\n Button:\n text: "5"\n Button:\n text: "6"\n BoxLayout:\n orientation: \'horizontal\'\n Button:\n text: "7"\n Button:\n text: "8"\n Button:\n text: "9"\n Button:\n text: "10"\n AnchorLayout:\n anchor_x: \'center\'\n anchor_y: \'bottom\'\n BoxLayout:\n orientation: \'horizontal\'\n size_hint: 1, .1\n Button:\n text: \'Go to Screen 1\'\n on_press: _screen_manager.current = \'screen1\'\n Button:\n text: \'Go to Screen 1\'\n on_press: _screen_manager.current = \'screen1\' \n Button:\n text: \'Go to Screen 1\'\n on_press: _screen_manager.current = \'screen1\' \n Button:\n text: \'Go to Screen 2\'\n on_press: _screen_manager.current = \'screen2\'"""\n )\n', (226, 2761), False, 'from kivy.lang import Builder\n'), ((2801, 2817), 'kivy.properties.StringProperty', 'StringProperty', ([], {}), '()\n', (2815, 2817), False, 'from kivy.properties import StringProperty, ObjectProperty\n'), ((2839, 2855), 'kivy.properties.StringProperty', 'StringProperty', ([], {}), '()\n', (2853, 2855), False, 'from kivy.properties import StringProperty, ObjectProperty\n'), ((2877, 2893), 'kivy.properties.StringProperty', 'StringProperty', ([], {}), '()\n', (2891, 2893), False, 'from kivy.properties import StringProperty, ObjectProperty\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import subprocess
from scipy.stats import chi2
TESTFILE_TEMPLATE = """#include <iostream>
#include "Chi2PLookup.h"
int main() {{
Chi2PLookup Chi2PLookupTable;
double x = {0};
int df = {1};
double outvalue;
outvalue = Chi2PLookupTable.getPValue(x, df);
std::cout << outvalue << "\\n";
return 0;
}}
"""
def test_headerfile(template=TESTFILE_TEMPLATE, testvalue=1.1,
df=1, precision=10000, start_chi=25, headerfile="tests/Chi2PLookup.h",
srcfpath="tests/test.cpp", binfpath="tests/test.out"):
"""Test generated header file within cpp source file.
:param str template: Template file that contains main() function and imports header file.
:param testvalue: Chi value.
:param int df: Degree of freedom.
:param str srcfpath: Path where source file will be saved.
:param str binfpath: Path where binary file will be saved.
:return: None
:rtype: None
"""
command = "python -m chi2plookup generate --headerfile={} --df={} --precision={} --start_chi={}".format(headerfile, df, precision, start_chi)
subprocess.call(command, shell=True)
p_value = 1 - chi2.cdf(testvalue, df)
template = template.format(testvalue, df)
with open(srcfpath, "w") as outfile:
outfile.write(template)
subprocess.call("g++ -std=c++11 {} -o {}".format(srcfpath, binfpath), shell=True)
generated_p_value = subprocess.check_output("./{}".format(binfpath))
assert round(float(p_value), 6) == round(float(generated_p_value.strip()), 6)
|
[
"subprocess.call",
"scipy.stats.chi2.cdf"
] |
[((1206, 1242), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (1221, 1242), False, 'import subprocess\n'), ((1261, 1284), 'scipy.stats.chi2.cdf', 'chi2.cdf', (['testvalue', 'df'], {}), '(testvalue, df)\n', (1269, 1284), False, 'from scipy.stats import chi2\n')]
|
import datetime
def julian_to_datetime(input_string: str):
"""
:param: input_string String to be converted
:rtype: datetime object
"""
if len(input_string) == 5:
date = datetime.datetime.strptime(input_string, '%y%j')
elif len(input_string) == 7:
date = datetime.datetime.strptime(input_string, '%Y%j')
else:
raise UtilityException("Incorrect parameter length passed to "
"julian_to_datetime")
return date
class UtilityException(Exception):
pass
|
[
"datetime.datetime.strptime"
] |
[((201, 249), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['input_string', '"""%y%j"""'], {}), "(input_string, '%y%j')\n", (227, 249), False, 'import datetime\n'), ((300, 348), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['input_string', '"""%Y%j"""'], {}), "(input_string, '%Y%j')\n", (326, 348), False, 'import datetime\n')]
|
import string
import tkinter
from search_option import SearchOption
class GUI:
def __init__(self) -> None:
self.window = None
self.entries_known_character = []
self.entry_contain_characters = None
self.entry_remove_characters = None
self.check_remove_duplicate = None
self.list_box_words = None
self.next_character_entries = {}
self.value_remove_duplicate = None
self.value_list_box_word = None
self.search_execute_command = None
def mainloop(self):
self.window = tkinter.Tk()
self.window.title('leword_vocab')
self.window.geometry("600x400")
self.frame_options = tkinter.Frame(self.window, width=200)
self.frame_options.propagate(False)
self.frame_options.pack(side=tkinter.LEFT, fill=tkinter.Y)
self.button_search = tkinter.Button(self.frame_options, text='検索',
command=lambda: self.__search_button_click())
self.button_search.propagate(True)
self.button_search.grid(row=0, column=0, rowspan=1, columnspan=5, sticky=tkinter.E+tkinter.W, pady=2, padx=2)
self.label_known_characters = tkinter.Label(self.frame_options, text='判明済の文字')
self.label_known_characters.grid(row=1, column=0, rowspan=1, columnspan=1, sticky=tkinter.W, padx=2, pady=2)
self.entries_known_character = []
entry_font = ("", 48)
entry_validation_command = self.window.register(self.__validate_entry_character)
for i in range(5):
entry = tkinter.Entry(self.frame_options, name=f'character_entry_{i}',
width=2, font=entry_font,
validate='all',
validatecommand=(entry_validation_command, '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W'),
justify=tkinter.CENTER)
entry.grid(row=2, column=i, rowspan=1, columnspan=1, padx=2)
self.entries_known_character.append(entry)
if i > 0:
self.next_character_entries[str(self.entries_known_character[i-1])] = entry
self.label_contains_characters = tkinter.Label(self.frame_options, text='含まれる文字')
self.label_contains_characters.grid(row=3, column=0, rowspan=1, columnspan=1, sticky=tkinter.W, padx=2, pady=2)
self.entry_contain_characters = tkinter.Entry(self.frame_options, justify=tkinter.LEFT)
self.entry_contain_characters.grid(row=4, column=0, rowspan=1, columnspan=5, sticky=tkinter.E+tkinter.W, padx=2, pady=2)
self.label_remove_characters = tkinter.Label(self.frame_options, text='除外する文字')
self.label_remove_characters.grid(row=5, column=0, rowspan=1, columnspan=1, sticky=tkinter.W, padx=2, pady=2)
self.entry_remove_characters = tkinter.Entry(self.frame_options, justify=tkinter.LEFT)
self.entry_remove_characters.grid(row=6, column=0, rowspan=1, columnspan=5, sticky=tkinter.E+tkinter.W, padx=2, pady=2)
self.value_remove_duplicate = tkinter.BooleanVar()
self.value_remove_duplicate.set(True)
self.check_remove_duplicate = tkinter.Checkbutton(self.frame_options, variable=self.value_remove_duplicate, text='同じ文字は1回のみ')
self.check_remove_duplicate.grid(row=7, column=0, rowspan=1, columnspan=5, sticky=tkinter.W, padx=2, pady=2)
self.value_list_box_word = tkinter.StringVar()
list_box_words = tkinter.Listbox(self.window, listvariable=self.value_list_box_word, width=200)
list_box_words.propagate(True)
list_box_words.pack(side=tkinter.LEFT, expand=True, fill=tkinter.BOTH)
self.window.mainloop()
def get_search_option(self):
result = SearchOption()
result.search_pattern = ''
for i in range(5):
char = self.entries_known_character[i].get()
if char == '':
result.search_pattern = result.search_pattern + '.'
else:
result.search_pattern = result.search_pattern + char
result.contain_characters = self.entry_contain_characters.get()
result.remove_characters = self.entry_remove_characters.get()
result.remove_duplicate = self.value_remove_duplicate.get()
return result
def set_word_list(self, in_list):
self.value_list_box_word.set(in_list)
def __validate_entry_character(self, in_action, in_index, in_new_str, in_old_str, in_item, in_validate_options, in_mode, in_name):
if in_mode == 'key':
if not (in_item in string.ascii_lowercase):
if in_new_str != '': # delete character
return False
if in_name in self.next_character_entries.keys():
self.next_character_entries[in_name].focus_set()
return True
def __search_button_click(self):
if self.search_execute_command is not None:
self.search_execute_command()
|
[
"tkinter.StringVar",
"tkinter.Checkbutton",
"tkinter.Entry",
"tkinter.Listbox",
"tkinter.BooleanVar",
"search_option.SearchOption",
"tkinter.Frame",
"tkinter.Label",
"tkinter.Tk"
] |
[((566, 578), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (576, 578), False, 'import tkinter\n'), ((691, 728), 'tkinter.Frame', 'tkinter.Frame', (['self.window'], {'width': '(200)'}), '(self.window, width=200)\n', (704, 728), False, 'import tkinter\n'), ((1203, 1251), 'tkinter.Label', 'tkinter.Label', (['self.frame_options'], {'text': '"""判明済の文字"""'}), "(self.frame_options, text='判明済の文字')\n", (1216, 1251), False, 'import tkinter\n'), ((2229, 2277), 'tkinter.Label', 'tkinter.Label', (['self.frame_options'], {'text': '"""含まれる文字"""'}), "(self.frame_options, text='含まれる文字')\n", (2242, 2277), False, 'import tkinter\n'), ((2438, 2493), 'tkinter.Entry', 'tkinter.Entry', (['self.frame_options'], {'justify': 'tkinter.LEFT'}), '(self.frame_options, justify=tkinter.LEFT)\n', (2451, 2493), False, 'import tkinter\n'), ((2663, 2711), 'tkinter.Label', 'tkinter.Label', (['self.frame_options'], {'text': '"""除外する文字"""'}), "(self.frame_options, text='除外する文字')\n", (2676, 2711), False, 'import tkinter\n'), ((2869, 2924), 'tkinter.Entry', 'tkinter.Entry', (['self.frame_options'], {'justify': 'tkinter.LEFT'}), '(self.frame_options, justify=tkinter.LEFT)\n', (2882, 2924), False, 'import tkinter\n'), ((3092, 3112), 'tkinter.BooleanVar', 'tkinter.BooleanVar', ([], {}), '()\n', (3110, 3112), False, 'import tkinter\n'), ((3197, 3297), 'tkinter.Checkbutton', 'tkinter.Checkbutton', (['self.frame_options'], {'variable': 'self.value_remove_duplicate', 'text': '"""同じ文字は1回のみ"""'}), "(self.frame_options, variable=self.\n value_remove_duplicate, text='同じ文字は1回のみ')\n", (3216, 3297), False, 'import tkinter\n'), ((3446, 3465), 'tkinter.StringVar', 'tkinter.StringVar', ([], {}), '()\n', (3463, 3465), False, 'import tkinter\n'), ((3491, 3569), 'tkinter.Listbox', 'tkinter.Listbox', (['self.window'], {'listvariable': 'self.value_list_box_word', 'width': '(200)'}), '(self.window, listvariable=self.value_list_box_word, width=200)\n', (3506, 3569), False, 'import tkinter\n'), ((3771, 3785), 'search_option.SearchOption', 'SearchOption', ([], {}), '()\n', (3783, 3785), False, 'from search_option import SearchOption\n'), ((1578, 1811), 'tkinter.Entry', 'tkinter.Entry', (['self.frame_options'], {'name': 'f"""character_entry_{i}"""', 'width': '(2)', 'font': 'entry_font', 'validate': '"""all"""', 'validatecommand': "(entry_validation_command, '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')", 'justify': 'tkinter.CENTER'}), "(self.frame_options, name=f'character_entry_{i}', width=2,\n font=entry_font, validate='all', validatecommand=(\n entry_validation_command, '%d', '%i', '%P', '%s', '%S', '%v', '%V',\n '%W'), justify=tkinter.CENTER)\n", (1591, 1811), False, 'import tkinter\n')]
|
import tkinter
TIMER = 0
FONT = "Times New Roman"
def count_up():
global TIMER
TIMER += 1
label["text"] = TIMER
root.after(1000, count_up) # 1초 후, count_up함수를 재실행
if __name__ == "__main__":
root = tkinter.Tk()
label = tkinter.Label(font=(FONT, 80))
label.pack()
root.after(1000, count_up)
root.mainloop()
|
[
"tkinter.Label",
"tkinter.Tk"
] |
[((223, 235), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (233, 235), False, 'import tkinter\n'), ((249, 279), 'tkinter.Label', 'tkinter.Label', ([], {'font': '(FONT, 80)'}), '(font=(FONT, 80))\n', (262, 279), False, 'import tkinter\n')]
|
"""Example of count data sampled from negative-binomial distribution
"""
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
from sklearn.model_selection import train_test_split
from xgboost_distribution import XGBDistribution
def generate_count_data(n_samples=10_000):
X = np.random.uniform(-2, 0, n_samples)
n = 66 * np.abs(np.cos(X))
p = 0.5 * np.abs(np.cos(X / 3))
y = np.random.negative_binomial(n=n, p=p, size=n_samples)
return X[..., np.newaxis], y
def predict_distribution(model, X, y):
"""Predict a distribution for a given X, and evaluate over y"""
distribution_func = {
"normal": getattr(stats, "norm").pdf,
"laplace": getattr(stats, "laplace").pdf,
"poisson": getattr(stats, "poisson").pmf,
"negative-binomial": getattr(stats, "nbinom").pmf,
}
preds = model.predict(X[..., np.newaxis])
dists = np.zeros(shape=(len(X), len(y)))
for ii, x in enumerate(X):
params = {field: param[ii] for (field, param) in zip(preds._fields, preds)}
dists[ii] = distribution_func[model.distribution](y, **params)
return dists
def create_distribution_heatmap(
model, x_range=(-2, 0), x_steps=100, y_range=(0, 100), normalize=True
):
xx = np.linspace(x_range[0], x_range[1], x_steps)
yy = np.linspace(y_range[0], y_range[1], y_range[1] - y_range[0] + 1)
ym, xm = np.meshgrid(xx, yy)
z = predict_distribution(model, xx, yy)
if normalize:
z = z / z.max(axis=0)
return ym, xm, z.transpose()
def main():
random_state = 10
np.random.seed(random_state)
X, y = generate_count_data(n_samples=10_000)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=random_state)
model = XGBDistribution(
distribution="negative-binomial", # try changing the distribution here
natural_gradient=True,
max_depth=3,
n_estimators=500,
)
model.fit(
X_train,
y_train,
eval_set=[(X_test, y_test)],
early_stopping_rounds=10,
verbose=False,
)
xm, ym, z = create_distribution_heatmap(model)
fig, ax = plt.subplots(figsize=(9, 6))
ax.pcolormesh(
xm, ym, z, cmap="Oranges", vmin=0, vmax=1.608, alpha=1.0, shading="auto"
)
ax.scatter(X_test, y_test, s=0.75, alpha=0.25, c="k", label="data")
plt.show()
if __name__ == "__main__":
main()
|
[
"numpy.random.uniform",
"numpy.meshgrid",
"numpy.random.seed",
"matplotlib.pyplot.show",
"xgboost_distribution.XGBDistribution",
"numpy.random.negative_binomial",
"sklearn.model_selection.train_test_split",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.subplots"
] |
[((309, 344), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(0)', 'n_samples'], {}), '(-2, 0, n_samples)\n', (326, 344), True, 'import numpy as np\n'), ((421, 474), 'numpy.random.negative_binomial', 'np.random.negative_binomial', ([], {'n': 'n', 'p': 'p', 'size': 'n_samples'}), '(n=n, p=p, size=n_samples)\n', (448, 474), True, 'import numpy as np\n'), ((1272, 1316), 'numpy.linspace', 'np.linspace', (['x_range[0]', 'x_range[1]', 'x_steps'], {}), '(x_range[0], x_range[1], x_steps)\n', (1283, 1316), True, 'import numpy as np\n'), ((1326, 1390), 'numpy.linspace', 'np.linspace', (['y_range[0]', 'y_range[1]', '(y_range[1] - y_range[0] + 1)'], {}), '(y_range[0], y_range[1], y_range[1] - y_range[0] + 1)\n', (1337, 1390), True, 'import numpy as np\n'), ((1404, 1423), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy'], {}), '(xx, yy)\n', (1415, 1423), True, 'import numpy as np\n'), ((1592, 1620), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (1606, 1620), True, 'import numpy as np\n'), ((1710, 1759), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': 'random_state'}), '(X, y, random_state=random_state)\n', (1726, 1759), False, 'from sklearn.model_selection import train_test_split\n'), ((1773, 1880), 'xgboost_distribution.XGBDistribution', 'XGBDistribution', ([], {'distribution': '"""negative-binomial"""', 'natural_gradient': '(True)', 'max_depth': '(3)', 'n_estimators': '(500)'}), "(distribution='negative-binomial', natural_gradient=True,\n max_depth=3, n_estimators=500)\n", (1788, 1880), False, 'from xgboost_distribution import XGBDistribution\n'), ((2170, 2198), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (2182, 2198), True, 'from matplotlib import pyplot as plt\n'), ((2381, 2391), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2389, 2391), True, 'from matplotlib import pyplot as plt\n'), ((365, 374), 'numpy.cos', 'np.cos', (['X'], {}), '(X)\n', (371, 374), True, 'import numpy as np\n'), ((397, 410), 'numpy.cos', 'np.cos', (['(X / 3)'], {}), '(X / 3)\n', (403, 410), True, 'import numpy as np\n')]
|
# This file is automatically generated by EBNFParser.
from Ruikowa.ObjectRegex.Tokenizer import unique_literal_cache_pool, regex_matcher, char_matcher, str_matcher, Tokenizer
from Ruikowa.ObjectRegex.Node import AstParser, Ref, SeqParser, LiteralValueParser, LiteralNameParser, Undef
namespace = globals()
recur_searcher = set()
token_table = ((unique_literal_cache_pool["keyword"], str_matcher(('efgh', 'abcd'))),
(unique_literal_cache_pool["auto_const"], char_matcher(('c', 'b', 'a'))))
class UNameEnum:
# names
auto_const = unique_literal_cache_pool['auto_const']
keyword = unique_literal_cache_pool['keyword']
S = unique_literal_cache_pool['S']
# values
auto_const_c = unique_literal_cache_pool['c']
keyword_efgh = unique_literal_cache_pool['efgh']
auto_const_a = unique_literal_cache_pool['a']
auto_const_b = unique_literal_cache_pool['b']
keyword_abcd = unique_literal_cache_pool['abcd']
token_func = lambda _: Tokenizer.from_raw_strings(_, token_table, ({}, {}))
keyword = LiteralNameParser('keyword')
S = AstParser([SeqParser(['a', 'b', 'c'], at_least=0,at_most=Undef)],
name="S",
to_ignore=({}, {}))
S.compile(namespace, recur_searcher)
# add here
print (S.possibilities[0][0].name)
|
[
"Ruikowa.ObjectRegex.Tokenizer.Tokenizer.from_raw_strings",
"Ruikowa.ObjectRegex.Node.LiteralNameParser",
"Ruikowa.ObjectRegex.Tokenizer.char_matcher",
"Ruikowa.ObjectRegex.Tokenizer.str_matcher",
"Ruikowa.ObjectRegex.Node.SeqParser"
] |
[((1037, 1065), 'Ruikowa.ObjectRegex.Node.LiteralNameParser', 'LiteralNameParser', (['"""keyword"""'], {}), "('keyword')\n", (1054, 1065), False, 'from Ruikowa.ObjectRegex.Node import AstParser, Ref, SeqParser, LiteralValueParser, LiteralNameParser, Undef\n'), ((974, 1026), 'Ruikowa.ObjectRegex.Tokenizer.Tokenizer.from_raw_strings', 'Tokenizer.from_raw_strings', (['_', 'token_table', '({}, {})'], {}), '(_, token_table, ({}, {}))\n', (1000, 1026), False, 'from Ruikowa.ObjectRegex.Tokenizer import unique_literal_cache_pool, regex_matcher, char_matcher, str_matcher, Tokenizer\n'), ((383, 412), 'Ruikowa.ObjectRegex.Tokenizer.str_matcher', 'str_matcher', (["('efgh', 'abcd')"], {}), "(('efgh', 'abcd'))\n", (394, 412), False, 'from Ruikowa.ObjectRegex.Tokenizer import unique_literal_cache_pool, regex_matcher, char_matcher, str_matcher, Tokenizer\n'), ((472, 501), 'Ruikowa.ObjectRegex.Tokenizer.char_matcher', 'char_matcher', (["('c', 'b', 'a')"], {}), "(('c', 'b', 'a'))\n", (484, 501), False, 'from Ruikowa.ObjectRegex.Tokenizer import unique_literal_cache_pool, regex_matcher, char_matcher, str_matcher, Tokenizer\n'), ((1081, 1134), 'Ruikowa.ObjectRegex.Node.SeqParser', 'SeqParser', (["['a', 'b', 'c']"], {'at_least': '(0)', 'at_most': 'Undef'}), "(['a', 'b', 'c'], at_least=0, at_most=Undef)\n", (1090, 1134), False, 'from Ruikowa.ObjectRegex.Node import AstParser, Ref, SeqParser, LiteralValueParser, LiteralNameParser, Undef\n')]
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
def test(mod, path, entity = None):
import re
# ignore anyhting but Firefox
if mod not in ("netwerk", "dom", "toolkit", "security/manager",
"browser", "extensions/reporter", "extensions/spellcheck",
"other-licenses/branding/firefox",
"browser/branding/official",
"services/sync"):
return False
if mod != "browser" and mod != "extensions/spellcheck":
# we only have exceptions for browser and extensions/spellcheck
return True
if not entity:
if mod == "extensions/spellcheck":
return False
# browser
return not (re.match(r"searchplugins\/.+\.xml", path) or
re.match(r"chrome\/help\/images\/[A-Za-z-_]+\.png", path))
if mod == "extensions/spellcheck":
# l10n ships en-US dictionary or something, do compare
return True
if path == "defines.inc":
return entity != "MOZ_LANGPACK_CONTRIBUTORS"
if path != "chrome/browser-region/region.properties":
# only region.properties exceptions remain, compare all others
return True
return not (re.match(r"browser\.search\.order\.[1-9]", entity) or
re.match(r"browser\.contentHandlers\.types\.[0-5]", entity) or
re.match(r"gecko\.handlerService\.schemes\.", entity) or
re.match(r"gecko\.handlerService\.defaultHandlersVersion", entity))
|
[
"re.match"
] |
[((1285, 1337), 're.match', 're.match', (['"""browser\\\\.search\\\\.order\\\\.[1-9]"""', 'entity'], {}), "('browser\\\\.search\\\\.order\\\\.[1-9]', entity)\n", (1293, 1337), False, 'import re\n'), ((1353, 1414), 're.match', 're.match', (['"""browser\\\\.contentHandlers\\\\.types\\\\.[0-5]"""', 'entity'], {}), "('browser\\\\.contentHandlers\\\\.types\\\\.[0-5]', entity)\n", (1361, 1414), False, 'import re\n'), ((1430, 1485), 're.match', 're.match', (['"""gecko\\\\.handlerService\\\\.schemes\\\\."""', 'entity'], {}), "('gecko\\\\.handlerService\\\\.schemes\\\\.', entity)\n", (1438, 1485), False, 'import re\n'), ((1501, 1568), 're.match', 're.match', (['"""gecko\\\\.handlerService\\\\.defaultHandlersVersion"""', 'entity'], {}), "('gecko\\\\.handlerService\\\\.defaultHandlersVersion', entity)\n", (1509, 1568), False, 'import re\n'), ((819, 861), 're.match', 're.match', (['"""searchplugins\\\\/.+\\\\.xml"""', 'path'], {}), "('searchplugins\\\\/.+\\\\.xml', path)\n", (827, 861), False, 'import re\n'), ((880, 940), 're.match', 're.match', (['"""chrome\\\\/help\\\\/images\\\\/[A-Za-z-_]+\\\\.png"""', 'path'], {}), "('chrome\\\\/help\\\\/images\\\\/[A-Za-z-_]+\\\\.png', path)\n", (888, 940), False, 'import re\n')]
|
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib.auth.views import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='index.html')),
url(r'^', include('goals.urls')),
url(r'^api-token-auth/', 'rest_framework.authtoken.views.obtain_auth_token'),
url(r'^api-token-register/', 'goals.views.create_auth'),
# Examples:
# url(r'^$', 'results.views.home', name='home'),
# url(r'^results/', include('results.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
[
"django.contrib.admin.autodiscover",
"django.views.generic.TemplateView.as_view",
"django.conf.urls.include",
"django.conf.urls.url"
] |
[((224, 244), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (242, 244), False, 'from django.contrib import admin\n'), ((382, 457), 'django.conf.urls.url', 'url', (['"""^api-token-auth/"""', '"""rest_framework.authtoken.views.obtain_auth_token"""'], {}), "('^api-token-auth/', 'rest_framework.authtoken.views.obtain_auth_token')\n", (385, 457), False, 'from django.conf.urls import patterns, include, url\n'), ((464, 518), 'django.conf.urls.url', 'url', (['"""^api-token-register/"""', '"""goals.views.create_auth"""'], {}), "('^api-token-register/', 'goals.views.create_auth')\n", (467, 518), False, 'from django.conf.urls import patterns, include, url\n'), ((288, 336), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""index.html"""'}), "(template_name='index.html')\n", (308, 336), False, 'from django.views.generic import TemplateView\n'), ((354, 375), 'django.conf.urls.include', 'include', (['"""goals.urls"""'], {}), "('goals.urls')\n", (361, 375), False, 'from django.conf.urls import patterns, include, url\n'), ((858, 882), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (865, 882), False, 'from django.conf.urls import patterns, include, url\n')]
|
import os
import numpy as np
import tensorflow as tf
from utils.recorder import RecorderTf2 as Recorder
class Base(tf.keras.Model):
def __init__(self, a_dim_or_list, action_type, base_dir):
super().__init__()
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
self.device = "/gpu:0"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
else:
self.device = "/cpu:0"
tf.keras.backend.set_floatx('float64')
self.cp_dir, self.log_dir, self.excel_dir = [os.path.join(base_dir, i) for i in ['model', 'log', 'excel']]
self.action_type = action_type
self.a_counts = int(np.array(a_dim_or_list).prod())
self.global_step = tf.Variable(0, name="global_step", trainable=False, dtype=tf.int64) # in TF 2.x must be tf.int64, because function set_step need args to be tf.int64.
self.episode = 0
def get_init_episode(self):
"""
get the initial training step. use for continue train from last training step.
"""
if os.path.exists(os.path.join(self.cp_dir, 'checkpoint')):
return int(tf.train.latest_checkpoint(self.cp_dir).split('-')[-1])
else:
return 0
def generate_recorder(self, logger2file, model=None):
"""
create model/log/data dictionary and define writer to record training data.
"""
self.check_or_create(self.cp_dir, 'checkpoints')
self.check_or_create(self.log_dir, 'logs(summaries)')
self.check_or_create(self.excel_dir, 'excel')
self.recorder = Recorder(
cp_dir=self.cp_dir,
log_dir=self.log_dir,
excel_dir=self.excel_dir,
logger2file=logger2file,
model=model
)
def init_or_restore(self, base_dir):
"""
check whether chekpoint and model be within cp_dir, if in it, restore otherwise initialize randomly.
"""
cp_dir = os.path.join(base_dir, 'model')
if os.path.exists(os.path.join(cp_dir, 'checkpoint')):
try:
self.recorder.checkpoint.restore(self.recorder.saver.latest_checkpoint)
except:
self.recorder.logger.error('restore model from checkpoint FAILED.')
else:
self.recorder.logger.info('restore model from checkpoint SUCCUESS.')
else:
self.recorder.logger.info('initialize model SUCCUESS.')
def save_checkpoint(self, global_step):
"""
save the training model
"""
self.recorder.saver.save(checkpoint_number=global_step)
def writer_summary(self, global_step, **kargs):
"""
record the data used to show in the tensorboard
"""
tf.summary.experimental.set_step(global_step)
for i in [{'tag': 'MAIN/' + key, 'value': kargs[key]} for key in kargs]:
tf.summary.scalar(i['tag'], i['value'])
self.recorder.writer.flush()
def check_or_create(self, dicpath, name=''):
"""
check dictionary whether existing, if not then create it.
"""
if not os.path.exists(dicpath):
os.makedirs(dicpath)
print(f'create {name} directionary :', dicpath)
def close(self):
"""
end training, and export the training model
"""
pass
def get_global_step(self):
"""
get the current trianing step.
"""
return self.global_step
def set_global_step(self, num):
"""
set the start training step.
"""
self.global_step = num
def update_target_net_weights(self, tge, src, ployak=None):
if ployak is None:
tf.group([r.assign(v) for r, v in zip(tge, src)])
else:
tf.group([r.assign(self.ployak * v + (1 - self.ployak) * r) for r, v in zip(tge, src)])
|
[
"os.path.join",
"tensorflow.summary.scalar",
"os.makedirs",
"utils.recorder.RecorderTf2",
"tensorflow.config.experimental.set_memory_growth",
"os.path.exists",
"tensorflow.summary.experimental.set_step",
"tensorflow.Variable",
"numpy.array",
"tensorflow.train.latest_checkpoint",
"tensorflow.keras.backend.set_floatx",
"tensorflow.config.experimental.list_physical_devices"
] |
[((252, 303), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (296, 303), True, 'import tensorflow as tf\n'), ((514, 552), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float64"""'], {}), "('float64')\n", (541, 552), True, 'import tensorflow as tf\n'), ((794, 861), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)', 'dtype': 'tf.int64'}), "(0, name='global_step', trainable=False, dtype=tf.int64)\n", (805, 861), True, 'import tensorflow as tf\n'), ((1661, 1779), 'utils.recorder.RecorderTf2', 'Recorder', ([], {'cp_dir': 'self.cp_dir', 'log_dir': 'self.log_dir', 'excel_dir': 'self.excel_dir', 'logger2file': 'logger2file', 'model': 'model'}), '(cp_dir=self.cp_dir, log_dir=self.log_dir, excel_dir=self.excel_dir,\n logger2file=logger2file, model=model)\n', (1669, 1779), True, 'from utils.recorder import RecorderTf2 as Recorder\n'), ((2038, 2069), 'os.path.join', 'os.path.join', (['base_dir', '"""model"""'], {}), "(base_dir, 'model')\n", (2050, 2069), False, 'import os\n'), ((2834, 2879), 'tensorflow.summary.experimental.set_step', 'tf.summary.experimental.set_step', (['global_step'], {}), '(global_step)\n', (2866, 2879), True, 'import tensorflow as tf\n'), ((389, 456), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (429, 456), True, 'import tensorflow as tf\n'), ((606, 631), 'os.path.join', 'os.path.join', (['base_dir', 'i'], {}), '(base_dir, i)\n', (618, 631), False, 'import os\n'), ((1140, 1179), 'os.path.join', 'os.path.join', (['self.cp_dir', '"""checkpoint"""'], {}), "(self.cp_dir, 'checkpoint')\n", (1152, 1179), False, 'import os\n'), ((2096, 2130), 'os.path.join', 'os.path.join', (['cp_dir', '"""checkpoint"""'], {}), "(cp_dir, 'checkpoint')\n", (2108, 2130), False, 'import os\n'), ((2973, 3012), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["i['tag']", "i['value']"], {}), "(i['tag'], i['value'])\n", (2990, 3012), True, 'import tensorflow as tf\n'), ((3205, 3228), 'os.path.exists', 'os.path.exists', (['dicpath'], {}), '(dicpath)\n', (3219, 3228), False, 'import os\n'), ((3242, 3262), 'os.makedirs', 'os.makedirs', (['dicpath'], {}), '(dicpath)\n', (3253, 3262), False, 'import os\n'), ((735, 758), 'numpy.array', 'np.array', (['a_dim_or_list'], {}), '(a_dim_or_list)\n', (743, 758), True, 'import numpy as np\n'), ((1205, 1244), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['self.cp_dir'], {}), '(self.cp_dir)\n', (1231, 1244), True, 'import tensorflow as tf\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import sys
import heapq
def readerLine(fn):
with open(fn, 'rb') as file:
while True:
yield next(file).decode('utf-8').rstrip('\r\n')
def show_top(k, scores, lst):
h = []
for i, s in enumerate(scores):
heapq.heappush(h, (s, -i))
if len(h) > k:
heapq.heappop(h)
num = len(h)
res = [heapq.heappop(h) for _ in six.moves.range(num)]
for s, ii in res[::-1]:
print(' ' + str(s) + '\t' + lst[-ii])
def split_wrt_brackets(str, sp):
blocks = []
part = []
count = 0
for x in str:
if count == 0 and x in sp:
blocks.append(''.join(part))
part = []
else:
part.append(x)
if x == '(':
count += 1
elif x == ')':
count -= 1
if count < 0:
print("Unmatched )", file=sys.stderr)
blocks.append(''.join(part))
return blocks
|
[
"heapq.heappush",
"six.moves.range",
"heapq.heappop"
] |
[((346, 372), 'heapq.heappush', 'heapq.heappush', (['h', '(s, -i)'], {}), '(h, (s, -i))\n', (360, 372), False, 'import heapq\n'), ((439, 455), 'heapq.heappop', 'heapq.heappop', (['h'], {}), '(h)\n', (452, 455), False, 'import heapq\n'), ((398, 414), 'heapq.heappop', 'heapq.heappop', (['h'], {}), '(h)\n', (411, 414), False, 'import heapq\n'), ((465, 485), 'six.moves.range', 'six.moves.range', (['num'], {}), '(num)\n', (480, 485), False, 'import six\n')]
|
#!/usr/bin/env python3
from marshmallow import fields
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from model import Reporter
class ReporterSchema(SQLAlchemyAutoSchema):
name = fields.String(required=True)
key = fields.String(required=True)
class Meta:
dump_only = ['id', 'created', 'info_digest', 'last_seen']
load_only = ['key']
model = Reporter
include_relationships = True
load_instance = True
include_fk = False
|
[
"marshmallow.fields.String"
] |
[((195, 223), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (208, 223), False, 'from marshmallow import fields\n'), ((234, 262), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (247, 262), False, 'from marshmallow import fields\n')]
|
from unittest import main, TestCase
from modules.bmp280 import BMP280
bmp = BMP280()
bmp.start()
class TestModuleBMP280(TestCase):
def test_read_temperatura(self):
medida_temperatura = bmp.read('Temperatura')
assert (medida_temperatura >= 0) or (medida_temperatura <= 100)
def test_read_pressure(self):
medida_pressure = bmp.read('Pressure')
assert (medida_pressure >= 300) or (medida_pressure <= 3000)
def test_read_invalid_grandeza(self):
medida_umidade = bmp.read('Umidade')
self.assertFalse(medida_umidade, 'Leitura realizada mesmo sem suporte')
|
[
"modules.bmp280.BMP280"
] |
[((78, 86), 'modules.bmp280.BMP280', 'BMP280', ([], {}), '()\n', (84, 86), False, 'from modules.bmp280 import BMP280\n')]
|
import base64
import jwt
import hashlib
import time
from datetime import timedelta
from datetime import datetime
DEFAULT_TASK_TYPE = "CMEF"
class TaskType:
@staticmethod
def value_of(task_type: str) -> int:
return {
"UNKNOWN": 1,
"INTERNAL": 2,
"CM": 3,
"CMEF": 4,
"OSF_COMMAND": 5,
"OSF_QUERY": 6,
"OSF_NOTIFY": 7,
"OSF_LOG": 8,
"MDR_ATTACK_DISCOVERY": 9,
"OSF_SYS_CALL": 10,
}.get(task_type)
def create_base64_checksum(http_method: str, raw_url: str, raw_header: str, request_body: str) -> str:
"""Create a base64 encoded hash string for an Apex JWT token"""
string_to_hash = http_method.upper() + "|" + raw_url.lower() + "|" + raw_header + "|" + request_body
base64_hash_string = base64.b64encode(hashlib.sha256(str.encode(string_to_hash)).digest()).decode("utf-8")
return base64_hash_string
def create_jwt_token(
application_id: str,
api_key: str,
http_method: str,
raw_url: str,
header: str,
request_body: str,
algorithm="HS256",
) -> str:
"""Generate a JWT token for an Apex HTTP request. Specific to a url destination and payload"""
issue_time = time.time()
payload = {
"appid": application_id,
"iat": issue_time,
"version": "V1",
"checksum": create_base64_checksum(http_method, raw_url, header, request_body),
}
token = jwt.encode(payload, api_key, algorithm).decode("utf-8")
return token
def get_expiration_utc_date_string(num_days=30):
if not isinstance(num_days, int) or num_days < 1:
num_days = 1
today = datetime.now()
# +5 hours for timezones, just a buffer
timedelta_days = timedelta(days=num_days, hours=5)
future = today + timedelta_days
return future.strftime("%Y-%m-%dT%H:%MU")
|
[
"datetime.datetime.now",
"jwt.encode",
"datetime.timedelta",
"time.time"
] |
[((1257, 1268), 'time.time', 'time.time', ([], {}), '()\n', (1266, 1268), False, 'import time\n'), ((1687, 1701), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1699, 1701), False, 'from datetime import datetime\n'), ((1767, 1800), 'datetime.timedelta', 'timedelta', ([], {'days': 'num_days', 'hours': '(5)'}), '(days=num_days, hours=5)\n', (1776, 1800), False, 'from datetime import timedelta\n'), ((1476, 1515), 'jwt.encode', 'jwt.encode', (['payload', 'api_key', 'algorithm'], {}), '(payload, api_key, algorithm)\n', (1486, 1515), False, 'import jwt\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_core.rq_database.ipynb (unless otherwise specified).
__all__ = ['get_release', 'get_user_release_rating', 'update_user_release_rating', 'delete_user_release_rating',
'get_community_release_rating', 'get_master_release', 'get_releases_related_to_master_release', 'get_artist',
'get_artist_releases', 'get_label', 'get_label_releases']
# Cell
import requests
from typing import Union
from . import *
# Cell
def get_release(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
release_id: int,
curr_abbr: Union[CurrAbbr, None] = None
) -> requests.models.Response:
"""
Get information about a particular release from the Discogs database.
No user Authentication needed.
Parameters:
user: user object (required)
release_id : number (required)
-> The Release ID.
curr_abbr: string (optional)
-> Currency for marketplace data. Defaults to the authenticated users currency.
"""
url = f"{RELEASES_URL}/{release_id}"
headers = user.headers
params = user.params
if curr_abbr:
params["curr_abbr"] = curr_abbr.value
return requests.get(url, headers=headers, params=params)
# Cell
def get_user_release_rating(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
release_id: int,
username: str
) -> requests.models.Response:
"""
Get the rating of a release made by the given user.
No user Authentication needed.
Parameters:
user: user object (required)
release_id : number (required)
-> The Release ID.
username: string (required)
-> The username of the rating you are trying to request.
"""
url = f"{RELEASES_URL}/{release_id}/rating/{username}"
headers = user.headers
params = user.params
return requests.get(url, headers=headers, params=params)
# Cell
def update_user_release_rating(user: UserWithUserTokenBasedAuthentication,
release_id: int,
username: str,
rating: int
) -> requests.models.Response:
"""
Update the rating of a release made by the given user.
If there is no rating, it will create one.
User Authentication needed.
Parameters:
user: user object (required)
release_id : number (required)
-> The Release ID.
username: string (required)
-> The username of the rating you are trying to request.
rating: int (required)
-> The new rating value. Must be a value between 1 and 5.
"""
url = f"{RELEASES_URL}/{release_id}/rating/{username}"
headers = user.headers
params = user.params
rating = min(max(0, rating), 5)
data = {"rating": rating}
return requests.put(url, headers=headers, params=params, json=data)
# Cell
def delete_user_release_rating(user: UserWithUserTokenBasedAuthentication,
release_id: int,
username: str
) -> requests.models.Response:
"""
Delete the rating of a release made by the given user.
User Authentication needed.
Parameters:
user: user object (required)
release_id : number (required)
-> The Release ID.
username: string (required)
-> The username of the rating you are trying to delete.
"""
url = f"{RELEASES_URL}/{release_id}/rating/{username}"
headers = user.headers
params = user.params
return requests.delete(url, headers=headers, params=params)
# Cell
def get_community_release_rating(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
release_id: int
) -> requests.models.Response:
"""
Get the rating of a release made by the community.
A community release rating includes the average rating and
the total number of user ratings for a given release.
This function doesn't work for master releases!
No user Authentication needed.
Parameters:
user: user object (required)
release_id : number (required)
-> The Release ID.
"""
url = f"{RELEASES_URL}/{release_id}/rating"
headers = user.headers
params = user.params
return requests.get(url, headers=headers, params=params)
# Cell
def get_master_release(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
master_id: int
) -> requests.models.Response:
"""
Get information to a particular master release from Discogs database.
No user Authentication needed.
Parameters:
user: user object (required)
master_id : number (required)
-> The Master ID.
"""
url = f"{MASTERS_URL}/{master_id}"
headers = user.headers
params = user.params
return requests.get(url, headers=headers, params=params)
# Cell
def get_releases_related_to_master_release(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
master_id: int,
page: Union[int, None] = None,
per_page: Union[int, None] = None,
release_format: Union[str, None] = None,
label: Union[str, None] = None,
released: Union[str, None] = None,
country: Union[str, None] = None,
sort: Union[SortOptionsMaster, None] = None,
sort_order: Union[SortOrder, None] = None
) -> requests.models.Response:
"""
Get a list of all Releases that are versions of the given master release.
No user Authentication needed.
Parameters:
user: user object (required)
master_id : number (required)
-> The Master ID.
page: number (optional)
-> The page you want to request.
per_page: number (optional)
-> The number of items per page.
release_format: string (optional)
-> The format to filter.
label: string (optional)
-> The label to filter.
released: string (optional)
-> The release year to filter.
country: string (optional)
-> The country to filter.
sort: string (optional)
-> Sort items by this field.
sort_order: string (optional)
-> Sort items in a particular order (one of asc, desc)
"""
url = f"{MASTERS_URL}/{master_id}/versions"
headers = user.headers
params = user.params
if page:
params["page"] = max(1, page)
if per_page:
params["per_page"] = max(1, per_page)
if release_format:
params["format"] = release_format
if label:
params["label"] = label
if released:
params["released"] = released
if country:
params["country"] = country
if sort:
params["sort"] = sort.value
if sort_order:
params["sort_order"] = sort_order.value
return requests.get(url, headers=headers, params=params)
# Cell
def get_artist(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
artist_id: int
) -> requests.models.Response:
"""
Get information about an artist.
No user Authentication needed.
Parameters:
user: user object (required)
artist_id : number (required)
-> The Artist ID.
"""
url = f"{ARTIST_URL}/{artist_id}"
headers = user.headers
params = user.params
return requests.get(url, headers=headers, params=params)
# Cell
def get_artist_releases(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
artist_id: int,
page: Union[int, None] = None,
per_page: Union[int, None] = None,
sort: Union[SortOptionsArtist, None] = None,
sort_order: Union[SortOrder, None] = None
) -> requests.models.Response:
"""
Get a list of releases and masters associated with the given artist.
No user Authentication needed.
Parameters:
user: user object (required)
artist_id : number (required)
-> The Artist ID.
page: number (optional)
-> The page you want to request.
per_page: number (optional)
-> The number of items per page.
sort: string (optional)
-> Sort items by this field.
sort_order: string (optional)
-> Sort items in a particular order (one of asc, desc)
"""
url = f"{ARTIST_URL}/{artist_id}/releases"
headers = user.headers
params = user.params
if page:
params["page"] = max(1, page)
if per_page:
params["per_page"] = max(1, per_page)
if sort:
params["sort"] = sort.value
if sort_order:
params["sort_order"] = sort_order.value
return requests.get(url, headers=headers, params=params)
# Cell
def get_label(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
label_id: int
) -> requests.models.Response:
"""
Get information about a label.
No user Authentication needed.
Parameters:
user: user object (required)
label_id : number (required)
-> The Label ID.
"""
url = f"{LABEL_URL}/{label_id}"
headers = user.headers
params = user.params
return requests.get(url, headers=headers, params=params)
# Cell
def get_label_releases(user: Union[UserWithoutAuthentication,
UserWithUserTokenBasedAuthentication],
label_id: int,
page: Union[int, None] = None,
per_page: Union[int, None] = None,
sort: Union[SortOptionsLabel, None] = None,
sort_order: Union[SortOrder, None] = None
) -> requests.models.Response:
"""
Get a list of releases and masters associated with the given label.
No user Authentication needed.
Parameters:
user: user object (required)
label_id : number (required)
-> The Label ID.
page: number (optional)
-> The page you want to request.
per_page: number (optional)
-> The number of items per page.
sort: string (optional)
-> Sort items by this field.
sort_order: string (optional)
-> Sort items in a particular order (one of asc, desc)
"""
url = f"{LABEL_URL}/{label_id}/releases"
headers = user.headers
params = user.params
if page:
params["page"] = max(1, page)
if per_page:
params["per_page"] = max(1, per_page)
if sort:
params["sort"] = sort.value
if sort_order:
params["sort_order"] = sort_order.value
return requests.get(url, headers=headers, params=params)
|
[
"requests.put",
"requests.delete",
"requests.get"
] |
[((1272, 1321), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (1284, 1321), False, 'import requests\n'), ((2067, 2116), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (2079, 2116), False, 'import requests\n'), ((3052, 3112), 'requests.put', 'requests.put', (['url'], {'headers': 'headers', 'params': 'params', 'json': 'data'}), '(url, headers=headers, params=params, json=data)\n', (3064, 3112), False, 'import requests\n'), ((3796, 3848), 'requests.delete', 'requests.delete', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (3811, 3848), False, 'import requests\n'), ((4636, 4685), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (4648, 4685), False, 'import requests\n'), ((5266, 5315), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (5278, 5315), False, 'import requests\n'), ((7657, 7706), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (7669, 7706), False, 'import requests\n'), ((8217, 8266), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (8229, 8266), False, 'import requests\n'), ((9646, 9695), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (9658, 9695), False, 'import requests\n'), ((10195, 10244), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (10207, 10244), False, 'import requests\n'), ((11609, 11658), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (11621, 11658), False, 'import requests\n')]
|
# -*- coding:utf-8 -*-
# --------------------------------------------------------
# Copyright (C), 2016-2021, lizhe, All rights reserved
# --------------------------------------------------------
# @Name: standard_excel_reader.py
# @Author: lizhe
# @Created: 2021/7/3 - 22:14
# --------------------------------------------------------
import os
from typing import Dict, List
from automotive.application.common.constants import Testcase, priority_config, point, index_list
from automotive.application.common.interfaces import BaseReader, TestCases
from automotive.logger.logger import logger
from automotive.application.common.enums import ModifyTypeEnum
try:
import xlwings as xw
except ModuleNotFoundError:
os.system("pip install xlwings")
finally:
import xlwings as xw
from xlwings import Sheet, Book
class StandardExcelSampleReader(BaseReader):
def __init__(self, ignore_sheet_name: List[str] = None):
# 从哪一行开始读取
if ignore_sheet_name is None:
ignore_sheet_name = ["Summary"]
self.__start_row = 3
self.__ignore_sheet_name = ignore_sheet_name
def read_from_file(self, file: str) -> Dict[str, TestCases]:
result = dict()
app = xw.App(visible=False, add_book=False)
app.display_alerts = False
app.screen_updating = False
wb = app.books.open(file)
sheet_count = wb.sheets.count
for i in range(sheet_count):
sheet_name = wb.sheets[i].name
# 可以过滤SummarySheet页面
if sheet_name not in self.__ignore_sheet_name:
self.__handle_sheet(wb, sheet_name, result)
wb.close()
app.quit()
try:
app.kill()
except AttributeError:
logger.debug("app kill fail")
logger.info("read excel done")
return result
def __handle_sheet(self, wb: Book, sheet_name: str, result: Dict[str, TestCases]):
"""
解析sheet
:param wb: workbook
:param sheet_name: sheet name
:param result: 结果集
"""
logger.info(f"handle sheet {sheet_name}")
sheet = wb.sheets[sheet_name]
testcases = self.__parse_test_case(sheet)
result[sheet_name] = testcases
def __parse_test_case(self, sheet: Sheet) -> TestCases:
"""
逐个解析测试用例
:param sheet:
:return: 测试用例
"""
testcases = []
# 存放用例ID
tem = []
max_row = sheet.used_range.last_cell.row
for i in range(max_row + 1):
if i > (self.__start_row - 1):
testcase = Testcase()
testcase.name = sheet.range(f"C{i}").value
index = testcase.name.split('_')[-1]
if not index.isdigit():
raise RuntimeError(f"此条用例名称: {testcase.name} 缺少ID号,请添加")
tem.append(index)
testcase.module = sheet.range(f"B{i}").value
testcase.pre_condition = self.__parse_pre_condition(sheet.range(f"D{i}").value)
testcase.actions = self.__parse_actions(sheet.range(f"E{i}").value)
testcase.exceptions = self.__parse_exceptions(sheet.range(f"F{i}").value)
requirement = sheet.range(f"G{i}").value
testcase.requirement = requirement.split("\n") if requirement else None
fix_cell = sheet.range(f"J{i}").value
if fix_cell is not None:
try:
testcase.fix = ModifyTypeEnum.read_excel_from_name(fix_cell)
except ValueError:
logger.debug(f"{fix_cell} is not ModifyTypeEnum")
automation_cell = sheet.range(f"H{i}").value
# automation_cell=空“”,automation=None; =是,automation=True;=other,automation=False,只有是,xmind才写入[A]
testcase.automation = automation_cell == "是" if automation_cell else None
priority_cell = sheet.range(f"I{i}").value
testcase.priority = priority_config[priority_cell] if priority_cell else None
test_result = sheet.range(f"N{i}").value
testcase.test_result = test_result.strip().upper() if test_result else None
testcase.calc_hash()
testcases.append(testcase)
for i in tem:
if tem.count(i) > 1:
raise RuntimeError(f"此ID: {i} 有重复,请检查")
return testcases
@staticmethod
def __filter_automotive(content: str) -> bool:
return not (content.startswith("0x") or content.startswith("0X"))
def __parse_pre_condition(self, pre_condition: str) -> List[str]:
"""
解析前置条件
:param pre_condition: 前置条件的字符串
:return:
"""
logger.debug(f"pre_condition = {pre_condition}")
contents = []
if pre_condition:
if "\r\n" in pre_condition:
pre_condition = pre_condition.replace("\r\n", "$")
# pre_conditions = list(filter(lambda x: self.__filter_automotive(x) and x != "", pre_condition.split("\n")))
pre_conditions = list(filter(lambda x: x != "", pre_condition.split("\n")))
pre_conditions = list(map(lambda x: x.replace("、", "."), pre_conditions))
for pre in pre_conditions:
# if point in pre:
# pre = pre.replace(point, " ").strip()
# 为了不去掉不带序号的前两个字符
if pre[0].isdecimal() and pre[:2] != '0x':
pre = pre[2:].strip()
if pre[:2] == "0x":
pre = pre
logger.debug(f"pre = {pre}")
if "$" in pre:
pre = pre.replace("$", "\r\n")
contents.append(pre)
return contents
def __parse_actions(self, actions: str) -> List[str]:
total = []
lines = actions.split("\n")
temp = []
for i, line in enumerate(lines):
if line == '':
continue
if line[0] in index_list:
# temp里装的是,每行带序号的索引(比如有四行:第一行和第三行,带操作步骤序号,temp=[0,2]
temp.append(i)
# 没有序号的情况,即只有一个操作步骤
if temp:
# 列表切片操作 0 2
temp.pop(0)
start_index = 0
for t in temp:
content = "\n".join(lines[start_index:t])
total.append(content)
start_index = t
# 把最后一个步骤序号所在行,到最后一行都用\n拼接
content = "\n".join(lines[start_index:])
total.append(content)
else:
total.append(actions)
# 处理掉1.类似的数据
new_total = []
for t in total:
content = self.__handle_prefix_str(t)
new_total.append(content)
return new_total
@staticmethod
def __handle_prefix_str(content: str) -> str:
"""
处理1. 2.这种前缀,去掉他们
:param content:
:return:
"""
if content[0] in index_list:
content = content[1:]
if content[0] in (".", "。", " "):
content = content[1:]
return content
def __parse_exceptions(self, exceptions: str) -> List[str]:
contents = []
if exceptions:
exception_lines = exceptions.split("\r\n")
for line in exception_lines:
content = self.__handle_prefix_str(line)
contents.append(content)
return contents
|
[
"automotive.logger.logger.logger.info",
"automotive.logger.logger.logger.debug",
"xlwings.App",
"os.system",
"automotive.application.common.enums.ModifyTypeEnum.read_excel_from_name",
"automotive.application.common.constants.Testcase"
] |
[((734, 766), 'os.system', 'os.system', (['"""pip install xlwings"""'], {}), "('pip install xlwings')\n", (743, 766), False, 'import os\n'), ((1233, 1270), 'xlwings.App', 'xw.App', ([], {'visible': '(False)', 'add_book': '(False)'}), '(visible=False, add_book=False)\n', (1239, 1270), True, 'import xlwings as xw\n'), ((1801, 1831), 'automotive.logger.logger.logger.info', 'logger.info', (['"""read excel done"""'], {}), "('read excel done')\n", (1812, 1831), False, 'from automotive.logger.logger import logger\n'), ((2083, 2124), 'automotive.logger.logger.logger.info', 'logger.info', (['f"""handle sheet {sheet_name}"""'], {}), "(f'handle sheet {sheet_name}')\n", (2094, 2124), False, 'from automotive.logger.logger import logger\n'), ((4780, 4828), 'automotive.logger.logger.logger.debug', 'logger.debug', (['f"""pre_condition = {pre_condition}"""'], {}), "(f'pre_condition = {pre_condition}')\n", (4792, 4828), False, 'from automotive.logger.logger import logger\n'), ((1763, 1792), 'automotive.logger.logger.logger.debug', 'logger.debug', (['"""app kill fail"""'], {}), "('app kill fail')\n", (1775, 1792), False, 'from automotive.logger.logger import logger\n'), ((2611, 2621), 'automotive.application.common.constants.Testcase', 'Testcase', ([], {}), '()\n', (2619, 2621), False, 'from automotive.application.common.constants import Testcase, priority_config, point, index_list\n'), ((5631, 5660), 'automotive.logger.logger.logger.debug', 'logger.debug', (['f"""pre = {pre}"""'], {}), "(f'pre = {pre}')\n", (5643, 5660), False, 'from automotive.logger.logger import logger\n'), ((3520, 3565), 'automotive.application.common.enums.ModifyTypeEnum.read_excel_from_name', 'ModifyTypeEnum.read_excel_from_name', (['fix_cell'], {}), '(fix_cell)\n', (3555, 3565), False, 'from automotive.application.common.enums import ModifyTypeEnum\n'), ((3629, 3678), 'automotive.logger.logger.logger.debug', 'logger.debug', (['f"""{fix_cell} is not ModifyTypeEnum"""'], {}), "(f'{fix_cell} is not ModifyTypeEnum')\n", (3641, 3678), False, 'from automotive.logger.logger import logger\n')]
|
import os
import HFSSdrawpy.libraries.example_elements as elt
from HFSSdrawpy import Body, Modeler
from HFSSdrawpy.parameters import GAP, TRACK
# import HFSSdrawpy.libraries.base_elements as base
pm = Modeler("hfss")
relative = pm.set_variable("1mm")
main = Body(pm, "main")
chip = Body(pm, "chip", rel_coor=[["1mm", "1mm", "1mm"], [1, 0, 0], [0, 0, 1]], ref_name="main")
chip1 = Body(pm, "chip1", rel_coor=[[0, 0, 0], [0, 1, 0], [1, 0, 0]], ref_name="chip")
chip2 = Body(pm, "chip2", rel_coor=[[0, 0, 0], [1, 0, 0], [0, 0, 1]], ref_name="chip")
track = pm.set_variable("20um")
gap = pm.set_variable("10um", name="gap")
track_big = pm.set_variable("25um")
gap_big = pm.set_variable("15um")
track_middle = pm.set_variable("22.5um")
gap_middle = pm.set_variable("12.5um")
offset = pm.set_variable("-50um")
# chip1
# default is the widths of track and gap
(port11,) = elt.create_port(chip1, [track, track + 2 * gap], name="port11")
with chip1(["2.0mm", "0.0mm"], [1, 0]):
# default is the widths of track and gap
(port12,) = elt.create_port(chip1, [track, track + 2 * gap], name="port12")
bond_length, bond_slope, pcb_track, pcb_gap = "200um", 0.5, "300um", "200um"
with chip1(["0.5mm", "0.5mm"], [0, 1]):
(con_port1,) = elt.draw_connector(chip1, pcb_track, pcb_gap, bond_length, name="con_port1")
with chip1(["1.5mm", "-1.0mm"], [0, 1]):
(port13,) = elt.create_port(chip1, [track, track + 2 * gap], name="port13")
chip1.draw_cable(
con_port1,
port13,
is_bond=True,
fillet="100um",
reverse_adaptor=False,
to_meander=[0, 0, 0],
meander_length=0,
name="con_port1_port13",
)
ground_plane1 = chip1.rect([0, 0], ["3mm", "3mm"], layer=TRACK, name="gp1")
# chip2
# default is the widths of track and gap
(port21,) = elt.create_port(chip2, [track, track + 2 * gap], name="port21")
with chip2(["2.0mm", "0.0mm"], [1, 0]):
# default is the widths of track and gap
(port22,) = elt.create_port(chip2, [track, track + 2 * gap], name="port22")
bond_length, bond_slope, pcb_track, pcb_gap = "200um", 0.5, "300um", "200um"
with chip2(["0.5mm", "0.5mm"], [0, 1]):
(con_port2,) = elt.draw_connector(chip2, pcb_track, pcb_gap, bond_length, name="con_port2")
with chip2(["1.5mm", "-1.0mm"], [0, 1]):
(port23,) = elt.create_port(chip2, [track, track + 2 * gap], name="port23")
chip2.draw_cable(
con_port2,
port23,
is_bond=True,
fillet="100um",
reverse_adaptor=False,
to_meander=[0, 0, 0],
meander_length=0,
name="con_port2_port23",
)
# # 3D
chip.box([0, 0, 0], ["3mm", "3mm", "3mm"], material="silicon")
ground_plane2 = chip2.rect([0, 0], ["3mm", "3mm"], layer=TRACK, name="gp2")
ground_plane1.subtract(chip1.entities[GAP])
ground_plane1.unite(chip1.entities[TRACK])
ground_plane1.assign_perfect_E()
ground_plane2.subtract(chip2.entities[GAP])
ground_plane2.unite(chip2.entities[TRACK])
ground_plane2.assign_perfect_E()
main.cylinder([0, 0, 0], "0.5mm", "0.7mm", "Z", name="tube")
# generate gds file
pm.generate_gds(os.path.join(os.getcwd(), "gds_files"), "cable_test")
|
[
"os.getcwd",
"HFSSdrawpy.libraries.example_elements.create_port",
"HFSSdrawpy.Body",
"HFSSdrawpy.Modeler",
"HFSSdrawpy.libraries.example_elements.draw_connector"
] |
[((204, 219), 'HFSSdrawpy.Modeler', 'Modeler', (['"""hfss"""'], {}), "('hfss')\n", (211, 219), False, 'from HFSSdrawpy import Body, Modeler\n'), ((263, 279), 'HFSSdrawpy.Body', 'Body', (['pm', '"""main"""'], {}), "(pm, 'main')\n", (267, 279), False, 'from HFSSdrawpy import Body, Modeler\n'), ((288, 381), 'HFSSdrawpy.Body', 'Body', (['pm', '"""chip"""'], {'rel_coor': "[['1mm', '1mm', '1mm'], [1, 0, 0], [0, 0, 1]]", 'ref_name': '"""main"""'}), "(pm, 'chip', rel_coor=[['1mm', '1mm', '1mm'], [1, 0, 0], [0, 0, 1]],\n ref_name='main')\n", (292, 381), False, 'from HFSSdrawpy import Body, Modeler\n'), ((387, 465), 'HFSSdrawpy.Body', 'Body', (['pm', '"""chip1"""'], {'rel_coor': '[[0, 0, 0], [0, 1, 0], [1, 0, 0]]', 'ref_name': '"""chip"""'}), "(pm, 'chip1', rel_coor=[[0, 0, 0], [0, 1, 0], [1, 0, 0]], ref_name='chip')\n", (391, 465), False, 'from HFSSdrawpy import Body, Modeler\n'), ((475, 553), 'HFSSdrawpy.Body', 'Body', (['pm', '"""chip2"""'], {'rel_coor': '[[0, 0, 0], [1, 0, 0], [0, 0, 1]]', 'ref_name': '"""chip"""'}), "(pm, 'chip2', rel_coor=[[0, 0, 0], [1, 0, 0], [0, 0, 1]], ref_name='chip')\n", (479, 553), False, 'from HFSSdrawpy import Body, Modeler\n'), ((879, 942), 'HFSSdrawpy.libraries.example_elements.create_port', 'elt.create_port', (['chip1', '[track, track + 2 * gap]'], {'name': '"""port11"""'}), "(chip1, [track, track + 2 * gap], name='port11')\n", (894, 942), True, 'import HFSSdrawpy.libraries.example_elements as elt\n'), ((1784, 1847), 'HFSSdrawpy.libraries.example_elements.create_port', 'elt.create_port', (['chip2', '[track, track + 2 * gap]'], {'name': '"""port21"""'}), "(chip2, [track, track + 2 * gap], name='port21')\n", (1799, 1847), True, 'import HFSSdrawpy.libraries.example_elements as elt\n'), ((1045, 1108), 'HFSSdrawpy.libraries.example_elements.create_port', 'elt.create_port', (['chip1', '[track, track + 2 * gap]'], {'name': '"""port12"""'}), "(chip1, [track, track + 2 * gap], name='port12')\n", (1060, 1108), True, 'import HFSSdrawpy.libraries.example_elements as elt\n'), ((1247, 1323), 'HFSSdrawpy.libraries.example_elements.draw_connector', 'elt.draw_connector', (['chip1', 'pcb_track', 'pcb_gap', 'bond_length'], {'name': '"""con_port1"""'}), "(chip1, pcb_track, pcb_gap, bond_length, name='con_port1')\n", (1265, 1323), True, 'import HFSSdrawpy.libraries.example_elements as elt\n'), ((1950, 2013), 'HFSSdrawpy.libraries.example_elements.create_port', 'elt.create_port', (['chip2', '[track, track + 2 * gap]'], {'name': '"""port22"""'}), "(chip2, [track, track + 2 * gap], name='port22')\n", (1965, 2013), True, 'import HFSSdrawpy.libraries.example_elements as elt\n'), ((2152, 2228), 'HFSSdrawpy.libraries.example_elements.draw_connector', 'elt.draw_connector', (['chip2', 'pcb_track', 'pcb_gap', 'bond_length'], {'name': '"""con_port2"""'}), "(chip2, pcb_track, pcb_gap, bond_length, name='con_port2')\n", (2170, 2228), True, 'import HFSSdrawpy.libraries.example_elements as elt\n'), ((1390, 1453), 'HFSSdrawpy.libraries.example_elements.create_port', 'elt.create_port', (['chip1', '[track, track + 2 * gap]'], {'name': '"""port13"""'}), "(chip1, [track, track + 2 * gap], name='port13')\n", (1405, 1453), True, 'import HFSSdrawpy.libraries.example_elements as elt\n'), ((2295, 2358), 'HFSSdrawpy.libraries.example_elements.create_port', 'elt.create_port', (['chip2', '[track, track + 2 * gap]'], {'name': '"""port23"""'}), "(chip2, [track, track + 2 * gap], name='port23')\n", (2310, 2358), True, 'import HFSSdrawpy.libraries.example_elements as elt\n'), ((3050, 3061), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3059, 3061), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-12-19 15:27
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metadata', '0003_metadataconfig'),
]
operations = [
migrations.AddField(
model_name='metadataconfig',
name='query_config',
field=django.contrib.postgres.fields.jsonb.JSONField(default={'dataset_description': {'boost': 1},
'dataset_source': {'boost': 1},
'dataset_title': {'boost': 1},
'description': {'boost': 1.5}}),
preserve_default=False,
),
migrations.AlterField(
model_name='indexmetadatatask',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
]
|
[
"django.db.models.DateTimeField"
] |
[((1033, 1072), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1053, 1072), False, 'from django.db import migrations, models\n')]
|
# This example client takes a PDB file, sends it to the REST service, which
# creates HSSP data. The HSSP data is then output to the console.
# api by https://github.com/cmbi/xssp-api/blob/master/xssp_api/frontend/api/endpoints.py
import json
import requests
import time
REST_URL = "https://www3.cmbi.umcn.nl/xssp/"
inputCollection = ["pdb_id", "pdb_redo_id", "pdb_file", "sequence"]
outputCollection = ["hssp_hssp", "hssp_stockholm", "dssp"]
def pdbToxssp(_input, inputF="pdb_id", outputF="dssp"):
'''transform PDB to xssp
Arguments:
_input {str} -- input id or PDB file
Keyword Arguments:
inputF {str} -- input format (default: {"pdb_id"})
outputF {str} -- output format (default: {"dssp"})
Raises:
Exception -- raise format error
Returns:
str -- dssp or hssp format
'''
# inuptF type check
if inputF not in inputCollection:
raise "input Format error, Please check your format!"
if outputF not in outputCollection:
raise "output Format error, Please check your format!"
# request url
url_create = '{0}api/create/{1}/{2}/'.format(REST_URL, inputF, outputF)
if inputF == "pdb_id":
pdb_id = {"data": _input}
r = requests.post(url_create, data=pdb_id)
elif inputF == "pdb_file":
files = {'file_': open(_input, 'rb')}
r = requests.post(url_create, files=files)
elif inputF == "pdb_redo_id":
pdb_redo_id = {"data": _input}
r = requests.post(url_create, data=pdb_redo_id)
elif inputF == "sequence":
sequence = {"data": open(_input, 'rb')}
r = requests.post(url_create, data=sequence)
# Send a request to the server to create hssp data from the pdb file data.
# If an error occurs, an exception is raised and the program exits. If the
# request is successful, the id of the job running on the server is
# returned.
r.raise_for_status()
job_id = json.loads(r.text)['id']
print("Job submitted successfully. Id is: '{}'".format(job_id))
# Loop until the job running on the server has finished, either successfully
# or due to an error.
ready = False
while not ready:
# Check the status of the running job. If an error occurs an exception
# is raised and the program exits. If the request is successful, the
# status is returned.
url_status = '{0}api/status/{1}/{2}/{3}/'.format(
REST_URL, inputF, outputF, job_id)
r = requests.get(url_status)
r.raise_for_status()
status = json.loads(r.text)['status']
print("Job status is: '{}'".format(status))
# If the status equals SUCCESS, exit out of the loop by changing the
# condition ready. This causes the code to drop into the `else` block
# below.
#
# If the status equals either FAILURE or REVOKED, an exception is raised
# containing the error message. The program exits.
#
# Otherwise, wait for five seconds and start at the beginning of the
# loop again.
if status == 'SUCCESS':
ready = True
elif status in ['FAILURE', 'REVOKED']:
raise Exception(json.loads(r.text)['message'])
else:
time.sleep(5)
else:
# Requests the result of the job. If an error occurs an exception is
# raised and the program exits. If the request is successful, the result
# is returned.
url_result = '{0}api/result/{1}/{2}/{3}/'.format(
REST_URL, inputF, outputF, job_id)
r = requests.get(url_result)
r.raise_for_status()
result = json.loads(r.text)['result']
# Return the result to the caller, which prints it to the screen.
return result
if __name__ == '__main__':
result = pdbToxssp("2GW9")
print(result)
|
[
"requests.post",
"json.loads",
"requests.get",
"time.sleep"
] |
[((1261, 1299), 'requests.post', 'requests.post', (['url_create'], {'data': 'pdb_id'}), '(url_create, data=pdb_id)\n', (1274, 1299), False, 'import requests\n'), ((1975, 1993), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (1985, 1993), False, 'import json\n'), ((2518, 2542), 'requests.get', 'requests.get', (['url_status'], {}), '(url_status)\n', (2530, 2542), False, 'import requests\n'), ((3614, 3638), 'requests.get', 'requests.get', (['url_result'], {}), '(url_result)\n', (3626, 3638), False, 'import requests\n'), ((1389, 1427), 'requests.post', 'requests.post', (['url_create'], {'files': 'files'}), '(url_create, files=files)\n', (1402, 1427), False, 'import requests\n'), ((2590, 2608), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (2600, 2608), False, 'import json\n'), ((3685, 3703), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (3695, 3703), False, 'import json\n'), ((1513, 1556), 'requests.post', 'requests.post', (['url_create'], {'data': 'pdb_redo_id'}), '(url_create, data=pdb_redo_id)\n', (1526, 1556), False, 'import requests\n'), ((3292, 3305), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3302, 3305), False, 'import time\n'), ((1648, 1688), 'requests.post', 'requests.post', (['url_create'], {'data': 'sequence'}), '(url_create, data=sequence)\n', (1661, 1688), False, 'import requests\n'), ((3235, 3253), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (3245, 3253), False, 'import json\n')]
|
import os
import torch
from copy import deepcopy
from src.agents.agents import *
from src.utils.setup import process_config
from src.utils.utils import load_json
def run(config_path, gpu_device=-1):
config = process_config(config_path)
if gpu_device >= 0:
config.gpu_device = [gpu_device]
AgentClass = globals()[config.agent]
agent = AgentClass(config)
if config.continue_exp_dir is not None:
agent.logger.info("Found existing model... Continuing training!")
checkpoint_dir = os.path.join(config.continue_exp_dir, 'checkpoints')
agent.load_checkpoint(
config.continue_exp_name,
checkpoint_dir=checkpoint_dir,
load_memory_bank=True,
load_model=True,
load_optim=True,
load_epoch=True,
)
try:
agent.run()
agent.finalise()
except KeyboardInterrupt:
pass
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str, default='path to config file')
parser.add_argument('--gpu-device', type=int, default=-1)
args = parser.parse_args()
run(args.config, args.gpu_device)
|
[
"os.path.join",
"argparse.ArgumentParser",
"src.utils.setup.process_config"
] |
[((214, 241), 'src.utils.setup.process_config', 'process_config', (['config_path'], {}), '(config_path)\n', (228, 241), False, 'from src.utils.setup import process_config\n'), ((982, 1007), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1005, 1007), False, 'import argparse\n'), ((523, 575), 'os.path.join', 'os.path.join', (['config.continue_exp_dir', '"""checkpoints"""'], {}), "(config.continue_exp_dir, 'checkpoints')\n", (535, 575), False, 'import os\n')]
|
import os
import pygame
from game_defines import DIRECTIONS
ASSET_BASE = os.path.join(os.path.dirname(__file__), "assets")
class Actor(object):
@staticmethod
def asset(name):
return os.path.join(ASSET_BASE, name)
def __init__(self, name, image_path, actor_type, startx, starty):
self.image = pygame.image.load(Actor.asset(image_path))
self.name = name
self.x = startx
self.y = starty
self.actor_type = actor_type
self.map_object = None
def process(self, sensor_input):
raise AssertionError("Process Needs to be overriden")
def get_image(self):
return self.image
def get_type(self):
return self.actor_type
def get_x(self):
return self.x
def get_y(self):
return self.y
def set_map(self, map_object):
self.map_object = map_object
def move(self, move_to):
x_offset = 0
y_offset = 0
if move_to == DIRECTIONS.UP:
x_offset = 0
y_offset = -1
elif move_to == DIRECTIONS.UPRIGHT:
x_offset = 1
y_offset = -1
elif move_to == DIRECTIONS.UPLEFT:
x_offset = -1
y_offset = -1
elif move_to == DIRECTIONS.RIGHT:
x_offset = 1
y_offset = 0
elif move_to == DIRECTIONS.DOWN:
x_offset = 0
y_offset = 1
elif move_to == DIRECTIONS.DOWNRIGHT:
x_offset = 1
y_offset = 1
elif move_to == DIRECTIONS.DOWNLEFT:
x_offset = -1
y_offset = 1
elif move_to == DIRECTIONS.LEFT:
x_offset = -1
y_offset = 0
if self.map_object.is_blocked(self.x + x_offset, self.y + y_offset):
return False
self.x += x_offset
self.y += y_offset
return True
|
[
"os.path.dirname",
"os.path.join"
] |
[((93, 118), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (108, 118), False, 'import os\n'), ((206, 236), 'os.path.join', 'os.path.join', (['ASSET_BASE', 'name'], {}), '(ASSET_BASE, name)\n', (218, 236), False, 'import os\n')]
|
"""
Various simple (basic) functions in the "utilities".
The MIT License (MIT)
Originally created at 8/31/20, for Python 3.x
Copyright (c) 2021 <NAME> (<EMAIL>) & Stanford Geometric Computing Lab
"""
import torch
import multiprocessing as mp
import dask.dataframe as dd
from torch import nn
from sklearn.model_selection import train_test_split
def iterate_in_chunks(l, n):
"""Yield successive 'n'-sized chunks from iterable 'l'.
Note: last chunk will be smaller than l if n doesn't divide l perfectly.
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def df_parallel_column_apply(df, func, column_name):
n_partitions = mp.cpu_count() * 4
d_data = dd.from_pandas(df, npartitions=n_partitions)
res =\
d_data.map_partitions(lambda df: df.apply((lambda row: func(row[column_name])), axis=1))\
.compute(scheduler='processes')
return res
def cross_entropy(pred, soft_targets):
""" pred: unscaled logits
soft_targets: target-distributions (i.e., sum to 1)
"""
logsoftmax = nn.LogSoftmax(dim=1)
return torch.mean(torch.sum(-soft_targets * logsoftmax(pred), 1))
def make_train_test_val_splits(datataset_df, loads, random_seed, unique_id_column=None):
""" Split the data into train/val/test.
:param datataset_df: pandas Dataframe containing the dataset (e.g., ArtEmis)
:param loads: list with the three floats summing to one for train/val/test
:param random_seed: int
:return: changes the datataset_df in-place to include a column ("split") indicating the split of each row
"""
if sum(loads) != 1:
raise ValueError()
train_size, val_size, test_size = loads
print("Using a {},{},{} for train/val/test purposes".format(train_size, val_size, test_size))
df = datataset_df
## unique id
if unique_id_column is None:
unique_id = df.art_style + df.painting # default for ArtEmis
else:
unique_id = df[unique_id_column]
unique_ids = unique_id.unique()
unique_ids.sort()
train, rest = train_test_split(unique_ids, test_size=val_size+test_size, random_state=random_seed)
train = set(train)
if val_size != 0:
val, test = train_test_split(rest, test_size=round(test_size*len(unique_ids)), random_state=random_seed)
else:
test = rest
test = set(test)
assert len(test.intersection(train)) == 0
def mark_example(x):
if x in train:
return 'train'
elif x in test:
return 'test'
else:
return 'val'
df = df.assign(split=unique_id.apply(mark_example))
return df
|
[
"torch.nn.LogSoftmax",
"dask.dataframe.from_pandas",
"sklearn.model_selection.train_test_split",
"multiprocessing.cpu_count"
] |
[((687, 731), 'dask.dataframe.from_pandas', 'dd.from_pandas', (['df'], {'npartitions': 'n_partitions'}), '(df, npartitions=n_partitions)\n', (701, 731), True, 'import dask.dataframe as dd\n'), ((1046, 1066), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (1059, 1066), False, 'from torch import nn\n'), ((2043, 2134), 'sklearn.model_selection.train_test_split', 'train_test_split', (['unique_ids'], {'test_size': '(val_size + test_size)', 'random_state': 'random_seed'}), '(unique_ids, test_size=val_size + test_size, random_state=\n random_seed)\n', (2059, 2134), False, 'from sklearn.model_selection import train_test_split\n'), ((655, 669), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (667, 669), True, 'import multiprocessing as mp\n')]
|
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from functools import partial
from time import sleep
from variaveis import *
from controler_palavras import buscar_palavra
from aprende import adicionar_resposta
import os
def iniciar(browser, botao_entrada, nome=''):
limpar_tela()
print('Carregando...')
wdw = WebDriverWait(browser, 25)
browser.get(url)
wdw.until(
partial(espera_elemento, By.XPATH, botao_entrada),
'"Botão entrar" não foi encontrado'
)
browser.find_element_by_xpath(botao_entrada).click()
if 'Twitter' in browser.title:
print('Entrando com o twitter...')
user = input('Digite o usuário/email: ')
password = input('Digite a senha: ')
browser.find_element_by_xpath(input_user_twitter).send_keys(user)
browser.find_element_by_xpath(input_pass_twitter).send_keys(password)
browser.find_element_by_xpath(input_submit_twitter).click()
elif 'Facebook' in browser.title:
print('Entrando com o Facebook...')
user = input('Digite o email/telefone: ')
password = input('Digite a senha: ')
browser.find_element_by_xpath(input_user_facebook).send_keys(user)
browser.find_element_by_xpath(input_pass_facebook).send_keys(password)
browser.find_element_by_xpath(input_submit_facebook).click()
else:
if len(nome) > 0:
wdw.until(
partial(espera_elemento, By.XPATH, input_nome_jogador),
'"Botão nome jogador" não foi encontrado'
)
input_name = browser.find_element_by_xpath(input_nome_jogador)
input_name.clear()
input_name.send_keys(nome)
wdw.until(
partial(espera_elemento, By.XPATH, botao_iniciar),
'"Botão iniciar" não foi encontrado'
)
button_jogar = browser.find_element_by_xpath(botao_iniciar)
button_jogar.click()
def iniciar_jogo(browser):
limpar_tela()
print('1 - Entrar com Twitter.')
print('2 - Entrar com Facebook.')
print('3 - Entrar com nome.')
print('4 - Entrar como anônimo')
tipo_entrada = int(input('=> '))
if tipo_entrada == 1:
iniciar(browser, botao_entrar_twitter)
elif tipo_entrada == 2:
iniciar(browser, botao_entrar_facebook)
elif tipo_entrada == 3:
nome_jogador = input("Digite o nome: ")
iniciar(browser, botao_entrar, nome_jogador)
elif tipo_entrada == 4:
iniciar(browser, botao_entrar)
else:
print('Resposta invalida.')
sleep(2)
iniciar_jogo(browser)
def espera_elemento(by, elemento, browser):
el = browser.find_elements(by, elemento)
return bool(el)
def limpar_tela():
os.system('cls' if os.name == 'nt' else 'clear')
def pegar_letra_atual(browser):
letra = browser.find_element_by_xpath(letra_atual).text
if len(letra) > 0:
return letra
return '?'
def escrever_resposta(browser, letra):
labels = browser.find_elements_by_xpath(label_inputs_palavras)
if bool(labels):
for label in labels:
categoria = label.find_element_by_tag_name('span').text
input = label.find_element_by_tag_name('input')
if len(input.get_attribute('value')) == 0:
resposta = buscar_palavra(categoria, letra)
if resposta:
limpar_tela()
print('Preenchendo campos...')
input.send_keys(resposta)
def buscar_pontos(browser):
pontos = browser.find_element_by_xpath(meus_pontos).text
if len(pontos) > 0:
return pontos.split(' ')[0]
return '0'
def clica_button(browser, botao_elemento, msg):
pode_clicar = browser.find_elements_by_xpath(botao_elemento)
if bool(pode_clicar):
button_preparado = browser.find_element_by_xpath(botao_elemento)
if 'disable' not in button_preparado.get_attribute('class'):
print(msg)
button_preparado.click()
def clica_estou_pronto(browser):
clica_button(browser, botao_estou_pronto, 'Clicando em "Estou pronto"')
def avalia_respostas(browser):
clica_button(browser, botao_avalia_respostas, 'Clicando em "Avaliar respostas"')
def aprende_novas_respostas(browser, primeira_letra):
tema = browser.find_elements_by_xpath(div_tema)
if bool(tema):
tema_text = browser.find_element_by_xpath(div_tema).text
if ':' in tema_text:
tema_text = tema_text.split(':')[1].strip()
respostas = browser.find_elements_by_xpath(div_palavras)
for resposta in respostas:
adicionar_resposta(tema_text, resposta.text, primeira_letra)
|
[
"functools.partial",
"controler_palavras.buscar_palavra",
"aprende.adicionar_resposta",
"os.system",
"time.sleep",
"selenium.webdriver.support.ui.WebDriverWait"
] |
[((375, 401), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['browser', '(25)'], {}), '(browser, 25)\n', (388, 401), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((2765, 2813), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (2774, 2813), False, 'import os\n'), ((447, 496), 'functools.partial', 'partial', (['espera_elemento', 'By.XPATH', 'botao_entrada'], {}), '(espera_elemento, By.XPATH, botao_entrada)\n', (454, 496), False, 'from functools import partial\n'), ((1772, 1821), 'functools.partial', 'partial', (['espera_elemento', 'By.XPATH', 'botao_iniciar'], {}), '(espera_elemento, By.XPATH, botao_iniciar)\n', (1779, 1821), False, 'from functools import partial\n'), ((3336, 3368), 'controler_palavras.buscar_palavra', 'buscar_palavra', (['categoria', 'letra'], {}), '(categoria, letra)\n', (3350, 3368), False, 'from controler_palavras import buscar_palavra\n'), ((4665, 4725), 'aprende.adicionar_resposta', 'adicionar_resposta', (['tema_text', 'resposta.text', 'primeira_letra'], {}), '(tema_text, resposta.text, primeira_letra)\n', (4683, 4725), False, 'from aprende import adicionar_resposta\n'), ((1474, 1528), 'functools.partial', 'partial', (['espera_elemento', 'By.XPATH', 'input_nome_jogador'], {}), '(espera_elemento, By.XPATH, input_nome_jogador)\n', (1481, 1528), False, 'from functools import partial\n'), ((2592, 2600), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (2597, 2600), False, 'from time import sleep\n')]
|
# Copyright 2019 the ProGraML authors.
#
# Contact <NAME> <<EMAIL>>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains TODO: one line summary.
TODO: Detailed explanation of the file.
"""
from typing import Any
from typing import Iterable
from typing import List
from typing import NamedTuple
from typing import Optional
import numpy as np
import sklearn.metrics
from labm8.py import app
FLAGS = app.FLAGS
app.DEFINE_string(
"batch_scores_averaging_method",
"weighted",
"Selects the averaging method to use when computing recall/precision/F1 "
"scores. See <https://scikit-learn.org/stable/modules/generated/sklearn"
".metrics.f1_score.html>",
)
class Data(NamedTuple):
"""The model data for a batch."""
graph_ids: List[int]
data: Any
# A flag used to mark that this batch is the end of an iterable sequences of
# batches.
end_of_batches: bool = False
@property
def graph_count(self) -> int:
return len(self.graph_ids)
def EmptyBatch() -> Data:
"""Construct an empty batch."""
return Data(graph_ids=[], data=None)
def EndOfBatches() -> Data:
"""Construct a 'end of batches' marker."""
return Data(graph_ids=[], data=None, end_of_batches=True)
class BatchIterator(NamedTuple):
"""A batch iterator"""
batches: Iterable[Data]
# The total number of graphs in all of the batches.
graph_count: int
class Results(NamedTuple):
"""The results of running a batch through a model.
Don't instantiate this tuple directly, use Results.Create().
"""
targets: np.array
predictions: np.array
# The number of model iterations to compute the final results. This is used
# by iterative models such as message passing networks.
iteration_count: int
# For iterative models, this indicates whether the state of the model at
# iteration_count had converged on a solution.
model_converged: bool
# The learning rate and loss of models, if applicable.
learning_rate: Optional[float]
loss: Optional[float]
# Batch-level average performance metrics.
accuracy: float
precision: float
recall: float
f1: float
@property
def has_learning_rate(self) -> bool:
return self.learning_rate is not None
@property
def has_loss(self) -> bool:
return self.loss is not None
@property
def target_count(self) -> int:
"""Get the number of targets in the batch.
For graph-level classifiers, this will be equal to Data.graph_count, else
it's equal to the batch node count.
"""
return self.targets.shape[1]
def __repr__(self) -> str:
return (
f"accuracy={self.accuracy:.2%}%, "
f"precision={self.precision:.3f}, "
f"recall={self.recall:.3f}, "
f"f1={self.f1:.3f}"
)
def __eq__(self, rhs: "Results"):
"""Compare batch results."""
return self.accuracy == rhs.accuracy
def __gt__(self, rhs: "Results"):
"""Compare batch results."""
return self.accuracy > rhs.accuracy
@classmethod
def Create(
cls,
targets: np.array,
predictions: np.array,
iteration_count: int = 1,
model_converged: bool = True,
learning_rate: Optional[float] = None,
loss: Optional[float] = None,
):
"""Construct a results instance from 1-hot targets and predictions.
This is the preferred means of construct a Results instance, which takes
care of evaluating all of the metrics for you. The behavior of metrics
calculation is dependent on the --batch_scores_averaging_method flag.
Args:
targets: An array of 1-hot target vectors with
shape (y_count, y_dimensionality), dtype int32.
predictions: An array of 1-hot prediction vectors with
shape (y_count, y_dimensionality), dtype int32.
iteration_count: For iterative models, the number of model iterations to
compute the final result.
model_converged: For iterative models, whether model converged.
learning_rate: The model learning rate, if applicable.
loss: The model loss, if applicable.
Returns:
A Results instance.
"""
if targets.shape != predictions.shape:
raise TypeError(
f"Expected model to produce targets with shape {targets.shape} but "
f"instead received predictions with shape {predictions.shape}"
)
y_dimensionality = targets.shape[1]
if y_dimensionality < 2:
raise TypeError(
f"Expected label dimensionality > 1, received {y_dimensionality}"
)
# Create dense arrays of shape (target_count).
true_y = np.argmax(targets, axis=1)
pred_y = np.argmax(predictions, axis=1)
# NOTE(github.com/ChrisCummins/ProGraML/issues/22): This assumes that
# labels use the values [0,...n).
labels = np.arange(y_dimensionality, dtype=np.int64)
return cls(
targets=targets,
predictions=predictions,
iteration_count=iteration_count,
model_converged=model_converged,
learning_rate=learning_rate,
loss=loss,
accuracy=sklearn.metrics.accuracy_score(true_y, pred_y),
precision=sklearn.metrics.precision_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
recall=sklearn.metrics.recall_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
f1=sklearn.metrics.f1_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
)
class RollingResults:
"""Maintain weighted rolling averages across batches."""
def __init__(self):
self.weight_sum = 0
self.batch_count = 0
self.graph_count = 0
self.target_count = 0
self.weighted_iteration_count_sum = 0
self.weighted_model_converged_sum = 0
self.has_learning_rate = False
self.weighted_learning_rate_sum = 0
self.has_loss = False
self.weighted_loss_sum = 0
self.weighted_accuracy_sum = 0
self.weighted_precision_sum = 0
self.weighted_recall_sum = 0
self.weighted_f1_sum = 0
def Update(
self, data: Data, results: Results, weight: Optional[float] = None
) -> None:
"""Update the rolling results with a new batch.
Args:
data: The batch data used to produce the results.
results: The batch results to update the current state with.
weight: A weight to assign to weighted sums. E.g. to weight results
across all targets, use weight=results.target_count. To weight across
targets, use weight=batch.target_count. To weight across
graphs, use weight=batch.graph_count. By default, weight by target
count.
"""
if weight is None:
weight = results.target_count
self.weight_sum += weight
self.batch_count += 1
self.graph_count += data.graph_count
self.target_count += results.target_count
self.weighted_iteration_count_sum += results.iteration_count * weight
self.weighted_model_converged_sum += (
weight if results.model_converged else 0
)
if results.has_learning_rate:
self.has_learning_rate = True
self.weighted_learning_rate_sum += results.learning_rate * weight
if results.has_loss:
self.has_loss = True
self.weighted_loss_sum += results.loss * weight
self.weighted_accuracy_sum += results.accuracy * weight
self.weighted_precision_sum += results.precision * weight
self.weighted_recall_sum += results.recall * weight
self.weighted_f1_sum += results.f1 * weight
@property
def iteration_count(self) -> float:
return self.weighted_iteration_count_sum / max(self.weight_sum, 1)
@property
def model_converged(self) -> float:
return self.weighted_model_converged_sum / max(self.weight_sum, 1)
@property
def learning_rate(self) -> Optional[float]:
if self.has_learning_rate:
return self.weighted_learning_rate_sum / max(self.weight_sum, 1)
@property
def loss(self) -> Optional[float]:
if self.has_loss:
return self.weighted_loss_sum / max(self.weight_sum, 1)
@property
def accuracy(self) -> float:
return self.weighted_accuracy_sum / max(self.weight_sum, 1)
@property
def precision(self) -> float:
return self.weighted_precision_sum / max(self.weight_sum, 1)
@property
def recall(self) -> float:
return self.weighted_recall_sum / max(self.weight_sum, 1)
@property
def f1(self) -> float:
return self.weighted_f1_sum / max(self.weight_sum, 1)
|
[
"numpy.arange",
"labm8.py.app.DEFINE_string",
"numpy.argmax"
] |
[((928, 1167), 'labm8.py.app.DEFINE_string', 'app.DEFINE_string', (['"""batch_scores_averaging_method"""', '"""weighted"""', '"""Selects the averaging method to use when computing recall/precision/F1 scores. See <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html>"""'], {}), "('batch_scores_averaging_method', 'weighted',\n 'Selects the averaging method to use when computing recall/precision/F1 scores. See <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html>'\n )\n", (945, 1167), False, 'from labm8.py import app\n'), ((5005, 5031), 'numpy.argmax', 'np.argmax', (['targets'], {'axis': '(1)'}), '(targets, axis=1)\n', (5014, 5031), True, 'import numpy as np\n'), ((5045, 5075), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (5054, 5075), True, 'import numpy as np\n'), ((5202, 5245), 'numpy.arange', 'np.arange', (['y_dimensionality'], {'dtype': 'np.int64'}), '(y_dimensionality, dtype=np.int64)\n', (5211, 5245), True, 'import numpy as np\n')]
|
import collections
import os
import numpy as np
import tensorflow as tf
from pysc2.lib import actions
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.layers.optimizers import OPTIMIZER_SUMMARIES
from actorcritic.policy import FullyConvPolicy
from common.preprocess import ObsProcesser, FEATURE_KEYS, AgentInputTuple
from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs
def _get_placeholders(spatial_dim):
sd = spatial_dim
feature_list = [
(FEATURE_KEYS.minimap_numeric, tf.float32, [None, sd, sd, ObsProcesser.N_MINIMAP_CHANNELS]),
(FEATURE_KEYS.screen_numeric, tf.float32, [None, sd, sd, ObsProcesser.N_SCREEN_CHANNELS]),
(FEATURE_KEYS.screen_unit_type, tf.int32, [None, sd, sd]),
(FEATURE_KEYS.is_spatial_action_available, tf.float32, [None]),
(FEATURE_KEYS.available_action_ids, tf.float32, [None, len(actions.FUNCTIONS)]),
(FEATURE_KEYS.selected_spatial_action, tf.int32, [None, 2]),
(FEATURE_KEYS.selected_action_id, tf.int32, [None]),
(FEATURE_KEYS.value_target, tf.float32, [None]),
(FEATURE_KEYS.player_relative_screen, tf.int32, [None, sd, sd]),
(FEATURE_KEYS.player_relative_minimap, tf.int32, [None, sd, sd]),
(FEATURE_KEYS.advantage, tf.float32, [None])
]
return AgentInputTuple(
**{name: tf.placeholder(dtype, shape, name) for name, dtype, shape in feature_list}
)
class ACMode:
A2C = "a2c"
PPO = "ppo"
SelectedLogProbs = collections.namedtuple("SelectedLogProbs", ["action_id", "spatial", "total"])
class ActorCriticAgent:
_scalar_summary_key = "scalar_summaries"
def __init__(self,
sess: tf.Session,
summary_path: str,
all_summary_freq: int,
scalar_summary_freq: int,
spatial_dim: int,
mode: str,
clip_epsilon=0.2,
unit_type_emb_dim=4,
loss_value_weight=1.0,
entropy_weight_spatial=1e-6,
entropy_weight_action_id=1e-5,
max_gradient_norm=None,
optimiser="adam",
optimiser_pars: dict = None,
policy=FullyConvPolicy
):
"""
Actor-Critic Agent for learning pysc2-minigames
https://arxiv.org/pdf/1708.04782.pdf
https://github.com/deepmind/pysc2
Can use
- A2C https://blog.openai.com/baselines-acktr-a2c/ (synchronous version of A3C)
or
- PPO https://arxiv.org/pdf/1707.06347.pdf
:param summary_path: tensorflow summaries will be created here
:param all_summary_freq: how often save all summaries
:param scalar_summary_freq: int, how often save scalar summaries
:param spatial_dim: dimension for both minimap and screen
:param mode: a2c or ppo
:param clip_epsilon: epsilon for clipping the ratio in PPO (no effect in A2C)
:param loss_value_weight: value weight for a2c update
:param entropy_weight_spatial: spatial entropy weight for a2c update
:param entropy_weight_action_id: action selection entropy weight for a2c update
:param max_gradient_norm: global max norm for gradients, if None then not limited
:param optimiser: see valid choices below
:param optimiser_pars: optional parameters to pass in optimiser
:param policy: Policy class
"""
assert optimiser in ["adam", "rmsprop"]
assert mode in [ACMode.A2C, ACMode.PPO]
self.mode = mode
self.sess = sess
self.spatial_dim = spatial_dim
self.loss_value_weight = loss_value_weight
self.entropy_weight_spatial = entropy_weight_spatial
self.entropy_weight_action_id = entropy_weight_action_id
self.unit_type_emb_dim = unit_type_emb_dim
self.summary_path = summary_path
os.makedirs(summary_path, exist_ok=True)
self.summary_writer = tf.summary.FileWriter(summary_path)
self.all_summary_freq = all_summary_freq
self.scalar_summary_freq = scalar_summary_freq
self.train_step = 0
self.max_gradient_norm = max_gradient_norm
self.clip_epsilon = clip_epsilon
self.policy = policy
opt_class = tf.train.AdamOptimizer if optimiser == "adam" else tf.train.RMSPropOptimizer
if optimiser_pars is None:
pars = {
"adam": {
"learning_rate": 1e-4,
"epsilon": 5e-7
},
"rmsprop": {
"learning_rate": 2e-4
}
}[optimiser]
else:
pars = optimiser_pars
self.optimiser = opt_class(**pars)
def init(self):
self.sess.run(self.init_op)
if self.mode == ACMode.PPO:
self.update_theta()
def _get_select_action_probs(self, pi, selected_spatial_action_flat):
action_id = select_from_each_row(
pi.action_id_log_probs, self.placeholders.selected_action_id
)
spatial = select_from_each_row(
pi.spatial_action_log_probs, selected_spatial_action_flat
)
total = spatial + action_id
return SelectedLogProbs(action_id, spatial, total)
def _scalar_summary(self, name, tensor):
tf.summary.scalar(name, tensor,
collections=[tf.GraphKeys.SUMMARIES, self._scalar_summary_key])
def build_model(self):
self.placeholders = _get_placeholders(self.spatial_dim)
with tf.variable_scope("theta"):
theta = self.policy(self, trainable=True).build()
selected_spatial_action_flat = ravel_index_pairs(
self.placeholders.selected_spatial_action, self.spatial_dim
)
selected_log_probs = self._get_select_action_probs(theta, selected_spatial_action_flat)
# maximum is to avoid 0 / 0 because this is used to calculate some means
sum_spatial_action_available = tf.maximum(
1e-10, tf.reduce_sum(self.placeholders.is_spatial_action_available)
)
neg_entropy_spatial = tf.reduce_sum(
theta.spatial_action_probs * theta.spatial_action_log_probs
) / sum_spatial_action_available
neg_entropy_action_id = tf.reduce_mean(tf.reduce_sum(
theta.action_id_probs * theta.action_id_log_probs, axis=1
))
if self.mode == ACMode.PPO:
# could also use stop_gradient and forget about the trainable
with tf.variable_scope("theta_old"):
theta_old = self.policy(self, trainable=False).build()
new_theta_var = tf.global_variables("theta/")
old_theta_var = tf.global_variables("theta_old/")
assert len(tf.trainable_variables("theta/")) == len(new_theta_var)
assert not tf.trainable_variables("theta_old/")
assert len(old_theta_var) == len(new_theta_var)
self.update_theta_op = [
tf.assign(t_old, t_new) for t_new, t_old in zip(new_theta_var, old_theta_var)
]
selected_log_probs_old = self._get_select_action_probs(
theta_old, selected_spatial_action_flat
)
ratio = tf.exp(selected_log_probs.total - selected_log_probs_old.total)
clipped_ratio = tf.clip_by_value(
ratio, 1.0 - self.clip_epsilon, 1.0 + self.clip_epsilon
)
l_clip = tf.minimum(
ratio * self.placeholders.advantage,
clipped_ratio * self.placeholders.advantage
)
self.sampled_action_id = weighted_random_sample(theta_old.action_id_probs)
self.sampled_spatial_action = weighted_random_sample(theta_old.spatial_action_probs)
self.value_estimate = theta_old.value_estimate
self._scalar_summary("action/ratio", tf.reduce_mean(clipped_ratio))
self._scalar_summary("action/ratio_is_clipped",
tf.reduce_mean(tf.to_float(tf.equal(ratio, clipped_ratio))))
policy_loss = -tf.reduce_mean(l_clip)
else:
self.sampled_action_id = weighted_random_sample(theta.action_id_probs)
self.sampled_spatial_action = weighted_random_sample(theta.spatial_action_probs)
self.value_estimate = theta.value_estimate
policy_loss = -tf.reduce_mean(selected_log_probs.total * self.placeholders.advantage)
value_loss = tf.losses.mean_squared_error(
self.placeholders.value_target, theta.value_estimate)
loss = (
policy_loss
+ value_loss * self.loss_value_weight
+ neg_entropy_spatial * self.entropy_weight_spatial
+ neg_entropy_action_id * self.entropy_weight_action_id
)
self.train_op = layers.optimize_loss(
loss=loss,
global_step=tf.train.get_global_step(),
optimizer=self.optimiser,
clip_gradients=self.max_gradient_norm,
summaries=OPTIMIZER_SUMMARIES,
learning_rate=None,
name="train_op"
)
self._scalar_summary("value/estimate", tf.reduce_mean(self.value_estimate))
self._scalar_summary("value/target", tf.reduce_mean(self.placeholders.value_target))
self._scalar_summary("action/is_spatial_action_available",
tf.reduce_mean(self.placeholders.is_spatial_action_available))
self._scalar_summary("action/selected_id_log_prob",
tf.reduce_mean(selected_log_probs.action_id))
self._scalar_summary("loss/policy", policy_loss)
self._scalar_summary("loss/value", value_loss)
self._scalar_summary("loss/neg_entropy_spatial", neg_entropy_spatial)
self._scalar_summary("loss/neg_entropy_action_id", neg_entropy_action_id)
self._scalar_summary("loss/total", loss)
self._scalar_summary("value/advantage", tf.reduce_mean(self.placeholders.advantage))
self._scalar_summary("action/selected_total_log_prob",
tf.reduce_mean(selected_log_probs.total))
self._scalar_summary("action/selected_spatial_log_prob",
tf.reduce_sum(selected_log_probs.spatial) / sum_spatial_action_available)
self.init_op = tf.global_variables_initializer()
self.saver = tf.train.Saver(max_to_keep=2)
self.all_summary_op = tf.summary.merge_all(tf.GraphKeys.SUMMARIES)
self.scalar_summary_op = tf.summary.merge(tf.get_collection(self._scalar_summary_key))
def _input_to_feed_dict(self, input_dict):
return {k + ":0": v for k, v in input_dict.items()}
def step(self, obs):
feed_dict = self._input_to_feed_dict(obs)
action_id, spatial_action, value_estimate = self.sess.run(
[self.sampled_action_id, self.sampled_spatial_action, self.value_estimate],
feed_dict=feed_dict
)
spatial_action_2d = np.array(
np.unravel_index(spatial_action, (self.spatial_dim,) * 2)
).transpose()
return action_id, spatial_action_2d, value_estimate
def train(self, input_dict):
feed_dict = self._input_to_feed_dict(input_dict)
ops = [self.train_op]
write_all_summaries = (
(self.train_step % self.all_summary_freq == 0) and
self.summary_path is not None
)
write_scalar_summaries = (
(self.train_step % self.scalar_summary_freq == 0) and
self.summary_path is not None
)
if write_all_summaries:
ops.append(self.all_summary_op)
elif write_scalar_summaries:
ops.append(self.scalar_summary_op)
r = self.sess.run(ops, feed_dict)
if write_all_summaries or write_scalar_summaries:
self.summary_writer.add_summary(r[-1], global_step=self.train_step)
self.train_step += 1
def get_value(self, obs):
feed_dict = self._input_to_feed_dict(obs)
return self.sess.run(self.value_estimate, feed_dict=feed_dict)
def flush_summaries(self):
self.summary_writer.flush()
def save(self, path, step=None):
os.makedirs(path, exist_ok=True)
step = step or self.train_step
print("saving model to %s, step %d" % (path, step))
self.saver.save(self.sess, path + '/model.ckpt', global_step=step)
def load(self, path):
ckpt = tf.train.get_checkpoint_state(path)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
self.train_step = int(ckpt.model_checkpoint_path.split('-')[-1])
print("loaded old model with train_step %d" % self.train_step)
self.train_step += 1
def update_theta(self):
if self.mode == ACMode.PPO:
self.sess.run(self.update_theta_op)
|
[
"tensorflow.reduce_sum",
"tensorflow.clip_by_value",
"tensorflow.trainable_variables",
"tensorflow.get_collection",
"tensorflow.global_variables",
"tensorflow.assign",
"common.util.ravel_index_pairs",
"tensorflow.variable_scope",
"tensorflow.minimum",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"tensorflow.exp",
"tensorflow.summary.merge_all",
"tensorflow.train.get_checkpoint_state",
"tensorflow.train.get_global_step",
"tensorflow.equal",
"tensorflow.summary.scalar",
"tensorflow.losses.mean_squared_error",
"tensorflow.global_variables_initializer",
"tensorflow.train.Saver",
"common.util.weighted_random_sample",
"tensorflow.reduce_mean",
"os.makedirs",
"common.util.select_from_each_row",
"numpy.unravel_index",
"collections.namedtuple"
] |
[((1528, 1605), 'collections.namedtuple', 'collections.namedtuple', (['"""SelectedLogProbs"""', "['action_id', 'spatial', 'total']"], {}), "('SelectedLogProbs', ['action_id', 'spatial', 'total'])\n", (1550, 1605), False, 'import collections\n'), ((3882, 3922), 'os.makedirs', 'os.makedirs', (['summary_path'], {'exist_ok': '(True)'}), '(summary_path, exist_ok=True)\n', (3893, 3922), False, 'import os\n'), ((3953, 3988), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['summary_path'], {}), '(summary_path)\n', (3974, 3988), True, 'import tensorflow as tf\n'), ((4945, 5032), 'common.util.select_from_each_row', 'select_from_each_row', (['pi.action_id_log_probs', 'self.placeholders.selected_action_id'], {}), '(pi.action_id_log_probs, self.placeholders.\n selected_action_id)\n', (4965, 5032), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((5068, 5147), 'common.util.select_from_each_row', 'select_from_each_row', (['pi.spatial_action_log_probs', 'selected_spatial_action_flat'], {}), '(pi.spatial_action_log_probs, selected_spatial_action_flat)\n', (5088, 5147), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((5320, 5420), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['name', 'tensor'], {'collections': '[tf.GraphKeys.SUMMARIES, self._scalar_summary_key]'}), '(name, tensor, collections=[tf.GraphKeys.SUMMARIES, self.\n _scalar_summary_key])\n', (5337, 5420), True, 'import tensorflow as tf\n'), ((5664, 5742), 'common.util.ravel_index_pairs', 'ravel_index_pairs', (['self.placeholders.selected_spatial_action', 'self.spatial_dim'], {}), '(self.placeholders.selected_spatial_action, self.spatial_dim)\n', (5681, 5742), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((8475, 8562), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['self.placeholders.value_target', 'theta.value_estimate'], {}), '(self.placeholders.value_target, theta.\n value_estimate)\n', (8503, 8562), True, 'import tensorflow as tf\n'), ((10273, 10306), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10304, 10306), True, 'import tensorflow as tf\n'), ((10328, 10357), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(2)'}), '(max_to_keep=2)\n', (10342, 10357), True, 'import tensorflow as tf\n'), ((10388, 10432), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', (['tf.GraphKeys.SUMMARIES'], {}), '(tf.GraphKeys.SUMMARIES)\n', (10408, 10432), True, 'import tensorflow as tf\n'), ((12163, 12195), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (12174, 12195), False, 'import os\n'), ((12412, 12447), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['path'], {}), '(path)\n', (12441, 12447), True, 'import tensorflow as tf\n'), ((5534, 5560), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""theta"""'], {}), "('theta')\n", (5551, 5560), True, 'import tensorflow as tf\n'), ((6014, 6074), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.placeholders.is_spatial_action_available'], {}), '(self.placeholders.is_spatial_action_available)\n', (6027, 6074), True, 'import tensorflow as tf\n'), ((6116, 6190), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(theta.spatial_action_probs * theta.spatial_action_log_probs)'], {}), '(theta.spatial_action_probs * theta.spatial_action_log_probs)\n', (6129, 6190), True, 'import tensorflow as tf\n'), ((6291, 6363), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(theta.action_id_probs * theta.action_id_log_probs)'], {'axis': '(1)'}), '(theta.action_id_probs * theta.action_id_log_probs, axis=1)\n', (6304, 6363), True, 'import tensorflow as tf\n'), ((6647, 6676), 'tensorflow.global_variables', 'tf.global_variables', (['"""theta/"""'], {}), "('theta/')\n", (6666, 6676), True, 'import tensorflow as tf\n'), ((6705, 6738), 'tensorflow.global_variables', 'tf.global_variables', (['"""theta_old/"""'], {}), "('theta_old/')\n", (6724, 6738), True, 'import tensorflow as tf\n'), ((7244, 7307), 'tensorflow.exp', 'tf.exp', (['(selected_log_probs.total - selected_log_probs_old.total)'], {}), '(selected_log_probs.total - selected_log_probs_old.total)\n', (7250, 7307), True, 'import tensorflow as tf\n'), ((7336, 7409), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['ratio', '(1.0 - self.clip_epsilon)', '(1.0 + self.clip_epsilon)'], {}), '(ratio, 1.0 - self.clip_epsilon, 1.0 + self.clip_epsilon)\n', (7352, 7409), True, 'import tensorflow as tf\n'), ((7461, 7558), 'tensorflow.minimum', 'tf.minimum', (['(ratio * self.placeholders.advantage)', '(clipped_ratio * self.placeholders.advantage)'], {}), '(ratio * self.placeholders.advantage, clipped_ratio * self.\n placeholders.advantage)\n', (7471, 7558), True, 'import tensorflow as tf\n'), ((7637, 7686), 'common.util.weighted_random_sample', 'weighted_random_sample', (['theta_old.action_id_probs'], {}), '(theta_old.action_id_probs)\n', (7659, 7686), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((7729, 7783), 'common.util.weighted_random_sample', 'weighted_random_sample', (['theta_old.spatial_action_probs'], {}), '(theta_old.spatial_action_probs)\n', (7751, 7783), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((8161, 8206), 'common.util.weighted_random_sample', 'weighted_random_sample', (['theta.action_id_probs'], {}), '(theta.action_id_probs)\n', (8183, 8206), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((8249, 8299), 'common.util.weighted_random_sample', 'weighted_random_sample', (['theta.spatial_action_probs'], {}), '(theta.spatial_action_probs)\n', (8271, 8299), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((9177, 9212), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.value_estimate'], {}), '(self.value_estimate)\n', (9191, 9212), True, 'import tensorflow as tf\n'), ((9259, 9305), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.placeholders.value_target'], {}), '(self.placeholders.value_target)\n', (9273, 9305), True, 'import tensorflow as tf\n'), ((9386, 9447), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.placeholders.is_spatial_action_available'], {}), '(self.placeholders.is_spatial_action_available)\n', (9400, 9447), True, 'import tensorflow as tf\n'), ((9521, 9565), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['selected_log_probs.action_id'], {}), '(selected_log_probs.action_id)\n', (9535, 9565), True, 'import tensorflow as tf\n'), ((9936, 9979), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.placeholders.advantage'], {}), '(self.placeholders.advantage)\n', (9950, 9979), True, 'import tensorflow as tf\n'), ((10056, 10096), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['selected_log_probs.total'], {}), '(selected_log_probs.total)\n', (10070, 10096), True, 'import tensorflow as tf\n'), ((10483, 10526), 'tensorflow.get_collection', 'tf.get_collection', (['self._scalar_summary_key'], {}), '(self._scalar_summary_key)\n', (10500, 10526), True, 'import tensorflow as tf\n'), ((1378, 1412), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', 'shape', 'name'], {}), '(dtype, shape, name)\n', (1392, 1412), True, 'import tensorflow as tf\n'), ((6515, 6545), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""theta_old"""'], {}), "('theta_old')\n", (6532, 6545), True, 'import tensorflow as tf\n'), ((6842, 6878), 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""theta_old/"""'], {}), "('theta_old/')\n", (6864, 6878), True, 'import tensorflow as tf\n'), ((6993, 7016), 'tensorflow.assign', 'tf.assign', (['t_old', 't_new'], {}), '(t_old, t_new)\n', (7002, 7016), True, 'import tensorflow as tf\n'), ((7892, 7921), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['clipped_ratio'], {}), '(clipped_ratio)\n', (7906, 7921), True, 'import tensorflow as tf\n'), ((8087, 8109), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['l_clip'], {}), '(l_clip)\n', (8101, 8109), True, 'import tensorflow as tf\n'), ((8382, 8452), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(selected_log_probs.total * self.placeholders.advantage)'], {}), '(selected_log_probs.total * self.placeholders.advantage)\n', (8396, 8452), True, 'import tensorflow as tf\n'), ((8899, 8925), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (8923, 8925), True, 'import tensorflow as tf\n'), ((10175, 10216), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['selected_log_probs.spatial'], {}), '(selected_log_probs.spatial)\n', (10188, 10216), True, 'import tensorflow as tf\n'), ((6763, 6795), 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""theta/"""'], {}), "('theta/')\n", (6785, 6795), True, 'import tensorflow as tf\n'), ((10961, 11018), 'numpy.unravel_index', 'np.unravel_index', (['spatial_action', '((self.spatial_dim,) * 2)'], {}), '(spatial_action, (self.spatial_dim,) * 2)\n', (10977, 11018), True, 'import numpy as np\n'), ((8026, 8056), 'tensorflow.equal', 'tf.equal', (['ratio', 'clipped_ratio'], {}), '(ratio, clipped_ratio)\n', (8034, 8056), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.5'
from django.conf import settings
from django.db.models.signals import post_save
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from .utils.models import get_model
from .cache import LoggedInUserCache
from .models import Permission, ObjectPermission, Group
## HANDLERS ##
def _update_author_permissions(sender, instance, raw, created, **kwargs):
"""Updates the permissions assigned to the author of the given object.
"""
author = LoggedInUserCache().user
if author and author.is_authenticated:
content_type = ContentType.objects.get_for_model(sender)
app_label = content_type.app_label
model_name = content_type.model
if created:
can_view_this_object, is_new = ObjectPermission.objects.get_or_create_by_natural_key("view_%s" % model_name, app_label, model_name, instance.pk)
can_change_this_object, is_new = ObjectPermission.objects.get_or_create_by_natural_key("change_%s" % model_name, app_label, model_name, instance.pk)
can_delete_this_object, is_new = ObjectPermission.objects.get_or_create_by_natural_key("delete_%s" % model_name, app_label, model_name, instance.pk)
can_view_this_object.users.add(author)
can_change_this_object.users.add(author)
can_delete_this_object.users.add(author)
def manage_author_permissions(cls, enabled=True):
"""Adds permissions assigned to the author of the given object.
Connects the post_save signal of the given model class to the handler which
adds default permissions to the current user. i.e.:
>> manage_author_permissions(Project)
It will add default view, change and delete permissions for each Project's
instances created by the current user.
To disconnect:
>> manage_author_permissions(Project, False)
"""
cls = get_model(cls)
dispatch_uid = "update_%s_permissions" % cls.__name__.lower()
if enabled:
post_save.connect(_update_author_permissions, cls, dispatch_uid=dispatch_uid)
else:
post_save.disconnect(_update_author_permissions, cls, dispatch_uid=dispatch_uid)
def user_post_save(sender, instance, created, *args, **kwargs):
"""Add view/delete/change object permissions to users (on themselves).
It also adds new user instances to "users" group.
"""
auth_app, sep, user_model_name = settings.AUTH_USER_MODEL.rpartition('.')
user_model_name = user_model_name.lower()
# All new users have full control over themselves.
can_view_this_user, is_new = ObjectPermission.objects.get_or_create_by_natural_key("view_%s" % user_model_name, auth_app, user_model_name, instance.pk)
can_change_this_user, is_new = ObjectPermission.objects.get_or_create_by_natural_key("change_%s" % user_model_name, auth_app, user_model_name, instance.pk)
can_delete_this_user, is_new = ObjectPermission.objects.get_or_create_by_natural_key("delete_%s" % user_model_name, auth_app, user_model_name, instance.pk)
can_view_this_user.users.add(instance)
can_change_this_user.users.add(instance)
can_delete_this_user.users.add(instance)
# All new users are members of "users" group.
if created:
users_group, is_new = Group.objects.get_or_create(name='users')
instance.groups.add(users_group)
def add_view_permission(sender, instance, **kwargs):
"""Adds a view permission related to each new ContentType instance.
"""
if isinstance(instance, ContentType):
codename = "view_%s" % instance.model
Permission.objects.get_or_create(content_type=instance, codename=codename, name="Can view %s" % instance.name)
## CONNECTIONS ##
post_save.connect(user_post_save, get_user_model())
post_save.connect(add_view_permission, ContentType)
|
[
"django.db.models.signals.post_save.disconnect",
"django.contrib.auth.get_user_model",
"django.contrib.contenttypes.models.ContentType.objects.get_for_model",
"django.conf.settings.AUTH_USER_MODEL.rpartition",
"django.db.models.signals.post_save.connect"
] |
[((4473, 4524), 'django.db.models.signals.post_save.connect', 'post_save.connect', (['add_view_permission', 'ContentType'], {}), '(add_view_permission, ContentType)\n', (4490, 4524), False, 'from django.db.models.signals import post_save\n'), ((3116, 3156), 'django.conf.settings.AUTH_USER_MODEL.rpartition', 'settings.AUTH_USER_MODEL.rpartition', (['"""."""'], {}), "('.')\n", (3151, 3156), False, 'from django.conf import settings\n'), ((4455, 4471), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (4469, 4471), False, 'from django.contrib.auth import get_user_model\n'), ((1247, 1288), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['sender'], {}), '(sender)\n', (1280, 1288), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((2686, 2763), 'django.db.models.signals.post_save.connect', 'post_save.connect', (['_update_author_permissions', 'cls'], {'dispatch_uid': 'dispatch_uid'}), '(_update_author_permissions, cls, dispatch_uid=dispatch_uid)\n', (2703, 2763), False, 'from django.db.models.signals import post_save\n'), ((2791, 2876), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', (['_update_author_permissions', 'cls'], {'dispatch_uid': 'dispatch_uid'}), '(_update_author_permissions, cls, dispatch_uid=dispatch_uid\n )\n', (2811, 2876), False, 'from django.db.models.signals import post_save\n')]
|
from django.db import models
from django.contrib.auth.models import User
from article.models import Article
class Comment(models.Model):
owner = models.ForeignKey(User, verbose_name="作者")
article = models.ForeignKey(Article, verbose_name="文章ID")
content = models.CharField("评论内容", max_length=1000)
to_comment = models.ForeignKey("self", null=True, blank=True, verbose_name="回复哪个评论")
status = models.IntegerField("状态", choices=((0, "正常"), (-1, "删除")), default = 0)
create_timestamp = models.DateTimeField("创建时间", auto_now_add=True)
last_update_timestamp = models.DateTimeField("最后更新时间", auto_now=True)
def __str__(self):
return self.content
class Meta:
verbose_name = "评论"
verbose_name_plural = "评论"
|
[
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.CharField",
"django.db.models.DateTimeField"
] |
[((152, 194), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'verbose_name': '"""作者"""'}), "(User, verbose_name='作者')\n", (169, 194), False, 'from django.db import models\n'), ((209, 256), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Article'], {'verbose_name': '"""文章ID"""'}), "(Article, verbose_name='文章ID')\n", (226, 256), False, 'from django.db import models\n'), ((271, 312), 'django.db.models.CharField', 'models.CharField', (['"""评论内容"""'], {'max_length': '(1000)'}), "('评论内容', max_length=1000)\n", (287, 312), False, 'from django.db import models\n'), ((330, 401), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""self"""'], {'null': '(True)', 'blank': '(True)', 'verbose_name': '"""回复哪个评论"""'}), "('self', null=True, blank=True, verbose_name='回复哪个评论')\n", (347, 401), False, 'from django.db import models\n'), ((415, 484), 'django.db.models.IntegerField', 'models.IntegerField', (['"""状态"""'], {'choices': "((0, '正常'), (-1, '删除'))", 'default': '(0)'}), "('状态', choices=((0, '正常'), (-1, '删除')), default=0)\n", (434, 484), False, 'from django.db import models\n'), ((511, 558), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""创建时间"""'], {'auto_now_add': '(True)'}), "('创建时间', auto_now_add=True)\n", (531, 558), False, 'from django.db import models\n'), ((587, 632), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""最后更新时间"""'], {'auto_now': '(True)'}), "('最后更新时间', auto_now=True)\n", (607, 632), False, 'from django.db import models\n')]
|
import re
import base64
from urllib.parse import urljoin
_pattern = re.compile(r"dynamicurl\|(?P<path>.+?)\|wzwsquestion\|(?P<question>.+?)\|wzwsfactor\|(?P<factor>\d+)")
def decrypt_wzws(text: str) -> str:
# noinspection PyBroadException
try:
return _decrypt_by_python(text)
except Exception:
print("解析html错误")
def _decrypt_by_python(text: str) -> str:
base_url = "http://wenshu.court.gov.cn"
group_dict = _pattern.search(text).groupdict()
question = group_dict["question"]
factor = int(group_dict["factor"])
path = group_dict["path"]
label = "WZWS_CONFIRM_PREFIX_LABEL{}".format(sum(ord(i) for i in question) * factor + 111111)
challenge = base64.b64encode(label.encode()).decode()
dynamic_url = urljoin(base_url, path)
dynamic_url = "{url}?{query}".format(url=dynamic_url, query="wzwschallenge={}".format(challenge))
return dynamic_url
if __name__ == "__main__":
with open("demo.html") as f:
_content = f.read()
_resp = decrypt_wzws(_content)
print(_resp)
|
[
"urllib.parse.urljoin",
"re.compile"
] |
[((71, 188), 're.compile', 're.compile', (['"""dynamicurl\\\\|(?P<path>.+?)\\\\|wzwsquestion\\\\|(?P<question>.+?)\\\\|wzwsfactor\\\\|(?P<factor>\\\\d+)"""'], {}), "(\n 'dynamicurl\\\\|(?P<path>.+?)\\\\|wzwsquestion\\\\|(?P<question>.+?)\\\\|wzwsfactor\\\\|(?P<factor>\\\\d+)'\n )\n", (81, 188), False, 'import re\n'), ((768, 791), 'urllib.parse.urljoin', 'urljoin', (['base_url', 'path'], {}), '(base_url, path)\n', (775, 791), False, 'from urllib.parse import urljoin\n')]
|
import csv
import random
import sys
def populate_test_csv():
f = open('test.csv', 'w')
with f:
input_fields = ['time_step', 'PlantOnSched', 'HeatingSetpointSchedule']
temp_change = [1, -1]
plant_on_sched_last = 52
heating_setpoint_schedule_last = 20
writer = csv.DictWriter(f, fieldnames=input_fields)
writer.writeheader()
j = 0
for i in range(35040):
k = random.randint(0, 1)
l = random.randint(0, 1)
a = temp_change[k]
b = temp_change[l]
if (10 < (plant_on_sched_last + a) < 55) and (10 < (heating_setpoint_schedule_last + b) < 55):
plant_on_sched_last += temp_change[k]
heating_setpoint_schedule_last += temp_change[l]
writer.writerow({'time_step': j,
'PlantOnSched': plant_on_sched_last,
'HeatingSetpointSchedule': heating_setpoint_schedule_last})
j += 900
f.close()
def populate_new_csv(index):
f = open(f'year{index}.csv', 'w')
with f:
input_fields = ['time_step', 'Tset']
temp_change = [1, -1]
q = 23
writer = csv.DictWriter(f, fieldnames=input_fields)
writer.writeheader()
j = 0
for i in range(35040):
k = random.randint(0, 1)
a = temp_change[k]
if -6 < (q + a) < 53:
q += temp_change[k]
writer.writerow({'time_step': j, 'Tset': q})
j += 900
f.close()
for i in range(1, 20):
populate_new_csv(i)
|
[
"random.randint",
"csv.DictWriter"
] |
[((311, 353), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'input_fields'}), '(f, fieldnames=input_fields)\n', (325, 353), False, 'import csv\n'), ((1219, 1261), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'input_fields'}), '(f, fieldnames=input_fields)\n', (1233, 1261), False, 'import csv\n'), ((444, 464), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (458, 464), False, 'import random\n'), ((481, 501), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (495, 501), False, 'import random\n'), ((1352, 1372), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1366, 1372), False, 'import random\n')]
|
#!/usr/bin/env python
# coding:utf-8
from __future__ import print_function
#import sys
import re
import glob
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
all_files = glob.glob('../*/dat_L*_tau_inf')
list_L = []
list_N = []
list_mx = []
list_mz0mz1 = []
list_ene = []
for file_name in all_files:
# N = file_name.replace("dat_L","")
L = re.sub(".*dat_L","",file_name)
L = int(L.replace("_tau_inf",""))
N = L**2
list_L.append(L)
list_N.append(N)
print(file_name,L,N)
# file = open(sys.argv[1])
# file = open('dat_L3_tau_inf')
file = open(file_name)
lines = file.readlines()
file.close()
for line in lines:
if line.startswith("mx ["):
line_mx = line[:-1]
line_mx = line_mx.replace("mx [","")
line_mx = line_mx.replace("]","")
# list_mx = np.fromstring(line_mx,dtype=np.float,sep=',')
list_mx.append(np.fromstring(line_mx,dtype=np.float,sep=','))
if line.startswith("mz0mz1 ["):
line_mz0mz1 = line[:-1]
line_mz0mz1 = line_mz0mz1.replace("mz0mz1 [","")
line_mz0mz1 = line_mz0mz1.replace("]","")
list_mz0mz1.append(np.fromstring(line_mz0mz1,dtype=np.float,sep=','))
if line.startswith("ene ["):
line_ene = line[:-1]
line_ene = line_ene.replace("ene [","")
line_ene = line_ene.replace("]","")
list_ene.append(np.fromstring(line_ene,dtype=np.float,sep=','))
if line.startswith("field_steps: h(t)= ["):
line_h = line[:-1]
line_h = line_h.replace("field_steps: h(t)= [","")
line_h = line_h.replace("]","")
list_h = np.fromstring(line_h,dtype=np.float,sep=',')
list_enedens = []
for i in range(len(list_N)):
list_enedens.append(np.array([x/list_N[i] for x in list_ene[i]],dtype=np.float))
print("h",list_h)
for i in range(len(list_L)):
print("L mx",list_L[i],list_mx[i])
print("L mz0mz1",list_L[i],list_mz0mz1[i])
print("L enedens",list_L[i],list_enedens[i])
fig0 = plt.figure()
fig0.suptitle("mx")
for i in range(len(list_L)):
plt.plot(list_h,list_mx[i],label=list_L[i])
plt.xlabel("field")
plt.legend(bbox_to_anchor=(1,1),loc='upper right',borderaxespad=1)
plt.gca().invert_xaxis()
fig0.savefig("fig_mx.png")
fig1 = plt.figure()
fig1.suptitle("mz0mz1")
for i in range(len(list_L)):
plt.plot(list_h,list_mz0mz1[i],label=list_L[i])
plt.xlabel("field")
plt.legend(bbox_to_anchor=(1,0),loc='lower right',borderaxespad=1)
plt.gca().invert_xaxis()
fig1.savefig("fig_mz0mz1.png")
fig2 = plt.figure()
fig2.suptitle("enedens")
for i in range(len(list_L)):
plt.plot(list_h,list_enedens[i],label=list_L[i])
plt.xlabel("field")
plt.legend(bbox_to_anchor=(1,0),loc='lower right',borderaxespad=1)
plt.gca().invert_xaxis()
fig2.savefig("fig_enedens.png")
|
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.array",
"glob.glob",
"matplotlib.pyplot.xlabel",
"re.sub",
"numpy.fromstring"
] |
[((147, 168), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (161, 168), False, 'import matplotlib\n'), ((214, 246), 'glob.glob', 'glob.glob', (['"""../*/dat_L*_tau_inf"""'], {}), "('../*/dat_L*_tau_inf')\n", (223, 246), False, 'import glob\n'), ((2106, 2118), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2116, 2118), True, 'import matplotlib.pyplot as plt\n'), ((2216, 2235), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""field"""'], {}), "('field')\n", (2226, 2235), True, 'import matplotlib.pyplot as plt\n'), ((2236, 2305), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1, 1)', 'loc': '"""upper right"""', 'borderaxespad': '(1)'}), "(bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=1)\n", (2246, 2305), True, 'import matplotlib.pyplot as plt\n'), ((2363, 2375), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2373, 2375), True, 'import matplotlib.pyplot as plt\n'), ((2481, 2500), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""field"""'], {}), "('field')\n", (2491, 2500), True, 'import matplotlib.pyplot as plt\n'), ((2501, 2570), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1, 0)', 'loc': '"""lower right"""', 'borderaxespad': '(1)'}), "(bbox_to_anchor=(1, 0), loc='lower right', borderaxespad=1)\n", (2511, 2570), True, 'import matplotlib.pyplot as plt\n'), ((2632, 2644), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2642, 2644), True, 'import matplotlib.pyplot as plt\n'), ((2752, 2771), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""field"""'], {}), "('field')\n", (2762, 2771), True, 'import matplotlib.pyplot as plt\n'), ((2772, 2841), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1, 0)', 'loc': '"""lower right"""', 'borderaxespad': '(1)'}), "(bbox_to_anchor=(1, 0), loc='lower right', borderaxespad=1)\n", (2782, 2841), True, 'import matplotlib.pyplot as plt\n'), ((391, 423), 're.sub', 're.sub', (['""".*dat_L"""', '""""""', 'file_name'], {}), "('.*dat_L', '', file_name)\n", (397, 423), False, 'import re\n'), ((2172, 2217), 'matplotlib.pyplot.plot', 'plt.plot', (['list_h', 'list_mx[i]'], {'label': 'list_L[i]'}), '(list_h, list_mx[i], label=list_L[i])\n', (2180, 2217), True, 'import matplotlib.pyplot as plt\n'), ((2433, 2482), 'matplotlib.pyplot.plot', 'plt.plot', (['list_h', 'list_mz0mz1[i]'], {'label': 'list_L[i]'}), '(list_h, list_mz0mz1[i], label=list_L[i])\n', (2441, 2482), True, 'import matplotlib.pyplot as plt\n'), ((2703, 2753), 'matplotlib.pyplot.plot', 'plt.plot', (['list_h', 'list_enedens[i]'], {'label': 'list_L[i]'}), '(list_h, list_enedens[i], label=list_L[i])\n', (2711, 2753), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1918), 'numpy.array', 'np.array', (['[(x / list_N[i]) for x in list_ene[i]]'], {'dtype': 'np.float'}), '([(x / list_N[i]) for x in list_ene[i]], dtype=np.float)\n', (1862, 1918), True, 'import numpy as np\n'), ((2303, 2312), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2310, 2312), True, 'import matplotlib.pyplot as plt\n'), ((2568, 2577), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2575, 2577), True, 'import matplotlib.pyplot as plt\n'), ((2839, 2848), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2846, 2848), True, 'import matplotlib.pyplot as plt\n'), ((1737, 1783), 'numpy.fromstring', 'np.fromstring', (['line_h'], {'dtype': 'np.float', 'sep': '""","""'}), "(line_h, dtype=np.float, sep=',')\n", (1750, 1783), True, 'import numpy as np\n'), ((960, 1007), 'numpy.fromstring', 'np.fromstring', (['line_mx'], {'dtype': 'np.float', 'sep': '""","""'}), "(line_mx, dtype=np.float, sep=',')\n", (973, 1007), True, 'import numpy as np\n'), ((1229, 1280), 'numpy.fromstring', 'np.fromstring', (['line_mz0mz1'], {'dtype': 'np.float', 'sep': '""","""'}), "(line_mz0mz1, dtype=np.float, sep=',')\n", (1242, 1280), True, 'import numpy as np\n'), ((1478, 1526), 'numpy.fromstring', 'np.fromstring', (['line_ene'], {'dtype': 'np.float', 'sep': '""","""'}), "(line_ene, dtype=np.float, sep=',')\n", (1491, 1526), True, 'import numpy as np\n')]
|
#libs
import pygame
import datetime
#modules
from modules.gui import UserInterface
from modules.sprites import Sprites
from modules.supplies import Supplies
from modules.workers import Workers
from modules.demand import Demand
from modules.gamelogic import Actions
pygame.init()
window = pygame.display.set_mode((1280, 720))
worker_and_supplies_font = pygame.font.Font('./font/kennyFont.ttf', 70)
demand_font = pygame.font.Font('./font/kennyFont.ttf', 35)
information_font = pygame.font.Font('./font/kennyFont.ttf', 30)
pygame.display.set_caption('The Seller')
actions = Actions()
gui = UserInterface()
gui.create_lines_for_gui(window)
sprites_for_tokens = Sprites()
sprites_for_tokens.load_sprites()
workers_objects = pygame.sprite.Group()
resource_objects = pygame.sprite.Group()
demand_objects = pygame.sprite.Group()
RESOURCE_BUCKET_ORE = Supplies((400, 140), sprites_for_tokens.loaded_sprites[0], 'bucket of ore', False, 0, 0, 0)
RESOURCE_BUCKET_WATER = Supplies((400, 250), sprites_for_tokens.loaded_sprites[1], 'bucket of water', True, 0, 0, 0)
RESOURCE_CLAM_CLOSED = Supplies((400, 360), sprites_for_tokens.loaded_sprites[2], 'clam', False, 0, 0, 0)
RESOURCE_CLAM_OPEN = Supplies((400, 470), sprites_for_tokens.loaded_sprites[3], 'opened clam', True, 0, 0, 0)
RESOURCE_MEAT_COOKED = Supplies((400, 580), sprites_for_tokens.loaded_sprites[4], 'cooked meat', True, 0, 0, 0)
RESOURCE_MEAT_RAW = Supplies((650, 140), sprites_for_tokens.loaded_sprites[5], 'raw meat', False, 0, 0, 0)
RESOURCE_TOOL_SHOVEL = Supplies((650, 250), sprites_for_tokens.loaded_sprites[6], 'shovel', True, 0, 0, 0)
RESOURCE_TOOL_SWORD = Supplies((650, 360), sprites_for_tokens.loaded_sprites[7], 'sword', True, 0, 0, 0)
RESOURCE_WOOD_LOG = Supplies((650, 470), sprites_for_tokens.loaded_sprites[8], 'log', False, 0, 0, 0)
RESOURCE_WOOD_TREE = Supplies((650, 580), sprites_for_tokens.loaded_sprites[9], 'tree', False, 0, 0, 0)
DEMAND_BUCKET_WATER = Demand((1050, 100), sprites_for_tokens.loaded_sprites[1], 'bucket of water', 0, 0)
DEMAND_CLAM_OPEN = Demand((1050, 230), sprites_for_tokens.loaded_sprites[3], 'opened clam', 0, 0)
DEMAND_MEAT_COOKED = Demand((1050, 360), sprites_for_tokens.loaded_sprites[4], 'cooked meat', 0, 0)
DEMAND_TOOL_SHOVEL = Demand((1050, 490), sprites_for_tokens.loaded_sprites[6], 'shovel',0, 0)
DEMAND_TOOL_SWORD = Demand((1050, 620), sprites_for_tokens.loaded_sprites[7], 'sword', 0, 0)
WORKER1 = Workers((80, 130), sprites_for_tokens.loaded_sprites[10], True)
WORKER2 = Workers((80, 220), sprites_for_tokens.loaded_sprites[10], True)
WORKER3 = Workers((80, 310), sprites_for_tokens.loaded_sprites[10], True)
WORKER4 = Workers((80, 400), sprites_for_tokens.loaded_sprites[10], True)
WORKER5 = Workers((80, 490), sprites_for_tokens.loaded_sprites[10], True)
WORKER6 = Workers((80, 580), sprites_for_tokens.loaded_sprites[10], True)
resource_objects.add(RESOURCE_BUCKET_ORE, RESOURCE_BUCKET_WATER, RESOURCE_CLAM_CLOSED,
RESOURCE_CLAM_OPEN, RESOURCE_MEAT_COOKED, RESOURCE_MEAT_RAW,
RESOURCE_TOOL_SHOVEL, RESOURCE_TOOL_SWORD, RESOURCE_WOOD_LOG,
RESOURCE_WOOD_TREE)
resource_objects.update()
resource_objects.draw(window)
demand_objects.add(DEMAND_BUCKET_WATER, DEMAND_CLAM_OPEN,
DEMAND_MEAT_COOKED, DEMAND_TOOL_SHOVEL, DEMAND_TOOL_SWORD)
demand_objects.update()
demand_objects.draw(window)
workers_objects.add(WORKER1, WORKER2, WORKER3, WORKER4, WORKER5, WORKER6)
workers_objects.update()
workers_objects.draw(window)
fps = pygame.time.Clock()
is_game_running: bool = True
happines = 100
end_turn = True
action_menu = False
selected_supply = None
def produce_supplies_with_one_resource(selected_supply, resources, value_change):
if actions.check_is_supply_producable(selected_supply, resources, -1):
if actions.start_production(workers_objects):
actions.calculate_growth_for_resource(selected_supply, value_change)
actions.calculate_usage_of_resources(resources, -1)
else:
gui.action_informations(window, information_font, 'noFreeWorkers')
else:
gui.action_informations(window, information_font, 'notEnoughResources')
def produce_supplies(value_change: int):
resources = []
if selected_supply.name == 'opened clam':
resources.append(RESOURCE_CLAM_CLOSED)
produce_supplies_with_one_resource(selected_supply, resources, value_change)
resources.clear()
elif selected_supply.name == 'cooked meat':
resources.append(RESOURCE_MEAT_RAW)
produce_supplies_with_one_resource(selected_supply, resources, value_change)
resources.clear()
elif selected_supply.name == 'shovel':
resources.extend((RESOURCE_WOOD_LOG, RESOURCE_BUCKET_ORE))
produce_supplies_with_one_resource(selected_supply, resources, value_change)
resources.clear()
elif selected_supply.name == 'sword':
pass
elif selected_supply.name == 'log':
resources.append(RESOURCE_WOOD_TREE)
produce_supplies_with_one_resource(selected_supply, resources, value_change)
resources.clear()
else:
if actions.start_production(workers_objects):
actions.calculate_growth_for_resource(selected_supply, value_change)
else:
gui.action_informations(window, information_font, 'noFreeWorkers')
# if actions.check_is_supply_producable(selected_supply, resource_objects, value_change):
# if actions.start_production(workers_objects):
# actions.calculate_growth_for_resource(selected_supply, value_change)
# else:
# gui.action_informations(window, information_font, 'noFreeWorkers')
# else:
# gui.action_informations(window, information_font, 'notEnoughResources')
def supplies_selling(value_change: int):
if actions.check_is_supply_sellable(selected_supply, value_change):
if actions.start_production(workers_objects):
actions.sell_supplies(selected_supply, value_change)
else:
gui.action_informations(window, information_font, 'noFreeWorkers')
else:
gui.action_informations(window, information_font, 'negativeAmount')
while is_game_running:
fps.tick(30)
gui.create_text_with_workers_status(worker_and_supplies_font, window, workers_objects)
gui.create_text_with_supplies_amounts(worker_and_supplies_font, window, resource_objects)
gui.create_text_with_demand_information(demand_font, window, demand_objects)
gui.create_text_with_happines(window, demand_font, happines)
if happines > 0:
if end_turn == True:
actions.free_workers(workers_objects)
actions.change_supplies_amount(resource_objects)
end_turn = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
is_game_running = False
if event.type == pygame.MOUSEBUTTONUP:
mouse_position = pygame.mouse.get_pos()
for supply in resource_objects:
if supply.rect.collidepoint(mouse_position):
gui.on_click_actions(window, information_font, supply, demand_objects)
action_menu = True
selected_supply = supply
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_F12:
pygame.image.save(window, 'screen.png')
if event.key == pygame.K_f:
end_turn = True
action_menu = False
gui.action_informations(window, information_font, 'endTurn')
if event.key == pygame.K_p and action_menu:
produce_supplies(1)
if event.key == pygame.K_s and action_menu:
supplies_selling(-1)
if event.key == pygame.K_d and action_menu:
supplies_selling(-5)
pygame.display.update()
pygame.quit()
|
[
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"modules.workers.Workers",
"modules.sprites.Sprites",
"pygame.init",
"pygame.mouse.get_pos",
"pygame.sprite.Group",
"pygame.display.update",
"modules.gamelogic.Actions",
"modules.gui.UserInterface",
"pygame.font.Font",
"modules.demand.Demand",
"pygame.display.set_caption",
"pygame.time.Clock",
"pygame.image.save",
"modules.supplies.Supplies"
] |
[((279, 292), 'pygame.init', 'pygame.init', ([], {}), '()\n', (290, 292), False, 'import pygame\n'), ((303, 339), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(1280, 720)'], {}), '((1280, 720))\n', (326, 339), False, 'import pygame\n'), ((368, 412), 'pygame.font.Font', 'pygame.font.Font', (['"""./font/kennyFont.ttf"""', '(70)'], {}), "('./font/kennyFont.ttf', 70)\n", (384, 412), False, 'import pygame\n'), ((428, 472), 'pygame.font.Font', 'pygame.font.Font', (['"""./font/kennyFont.ttf"""', '(35)'], {}), "('./font/kennyFont.ttf', 35)\n", (444, 472), False, 'import pygame\n'), ((493, 537), 'pygame.font.Font', 'pygame.font.Font', (['"""./font/kennyFont.ttf"""', '(30)'], {}), "('./font/kennyFont.ttf', 30)\n", (509, 537), False, 'import pygame\n'), ((539, 579), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""The Seller"""'], {}), "('The Seller')\n", (565, 579), False, 'import pygame\n'), ((593, 602), 'modules.gamelogic.Actions', 'Actions', ([], {}), '()\n', (600, 602), False, 'from modules.gamelogic import Actions\n'), ((612, 627), 'modules.gui.UserInterface', 'UserInterface', ([], {}), '()\n', (625, 627), False, 'from modules.gui import UserInterface\n'), ((686, 695), 'modules.sprites.Sprites', 'Sprites', ([], {}), '()\n', (693, 695), False, 'from modules.sprites import Sprites\n'), ((752, 773), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (771, 773), False, 'import pygame\n'), ((794, 815), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (813, 815), False, 'import pygame\n'), ((834, 855), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (853, 855), False, 'import pygame\n'), ((885, 980), 'modules.supplies.Supplies', 'Supplies', (['(400, 140)', 'sprites_for_tokens.loaded_sprites[0]', '"""bucket of ore"""', '(False)', '(0)', '(0)', '(0)'], {}), "((400, 140), sprites_for_tokens.loaded_sprites[0], 'bucket of ore',\n False, 0, 0, 0)\n", (893, 980), False, 'from modules.supplies import Supplies\n'), ((1004, 1100), 'modules.supplies.Supplies', 'Supplies', (['(400, 250)', 'sprites_for_tokens.loaded_sprites[1]', '"""bucket of water"""', '(True)', '(0)', '(0)', '(0)'], {}), "((400, 250), sprites_for_tokens.loaded_sprites[1],\n 'bucket of water', True, 0, 0, 0)\n", (1012, 1100), False, 'from modules.supplies import Supplies\n'), ((1124, 1210), 'modules.supplies.Supplies', 'Supplies', (['(400, 360)', 'sprites_for_tokens.loaded_sprites[2]', '"""clam"""', '(False)', '(0)', '(0)', '(0)'], {}), "((400, 360), sprites_for_tokens.loaded_sprites[2], 'clam', False, 0,\n 0, 0)\n", (1132, 1210), False, 'from modules.supplies import Supplies\n'), ((1234, 1327), 'modules.supplies.Supplies', 'Supplies', (['(400, 470)', 'sprites_for_tokens.loaded_sprites[3]', '"""opened clam"""', '(True)', '(0)', '(0)', '(0)'], {}), "((400, 470), sprites_for_tokens.loaded_sprites[3], 'opened clam', \n True, 0, 0, 0)\n", (1242, 1327), False, 'from modules.supplies import Supplies\n'), ((1350, 1443), 'modules.supplies.Supplies', 'Supplies', (['(400, 580)', 'sprites_for_tokens.loaded_sprites[4]', '"""cooked meat"""', '(True)', '(0)', '(0)', '(0)'], {}), "((400, 580), sprites_for_tokens.loaded_sprites[4], 'cooked meat', \n True, 0, 0, 0)\n", (1358, 1443), False, 'from modules.supplies import Supplies\n'), ((1466, 1557), 'modules.supplies.Supplies', 'Supplies', (['(650, 140)', 'sprites_for_tokens.loaded_sprites[5]', '"""raw meat"""', '(False)', '(0)', '(0)', '(0)'], {}), "((650, 140), sprites_for_tokens.loaded_sprites[5], 'raw meat', \n False, 0, 0, 0)\n", (1474, 1557), False, 'from modules.supplies import Supplies\n'), ((1580, 1668), 'modules.supplies.Supplies', 'Supplies', (['(650, 250)', 'sprites_for_tokens.loaded_sprites[6]', '"""shovel"""', '(True)', '(0)', '(0)', '(0)'], {}), "((650, 250), sprites_for_tokens.loaded_sprites[6], 'shovel', True, \n 0, 0, 0)\n", (1588, 1668), False, 'from modules.supplies import Supplies\n'), ((1691, 1777), 'modules.supplies.Supplies', 'Supplies', (['(650, 360)', 'sprites_for_tokens.loaded_sprites[7]', '"""sword"""', '(True)', '(0)', '(0)', '(0)'], {}), "((650, 360), sprites_for_tokens.loaded_sprites[7], 'sword', True, 0,\n 0, 0)\n", (1699, 1777), False, 'from modules.supplies import Supplies\n'), ((1801, 1886), 'modules.supplies.Supplies', 'Supplies', (['(650, 470)', 'sprites_for_tokens.loaded_sprites[8]', '"""log"""', '(False)', '(0)', '(0)', '(0)'], {}), "((650, 470), sprites_for_tokens.loaded_sprites[8], 'log', False, 0,\n 0, 0)\n", (1809, 1886), False, 'from modules.supplies import Supplies\n'), ((1910, 1996), 'modules.supplies.Supplies', 'Supplies', (['(650, 580)', 'sprites_for_tokens.loaded_sprites[9]', '"""tree"""', '(False)', '(0)', '(0)', '(0)'], {}), "((650, 580), sprites_for_tokens.loaded_sprites[9], 'tree', False, 0,\n 0, 0)\n", (1918, 1996), False, 'from modules.supplies import Supplies\n'), ((2020, 2106), 'modules.demand.Demand', 'Demand', (['(1050, 100)', 'sprites_for_tokens.loaded_sprites[1]', '"""bucket of water"""', '(0)', '(0)'], {}), "((1050, 100), sprites_for_tokens.loaded_sprites[1], 'bucket of water',\n 0, 0)\n", (2026, 2106), False, 'from modules.demand import Demand\n'), ((2130, 2208), 'modules.demand.Demand', 'Demand', (['(1050, 230)', 'sprites_for_tokens.loaded_sprites[3]', '"""opened clam"""', '(0)', '(0)'], {}), "((1050, 230), sprites_for_tokens.loaded_sprites[3], 'opened clam', 0, 0)\n", (2136, 2208), False, 'from modules.demand import Demand\n'), ((2236, 2314), 'modules.demand.Demand', 'Demand', (['(1050, 360)', 'sprites_for_tokens.loaded_sprites[4]', '"""cooked meat"""', '(0)', '(0)'], {}), "((1050, 360), sprites_for_tokens.loaded_sprites[4], 'cooked meat', 0, 0)\n", (2242, 2314), False, 'from modules.demand import Demand\n'), ((2342, 2415), 'modules.demand.Demand', 'Demand', (['(1050, 490)', 'sprites_for_tokens.loaded_sprites[6]', '"""shovel"""', '(0)', '(0)'], {}), "((1050, 490), sprites_for_tokens.loaded_sprites[6], 'shovel', 0, 0)\n", (2348, 2415), False, 'from modules.demand import Demand\n'), ((2442, 2514), 'modules.demand.Demand', 'Demand', (['(1050, 620)', 'sprites_for_tokens.loaded_sprites[7]', '"""sword"""', '(0)', '(0)'], {}), "((1050, 620), sprites_for_tokens.loaded_sprites[7], 'sword', 0, 0)\n", (2448, 2514), False, 'from modules.demand import Demand\n'), ((2542, 2605), 'modules.workers.Workers', 'Workers', (['(80, 130)', 'sprites_for_tokens.loaded_sprites[10]', '(True)'], {}), '((80, 130), sprites_for_tokens.loaded_sprites[10], True)\n', (2549, 2605), False, 'from modules.workers import Workers\n'), ((2633, 2696), 'modules.workers.Workers', 'Workers', (['(80, 220)', 'sprites_for_tokens.loaded_sprites[10]', '(True)'], {}), '((80, 220), sprites_for_tokens.loaded_sprites[10], True)\n', (2640, 2696), False, 'from modules.workers import Workers\n'), ((2724, 2787), 'modules.workers.Workers', 'Workers', (['(80, 310)', 'sprites_for_tokens.loaded_sprites[10]', '(True)'], {}), '((80, 310), sprites_for_tokens.loaded_sprites[10], True)\n', (2731, 2787), False, 'from modules.workers import Workers\n'), ((2815, 2878), 'modules.workers.Workers', 'Workers', (['(80, 400)', 'sprites_for_tokens.loaded_sprites[10]', '(True)'], {}), '((80, 400), sprites_for_tokens.loaded_sprites[10], True)\n', (2822, 2878), False, 'from modules.workers import Workers\n'), ((2906, 2969), 'modules.workers.Workers', 'Workers', (['(80, 490)', 'sprites_for_tokens.loaded_sprites[10]', '(True)'], {}), '((80, 490), sprites_for_tokens.loaded_sprites[10], True)\n', (2913, 2969), False, 'from modules.workers import Workers\n'), ((2997, 3060), 'modules.workers.Workers', 'Workers', (['(80, 580)', 'sprites_for_tokens.loaded_sprites[10]', '(True)'], {}), '((80, 580), sprites_for_tokens.loaded_sprites[10], True)\n', (3004, 3060), False, 'from modules.workers import Workers\n'), ((3754, 3773), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (3771, 3773), False, 'import pygame\n'), ((8347, 8360), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (8358, 8360), False, 'import pygame\n'), ((8316, 8339), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (8337, 8339), False, 'import pygame\n'), ((7104, 7122), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (7120, 7122), False, 'import pygame\n'), ((7294, 7316), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (7314, 7316), False, 'import pygame\n'), ((7740, 7779), 'pygame.image.save', 'pygame.image.save', (['window', '"""screen.png"""'], {}), "(window, 'screen.png')\n", (7757, 7779), False, 'import pygame\n')]
|
#!/usr/bin/env python
# encoding: utf-8
import argparse
import prody
import os
import shutil
import subprocess
import numpy
from os.path import join
GMX_PATH = '/usr/local/gromacs/bin/'
mdp_string = '''
define = -DPOSRES
integrator = {integrator}
nsteps = 1000
emtol = 1
nstlist = 1
coulombtype = Cut-off
vdwtype = Cut-off
ns_type = simple
rlist = 1.8
rcoulomb = 1.8
rvdw = 1.8
pbc = xyz
implicit_solvent = GBSA
gb_algorithm = OBC
sa_algorithm = ACE-approximation
rgbradii = 1.8
;nstxout = 1
'''
def parse_args():
parser = argparse.ArgumentParser(description='Generate trajectory with gaussian flucutations.')
parser.add_argument('pdb_file', metavar='INPUT_PDB_FILE', help='path to input pdb file')
parser.add_argument('trajectory', metavar='TRAJECTORY', help='path to input trajectory')
parser.add_argument('out_file', metavar='OUTPUT_PDB_FILE', help='path to input pdb file')
parser.add_argument('--pos_res_k', type=float, default=1000.)
args = parser.parse_args()
return (args.pdb_file, args.trajectory, args.out_file, args.pos_res_k)
class Minimizer(object):
def __init__(self, input_pdb_filename, trajectory_filename):
self.input_pdb = self._load_pdb(input_pdb_filename)
self.trajectory = self._load_pdb(trajectory_filename)
def _load_pdb(self, in_file):
protein = prody.parsePDB(in_file)
return protein
def _get_closest_frame(self):
output = prody.AtomGroup('Cartesian average coordinates')
output.setCoords( self.trajectory.getCoords() )
output.setNames( self.trajectory.getNames() )
output.setResnums( self.trajectory.getResnums() )
output.setResnames( self.trajectory.getResnames() )
ensemble = prody.PDBEnsemble(self.trajectory)
ensemble.setCoords(self.input_pdb)
ensemble.superpose()
rmsds = ensemble.getRMSDs()
min_index = numpy.argmin(rmsds)
output.setCoords( ensemble.getCoordsets(min_index) )
return output
def _create_no_h_file(self, output_stream):
# make the index file
cmd = join(GMX_PATH, 'make_ndx')
cmd += ' -f min_round_2.gro -o no_h.ndx'
p1 = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=output_stream, stderr=output_stream)
p1.communicate('q\n')
# run editconf
edit_cmd = join(GMX_PATH, 'editconf')
edit_cmd += ' -f min_round_2.gro -o no_h.gro -n no_h.ndx'
p2 = subprocess.Popen(edit_cmd, shell=True, stdin=subprocess.PIPE,
stdout=output_stream, stderr=output_stream)
p2.communicate('2\n')
def _re_order(self, output_stream):
# create a new index file
lines = open('index.ndx').read().splitlines()
header = lines[0]
indices = []
for line in lines[1:]:
cols = line.split()
for col in cols:
indices.append( int(col) )
resorted = [ indices.index(val)+1 for val in range( 1, max(indices)+1 ) ]
with open('resort.ndx', 'w') as out:
print >>out, header
for val in resorted:
print >>out, val
# resort
edit_cmd = join(GMX_PATH, 'editconf')
edit_cmd += ' -f no_h.gro -o min.pdb -n resort.ndx'
subprocess.check_call(edit_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
def run_minimization(self, posres_force_const=1000., output_stream=None):
start = self._get_closest_frame()
# create temp dir
if os.path.isdir('Temp'):
pass
else:
os.mkdir('Temp')
os.chdir('Temp')
# write the average file
prody.writePDB('average.pdb', self.input_pdb)
pdb_cmd = join(GMX_PATH, 'pdb2gmx')
pdb_cmd += ' -f average.pdb -ff amber99sb-ildn -water none -n index.ndx -posrefc {} -o ref.gro -his'.format(
posres_force_const)
p = subprocess.Popen(pdb_cmd, shell=True, stdin=subprocess.PIPE,
stdout=output_stream, stderr=output_stream)
p.communicate('0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n')
# put it in a bigger box
box_cmd = join(GMX_PATH, 'editconf')
box_cmd += ' -f ref.gro -o ref_box.gro -c -box 999 999 999'
subprocess.check_call(box_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
# write pdb file
prody.writePDB('start.pdb', start)
# pdb2gmx
pdb_cmd = join(GMX_PATH, 'pdb2gmx')
pdb_cmd += ' -f start.pdb -ff amber99sb-ildn -water none -n index.ndx -posrefc {} -his'.format(
posres_force_const)
p = subprocess.Popen(pdb_cmd, shell=True, stdin=subprocess.PIPE,
stdout=output_stream, stderr=output_stream)
p.communicate('0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n')
# put it in a bigger box
box_cmd = join(GMX_PATH, 'editconf')
box_cmd += ' -f conf.gro -o box.gro -c -box 999 999 999'
subprocess.check_call(box_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
#
# Round 1
#
# write mdp file
with open('min_round_1.mdp', 'w') as min_file:
min_file.write( mdp_string.format(integrator='steep') )
# run grompp
grompp_cmd = join(GMX_PATH, 'grompp')
grompp_cmd += ' -f min_round_1.mdp -c box.gro -p topol.top -o min_round_1 -r ref_box.gro'
subprocess.check_call(grompp_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
# run mdrun
md_cmd = join(GMX_PATH, 'mdrun')
md_cmd += ' -deffnm min_round_1 -v -nt 1'
subprocess.check_call(md_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
#
# Round 2
#
# write mdp file
with open('min_round_2.mdp', 'w') as min_file:
min_file.write( mdp_string.format(integrator='l-bfgs') )
# run grompp
grompp_cmd = join(GMX_PATH, 'grompp')
grompp_cmd += ' -f min_round_2.mdp -c min_round_1.gro -p topol.top -o min_round_2 -maxwarn 1 -r ref_box.gro'
subprocess.check_call(grompp_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
# run mdrun
md_cmd = join(GMX_PATH, 'mdrun')
md_cmd += ' -deffnm min_round_2 -v -nt 1'
subprocess.check_call(md_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
#
# gather results
#
self._create_no_h_file(output_stream)
self._re_order(output_stream)
# load the pdb
protein = prody.parsePDB('min.pdb').select('not hydrogen')
# clean up
os.chdir('..')
shutil.rmtree('Temp')
return protein
def main():
r = parse_args()
input_pdb_filename = r[0]
trajectory_filename = r[1]
output_pdb_filename = r[2]
force_const = r[3]
m = Minimizer(input_pdb_filename, trajectory_filename)
minimized_protein = m.run_minimization(force_const)
prody.writePDB(output_pdb_filename, minimized_protein)
if __name__ == '__main__':
main()
|
[
"os.mkdir",
"subprocess.Popen",
"argparse.ArgumentParser",
"prody.PDBEnsemble",
"os.path.join",
"os.path.isdir",
"prody.AtomGroup",
"numpy.argmin",
"prody.parsePDB",
"shutil.rmtree",
"prody.writePDB",
"os.chdir",
"subprocess.check_call"
] |
[((534, 625), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate trajectory with gaussian flucutations."""'}), "(description=\n 'Generate trajectory with gaussian flucutations.')\n", (557, 625), False, 'import argparse\n'), ((7413, 7467), 'prody.writePDB', 'prody.writePDB', (['output_pdb_filename', 'minimized_protein'], {}), '(output_pdb_filename, minimized_protein)\n', (7427, 7467), False, 'import prody\n'), ((1341, 1364), 'prody.parsePDB', 'prody.parsePDB', (['in_file'], {}), '(in_file)\n', (1355, 1364), False, 'import prody\n'), ((1440, 1488), 'prody.AtomGroup', 'prody.AtomGroup', (['"""Cartesian average coordinates"""'], {}), "('Cartesian average coordinates')\n", (1455, 1488), False, 'import prody\n'), ((1737, 1771), 'prody.PDBEnsemble', 'prody.PDBEnsemble', (['self.trajectory'], {}), '(self.trajectory)\n', (1754, 1771), False, 'import prody\n'), ((1900, 1919), 'numpy.argmin', 'numpy.argmin', (['rmsds'], {}), '(rmsds)\n', (1912, 1919), False, 'import numpy\n'), ((2097, 2123), 'os.path.join', 'join', (['GMX_PATH', '"""make_ndx"""'], {}), "(GMX_PATH, 'make_ndx')\n", (2101, 2123), False, 'from os.path import join\n'), ((2186, 2291), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdin': 'subprocess.PIPE', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(cmd, shell=True, stdin=subprocess.PIPE, stdout=\n output_stream, stderr=output_stream)\n', (2202, 2291), False, 'import subprocess\n'), ((2390, 2416), 'os.path.join', 'join', (['GMX_PATH', '"""editconf"""'], {}), "(GMX_PATH, 'editconf')\n", (2394, 2416), False, 'from os.path import join\n'), ((2496, 2606), 'subprocess.Popen', 'subprocess.Popen', (['edit_cmd'], {'shell': '(True)', 'stdin': 'subprocess.PIPE', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(edit_cmd, shell=True, stdin=subprocess.PIPE, stdout=\n output_stream, stderr=output_stream)\n', (2512, 2606), False, 'import subprocess\n'), ((3235, 3261), 'os.path.join', 'join', (['GMX_PATH', '"""editconf"""'], {}), "(GMX_PATH, 'editconf')\n", (3239, 3261), False, 'from os.path import join\n'), ((3330, 3422), 'subprocess.check_call', 'subprocess.check_call', (['edit_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(edit_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (3351, 3422), False, 'import subprocess\n'), ((3607, 3628), 'os.path.isdir', 'os.path.isdir', (['"""Temp"""'], {}), "('Temp')\n", (3620, 3628), False, 'import os\n'), ((3698, 3714), 'os.chdir', 'os.chdir', (['"""Temp"""'], {}), "('Temp')\n", (3706, 3714), False, 'import os\n'), ((3757, 3802), 'prody.writePDB', 'prody.writePDB', (['"""average.pdb"""', 'self.input_pdb'], {}), "('average.pdb', self.input_pdb)\n", (3771, 3802), False, 'import prody\n'), ((3821, 3846), 'os.path.join', 'join', (['GMX_PATH', '"""pdb2gmx"""'], {}), "(GMX_PATH, 'pdb2gmx')\n", (3825, 3846), False, 'from os.path import join\n'), ((4008, 4117), 'subprocess.Popen', 'subprocess.Popen', (['pdb_cmd'], {'shell': '(True)', 'stdin': 'subprocess.PIPE', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(pdb_cmd, shell=True, stdin=subprocess.PIPE, stdout=\n output_stream, stderr=output_stream)\n', (4024, 4117), False, 'import subprocess\n'), ((4330, 4356), 'os.path.join', 'join', (['GMX_PATH', '"""editconf"""'], {}), "(GMX_PATH, 'editconf')\n", (4334, 4356), False, 'from os.path import join\n'), ((4433, 4524), 'subprocess.check_call', 'subprocess.check_call', (['box_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(box_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (4454, 4524), False, 'import subprocess\n'), ((4584, 4618), 'prody.writePDB', 'prody.writePDB', (['"""start.pdb"""', 'start'], {}), "('start.pdb', start)\n", (4598, 4618), False, 'import prody\n'), ((4656, 4681), 'os.path.join', 'join', (['GMX_PATH', '"""pdb2gmx"""'], {}), "(GMX_PATH, 'pdb2gmx')\n", (4660, 4681), False, 'from os.path import join\n'), ((4830, 4939), 'subprocess.Popen', 'subprocess.Popen', (['pdb_cmd'], {'shell': '(True)', 'stdin': 'subprocess.PIPE', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(pdb_cmd, shell=True, stdin=subprocess.PIPE, stdout=\n output_stream, stderr=output_stream)\n', (4846, 4939), False, 'import subprocess\n'), ((5153, 5179), 'os.path.join', 'join', (['GMX_PATH', '"""editconf"""'], {}), "(GMX_PATH, 'editconf')\n", (5157, 5179), False, 'from os.path import join\n'), ((5253, 5344), 'subprocess.check_call', 'subprocess.check_call', (['box_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(box_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (5274, 5344), False, 'import subprocess\n'), ((5601, 5625), 'os.path.join', 'join', (['GMX_PATH', '"""grompp"""'], {}), "(GMX_PATH, 'grompp')\n", (5605, 5625), False, 'from os.path import join\n'), ((5732, 5826), 'subprocess.check_call', 'subprocess.check_call', (['grompp_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(grompp_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (5753, 5826), False, 'import subprocess\n'), ((5890, 5913), 'os.path.join', 'join', (['GMX_PATH', '"""mdrun"""'], {}), "(GMX_PATH, 'mdrun')\n", (5894, 5913), False, 'from os.path import join\n'), ((5972, 6062), 'subprocess.check_call', 'subprocess.check_call', (['md_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(md_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (5993, 6062), False, 'import subprocess\n'), ((6320, 6344), 'os.path.join', 'join', (['GMX_PATH', '"""grompp"""'], {}), "(GMX_PATH, 'grompp')\n", (6324, 6344), False, 'from os.path import join\n'), ((6470, 6564), 'subprocess.check_call', 'subprocess.check_call', (['grompp_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(grompp_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (6491, 6564), False, 'import subprocess\n'), ((6628, 6651), 'os.path.join', 'join', (['GMX_PATH', '"""mdrun"""'], {}), "(GMX_PATH, 'mdrun')\n", (6632, 6651), False, 'from os.path import join\n'), ((6710, 6800), 'subprocess.check_call', 'subprocess.check_call', (['md_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(md_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (6731, 6800), False, 'import subprocess\n'), ((7075, 7089), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (7083, 7089), False, 'import os\n'), ((7098, 7119), 'shutil.rmtree', 'shutil.rmtree', (['"""Temp"""'], {}), "('Temp')\n", (7111, 7119), False, 'import shutil\n'), ((3673, 3689), 'os.mkdir', 'os.mkdir', (['"""Temp"""'], {}), "('Temp')\n", (3681, 3689), False, 'import os\n'), ((6998, 7023), 'prody.parsePDB', 'prody.parsePDB', (['"""min.pdb"""'], {}), "('min.pdb')\n", (7012, 7023), False, 'import prody\n')]
|
import argparse
import glob
import os
import xml.etree.ElementTree as ET
from xml.dom import minidom
class Merger:
def __init__(self):
self._filelist = []
pass
def prettify(self, elem):
xmlstr = ET.tostring(elem, 'utf-8').replace('\n', '')
while "> " in xmlstr:
xmlstr = xmlstr.replace("> ", ">")
while " <" in xmlstr:
xmlstr = xmlstr.replace(" <", "<")
reparsed = minidom.parseString(xmlstr)
result = reparsed.toprettyxml(indent=' ', newl='\n', encoding="utf-8")
return result
def read(self, inputfolder):
for f in glob.glob(os.path.join(inputfolder, "*_gamelist.xml")):
print("Found {}".format(f))
self._filelist.append(f)
def write(self, outputfile):
# Read
utree = ET.Element("gameList")
for f in self._filelist:
print("Loading {}".format(f))
for se in ET.parse(f).getroot():
utree.append(se)
# Write
print("Writing {}".format(outputfile))
with open(outputfile, "w") as f:
f.write(self.prettify(utree).encode("utf-8"))
# Cleanup
for f in self._filelist:
print("Removing {}".format(f))
os.remove(f)
if __name__ == '__main__':
# Parse command line
parser = argparse.ArgumentParser(description='Gamelist merger')
parser.add_argument("-i", "--input", help="Input foldet where to collect *_gamelist.xml", type=str, required=True)
parser.add_argument("-o", "--output", help="Output gamelist file", type=str, required=True)
#parser.add_argument("--port", help="This system is a port, not a regular system", action="store_true", required=False)
args = parser.parse_args()
merger = Merger()
merger.read(args.input)
merger.write(args.output)
|
[
"xml.etree.ElementTree.parse",
"os.remove",
"argparse.ArgumentParser",
"xml.dom.minidom.parseString",
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.tostring",
"os.path.join"
] |
[((1346, 1400), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Gamelist merger"""'}), "(description='Gamelist merger')\n", (1369, 1400), False, 'import argparse\n'), ((444, 471), 'xml.dom.minidom.parseString', 'minidom.parseString', (['xmlstr'], {}), '(xmlstr)\n', (463, 471), False, 'from xml.dom import minidom\n'), ((822, 844), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""gameList"""'], {}), "('gameList')\n", (832, 844), True, 'import xml.etree.ElementTree as ET\n'), ((634, 677), 'os.path.join', 'os.path.join', (['inputfolder', '"""*_gamelist.xml"""'], {}), "(inputfolder, '*_gamelist.xml')\n", (646, 677), False, 'import os\n'), ((1266, 1278), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1275, 1278), False, 'import os\n'), ((230, 256), 'xml.etree.ElementTree.tostring', 'ET.tostring', (['elem', '"""utf-8"""'], {}), "(elem, 'utf-8')\n", (241, 256), True, 'import xml.etree.ElementTree as ET\n'), ((942, 953), 'xml.etree.ElementTree.parse', 'ET.parse', (['f'], {}), '(f)\n', (950, 953), True, 'import xml.etree.ElementTree as ET\n')]
|
"""Commandline script for docconvert."""
import argparse
import logging
import os
import subprocess
import sys
from six.moves import input
from . import configuration
from . import core
from . import parser
from . import writer
_LOGGER = logging.getLogger(__name__)
def setup_logger(verbose=False):
"""Setup basic logging handler for console feedback.
Args:
verbose (bool): If true, sets level of logger to logging.INFO,
else leaves handler at default of logging.WARNING.
"""
level = logging.INFO if verbose else logging.WARNING
log_format = "%(name)s:%(levelname)s: %(message)s"
logging.basicConfig(format=log_format, level=level)
def is_git_repository(path):
"""Checks if path is in a git repository.
Args:
path (str): The path to check.
Returns:
bool: Whether the path is a git repository.
"""
if os.path.isfile(path):
path = os.path.dirname(path)
with open(os.devnull, "wb") as devnull:
proc = subprocess.Popen(
["git", "rev-parse", "--is-inside-work-tree"],
cwd=path,
stdout=devnull,
stderr=devnull,
)
proc.wait()
return proc.returncode == 0
def run():
"""Parses arguments and calls core convert function."""
arg_parser = argparse.ArgumentParser(prog="docconvert")
arg_parser.add_argument("source", help="The directory or file to convert.")
arg_parser.add_argument(
"-i",
"--input",
help="Input docstring style. (default: guess)",
type=parser.InputStyle,
choices=list(parser.InputStyle),
)
arg_parser.add_argument(
"-o",
"--output",
help="Output docstring style to convert to. (default: google)",
type=writer.OutputStyle,
choices=list(writer.OutputStyle),
)
arg_parser.add_argument(
"--in-place",
help="Write the changes to the input file instead of printing diffs.",
action="store_true",
)
arg_parser.add_argument(
"-c", "--config", help="Location of configuration file to use."
)
arg_parser.add_argument(
"-t",
"--threads",
type=int,
default=0,
help="Number of threads to use. (default: cpu count)",
)
arg_parser.add_argument(
"-v", "--verbose", action="store_true", help="Log more information."
)
args = arg_parser.parse_args()
setup_logger(verbose=args.verbose)
source = os.path.abspath(os.path.expanduser(args.source))
if not os.path.exists(source):
_LOGGER.error("Path does not exist: %s", source)
return
if not is_git_repository(source):
_LOGGER.warning(
"This directory is not under git control. "
"Continuing will overwrite files."
)
answer = input("Are you sure you would like to proceed? [y/n] ")
if answer.lower() not in ("y", "yes"):
_LOGGER.warning("Exiting without converting.")
return
config = configuration.DocconvertConfiguration.create_default()
if args.config:
config_path = os.path.abspath(os.path.expanduser(args.config))
if not os.path.exists(config_path):
_LOGGER.error("Config path does not exist: %s", config_path)
return
config.update_from_json(config_path)
# Override config values if specified directly with flags
if args.input:
config.input_style = args.input
if args.output:
config.output_style = args.output
diffs = core.convert(source, args.threads, config, args.in_place)
if diffs and not args.in_place:
for diff in diffs:
for line in diff:
sys.stdout.write(line)
if __name__ == "__main__":
sys.exit(run())
|
[
"sys.stdout.write",
"subprocess.Popen",
"argparse.ArgumentParser",
"logging.basicConfig",
"six.moves.input",
"os.path.dirname",
"os.path.exists",
"os.path.isfile",
"os.path.expanduser",
"logging.getLogger"
] |
[((243, 270), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (260, 270), False, 'import logging\n'), ((632, 683), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'log_format', 'level': 'level'}), '(format=log_format, level=level)\n', (651, 683), False, 'import logging\n'), ((892, 912), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (906, 912), False, 'import os\n'), ((1317, 1359), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""docconvert"""'}), "(prog='docconvert')\n", (1340, 1359), False, 'import argparse\n'), ((929, 950), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (944, 950), False, 'import os\n'), ((1010, 1119), 'subprocess.Popen', 'subprocess.Popen', (["['git', 'rev-parse', '--is-inside-work-tree']"], {'cwd': 'path', 'stdout': 'devnull', 'stderr': 'devnull'}), "(['git', 'rev-parse', '--is-inside-work-tree'], cwd=path,\n stdout=devnull, stderr=devnull)\n", (1026, 1119), False, 'import subprocess\n'), ((2510, 2541), 'os.path.expanduser', 'os.path.expanduser', (['args.source'], {}), '(args.source)\n', (2528, 2541), False, 'import os\n'), ((2554, 2576), 'os.path.exists', 'os.path.exists', (['source'], {}), '(source)\n', (2568, 2576), False, 'import os\n'), ((2843, 2898), 'six.moves.input', 'input', (['"""Are you sure you would like to proceed? [y/n] """'], {}), "('Are you sure you would like to proceed? [y/n] ')\n", (2848, 2898), False, 'from six.moves import input\n'), ((3150, 3181), 'os.path.expanduser', 'os.path.expanduser', (['args.config'], {}), '(args.config)\n', (3168, 3181), False, 'import os\n'), ((3198, 3225), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (3212, 3225), False, 'import os\n'), ((3726, 3748), 'sys.stdout.write', 'sys.stdout.write', (['line'], {}), '(line)\n', (3742, 3748), False, 'import sys\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module defines filters for Cell instances
"""
from __future__ import print_function
import copy
import numpy as np
from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND
from tunacell.base.datatools import multiplicative_increments
from tunacell.base.observable import Observable
class FilterCell(FilterGeneral):
"General class for filtering cell objects (reader.Cell instances)"
_type = 'CELL'
class FilterCellAny(FilterCell):
"Class that does not filter anything."
def __init__(self):
self.label = 'Always True' # short description for human readers
return
def func(self, cell):
return True
class FilterData(FilterCell):
"""Default filter test only if cell exists and cell.data non empty."""
def __init__(self):
self.label = 'Cell Has Data'
return
def func(self, cell):
boo = False
if cell is not None:
boo = cell.data is not None and len(cell.data) > 0
return boo
class FilterCellIDparity(FilterCell):
"""Test whether identifier is odd or even"""
def __init__(self, parity='even'):
self.parity = parity
self.label = 'Cell identifier is {}'.format(parity)
return
def func(self, cell):
# test if even
try:
even = int(cell.identifier) % 2 == 0
if self.parity == 'even':
return even
elif self.parity == 'odd':
return not even
else:
raise ValueError("Parity must be 'even' or 'odd'")
except ValueError as ve:
print(ve)
return False
class FilterCellIDbound(FilterCell):
"""Test class"""
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.label = '{} <= cellID < {}'.format(lower_bound, upper_bound)
return
def func(self, cell):
return bounded(int(cell.identifier),
self.lower_bound, self.upper_bound)
class FilterHasParent(FilterCell):
"""Test whether a cell has an identified parent cell"""
def __init__(self):
self.label = 'Cell Has Parent'
return
def func(self, cell):
boo = False
if cell.parent:
boo = True
return boo
class FilterDaughters(FilterCell):
"Test whether a given cell as at least one daughter cell"
def __init__(self, daughter_min=1, daughter_max=2):
label = 'Number of daughter cell(s): '
label += '{0} <= n_daughters <= {1}'.format(daughter_min, daughter_max)
self.label = label
self.lower_bound = daughter_min
self.upper_bound = daughter_max + 1 # lower <= x < upper
return
def func(self, cell):
return bounded(len(cell.childs),
lower_bound=self.lower_bound,
upper_bound=self.upper_bound)
class FilterCompleteCycle(FilterCell):
"Test whether a cell has a given parent and at least one daughter."
def __init__(self, daughter_min=1):
label = 'Cell cycle complete'
label += ' (with at least {} daughter cell(s)'.format(daughter_min)
self.daughter_min = daughter_min
self.label = label
return
def func(self, cell):
filt_parent = FilterHasParent()
filt_daughter = FilterDaughters(daughter_min=self.daughter_min)
return filt_parent(cell) and filt_daughter(cell)
class FilterCycleFrames(FilterCell):
"""Check whether cell has got a minimal number of datapoints."""
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = 'Number of registered frames:'
label += '{0} <= n_frames <= {1}'.format(self.lower_bound,
self.upper_bound)
self.label = label
return
def func(self, cell):
# check whether data exists
boo = False
filtData = FilterData()
if filtData.func(cell):
boo = bounded(len(cell.data),
lower_bound=self.lower_bound,
upper_bound=self.upper_bound
)
return boo
class FilterCycleSpanIncluded(FilterCell):
"""Check that cell cycle time interval is within valid bounds."""
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = '{} <= Initial frame and Final frame < {}'.format(lower_bound,
upper_bound)
self.label = label
return
def func(self, cell):
boo = False
filtData = FilterData()
if filtData(cell):
boo = included(cell.data['time'],
lower_bound=self.lower_bound,
upper_bound=self.upper_bound)
return boo
class FilterTimeInCycle(FilterCell):
"""Check that tref is within cell birth and division time"""
def __init__(self, tref=0.):
self.tref = tref
label = 'birth/first time <= {} < division/last time'.format(tref)
self.label = label
return
def func(self, cell):
boo = False
filtData = FilterData()
if filtData(cell):
if cell.birth_time is not None:
lower = cell.birth_time
else:
lower = cell.data['time'][0]
if cell.division_time is not None:
upper = cell.division_time
else:
upper = cell.data['time'][-1]
boo = lower <= self.tref < upper
return boo
class FilterObservableBound(FilterCell):
"""Check that a given observable is bounded.
Parameters
----------
obs : Observable instance
observable that will be tested for bounds
works only for continuous observable (mode='dynamics')
tref : float (default None)
Time of reference at which to test dynamics observable value
lower_bound : float (default None)
upper_bound : float (default None)
"""
def __init__(self, obs=Observable(name='undefined'), tref=None,
lower_bound=None, upper_bound=None):
self.obs_to_test = obs # observable to be tested
self._obs = [obs, ] # hidden to be computed at for filtering purpose
self.tref = tref
# code below is commented because func is able to deal with arrays
# if obs.mode == 'dynamics' and tref is None:
# msg = 'For dynamics mode, this filter needs a valid tref (float)'
# raise ValueError(msg)
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = '{} <= {}'.format(lower_bound, obs.name)
if tref is not None:
label += ' (t={})'.format(tref)
label += ' < {}'.format(upper_bound)
self.label = label
return
def func(self, cell):
import collections
boo = False
if self.tref is not None:
filt = FilterAND(FilterData(),
FilterTimeInCycle(tref=self.tref))
else:
filt = FilterData()
label = self.obs_to_test.label
if filt(cell):
# retrieve data
array = cell._sdata[label] # two cases: array, or single value
raw_time = cell.data['time']
if len(raw_time) > 1:
dt = np.amin(raw_time[1:] - raw_time[:-1])
else:
dt = cell.container.period
if array is None:
return False
if isinstance(array, collections.Iterable):
if self.tref is None:
# data may be one value (for cycle observables), or array
boo = bounded(array[label], self.lower_bound, self.upper_bound)
else:
# find data closest to tref (-> round to closest time)
# for now return closest time to tref
index = np.argmin(np.abs(array['time'] - self.tref))
# check that it's really close:
if np.abs(array['time'][index] - self.tref) < dt:
value = array[label][index]
boo = bounded(value, self.lower_bound, self.upper_bound)
# otherwise it's a number
else:
boo = bounded(array, self.lower_bound, self.upper_bound)
return boo
# useless ?
class FilterLengthIncrement(FilterCell):
"Check increments are bounded."
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = 'Length increments between two successive frames: '
label += '{0} <= delta_length <= {1}'.format(self.lower_bound,
self.upper_bound)
return
def func(self, cell):
boo = False
filtData = FilterData()
if filtData(cell):
ell = np.array(cell.data['length'])
incr = multiplicative_increments(ell)
lower = bounded(np.amin(incr), lower_bound=self.lower_bound)
upper = bounded(np.amax(incr), upper_bound=self.upper_bound)
boo = lower and upper
return boo
class FilterSymmetricDivision(FilterCell):
"""Check that cell division is (roughly) symmetric.
Parameters
----------
raw : str
column label of raw observable to test for symmetric division
(usually one of 'length', 'area'). This quantity will be approximated
"""
def __init__(self, raw='area', lower_bound=0.4, upper_bound=0.6):
self.raw = raw
# Observable to be computed: raw at birth, raw at division
# hidden _obs because not part of parameters, but should be computed
self._obs = [Observable(raw=raw, scale='log', mode='birth', timing='b'),
Observable(raw=raw, scale='log', mode='division',
timing='d')]
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = 'Symmetric division filter:'
ratio_str = '(daughter cell {})/(mother cell {})'.format(raw, raw)
label += ' {} <= {} <= {}'.format(self.lower_bound,
ratio_str,
self.upper_bound)
# label += 'OR (in case mother cell data is missing) '
# label += '{0} <= (daughter cell area)/(sister cell area) <= {1}\
# '.format(self.lower_bound/self.upper_bound,
# self.upper_bound/self.lower_bound)
self.label = label
return
def func(self, cell):
boo = False
filtData = FilterData()
if cell.parent is None:
# birth is not reported, impossible to test, cannot exclude from data
boo = True
else:
if filtData(cell):
csize = cell._sdata[self._obs[0].label]
if filtData(cell.parent):
psize = cell.parent._sdata[self._obs[1].label]
boo = bounded(csize/psize,
lower_bound=self.lower_bound,
upper_bound=self.upper_bound
)
else:
# parent exists, but without data.
# this is a weird scenario, that should not exist
# TODO: warn user?
# but we can check with sibling
sibs = copy.deepcopy(cell.parent.childs)
for item in sibs:
if item.identifier == cell.identifier:
sibs.remove(item)
if sibs:
if len(sibs) > 1:
from ..base.cell import CellChildsError
raise CellChildsError('>2 daughters')
sib = sibs[0] # there should be only one cell
if sib.data is not None:
sibsize = sib._sdata[self._obs[0].label()]
boo = bounded(csize/sibsize,
lower_bound=self.lower_bound,
upper_bound=self.upper_bound
)
else:
boo = True # sibling cell: no data, accept this cell
else:
boo = True # no sibling, accept this cell
return boo
|
[
"tunacell.filters.main.included",
"copy.deepcopy",
"numpy.abs",
"numpy.amin",
"tunacell.base.datatools.multiplicative_increments",
"tunacell.filters.main.bounded",
"tunacell.base.observable.Observable",
"numpy.amax",
"numpy.array"
] |
[((6363, 6391), 'tunacell.base.observable.Observable', 'Observable', ([], {'name': '"""undefined"""'}), "(name='undefined')\n", (6373, 6391), False, 'from tunacell.base.observable import Observable\n'), ((4970, 5062), 'tunacell.filters.main.included', 'included', (["cell.data['time']"], {'lower_bound': 'self.lower_bound', 'upper_bound': 'self.upper_bound'}), "(cell.data['time'], lower_bound=self.lower_bound, upper_bound=self.\n upper_bound)\n", (4978, 5062), False, 'from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND\n'), ((9303, 9332), 'numpy.array', 'np.array', (["cell.data['length']"], {}), "(cell.data['length'])\n", (9311, 9332), True, 'import numpy as np\n'), ((9352, 9382), 'tunacell.base.datatools.multiplicative_increments', 'multiplicative_increments', (['ell'], {}), '(ell)\n', (9377, 9382), False, 'from tunacell.base.datatools import multiplicative_increments\n'), ((10143, 10201), 'tunacell.base.observable.Observable', 'Observable', ([], {'raw': 'raw', 'scale': '"""log"""', 'mode': '"""birth"""', 'timing': '"""b"""'}), "(raw=raw, scale='log', mode='birth', timing='b')\n", (10153, 10201), False, 'from tunacell.base.observable import Observable\n'), ((10224, 10285), 'tunacell.base.observable.Observable', 'Observable', ([], {'raw': 'raw', 'scale': '"""log"""', 'mode': '"""division"""', 'timing': '"""d"""'}), "(raw=raw, scale='log', mode='division', timing='d')\n", (10234, 10285), False, 'from tunacell.base.observable import Observable\n'), ((7679, 7716), 'numpy.amin', 'np.amin', (['(raw_time[1:] - raw_time[:-1])'], {}), '(raw_time[1:] - raw_time[:-1])\n', (7686, 7716), True, 'import numpy as np\n'), ((8654, 8704), 'tunacell.filters.main.bounded', 'bounded', (['array', 'self.lower_bound', 'self.upper_bound'], {}), '(array, self.lower_bound, self.upper_bound)\n', (8661, 8704), False, 'from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND\n'), ((9411, 9424), 'numpy.amin', 'np.amin', (['incr'], {}), '(incr)\n', (9418, 9424), True, 'import numpy as np\n'), ((9484, 9497), 'numpy.amax', 'np.amax', (['incr'], {}), '(incr)\n', (9491, 9497), True, 'import numpy as np\n'), ((8035, 8092), 'tunacell.filters.main.bounded', 'bounded', (['array[label]', 'self.lower_bound', 'self.upper_bound'], {}), '(array[label], self.lower_bound, self.upper_bound)\n', (8042, 8092), False, 'from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND\n'), ((11446, 11533), 'tunacell.filters.main.bounded', 'bounded', (['(csize / psize)'], {'lower_bound': 'self.lower_bound', 'upper_bound': 'self.upper_bound'}), '(csize / psize, lower_bound=self.lower_bound, upper_bound=self.\n upper_bound)\n', (11453, 11533), False, 'from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND\n'), ((11895, 11928), 'copy.deepcopy', 'copy.deepcopy', (['cell.parent.childs'], {}), '(cell.parent.childs)\n', (11908, 11928), False, 'import copy\n'), ((8286, 8319), 'numpy.abs', 'np.abs', (["(array['time'] - self.tref)"], {}), "(array['time'] - self.tref)\n", (8292, 8319), True, 'import numpy as np\n'), ((8396, 8436), 'numpy.abs', 'np.abs', (["(array['time'][index] - self.tref)"], {}), "(array['time'][index] - self.tref)\n", (8402, 8436), True, 'import numpy as np\n'), ((8525, 8575), 'tunacell.filters.main.bounded', 'bounded', (['value', 'self.lower_bound', 'self.upper_bound'], {}), '(value, self.lower_bound, self.upper_bound)\n', (8532, 8575), False, 'from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND\n'), ((12506, 12595), 'tunacell.filters.main.bounded', 'bounded', (['(csize / sibsize)'], {'lower_bound': 'self.lower_bound', 'upper_bound': 'self.upper_bound'}), '(csize / sibsize, lower_bound=self.lower_bound, upper_bound=self.\n upper_bound)\n', (12513, 12595), False, 'from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND\n')]
|
import json
import logging
from aiohttp import web
log = logging.getLogger(__name__)
class AuthorizeView(web.View):
async def get(self):
log.info('Requested URL: %s', self.request.path_qs)
log.info('Requested Remote: %s', self.request.remote)
if self.request.can_read_body:
data = await self.request.json()
parsed = json.loads(data)
dump = json.dumps(parsed, indent=2)
log.info('JSON body: %s', dump)
return web.Response(text="Welcome")
class TokenView(web.View):
async def get(self):
log.info('Requested URL: %s', self.request.path_qs)
log.info('Requested Remote: %s', self.request.remote)
if self.request.can_read_body:
data = await self.request.json()
parsed = json.loads(data)
dump = json.dumps(parsed, indent=2)
log.info('JSON body: %s', dump)
return web.Response(text="Welcome")
|
[
"aiohttp.web.Response",
"json.loads",
"logging.getLogger",
"json.dumps"
] |
[((60, 87), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (77, 87), False, 'import logging\n'), ((497, 525), 'aiohttp.web.Response', 'web.Response', ([], {'text': '"""Welcome"""'}), "(text='Welcome')\n", (509, 525), False, 'from aiohttp import web\n'), ((931, 959), 'aiohttp.web.Response', 'web.Response', ([], {'text': '"""Welcome"""'}), "(text='Welcome')\n", (943, 959), False, 'from aiohttp import web\n'), ((373, 389), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (383, 389), False, 'import json\n'), ((409, 437), 'json.dumps', 'json.dumps', (['parsed'], {'indent': '(2)'}), '(parsed, indent=2)\n', (419, 437), False, 'import json\n'), ((807, 823), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (817, 823), False, 'import json\n'), ((843, 871), 'json.dumps', 'json.dumps', (['parsed'], {'indent': '(2)'}), '(parsed, indent=2)\n', (853, 871), False, 'import json\n')]
|
import torch
import torch.distributed as dist
from common import distributed_test
import pytest
@distributed_test(world_size=3)
def test_init():
assert dist.is_initialized()
assert dist.get_world_size() == 3
assert dist.get_rank() < 3
# Demonstration of pytest's parameterization
@pytest.mark.parametrize('number,color', [(1138, 'purple')])
def test_dist_args(number, color):
"""Outer test function with inputs from pytest.mark.parametrize(). Uses a distributed
helper function.
"""
@distributed_test(world_size=2)
def _test_dist_args_helper(x, color='red'):
assert dist.get_world_size() == 2
assert x == 1138
assert color == 'purple'
"""Ensure that we can parse args to distributed_test decorated functions. """
_test_dist_args_helper(number, color=color)
@distributed_test(world_size=[1, 2, 4])
def test_dist_allreduce():
x = torch.ones(1, 3).cuda() * (dist.get_rank() + 1)
sum_of_ranks = (dist.get_world_size() * (dist.get_world_size() + 1)) // 2
result = torch.ones(1, 3).cuda() * sum_of_ranks
dist.all_reduce(x)
assert torch.all(x == result)
|
[
"torch.distributed.is_initialized",
"torch.distributed.all_reduce",
"torch.ones",
"torch.distributed.get_rank",
"common.distributed_test",
"torch.distributed.get_world_size",
"pytest.mark.parametrize",
"torch.all"
] |
[((101, 131), 'common.distributed_test', 'distributed_test', ([], {'world_size': '(3)'}), '(world_size=3)\n', (117, 131), False, 'from common import distributed_test\n'), ((299, 358), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number,color"""', "[(1138, 'purple')]"], {}), "('number,color', [(1138, 'purple')])\n", (322, 358), False, 'import pytest\n'), ((831, 869), 'common.distributed_test', 'distributed_test', ([], {'world_size': '[1, 2, 4]'}), '(world_size=[1, 2, 4])\n', (847, 869), False, 'from common import distributed_test\n'), ((160, 181), 'torch.distributed.is_initialized', 'dist.is_initialized', ([], {}), '()\n', (179, 181), True, 'import torch.distributed as dist\n'), ((518, 548), 'common.distributed_test', 'distributed_test', ([], {'world_size': '(2)'}), '(world_size=2)\n', (534, 548), False, 'from common import distributed_test\n'), ((1087, 1105), 'torch.distributed.all_reduce', 'dist.all_reduce', (['x'], {}), '(x)\n', (1102, 1105), True, 'import torch.distributed as dist\n'), ((1117, 1139), 'torch.all', 'torch.all', (['(x == result)'], {}), '(x == result)\n', (1126, 1139), False, 'import torch\n'), ((193, 214), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (212, 214), True, 'import torch.distributed as dist\n'), ((231, 246), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (244, 246), True, 'import torch.distributed as dist\n'), ((612, 633), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (631, 633), True, 'import torch.distributed as dist\n'), ((932, 947), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (945, 947), True, 'import torch.distributed as dist\n'), ((973, 994), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (992, 994), True, 'import torch.distributed as dist\n'), ((905, 921), 'torch.ones', 'torch.ones', (['(1)', '(3)'], {}), '(1, 3)\n', (915, 921), False, 'import torch\n'), ((998, 1019), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (1017, 1019), True, 'import torch.distributed as dist\n'), ((1044, 1060), 'torch.ones', 'torch.ones', (['(1)', '(3)'], {}), '(1, 3)\n', (1054, 1060), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
#############################################
## (C)opyright by <NAME> ##
## All rights reserved ##
#############################################
__version__ = "$Revision: 176 $"
__author__ = "$Author: kgrodzicki $"
__date__ = "$Date: 2011-01-15 10:11:47 +0100 (Fr, 15 July 2011) $"
"""
HTML/CSS to PDF converter
Test background image generation on the `portrait` and `landscape`
page.
"""
from cookbook import HTML2PDF
import sys, os
if __name__ == "__main__":
xhtml = open(sys.argv[1])
try:
filename = sys.argv[2]
if not len(filename):
raise Exception
except:
filename = sys.argv[1].split('.')[0] + '.pdf'
HTML2PDF(xhtml.read(), filename)
os.remove(sys.argv[1])
# def render(html_fn, filename):
# #print html_fn
# #xhtml = open(html_fn)
# #print xhtml.read()
# HTML2PDF(html_fn, filename)
|
[
"os.remove"
] |
[((746, 768), 'os.remove', 'os.remove', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (755, 768), False, 'import sys, os\n')]
|
import yaml
def read_creds(conf_path):
with open(conf_path, 'r') as cf:
config = yaml.load(cf, Loader=yaml.FullLoader)
return config
|
[
"yaml.load"
] |
[((94, 131), 'yaml.load', 'yaml.load', (['cf'], {'Loader': 'yaml.FullLoader'}), '(cf, Loader=yaml.FullLoader)\n', (103, 131), False, 'import yaml\n')]
|
from setuptools import setup, find_packages
setup(name='fate', version='1.0', packages=find_packages())
|
[
"setuptools.find_packages"
] |
[((88, 103), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (101, 103), False, 'from setuptools import setup, find_packages\n')]
|
import logging
from core.authentication import VKAuthentication
from core.models import Boec, UserApply
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, status, viewsets
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from reversion.views import RevisionMixin
from .serializers import ApplySerializer
logger = logging.getLogger(__name__)
class UserApplyView(viewsets.ViewSet):
"Manage user applies"
authentication_classes = (VKAuthentication,)
permission_classes = (IsAuthenticated,)
@action(
methods=["get"],
detail=False,
)
def get_own_apply(self, request):
try:
apply = UserApply.objects.get(vk_id=request.user.vk_id)
return Response({}, status=status.HTTP_200_OK)
except UserApply.DoesNotExist:
msg = _(f"User apply doesn't exist")
return Response({"error": msg}, status=status.HTTP_404_NOT_FOUND)
@action(
methods=["post"],
detail=False,
)
def apply(self, request):
serializer = ApplySerializer(data=request.data)
if serializer.is_valid():
instance = serializer.save()
Boec.objects.create(
first_name=instance.first_name,
last_name=instance.last_name,
middle_name=instance.middle_name,
date_of_birth=instance.date_of_birth,
vk_id=instance.vk_id,
for_development=True,
)
return Response(serializer.data)
else:
print(serializer.errors)
return Response({"error": "Validation"}, status=status.HTTP_400_BAD_REQUEST)
|
[
"core.models.UserApply.objects.get",
"rest_framework.response.Response",
"rest_framework.decorators.action",
"core.models.Boec.objects.create",
"django.utils.translation.ugettext_lazy",
"logging.getLogger"
] |
[((457, 484), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (474, 484), False, 'import logging\n'), ((651, 688), 'rest_framework.decorators.action', 'action', ([], {'methods': "['get']", 'detail': '(False)'}), "(methods=['get'], detail=False)\n", (657, 688), False, 'from rest_framework.decorators import action\n'), ((1063, 1101), 'rest_framework.decorators.action', 'action', ([], {'methods': "['post']", 'detail': '(False)'}), "(methods=['post'], detail=False)\n", (1069, 1101), False, 'from rest_framework.decorators import action\n'), ((783, 830), 'core.models.UserApply.objects.get', 'UserApply.objects.get', ([], {'vk_id': 'request.user.vk_id'}), '(vk_id=request.user.vk_id)\n', (804, 830), False, 'from core.models import Boec, UserApply\n'), ((850, 889), 'rest_framework.response.Response', 'Response', (['{}'], {'status': 'status.HTTP_200_OK'}), '({}, status=status.HTTP_200_OK)\n', (858, 889), False, 'from rest_framework.response import Response\n'), ((1299, 1506), 'core.models.Boec.objects.create', 'Boec.objects.create', ([], {'first_name': 'instance.first_name', 'last_name': 'instance.last_name', 'middle_name': 'instance.middle_name', 'date_of_birth': 'instance.date_of_birth', 'vk_id': 'instance.vk_id', 'for_development': '(True)'}), '(first_name=instance.first_name, last_name=instance.\n last_name, middle_name=instance.middle_name, date_of_birth=instance.\n date_of_birth, vk_id=instance.vk_id, for_development=True)\n', (1318, 1506), False, 'from core.models import Boec, UserApply\n'), ((1627, 1652), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1635, 1652), False, 'from rest_framework.response import Response\n'), ((1724, 1793), 'rest_framework.response.Response', 'Response', (["{'error': 'Validation'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'error': 'Validation'}, status=status.HTTP_400_BAD_REQUEST)\n", (1732, 1793), False, 'from rest_framework.response import Response\n'), ((948, 978), 'django.utils.translation.ugettext_lazy', '_', (['f"""User apply doesn\'t exist"""'], {}), '(f"User apply doesn\'t exist")\n', (949, 978), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((998, 1056), 'rest_framework.response.Response', 'Response', (["{'error': msg}"], {'status': 'status.HTTP_404_NOT_FOUND'}), "({'error': msg}, status=status.HTTP_404_NOT_FOUND)\n", (1006, 1056), False, 'from rest_framework.response import Response\n')]
|
import errno
import pytest
from tempfile import TemporaryDirectory
from unittest.mock import patch
import docker
import escapism
from repo2docker.app import Repo2Docker
from repo2docker.__main__ import make_r2d
from repo2docker.utils import chdir
def test_find_image():
images = [{"RepoTags": ["some-org/some-repo:latest"]}]
with patch("repo2docker.docker.docker.APIClient") as FakeDockerClient:
instance = FakeDockerClient.return_value
instance.images.return_value = images
r2d = Repo2Docker()
r2d.output_image_spec = "some-org/some-repo"
assert r2d.find_image()
instance.images.assert_called_with()
def test_dont_find_image():
images = [{"RepoTags": ["some-org/some-image-name:latest"]}]
with patch("repo2docker.docker.docker.APIClient") as FakeDockerClient:
instance = FakeDockerClient.return_value
instance.images.return_value = images
r2d = Repo2Docker()
r2d.output_image_spec = "some-org/some-other-image-name"
assert not r2d.find_image()
instance.images.assert_called_with()
def test_image_name_remains_unchanged():
# if we specify an image name, it should remain unmodified
with TemporaryDirectory() as src:
app = Repo2Docker()
argv = ["--image-name", "a-special-name", "--no-build", src]
app = make_r2d(argv)
app.start()
assert app.output_image_spec == "a-special-name"
def test_image_name_contains_sha1(repo_with_content):
upstream, sha1 = repo_with_content
app = Repo2Docker()
# force selection of the git content provider by prefixing path with
# file://. This is important as the Local content provider does not
# store the SHA1 in the repo spec
argv = ["--no-build", "file://" + upstream]
app = make_r2d(argv)
app.start()
assert app.output_image_spec.endswith(sha1[:7])
def test_local_dir_image_name(repo_with_content):
upstream, sha1 = repo_with_content
app = Repo2Docker()
argv = ["--no-build", upstream]
app = make_r2d(argv)
app.start()
assert app.output_image_spec.startswith(
"r2d" + escapism.escape(upstream, escape_char="-").lower()
)
def test_build_kwargs(repo_with_content):
upstream, sha1 = repo_with_content
argv = [upstream]
app = make_r2d(argv)
app.extra_build_kwargs = {"somekey": "somevalue"}
with patch.object(docker.APIClient, "build") as builds:
builds.return_value = []
app.build()
builds.assert_called_once()
args, kwargs = builds.call_args
assert "somekey" in kwargs
assert kwargs["somekey"] == "somevalue"
def test_run_kwargs(repo_with_content):
upstream, sha1 = repo_with_content
argv = [upstream]
app = make_r2d(argv)
app.extra_run_kwargs = {"somekey": "somevalue"}
with patch.object(docker.DockerClient, "containers") as containers:
app.start_container()
containers.run.assert_called_once()
args, kwargs = containers.run.call_args
assert "somekey" in kwargs
assert kwargs["somekey"] == "somevalue"
def test_root_not_allowed():
with TemporaryDirectory() as src, patch("os.geteuid") as geteuid:
geteuid.return_value = 0
argv = [src]
with pytest.raises(SystemExit) as exc:
app = make_r2d(argv)
assert exc.code == 1
with pytest.raises(ValueError):
app = Repo2Docker(repo=src, run=False)
app.build()
app = Repo2Docker(repo=src, user_id=1000, user_name="jovyan", run=False)
app.initialize()
with patch.object(docker.APIClient, "build") as builds:
builds.return_value = []
app.build()
builds.assert_called_once()
def test_dryrun_works_without_docker(tmpdir, capsys):
with chdir(tmpdir):
with patch.object(docker, "APIClient") as client:
client.side_effect = docker.errors.DockerException("Error: no Docker")
app = Repo2Docker(dry_run=True)
app.build()
captured = capsys.readouterr()
assert "Error: no Docker" not in captured.err
def test_error_log_without_docker(tmpdir, capsys):
with chdir(tmpdir):
with patch.object(docker, "APIClient") as client:
client.side_effect = docker.errors.DockerException("Error: no Docker")
app = Repo2Docker()
with pytest.raises(SystemExit):
app.build()
captured = capsys.readouterr()
assert "Error: no Docker" in captured.err
|
[
"unittest.mock.patch.object",
"tempfile.TemporaryDirectory",
"repo2docker.__main__.make_r2d",
"unittest.mock.patch",
"pytest.raises",
"repo2docker.utils.chdir",
"docker.errors.DockerException",
"escapism.escape",
"repo2docker.app.Repo2Docker"
] |
[((1560, 1573), 'repo2docker.app.Repo2Docker', 'Repo2Docker', ([], {}), '()\n', (1571, 1573), False, 'from repo2docker.app import Repo2Docker\n'), ((1815, 1829), 'repo2docker.__main__.make_r2d', 'make_r2d', (['argv'], {}), '(argv)\n', (1823, 1829), False, 'from repo2docker.__main__ import make_r2d\n'), ((2001, 2014), 'repo2docker.app.Repo2Docker', 'Repo2Docker', ([], {}), '()\n', (2012, 2014), False, 'from repo2docker.app import Repo2Docker\n'), ((2061, 2075), 'repo2docker.__main__.make_r2d', 'make_r2d', (['argv'], {}), '(argv)\n', (2069, 2075), False, 'from repo2docker.__main__ import make_r2d\n'), ((2327, 2341), 'repo2docker.__main__.make_r2d', 'make_r2d', (['argv'], {}), '(argv)\n', (2335, 2341), False, 'from repo2docker.__main__ import make_r2d\n'), ((2766, 2780), 'repo2docker.__main__.make_r2d', 'make_r2d', (['argv'], {}), '(argv)\n', (2774, 2780), False, 'from repo2docker.__main__ import make_r2d\n'), ((343, 387), 'unittest.mock.patch', 'patch', (['"""repo2docker.docker.docker.APIClient"""'], {}), "('repo2docker.docker.docker.APIClient')\n", (348, 387), False, 'from unittest.mock import patch\n'), ((519, 532), 'repo2docker.app.Repo2Docker', 'Repo2Docker', ([], {}), '()\n', (530, 532), False, 'from repo2docker.app import Repo2Docker\n'), ((769, 813), 'unittest.mock.patch', 'patch', (['"""repo2docker.docker.docker.APIClient"""'], {}), "('repo2docker.docker.docker.APIClient')\n", (774, 813), False, 'from unittest.mock import patch\n'), ((945, 958), 'repo2docker.app.Repo2Docker', 'Repo2Docker', ([], {}), '()\n', (956, 958), False, 'from repo2docker.app import Repo2Docker\n'), ((1221, 1241), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (1239, 1241), False, 'from tempfile import TemporaryDirectory\n'), ((1264, 1277), 'repo2docker.app.Repo2Docker', 'Repo2Docker', ([], {}), '()\n', (1275, 1277), False, 'from repo2docker.app import Repo2Docker\n'), ((1361, 1375), 'repo2docker.__main__.make_r2d', 'make_r2d', (['argv'], {}), '(argv)\n', (1369, 1375), False, 'from repo2docker.__main__ import make_r2d\n'), ((2406, 2445), 'unittest.mock.patch.object', 'patch.object', (['docker.APIClient', '"""build"""'], {}), "(docker.APIClient, 'build')\n", (2418, 2445), False, 'from unittest.mock import patch\n'), ((2843, 2890), 'unittest.mock.patch.object', 'patch.object', (['docker.DockerClient', '"""containers"""'], {}), "(docker.DockerClient, 'containers')\n", (2855, 2890), False, 'from unittest.mock import patch\n'), ((3135, 3155), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (3153, 3155), False, 'from tempfile import TemporaryDirectory\n'), ((3164, 3183), 'unittest.mock.patch', 'patch', (['"""os.geteuid"""'], {}), "('os.geteuid')\n", (3169, 3183), False, 'from unittest.mock import patch\n'), ((3494, 3560), 'repo2docker.app.Repo2Docker', 'Repo2Docker', ([], {'repo': 'src', 'user_id': '(1000)', 'user_name': '"""jovyan"""', 'run': '(False)'}), "(repo=src, user_id=1000, user_name='jovyan', run=False)\n", (3505, 3560), False, 'from repo2docker.app import Repo2Docker\n'), ((3812, 3825), 'repo2docker.utils.chdir', 'chdir', (['tmpdir'], {}), '(tmpdir)\n', (3817, 3825), False, 'from repo2docker.utils import chdir\n'), ((4199, 4212), 'repo2docker.utils.chdir', 'chdir', (['tmpdir'], {}), '(tmpdir)\n', (4204, 4212), False, 'from repo2docker.utils import chdir\n'), ((3263, 3288), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (3276, 3288), False, 'import pytest\n'), ((3315, 3329), 'repo2docker.__main__.make_r2d', 'make_r2d', (['argv'], {}), '(argv)\n', (3323, 3329), False, 'from repo2docker.__main__ import make_r2d\n'), ((3377, 3402), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3390, 3402), False, 'import pytest\n'), ((3422, 3454), 'repo2docker.app.Repo2Docker', 'Repo2Docker', ([], {'repo': 'src', 'run': '(False)'}), '(repo=src, run=False)\n', (3433, 3454), False, 'from repo2docker.app import Repo2Docker\n'), ((3599, 3638), 'unittest.mock.patch.object', 'patch.object', (['docker.APIClient', '"""build"""'], {}), "(docker.APIClient, 'build')\n", (3611, 3638), False, 'from unittest.mock import patch\n'), ((3840, 3873), 'unittest.mock.patch.object', 'patch.object', (['docker', '"""APIClient"""'], {}), "(docker, 'APIClient')\n", (3852, 3873), False, 'from unittest.mock import patch\n'), ((3918, 3967), 'docker.errors.DockerException', 'docker.errors.DockerException', (['"""Error: no Docker"""'], {}), "('Error: no Docker')\n", (3947, 3967), False, 'import docker\n'), ((3986, 4011), 'repo2docker.app.Repo2Docker', 'Repo2Docker', ([], {'dry_run': '(True)'}), '(dry_run=True)\n', (3997, 4011), False, 'from repo2docker.app import Repo2Docker\n'), ((4227, 4260), 'unittest.mock.patch.object', 'patch.object', (['docker', '"""APIClient"""'], {}), "(docker, 'APIClient')\n", (4239, 4260), False, 'from unittest.mock import patch\n'), ((4305, 4354), 'docker.errors.DockerException', 'docker.errors.DockerException', (['"""Error: no Docker"""'], {}), "('Error: no Docker')\n", (4334, 4354), False, 'import docker\n'), ((4373, 4386), 'repo2docker.app.Repo2Docker', 'Repo2Docker', ([], {}), '()\n', (4384, 4386), False, 'from repo2docker.app import Repo2Docker\n'), ((4405, 4430), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (4418, 4430), False, 'import pytest\n'), ((2155, 2197), 'escapism.escape', 'escapism.escape', (['upstream'], {'escape_char': '"""-"""'}), "(upstream, escape_char='-')\n", (2170, 2197), False, 'import escapism\n')]
|
from nose.tools import istest, assert_equal
from mammoth.html_generation import HtmlGenerator, satisfy_html_path
from mammoth import html_paths
@istest
def generates_empty_string_when_newly_created():
generator = HtmlGenerator()
assert_equal("", generator.html_string())
@istest
def html_escapes_text():
generator = HtmlGenerator()
generator.text("<")
assert_equal("<", generator.html_string())
@istest
def self_closing_tag_is_self_closing():
generator = HtmlGenerator()
generator.self_closing("br")
assert_equal("<br />", generator.html_string())
@istest
def all_elements_are_closed_by_end_all():
generator = HtmlGenerator()
generator.start("p")
generator.start("span")
generator.text("Hello!")
generator.end_all()
assert_equal("<p><span>Hello!</span></p>", generator.html_string())
@istest
def elements_with_no_text_are_not_generator():
generator = HtmlGenerator()
generator.start("p")
generator.start("span")
generator.end_all()
assert_equal("", generator.html_string())
@istest
def elements_with_empty_string_text_are_not_generator():
generator = HtmlGenerator()
generator.start("p")
generator.start("span")
generator.text("")
generator.end_all()
assert_equal("", generator.html_string())
@istest
def self_closing_tag_can_have_attributes():
generator = HtmlGenerator()
generator.self_closing("br", {"data-blah": "42"})
assert_equal('<br data-blah="42" />', generator.html_string())
@istest
def attribute_values_are_escaped():
generator = HtmlGenerator()
generator.self_closing("br", {"data-blah": "<"})
assert_equal('<br data-blah="<" />', generator.html_string())
@istest
def opening_tag_can_have_attributes():
generator = HtmlGenerator()
generator.start("p", {"data-blah": "42"})
generator.text("Hello!")
generator.end()
assert_equal('<p data-blah="42">Hello!</p>', generator.html_string())
@istest
def appending_another_html_generator_does_nothing_if_empty():
generator = HtmlGenerator()
generator.start("p")
generator.append(HtmlGenerator())
assert_equal('', generator.html_string())
@istest
def appending_another_html_generator_writes_out_elements_if_other_generator_is_not_empty():
generator = HtmlGenerator()
generator.start("p")
other = HtmlGenerator()
other.text("Hello!")
generator.append(other)
assert_equal('<p>Hello!', generator.html_string())
@istest
class SatisfyPathTests(object):
@istest
def plain_elements_are_generated_to_satisfy_plain_path_elements(self):
generator = HtmlGenerator()
path = html_paths.path([html_paths.element(["p"])])
satisfy_html_path(generator, path)
generator.text("Hello!")
assert_equal('<p>Hello!', generator.html_string())
@istest
def only_missing_elements_are_generated_to_satisfy_plain_path_elements(self):
generator = HtmlGenerator()
generator.start("blockquote")
generator.text("Hello")
path = html_paths.path([html_paths.element(["blockquote"]), html_paths.element(["p"])])
satisfy_html_path(generator, path)
generator.text("there")
assert_equal('<blockquote>Hello<p>there', generator.html_string())
@istest
def mismatched_elements_are_closed_to_satisfy_plain_path_elements(self):
generator = HtmlGenerator()
generator.start("blockquote")
generator.start("span")
generator.text("Hello")
path = html_paths.path([html_paths.element(["blockquote"]), html_paths.element(["p"])])
satisfy_html_path(generator, path)
generator.text("there")
assert_equal('<blockquote><span>Hello</span><p>there', generator.html_string())
@istest
def fresh_element_matches_nothing(self):
generator = HtmlGenerator()
generator.start("blockquote")
generator.start("p")
generator.text("Hello")
path = html_paths.path([html_paths.element(["blockquote"]), html_paths.element(["p"], fresh=True)])
satisfy_html_path(generator, path)
generator.text("there")
assert_equal('<blockquote><p>Hello</p><p>there', generator.html_string())
@istest
def attributes_are_generated_when_satisfying_elements(self):
generator = HtmlGenerator()
path = html_paths.path([html_paths.element(["p"], class_names=["tip"])])
satisfy_html_path(generator, path)
generator.text("Hello")
assert_equal('<p class="tip">Hello', generator.html_string())
@istest
def elements_do_not_match_if_class_names_do_not_match(self):
generator = HtmlGenerator()
generator.start("p", {"class": "help"})
generator.text("Help")
path = html_paths.path([html_paths.element(["p"], class_names=["tip"])])
satisfy_html_path(generator, path)
generator.text("Tip")
assert_equal('<p class="help">Help</p><p class="tip">Tip', generator.html_string())
@istest
def class_names_match_if_they_are_the_same(self):
generator = HtmlGenerator()
generator.start("p", {"class": "tip"})
generator.text("Help")
path = html_paths.path([html_paths.element(["p"], class_names=["tip"])])
satisfy_html_path(generator, path)
generator.text("Tip")
assert_equal('<p class="tip">HelpTip', generator.html_string())
|
[
"mammoth.html_generation.satisfy_html_path",
"mammoth.html_paths.element",
"mammoth.html_generation.HtmlGenerator"
] |
[((220, 235), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (233, 235), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((333, 348), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (346, 348), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((489, 504), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (502, 504), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((658, 673), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (671, 673), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((925, 940), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (938, 940), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((1151, 1166), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (1164, 1166), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((1387, 1402), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (1400, 1402), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((1586, 1601), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (1599, 1601), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((1789, 1804), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (1802, 1804), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((2062, 2077), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (2075, 2077), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((2305, 2320), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (2318, 2320), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((2358, 2373), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (2371, 2373), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((2124, 2139), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (2137, 2139), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((2631, 2646), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (2644, 2646), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((2715, 2749), 'mammoth.html_generation.satisfy_html_path', 'satisfy_html_path', (['generator', 'path'], {}), '(generator, path)\n', (2732, 2749), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((2966, 2981), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (2979, 2981), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((3156, 3190), 'mammoth.html_generation.satisfy_html_path', 'satisfy_html_path', (['generator', 'path'], {}), '(generator, path)\n', (3173, 3190), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((3417, 3432), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (3430, 3432), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((3639, 3673), 'mammoth.html_generation.satisfy_html_path', 'satisfy_html_path', (['generator', 'path'], {}), '(generator, path)\n', (3656, 3673), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((3881, 3896), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (3894, 3896), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((4112, 4146), 'mammoth.html_generation.satisfy_html_path', 'satisfy_html_path', (['generator', 'path'], {}), '(generator, path)\n', (4129, 4146), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((4368, 4383), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (4381, 4383), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((4473, 4507), 'mammoth.html_generation.satisfy_html_path', 'satisfy_html_path', (['generator', 'path'], {}), '(generator, path)\n', (4490, 4507), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((4717, 4732), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (4730, 4732), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((4901, 4935), 'mammoth.html_generation.satisfy_html_path', 'satisfy_html_path', (['generator', 'path'], {}), '(generator, path)\n', (4918, 4935), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((5154, 5169), 'mammoth.html_generation.HtmlGenerator', 'HtmlGenerator', ([], {}), '()\n', (5167, 5169), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((5337, 5371), 'mammoth.html_generation.satisfy_html_path', 'satisfy_html_path', (['generator', 'path'], {}), '(generator, path)\n', (5354, 5371), False, 'from mammoth.html_generation import HtmlGenerator, satisfy_html_path\n'), ((2679, 2704), 'mammoth.html_paths.element', 'html_paths.element', (["['p']"], {}), "(['p'])\n", (2697, 2704), False, 'from mammoth import html_paths\n'), ((3084, 3118), 'mammoth.html_paths.element', 'html_paths.element', (["['blockquote']"], {}), "(['blockquote'])\n", (3102, 3118), False, 'from mammoth import html_paths\n'), ((3120, 3145), 'mammoth.html_paths.element', 'html_paths.element', (["['p']"], {}), "(['p'])\n", (3138, 3145), False, 'from mammoth import html_paths\n'), ((3567, 3601), 'mammoth.html_paths.element', 'html_paths.element', (["['blockquote']"], {}), "(['blockquote'])\n", (3585, 3601), False, 'from mammoth import html_paths\n'), ((3603, 3628), 'mammoth.html_paths.element', 'html_paths.element', (["['p']"], {}), "(['p'])\n", (3621, 3628), False, 'from mammoth import html_paths\n'), ((4028, 4062), 'mammoth.html_paths.element', 'html_paths.element', (["['blockquote']"], {}), "(['blockquote'])\n", (4046, 4062), False, 'from mammoth import html_paths\n'), ((4064, 4101), 'mammoth.html_paths.element', 'html_paths.element', (["['p']"], {'fresh': '(True)'}), "(['p'], fresh=True)\n", (4082, 4101), False, 'from mammoth import html_paths\n'), ((4416, 4462), 'mammoth.html_paths.element', 'html_paths.element', (["['p']"], {'class_names': "['tip']"}), "(['p'], class_names=['tip'])\n", (4434, 4462), False, 'from mammoth import html_paths\n'), ((4844, 4890), 'mammoth.html_paths.element', 'html_paths.element', (["['p']"], {'class_names': "['tip']"}), "(['p'], class_names=['tip'])\n", (4862, 4890), False, 'from mammoth import html_paths\n'), ((5280, 5326), 'mammoth.html_paths.element', 'html_paths.element', (["['p']"], {'class_names': "['tip']"}), "(['p'], class_names=['tip'])\n", (5298, 5326), False, 'from mammoth import html_paths\n')]
|
# -*- coding: utf-8 -*-
import json
import logging
from django.test.client import Client
from networkapi.test.test_case import NetworkApiTestCase
log = logging.getLogger(__name__)
class PoolTestV3Case(NetworkApiTestCase):
maxDiff = None
def setUp(self):
self.client = Client()
def tearDown(self):
pass
def execute_some_put_verify_error(self, name_file):
# update
response = self.client.put(
'/api/v3/pool/1/',
data=json.dumps(self.load_json_file(name_file)),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(400, response.status_code,
'Status code should be 400 and was %s' % response.status_code)
def execute_some_put_verify_success(self, name_file):
# update
response = self.client.put(
'/api/v3/pool/1/',
data=json.dumps(self.load_json_file(name_file)),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(200, response.status_code,
'Status code should be 200 and was %s' % response.status_code)
# get datas updated
response = self.client.get(
'/api/v3/pool/1/',
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
# test if datas were updated
self.assertEqual(
json.dumps(self.load_json_file(name_file), sort_keys=True),
json.dumps(response.data, sort_keys=True),
'jsons should same'
)
self.assertEqual(200, response.status_code,
'Status code should be 200 and was %s' % response.status_code)
def execute_some_post_verify_error(self, name_file):
# delete
self.client.delete(
'/api/v3/pool/1/',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
# insert
response = self.client.post(
'/api/v3/pool/',
data=json.dumps(self.load_json_file(name_file)),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(400, response.status_code,
'Status code should be 400 and was %s' % response.status_code)
# try to get datas
response = self.client.get(
'/api/v3/pool/1/',
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
# test if data were not inserted
self.assertEqual(500, response.status_code,
'Status code should be 500 and was %s' % response.status_code)
def execute_some_post_verify_success(self, name_file):
# delete
self.client.delete(
'/api/v3/pool/1/',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
# try to get datas
response = self.client.get(
'/api/v3/pool/1/',
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
# test if does not exist data inserted
self.assertEqual(500, response.status_code,
'Status code should be 500 and was %s' % response.status_code)
# insert
response = self.client.post(
'/api/v3/pool/',
data=json.dumps(self.load_json_file(name_file)),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(201, response.status_code,
'Status code should be 201 and was %s' % response.status_code)
id_pool = response.data[0]['id']
# get data inserted
response = self.client.get(
'/api/v3/pool/%s/' % id_pool,
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
data = response.data
del data['server_pools'][0]['id']
# test if data were inserted
self.assertEqual(
json.dumps(self.load_json_file(name_file), sort_keys=True),
json.dumps(data, sort_keys=True),
'jsons should same'
)
self.assertEqual(200, response.status_code,
'Status code should be 200 and was %s' % response.status_code)
def test_put_valid_file(self):
""" test_put_valid_file"""
self.execute_some_put_verify_success(
'api_pools/tests/json/put/test_pool_put_valid_file.json')
def test_put_out_of_range_port(self):
""" test_put_out_of_range_port"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_out_of_range_port.json')
def test_put_negative_port(self):
""" test_put_negative_port"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_negative_port.json')
def test_put_float_port(self):
""" test_put_float_port"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_float_port.json')
def test_put_zero_port(self):
""" test_put_zero_port"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_zero_port.json')
def test_put_string_port(self):
""" test_put_string_port"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_string_port.json')
def test_put_float_environment(self):
""" test_put_float_environment"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_float_environment.json')
def test_put_string_environment(self):
""" test_put_string_environment"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_string_environment.json')
def test_put_zero_environment(self):
""" test_put_zero_environment"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_zero_environment.json')
def test_put_negative_environment(self):
""" test_put_negative_environment"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_negative_environment.json')
def test_put_integer_name_servicedownaction(self):
""" test_put_integer_name_servicedownaction"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_integer_name_servicedownaction.json')
def test_put_invalid_healthcheck_type(self):
""" test_put_invalid_healthcheck_type"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_invalid_healthcheck_type.json')
def test_put_invalid_destination(self):
""" test_put_invalid_destination"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_invalid_destination.json')
def test_put_negative_default_limit(self):
""" test_put_negative_default_limit"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_negative_default_limit.json')
def test_put_integer_lb_method(self):
""" test_put_integer_lb_method"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_integer_lb_method.json')
def test_put_string_id_servicedownaction(self):
""" test_put_string_id_servicedownaction"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_string_id_servicedownaction.json')
def test_put_zero_id_servicedownaction(self):
""" test_put_zero_id_servicedownaction"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_zero_id_servicedownaction.json')
def test_put_negative_id_servicedownaction(self):
""" test_put_negative_id_servicedownaction"""
self.execute_some_put_verify_error(
'api_pools/tests/json/put/test_pool_put_negative_id_servicedownaction.json')
def test_post_valid_file(self):
""" test_post_valid_file"""
self.execute_some_post_verify_success(
'api_pools/tests/json/post/test_pool_post_valid_file.json')
def test_post_out_of_range_port(self):
""" test_post_out_of_range_port"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_out_of_range_port.json')
def test_post_negative_port(self):
""" test_post_negative_port"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_negative_port.json')
def test_post_float_port(self):
""" test_post_float_port"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_float_port.json')
def test_post_zero_port(self):
""" test_post_zero_port"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_zero_port.json')
def test_post_string_port(self):
""" test_post_string_port"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_string_port.json')
def test_post_float_environment(self):
""" test_post_float_environment"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_float_environment.json')
def test_post_string_environment(self):
""" test_post_string_environment"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_string_environment.json')
def test_post_zero_environment(self):
""" test_post_zero_environment"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_zero_environment.json')
def test_post_negative_environment(self):
""" test_post_negative_environment"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_negative_environment.json')
def test_post_integer_name_servicedownaction(self):
""" test_post_integer_name_servicedownaction"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_integer_name_servicedownaction.json')
def test_post_invalid_healthcheck_type(self):
""" test_post_invalid_healthcheck_type"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_invalid_healthcheck_type.json')
def test_post_invalid_destination(self):
""" test_post_invalid_destination"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_invalid_destination.json')
def test_post_negative_default_limit(self):
""" test_post_negative_default_limit"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_negative_default_limit.json')
def test_post_integer_lb_method(self):
""" test_post_integer_lb_method"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_integer_lb_method.json')
def test_post_string_id_servicedownaction(self):
""" test_post_string_id_servicedownaction"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_string_id_servicedownaction.json')
def test_post_zero_id_servicedownaction(self):
""" test_post_zero_id_servicedownaction"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_zero_id_servicedownaction.json')
def test_post_negative_id_servicedownaction(self):
""" test_post_negative_id_servicedownaction"""
self.execute_some_post_verify_error(
'api_pools/tests/json/post/test_pool_post_negative_id_servicedownaction.json')
def test_valid_post_after_equals_valid_put(self):
""" test_valid_post_after_equals_valid_put"""
# try to get datas
response = self.client.get(
'/api/v3/pool/1/',
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
# test if data were not inserted
self.assertEqual(200, response.status_code,
'Status code should be 200 and was %s' % response.status_code)
response = self.client.put(
'/api/v3/pool/1/',
data=json.dumps(self.load_json_file(
'api_pools/tests/json/test_pool_put_and_post.json')),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(200, response.status_code,
'Status code should be 200 and was %s' % response.status_code)
response = self.client.post(
'/api/v3/pool/',
data=json.dumps(self.load_json_file(
'api_pools/tests/json/test_pool_put_and_post.json')),
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(400, response.status_code,
'Status code should be 500 and was %s' % response.status_code)
|
[
"django.test.client.Client",
"logging.getLogger",
"json.dumps"
] |
[((155, 182), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (172, 182), False, 'import logging\n'), ((291, 299), 'django.test.client.Client', 'Client', ([], {}), '()\n', (297, 299), False, 'from django.test.client import Client\n'), ((1607, 1648), 'json.dumps', 'json.dumps', (['response.data'], {'sort_keys': '(True)'}), '(response.data, sort_keys=True)\n', (1617, 1648), False, 'import json\n'), ((4303, 4335), 'json.dumps', 'json.dumps', (['data'], {'sort_keys': '(True)'}), '(data, sort_keys=True)\n', (4313, 4335), False, 'import json\n')]
|
#!/usr/bin/env python3
'''
designed for cronjobs where /build and /sources are mount on the system.
They need to be mounted under the same parent directory.
e.g. /mnt/seadev/build for /build
/mnt/seadev/sources for /sources
'''
import psycopg2
import argparse
import re
import os
import sys
import inspect
import traceback
sys.path.append(os.path.dirname( os.path.abspath( inspect.getfile( inspect.currentframe() ) ) ) + '/dynamic/')
sys.path.append(os.path.dirname( os.path.abspath( inspect.getfile( inspect.currentframe() ) ) ) + '/static/')
sys.path.append(os.path.dirname( os.path.abspath( inspect.getfile( inspect.currentframe() ) ) ) + '/library/')
import dynamic_parser
import static_parser
import dependency_resolver
from library import *
def get_fullversion_str(dynamic_path, version):
#import pdb; pdb.set_trace()
# bldnum file approach
fullver_str = ""
try:
with open(dynamic_path + "/bldnum", "r") as f:
# read the raw string form bldnum (e.g., 3754.0)
buildnum_raw = f.readline().strip()
# match with regex (just in case there are other unwanted info appended)
bn_regex = r"(\d+).0"
bn_regex_match = re.search(bn_regex, buildnum_raw)
# form the build number. (e.g., 3754.0 => 0.0.3754)
build_bldnum = "0.0." + bn_regex_match.group(1)
print("DHNO: Reached here when getting re")
# version: hotfix processing
if "-hf" in version:
version = version.split("-hf", 1)[0]
# ISO filename extract approach
for f in os.listdir(dynamic_path):
# find the ISO
#print("DHNO: listing directory")
#print("DHNO: f is", f)
if os.path.splitext(f)[-1].lower() == ".iso":
# define the regex to match version + buildnum
fn_regex = r"(" + version + r"[-.](.+?)).iso"
fn_regex_match = re.search(fn_regex, f)
print(fn_regex_match.groups())
fullver_str = fn_regex_match.group(1)
# build number from ISO
build_isoname = fn_regex_match.group(2)
# complain on mismatch, proceed with build numbers in ISO filename
if build_isoname != build_bldnum:
utility.terminal_msg(1, "Either the build number in bldnum is incorrect or the build serializing is not following the standard fashion. " + \
"\n\t bldnum: {0} ; ISO filename: {1}".format(buildnum_raw, build_isoname))
except FileNotFoundError as e:
print("Error in finding dynamic path bldnum.")
print(e.strerror)
fullver_str = ""
return fullver_str
def version_exist(prod, vers):
# establish connection
conn = psycopg2.connect(utility.get_conn_str())
# init variable
res = (0,)
# with connect enables auto-commit. (otherwise do conn.commit() manually)
with conn:
with conn.cursor() as cur:
sql = "SELECT count(*) FROM versions JOIN products ON versions.prod_id = products.prod_id WHERE products.product = '{}' AND versions.version = '{}';".format(prod, vers + "%")
cur.execute(sql)
# print real query generated by psycopg2
print(cur.query)
try:
res = cur.fetchone()
except psycopg2.ProgrammingError:
res = (0,) # Error
# close connection
conn.close()
try:
if res[0] > 0:
utility.terminal_msg(2, "Product %s %s is already parsed into database.".format(prod, vers))
return True
# when certain version not found in database
else:
return False
except Exception as e:
utility.terminal_msg(0, "Error occurred during querying product %s %s with error message: %s".format(prod, vers, e))
def iterate_seadev(args):
# add/edit paths here for new products supported or on change of seadev directory structure
# retrieve mount point
mnt_path = args.seadev
# Only add this many isos to the database
iso_load_limit = args.limit_upload
isos_uploaded = 0
# check if the mount point exists or not
# add os.path.ismount(mnt_path) check when PD figures out a way to mount / (currently impossible with CIFS as well due to different access policies imposed to different subdirs)
if os.path.isdir(mnt_path):
mnt_path = utility.dir_formatting(mnt_path)
else:
utility.terminal_msg(0, "Mount point not found or not a valid one.")
# iterate over all products specified in path_dict
for prod in path_dict:
dynamic_base_path = mnt_path + "/build/" + path_dict.get(prod)
dynamic_trailing_path = "dist/release"
# check if path provided can be found in file system
if os.path.exists(dynamic_base_path) and os.path.isdir(dynamic_base_path):
# iterate over all versions
for ver_dir in os.listdir(dynamic_base_path):
# form the complete path for dynamic parsing
dynamic_path = os.path.join(dynamic_base_path, ver_dir, dynamic_trailing_path)
# check if build path exist
if os.path.exists(dynamic_path) and os.path.isdir(dynamic_path):
static_base_path = mnt_path + "/sources/" + path_dict.get(prod)
static_trailing_path = "release/logs"
# form the complete path for static parsing
static_path = os.path.join(static_base_path, ver_dir, static_trailing_path)
# set product
args.product_name = prod
# remove the "v" from the directory name and set version. (e.g., v13.1.1 => version number 13.1.1)
#args.version_number = get_fullversion_str(dynamic_path, ver_dir[1:])
args.version_number = ver_dir[1:]
# if no version number found (due to no iso found), skip
if not args.version_number:
continue
try:
# check if sources path also exist and whether the version is seen in DB
if os.path.exists(static_path) and os.path.isdir(static_path) and not version_exist(args.product_name,
args.version_number):
if (iso_load_limit and (isos_uploaded >= iso_load_limit)):
#We've uploaded enough isos for now. Time to break out
break
isos_uploaded += 1
# log message
utility.terminal_msg(2, "Processing {} {} from seadev path.".format(args.product_name, args.version_number))
# run parser
args = dynamic_parser.wrapper(args, 2)
static_parser.wrapper(args, 2)
dependency_resolver.resolve_deps(args.product_name, args.version_number)
except Exception as e:
with open("~/parser_exception_log", "a") as f:
f.write("************FOUND EXCEPTION**********\n")
f.write("Error parsing version: %s\n" % args.version_number)
f.write("Static path calculated %s\n" % os.fsdecode(args.static_path))
f.write("Dynamic path %s\n" % os.fsdecode(dynamic_path))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, file=sys.stdout)
traceback.print_exception(exc_type, exc_value, exc_traceback, file=f)
continue
terminal_msg(2, "Completed uploading {} isos to {} at {}.".format(args.isos_uploaded, utility.get_database_name(), utility.get_database_url()))
if __name__ == "__main__":
p = argparse.ArgumentParser()
# used
p.add_argument("-pc", "--processes", metavar="<amount>", type=int, default=5,
help = "The number of processes to spawn that can be utilized to examine rpm files. " + \
"(default 10, suggested threshold x where x <= how many GBs of RAM available)")
p.add_argument("-o", "--output-directory", metavar = "<path>", type = str, default = "./output/",
help = "A directory to place the output. <cwd>/output is created if not specified.")
p.add_argument("-sd", "--seadev", metavar = "<mount path>", type = str, required = True,
help = "The mount point if running on or mounted with the seadev machine or any UNIX box that shares the same source code/ISO directory structure as seadev.")
p.add_argument("-c", "--clean-output-directory", action = "store_true",
help = "Cleanup the output directory before writing to it.")
p.add_argument("-w", "--wipe-program-output", action = "store_true",
help = "Remove the output directories and files after the whole process finishes.")
p.add_argument("-l", "--limit-upload", metavar="<limit>", type=int, default=0,
help = "Only upload <limit> new isos to the database.")
# not used, but necessary for compatibility with dynamic/static parsers
p.add_argument("-i", "--iso", type=str,
help = "[*] Please do not assign any value. This option will not take affect in this script.")
p.add_argument("-d", "--directory", type=str,
help = "[*] Please do not assign any value. This option will not take affect in this script.")
p.add_argument("-f", "--file", type = str,
help = "[*] Please do not assign any value. This option will not take affect in this script.")
p.add_argument("-p", "--product-name", type = str,
help = "[*] Please do not assign any value. This option will not take affect in this script.")
p.add_argument("-v", "--version-number", type = str,
help = "[*] Please do not assign any value. This option will not take affect in this script.")
args = p.parse_args()
args.isos_uploaded = 0
iterate_seadev(args)
|
[
"dynamic_parser.wrapper",
"argparse.ArgumentParser",
"os.path.join",
"traceback.print_tb",
"os.path.isdir",
"os.fsdecode",
"os.path.exists",
"static_parser.wrapper",
"traceback.print_exception",
"dependency_resolver.resolve_deps",
"os.path.splitext",
"sys.exc_info",
"inspect.currentframe",
"re.search",
"os.listdir"
] |
[((4566, 4589), 'os.path.isdir', 'os.path.isdir', (['mnt_path'], {}), '(mnt_path)\n', (4579, 4589), False, 'import os\n'), ((8326, 8351), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8349, 8351), False, 'import argparse\n'), ((1206, 1239), 're.search', 're.search', (['bn_regex', 'buildnum_raw'], {}), '(bn_regex, buildnum_raw)\n', (1215, 1239), False, 'import re\n'), ((1618, 1642), 'os.listdir', 'os.listdir', (['dynamic_path'], {}), '(dynamic_path)\n', (1628, 1642), False, 'import os\n'), ((5006, 5039), 'os.path.exists', 'os.path.exists', (['dynamic_base_path'], {}), '(dynamic_base_path)\n', (5020, 5039), False, 'import os\n'), ((5044, 5076), 'os.path.isdir', 'os.path.isdir', (['dynamic_base_path'], {}), '(dynamic_base_path)\n', (5057, 5076), False, 'import os\n'), ((5145, 5174), 'os.listdir', 'os.listdir', (['dynamic_base_path'], {}), '(dynamic_base_path)\n', (5155, 5174), False, 'import os\n'), ((5269, 5332), 'os.path.join', 'os.path.join', (['dynamic_base_path', 'ver_dir', 'dynamic_trailing_path'], {}), '(dynamic_base_path, ver_dir, dynamic_trailing_path)\n', (5281, 5332), False, 'import os\n'), ((396, 418), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (416, 418), False, 'import inspect\n'), ((507, 529), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (527, 529), False, 'import inspect\n'), ((617, 639), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (637, 639), False, 'import inspect\n'), ((1997, 2019), 're.search', 're.search', (['fn_regex', 'f'], {}), '(fn_regex, f)\n', (2006, 2019), False, 'import re\n'), ((5397, 5425), 'os.path.exists', 'os.path.exists', (['dynamic_path'], {}), '(dynamic_path)\n', (5411, 5425), False, 'import os\n'), ((5430, 5457), 'os.path.isdir', 'os.path.isdir', (['dynamic_path'], {}), '(dynamic_path)\n', (5443, 5457), False, 'import os\n'), ((5701, 5762), 'os.path.join', 'os.path.join', (['static_base_path', 'ver_dir', 'static_trailing_path'], {}), '(static_base_path, ver_dir, static_trailing_path)\n', (5713, 5762), False, 'import os\n'), ((6416, 6443), 'os.path.exists', 'os.path.exists', (['static_path'], {}), '(static_path)\n', (6430, 6443), False, 'import os\n'), ((6448, 6474), 'os.path.isdir', 'os.path.isdir', (['static_path'], {}), '(static_path)\n', (6461, 6474), False, 'import os\n'), ((7161, 7192), 'dynamic_parser.wrapper', 'dynamic_parser.wrapper', (['args', '(2)'], {}), '(args, 2)\n', (7183, 7192), False, 'import dynamic_parser\n'), ((7221, 7251), 'static_parser.wrapper', 'static_parser.wrapper', (['args', '(2)'], {}), '(args, 2)\n', (7242, 7251), False, 'import static_parser\n'), ((7280, 7352), 'dependency_resolver.resolve_deps', 'dependency_resolver.resolve_deps', (['args.product_name', 'args.version_number'], {}), '(args.product_name, args.version_number)\n', (7312, 7352), False, 'import dependency_resolver\n'), ((1784, 1803), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (1800, 1803), False, 'import os\n'), ((7884, 7898), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7896, 7898), False, 'import sys\n'), ((7927, 7977), 'traceback.print_tb', 'traceback.print_tb', (['exc_traceback'], {'file': 'sys.stdout'}), '(exc_traceback, file=sys.stdout)\n', (7945, 7977), False, 'import traceback\n'), ((8007, 8076), 'traceback.print_exception', 'traceback.print_exception', (['exc_type', 'exc_value', 'exc_traceback'], {'file': 'f'}), '(exc_type, exc_value, exc_traceback, file=f)\n', (8032, 8076), False, 'import traceback\n'), ((7703, 7732), 'os.fsdecode', 'os.fsdecode', (['args.static_path'], {}), '(args.static_path)\n', (7714, 7732), False, 'import os\n'), ((7792, 7817), 'os.fsdecode', 'os.fsdecode', (['dynamic_path'], {}), '(dynamic_path)\n', (7803, 7817), False, 'import os\n')]
|
import os
import select
import signal
from datetime import datetime
from io import TextIOWrapper
from pty import openpty
from pwd import getpwnam, struct_passwd
from subprocess import Popen
from termios import ONLCR, tcgetattr, TCSANOW, tcsetattr
from threading import Thread
from time import sleep
from typing import Any, Dict, List, Optional
def studSaveStrComp(ref: str, other: str, strip: bool = True, ignoreCase: bool = True, ignoreNonAlNum=True):
"""
Student save compare between strings.
Converts both to lower, strips them and removes all non alphanumeric chars
before comparison.
"""
# Strip:
if strip:
ref = ref.strip()
other = other.strip()
# Convert to lower
if ignoreCase:
ref = ref.lower()
other = other.lower()
# Remove all non alphanumeric chars:
if ignoreNonAlNum:
ref = "".join(c for c in ref if c.isalnum())
other = "".join(c for c in other if c.isalnum())
# print("Ref: {}\nOther:{}".format(ref, other))
return ref == other
def recursive_chmod(path: str, mode: int):
"""
Recursively changes file permissions.
"""
os.chmod(path, mode)
# print("CHMOD: {}".format(path))
f: str
for f in os.listdir(path):
f = os.path.join(path, f)
if os.path.isdir(f):
recursive_chmod(f, mode)
else:
os.chmod(f, mode)
# print("CHMOD: {}".format(f))
# Limit for stdout in chars.
# Should prevent to much output on artemis if for example there is a loop in a tree.
# By default the stdout limit is disabled:
__stdoutLimitEnabled: bool = False
def resetStdoutLimit(limit: int = 15000):
"""
Resets the stout limit to the given limit (default = 15.000 chars).
"""
global stdoutCharsLeft # Required since we want to modify stdoutCharsLeft
stdoutCharsLeft = limit
def setStdoutLimitEnabled(enabled: bool):
"""
Enables or disables the stdout limit.
Does not restet the chars left!
"""
global __stdoutLimitEnabled
__stdoutLimitEnabled = enabled
def __printStdout(text: str):
"""
Prints the given text to stdout.
Only if there are still enough chars in stdoutCharsLeft left.
Else will not print anything.
"""
global stdoutCharsLeft # Required since we want to modify stdoutCharsLeft
if not __stdoutLimitEnabled:
print(text)
elif stdoutCharsLeft > 0:
if stdoutCharsLeft >= len(text):
print(text)
else:
print(text[:stdoutCharsLeft] + "...")
stdoutCharsLeft -= len(text)
if stdoutCharsLeft <= 0:
print("[STDOUT LIMIT REACHED]".center(50, "="))
# A cache of all that the tester has been writing to stdout:
testerOutputCache: List[str] = list()
def clearTesterOutputCache():
"""
Clears the testerOutputCache.
"""
testerOutputCache.clear()
def getTesterOutput():
"""
Returns the complete tester output as a single string.
"""
return "\n".join(testerOutputCache)
startTime: datetime = datetime.now()
def __getCurSeconds():
"""
Returns the total seconds passed, since the tester started as a string with a precision of two digits.
"""
seconds: float = (datetime.now() - startTime).total_seconds()
return str(round(seconds, 2))
def __getCurDateTimeStr():
"""
Returns the current date and time string (e.g. 11.10.2019_17:02:33)
"""
return datetime.now().strftime("%d.%m.%Y_%H:%M:%S")
def printTester(text: str, addToCache: bool = True):
"""
Prints the given string with the '[T]: ' tag in front.
Should be used instead of print() to make it easier for students
to determine what came from the tester and what from their program.
"""
msg: str = f"[{__getCurSeconds()}][T]: {text}"
__printStdout(msg)
if addToCache:
testerOutputCache.append(msg)
def printProg(text: str, addToCache: bool = True):
"""
Prints the given string with the '[P]: ' tag in front.
Should be used instead of print() to make it easier for students
to determine what came from the tester and what from their program.
"""
msg: str = f"[{__getCurSeconds()}][P]: {text.rstrip()}"
__printStdout(msg)
if addToCache:
testerOutputCache.append(msg)
def shortenText(text: str, maxNumChars: int):
"""
Shortens the given text to a maximum number of chars.
If there are more chars than specified in maxNumChars,
it will append: "\n[And {} chars more...]".
"""
if len(text) > maxNumChars:
s: str = f"\n[And {len(text) - maxNumChars} chars more...]"
l: int = maxNumChars - len(s)
if l > 0:
return f"{text[:l]}{s}"
else:
printTester(f"Unable to limit output to {maxNumChars} chars! Not enough space.", False)
return ""
return text
class ReadCache(Thread):
"""
Helper class that makes sure we only get one line (separated by '\n')
if we read multiple lines at once.
"""
__cacheList: List[str]
__cacheFile: TextIOWrapper
__outFd: int
__outSlaveFd: int
def __init__(self, filePath: str):
Thread.__init__(self)
self.__cacheList = []
self.__cacheFile = open(filePath, "w")
# Emulate a terminal:
self.__outFd, self.__outSlaveFd = openpty()
self.start()
def fileno(self):
return self.__outFd
def join(self, timeout: float = None):
try:
os.close(self.__outFd)
except OSError as e:
printTester(f"Closing stdout FD failed with: {e}")
try:
os.close(self.__outSlaveFd)
except OSError as e:
printTester(f"Closing stdout slave FD failed with: {e}")
Thread.join(self, timeout)
@staticmethod
def __isFdValid(fd: int):
try:
os.stat(fd)
except OSError:
return False
return True
@staticmethod
def __decode(data: bytes):
"""
Tries to decode the given string as UTF8.
In case this fails, it will fall back to ASCII encoding.
Returns the decoded result.
---
data: bytes
The data that should be decoded.
"""
try:
return data.decode("utf8", "replace")
except UnicodeDecodeError as e:
printTester(f"Failed to decode line as utf8. Using ascii ecoding - {e}")
return data.decode("ascii", "replace")
def run(self):
pollObj = select.poll()
pollObj.register(self.__outSlaveFd, select.POLLIN)
while self.__isFdValid(self.__outSlaveFd):
try:
for fd, mask in pollObj.poll(100):
if fd != self.__outSlaveFd:
continue
if mask & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
return
if mask & select.POLLIN:
data: bytes = os.read(self.__outSlaveFd, 4096)
dataStr: str = self.__decode(data)
try:
self.__cacheFile.write(dataStr)
except UnicodeEncodeError:
printTester("Invalid ASCII character read. Skipping line...")
continue
self.__cacheFile.flush()
self.__cache(dataStr)
printProg(dataStr)
except OSError:
break
def canReadLine(self):
return len(self.__cacheList) > 0
def __cache(self, data: str):
self.__cacheList.extend(data.splitlines(True))
def readLine(self):
if self.canReadLine():
return self.__cacheList.pop(0)
return ""
class PWrap:
"""
A wrapper for "Popen".
"""
cmd: List[str]
prog: Optional[Popen]
cwd: str
__stdinFd: int
__stdinMasterFd: int
__stdOutLineCache: ReadCache
__stdErrLineCache: ReadCache
__terminatedTime: Optional[datetime]
def __init__(self, cmd: List[str], stdoutFilePath: str = "/tmp/stdout.txt", stderrFilePath: str = "/tmp/stderr.txt", cwd: Optional[str] = None):
self.cmd = cmd
self.prog = None
self.cwd: str = os.getcwd() if cwd is None else cwd
self.stdout = open(stdoutFilePath, "wb")
self.stderr = open(stderrFilePath, "wb")
self.__stdOutLineCache = ReadCache(stdoutFilePath)
self.__stdErrLineCache = ReadCache(stderrFilePath)
self.__terminatedTime = None
def __del__(self):
try:
os.close(self.__stdinFd)
except OSError as e:
printTester(f"Closing stdin FD failed with: {e}")
except AttributeError:
pass
try:
os.close(self.__stdinMasterFd)
except OSError as e:
printTester(f"Closing stdin master FD failed with: {e}")
except AttributeError:
pass
def start(self, userName: Optional[str] = None):
"""
Starts the process and sets all file descriptors to nonblocking.
---
userName: Optional[str] = None
In case the userName is not None, the process will be executed as the given userName.
This requires root privileges and you have to ensure the user has the required rights to access all resources (files).
"""
# Emulate a terminal for stdin:
self.__stdinMasterFd, self.__stdinFd = openpty()
# Transform "\r\n" to '\n' for data send to stdin:
tsettings: List[Any] = tcgetattr(self.__stdinFd)
tsettings[1] &= ~ONLCR
tcsetattr(self.__stdinFd, TCSANOW, tsettings)
if userName is not None:
# Check for root privileges:
self.__checkForRootPrivileges()
# Prepare environment:
pwRecord: struct_passwd = getpwnam(userName)
env: Dict[str, str] = os.environ.copy()
env["HOME"] = pwRecord.pw_dir
env["LOGNAME"] = pwRecord.pw_name
env["USER"] = pwRecord.pw_name
env["PWD"] = self.cwd
printTester(f"Starting process as: {pwRecord.pw_name}")
# Start the actual process:
self.prog = Popen(
self.cmd,
stdout=self.__stdOutLineCache.fileno(),
stdin=self.__stdinMasterFd,
stderr=self.__stdErrLineCache.fileno(),
universal_newlines=True,
cwd=self.cwd,
env=env,
preexec_fn=self.__demote(pwRecord.pw_uid, pwRecord.pw_gid, pwRecord.pw_name),
)
else:
# Start the actual process:
self.prog = Popen(
self.cmd,
stdout=self.__stdOutLineCache.fileno(),
stdin=self.__stdinMasterFd,
stderr=self.__stdErrLineCache.fileno(),
universal_newlines=True,
cwd=self.cwd,
preexec_fn=os.setsid,
) # Make sure we store the process group id
def __demote(self, userUid: int, userGid: int, userName: str):
"""
Returns a call, demoting the calling process to the given user, UID and GID.
"""
def result():
# self.__printIds("Starting demotion...") # Will print inside the new process and reports via the __stdOutLineCache
os.initgroups(userName, userGid)
os.setuid(userUid)
# self.__printIds("Finished demotion.") # Will print inside the new process and reports via the __stdOutLineCache
return result
@staticmethod
def __checkForRootPrivileges():
"""
Checks if the current process has root permissions.
Fails if not.
"""
if os.geteuid() != 0:
raise PermissionError("The tester has to be executed as root to be able to switch users!")
def __printIds(self, msg: str):
printTester(f"uid, gid = {os.getuid()}, {os.getgid()}; {msg}")
def __readLine(self, lineCache: ReadCache, blocking: bool):
"""
Reads a single line from the given ReadCache and returns it.
---
blocking:
When set to True will only return if the process terminated or we read a non empty string.
"""
while blocking:
if not lineCache.canReadLine():
if not self.hasTerminated():
sleep(0.1)
else:
break
else:
line: str = lineCache.readLine()
return line
return ""
def readLineStdout(self, blocking: bool = True):
"""
Reads a single line from the processes stdout and returns it.
---
blocking:
When set to True will only return if the process terminated or we read a non empty string.
"""
return self.__readLine(self.__stdOutLineCache, blocking)
def canReadLineStdout(self):
"""
Returns whether there is a line from the processes stdout that can be read.
"""
return self.__stdOutLineCache.canReadLine()
def readLineStderr(self, blocking: bool = True):
"""
Reads a single line from the processes stderr and returns it.
---
blocking:
When set to True will only return if the process terminated or we read a non empty string.
"""
return self.__readLine(self.__stdErrLineCache, blocking)
def canReadLineStderr(self):
"""
Returns whether there is a line from the processes stderr that can be read.
"""
return self.__stdErrLineCache.canReadLine()
def writeStdin(self, data: str):
"""
Writes the given data string to the processes stdin.
"""
os.write(self.__stdinFd, data.encode())
printTester(f"Wrote: {data}")
def hasTerminated(self):
"""
Returns whether the process has terminated.
"""
if self.prog is None:
return True
# Make sure we wait 1.0 seconds after the process has terminated to
# make sure all the output arrived:
elif self.prog.poll() is not None:
if self.__terminatedTime:
if (datetime.now() - self.__terminatedTime).total_seconds() > 1.0:
return True
else:
self.__terminatedTime = datetime.now()
return False
def getReturnCode(self):
"""
Returns the returncode of the terminated process else None.
"""
return self.prog.returncode
def waitUntilTerminationReading(self, secs: float = -1):
"""
Waits until termination of the process and tries to read until either
the process terminated or the timeout occurred.
Returns True if the process terminated before the timeout occurred,
else False.
---
secs:
The timeout in seconds. Values < 0 result in infinity.
"""
start: datetime = datetime.now()
while True:
if self.hasTerminated():
return True
elif 0 <= secs <= (datetime.now() - start).total_seconds():
return False
self.readLineStdout(False)
sleep(0.1)
def kill(self, signal: int = signal.SIGKILL):
"""
Sends the given signal to the complete process group started by the process.
Returns True if the process existed and had to be killed. Else False.
---
signal:
The signal that should be sent to the process group started by the process.
"""
# Send a signal to the complete process group:
try:
os.killpg(os.getpgid(self.prog.pid), signal)
return True
except ProcessLookupError:
printTester("No need to kill process. Process does not exist any more.")
return False
def cleanup(self):
"""
Should be called once the execution has terminated.
Will join the stdout and stderr reader threads.
"""
self.__stdOutLineCache.join()
self.__stdErrLineCache.join()
def getPID(self):
return self.prog.pid
|
[
"pty.openpty",
"select.poll",
"os.environ.copy",
"termios.tcsetattr",
"os.close",
"os.path.join",
"threading.Thread.__init__",
"os.getgid",
"os.setuid",
"pwd.getpwnam",
"datetime.datetime.now",
"os.read",
"os.chmod",
"threading.Thread.join",
"os.stat",
"time.sleep",
"os.listdir",
"os.getpgid",
"os.initgroups",
"termios.tcgetattr",
"os.path.isdir",
"os.getcwd",
"os.getuid",
"os.geteuid"
] |
[((3066, 3080), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3078, 3080), False, 'from datetime import datetime\n'), ((1156, 1176), 'os.chmod', 'os.chmod', (['path', 'mode'], {}), '(path, mode)\n', (1164, 1176), False, 'import os\n'), ((1239, 1255), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1249, 1255), False, 'import os\n'), ((1269, 1290), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (1281, 1290), False, 'import os\n'), ((1302, 1318), 'os.path.isdir', 'os.path.isdir', (['f'], {}), '(f)\n', (1315, 1318), False, 'import os\n'), ((5190, 5211), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (5205, 5211), False, 'from threading import Thread\n'), ((5362, 5371), 'pty.openpty', 'openpty', ([], {}), '()\n', (5369, 5371), False, 'from pty import openpty\n'), ((5788, 5814), 'threading.Thread.join', 'Thread.join', (['self', 'timeout'], {}), '(self, timeout)\n', (5799, 5814), False, 'from threading import Thread\n'), ((6551, 6564), 'select.poll', 'select.poll', ([], {}), '()\n', (6562, 6564), False, 'import select\n'), ((9563, 9572), 'pty.openpty', 'openpty', ([], {}), '()\n', (9570, 9572), False, 'from pty import openpty\n'), ((9664, 9689), 'termios.tcgetattr', 'tcgetattr', (['self.__stdinFd'], {}), '(self.__stdinFd)\n', (9673, 9689), False, 'from termios import ONLCR, tcgetattr, TCSANOW, tcsetattr\n'), ((9729, 9774), 'termios.tcsetattr', 'tcsetattr', (['self.__stdinFd', 'TCSANOW', 'tsettings'], {}), '(self.__stdinFd, TCSANOW, tsettings)\n', (9738, 9774), False, 'from termios import ONLCR, tcgetattr, TCSANOW, tcsetattr\n'), ((15176, 15190), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15188, 15190), False, 'from datetime import datetime\n'), ((1383, 1400), 'os.chmod', 'os.chmod', (['f', 'mode'], {}), '(f, mode)\n', (1391, 1400), False, 'import os\n'), ((3457, 3471), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3469, 3471), False, 'from datetime import datetime\n'), ((5514, 5536), 'os.close', 'os.close', (['self.__outFd'], {}), '(self.__outFd)\n', (5522, 5536), False, 'import os\n'), ((5654, 5681), 'os.close', 'os.close', (['self.__outSlaveFd'], {}), '(self.__outSlaveFd)\n', (5662, 5681), False, 'import os\n'), ((5889, 5900), 'os.stat', 'os.stat', (['fd'], {}), '(fd)\n', (5896, 5900), False, 'import os\n'), ((8337, 8348), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8346, 8348), False, 'import os\n'), ((8677, 8701), 'os.close', 'os.close', (['self.__stdinFd'], {}), '(self.__stdinFd)\n', (8685, 8701), False, 'import os\n'), ((8866, 8896), 'os.close', 'os.close', (['self.__stdinMasterFd'], {}), '(self.__stdinMasterFd)\n', (8874, 8896), False, 'import os\n'), ((9968, 9986), 'pwd.getpwnam', 'getpwnam', (['userName'], {}), '(userName)\n', (9976, 9986), False, 'from pwd import getpwnam, struct_passwd\n'), ((10021, 10038), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (10036, 10038), False, 'import os\n'), ((11503, 11535), 'os.initgroups', 'os.initgroups', (['userName', 'userGid'], {}), '(userName, userGid)\n', (11516, 11535), False, 'import os\n'), ((11548, 11566), 'os.setuid', 'os.setuid', (['userUid'], {}), '(userUid)\n', (11557, 11566), False, 'import os\n'), ((11888, 11900), 'os.geteuid', 'os.geteuid', ([], {}), '()\n', (11898, 11900), False, 'import os\n'), ((15428, 15438), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (15433, 15438), False, 'from time import sleep\n'), ((3251, 3265), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3263, 3265), False, 'from datetime import datetime\n'), ((15886, 15911), 'os.getpgid', 'os.getpgid', (['self.prog.pid'], {}), '(self.prog.pid)\n', (15896, 15911), False, 'import os\n'), ((12081, 12092), 'os.getuid', 'os.getuid', ([], {}), '()\n', (12090, 12092), False, 'import os\n'), ((12096, 12107), 'os.getgid', 'os.getgid', ([], {}), '()\n', (12105, 12107), False, 'import os\n'), ((12544, 12554), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (12549, 12554), False, 'from time import sleep\n'), ((14544, 14558), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14556, 14558), False, 'from datetime import datetime\n'), ((7021, 7053), 'os.read', 'os.read', (['self.__outSlaveFd', '(4096)'], {}), '(self.__outSlaveFd, 4096)\n', (7028, 7053), False, 'import os\n'), ((15307, 15321), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15319, 15321), False, 'from datetime import datetime\n'), ((14391, 14405), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14403, 14405), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
"""Base file data sink class for NICOS."""
from nicos.core.data import DataSink
from nicos.core.params import Param, intrange, listof, none_or, subdir
TEMPLATE_DESC = '''Templates must contain percent-style placeholders
(e.g. ``%(proposal)s_%(pointcounter)08d``) with the following keys:
* counters:
- ``(type)counter`` for globally unique counters
- ``(type)propcounter`` for unique counters within a proposal
- ``(type)samplecounter`` for unique counters within a sample directory \
(for many instruments, there is no separate sample directory, so this \
counter is the same as the propcounter)
- ``(type)number`` for the dataset's number within its parent
``type`` is the dataset type, e.g. ``point`` or ``scan``.
* proposal info from the experiment (e.g. ``proposal`` for the prop. number)
* all devices and parameters (e.g. ``dev1`` for the value of dev1 and
``dev1.param`` for a parameter)
'''
class FileSink(DataSink):
"""Base class for sinks that save data into files."""
parameters = {
'subdir': Param('Filetype specific subdirectory name',
type=subdir, mandatory=False, default=''),
'filenametemplate': Param('List of templates for data file names '
'(will be hardlinked), can contain '
'subdirectories',
ext_desc=TEMPLATE_DESC, type=listof(str),
default=['%(pointcounter)08d.dat'],
settable=False, prefercache=False),
'filemode': Param('File access rights after closing the file, '
"if set to 'none' (default) the OS defaults "
'will be used',
type=none_or(intrange(0o000, 0o777),)),
}
|
[
"nicos.core.params.intrange",
"nicos.core.params.listof",
"nicos.core.params.Param"
] |
[((2118, 2208), 'nicos.core.params.Param', 'Param', (['"""Filetype specific subdirectory name"""'], {'type': 'subdir', 'mandatory': '(False)', 'default': '""""""'}), "('Filetype specific subdirectory name', type=subdir, mandatory=False,\n default='')\n", (2123, 2208), False, 'from nicos.core.params import Param, intrange, listof, none_or, subdir\n'), ((2501, 2512), 'nicos.core.params.listof', 'listof', (['str'], {}), '(str)\n', (2507, 2512), False, 'from nicos.core.params import Param, intrange, listof, none_or, subdir\n'), ((2911, 2927), 'nicos.core.params.intrange', 'intrange', (['(0)', '(511)'], {}), '(0, 511)\n', (2919, 2927), False, 'from nicos.core.params import Param, intrange, listof, none_or, subdir\n')]
|
# -*- coding: utf-8 -*-
# Copyright © 2021 by <NAME>. All rights reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Schemas and assertions for raw data inputs.
Schema checks are useful to standardize certain information in raw data including:
* Clean column names (lowercase, underscore)
* Ensure an understanding of primary keys of data (uniqueness)
* Set data types of columns, i.e. boolean->int, datetime, string, nullable integer
* Pandera schema checks to attempt to lock some checks about a datafile. This helps
to quickly assess if a file has changed, or a similar file is same or different.
Some examples:
* Contains exact list of columns (no more no less)
* Nullable column
* Data type correct
* Data range checks (must not be 0 or less, must contain only these values)
Schemas can often be re-applied to similar data files, i.e. tabs of an excel, or train/test data.
"""
import logging
from pathlib import Path
import pandas as pd
from pandera import io
from ndj_pipeline import utils
def check_titanic() -> pd.DataFrame:
"""Data schema and typing validations.
Returns:
Loaded pandas dataframe with typing and schema checks.
"""
# Standardize column names
input_path = Path("data", "titanic.csv")
logging.info(f"Loading data from {input_path}")
df = pd.read_csv(input_path)
df = df.rename(columns=utils.clean_column_names(df)) # type: ignore
# Checks for duplicates
assert df.shape == df.drop_duplicates().shape
# Recoding of string variables
df["sex"] = df["sex"].replace({"male": 1, "female": 0}).astype("Int64")
# Full expressive list of variables, assumptions and questions
schema_path = Path("schemas", "titanic.yaml")
with open(schema_path, "r") as f:
pandera_schema_check = io.from_yaml(f)
df = pandera_schema_check.validate(df)
logging.info("Validation checks passed")
return df
|
[
"pandera.io.from_yaml",
"ndj_pipeline.utils.clean_column_names",
"pandas.read_csv",
"logging.info",
"pathlib.Path"
] |
[((2246, 2273), 'pathlib.Path', 'Path', (['"""data"""', '"""titanic.csv"""'], {}), "('data', 'titanic.csv')\n", (2250, 2273), False, 'from pathlib import Path\n'), ((2278, 2325), 'logging.info', 'logging.info', (['f"""Loading data from {input_path}"""'], {}), "(f'Loading data from {input_path}')\n", (2290, 2325), False, 'import logging\n'), ((2335, 2358), 'pandas.read_csv', 'pd.read_csv', (['input_path'], {}), '(input_path)\n', (2346, 2358), True, 'import pandas as pd\n'), ((2710, 2741), 'pathlib.Path', 'Path', (['"""schemas"""', '"""titanic.yaml"""'], {}), "('schemas', 'titanic.yaml')\n", (2714, 2741), False, 'from pathlib import Path\n'), ((2874, 2914), 'logging.info', 'logging.info', (['"""Validation checks passed"""'], {}), "('Validation checks passed')\n", (2886, 2914), False, 'import logging\n'), ((2811, 2826), 'pandera.io.from_yaml', 'io.from_yaml', (['f'], {}), '(f)\n', (2823, 2826), False, 'from pandera import io\n'), ((2387, 2415), 'ndj_pipeline.utils.clean_column_names', 'utils.clean_column_names', (['df'], {}), '(df)\n', (2411, 2415), False, 'from ndj_pipeline import utils\n')]
|
from ast import (
Module,
Suite,
FunctionDef,
AsyncFunctionDef,
Assign,
AnnAssign,
For,
AsyncFor,
With,
AsyncWith,
Num,
Str,
Bytes,
NameConstant,
ExtSlice,
arguments,
arg,
)
from quiche.pal.pal_block import (
PALIdentifier,
PALLeaf,
PALPrimitive,
StmtBlock,
ExprBlock,
SliceBlock,
ArgBlock,
WithItemBlock,
)
from quiche.pal.pal_lifter import PALLifter
class PALLift37(PALLifter):
def visit_Module(self, node: Module) -> Module:
# Short-circuit: assume if the body is a StmtBlock, it's already
# been transformed. NOTE: may need to revisit this later if this
# transform is applied to fix-up after other modifications.
if isinstance(node.body, StmtBlock):
return node
self.generic_visit(node)
return Module(body=StmtBlock(node.body))
def visit_Suite(self, node: Suite) -> Suite:
if isinstance(node.body, StmtBlock):
return node
self.generic_visit(node)
return Suite(body=StmtBlock(node.body))
def visit_FunctionDef(self, node: FunctionDef) -> FunctionDef:
if isinstance(node.body, StmtBlock):
return node
self.generic_visit(node)
return FunctionDef(
name=PALIdentifier(node.name),
args=node.args,
body=StmtBlock(node.body),
decorator_list=ExprBlock(node.decorator_list),
returns=node.returns,
)
def visit_AsyncFunctionDef(self, node: AsyncFunctionDef) -> AsyncFunctionDef:
if isinstance(node.body, StmtBlock):
return node
self.generic_visit(node)
return AsyncFunctionDef(
name=PALIdentifier(node.name),
args=node.args,
body=StmtBlock(node.body),
decorator_list=ExprBlock(node.decorator_list),
returns=node.returns,
)
def visit_Assign(self, node: Assign) -> Assign:
if isinstance(node.targets, ExprBlock):
return node
self.generic_visit(node)
return Assign(targets=ExprBlock(node.targets), value=node.value)
def visit_AnnAssign(self, node: AnnAssign) -> AnnAssign:
if isinstance(node.simple, PALPrimitive):
return node
self.generic_visit(node)
return AnnAssign(target=node.target, value=node.value, simple=PALPrimitive[int](node.simple))
def visit_For(self, node: For) -> For:
if isinstance(node.body, StmtBlock):
return node
self.generic_visit(node)
return For(
target=node.target,
iter=node.iter,
body=StmtBlock(node.body),
orelse=StmtBlock(node.orelse),
)
def visit_AsyncFor(self, node: AsyncFor) -> AsyncFor:
if isinstance(node.body, StmtBlock):
return node
self.generic_visit(node)
return AsyncFor(
target=node.target,
iter=node.iter,
body=StmtBlock(node.body),
orelse=StmtBlock(node.orelse),
)
def visit_With(self, node: With) -> With:
if isinstance(node.items, WithItemBlock):
return node
self.generic_visit(node)
return With(items=WithItemBlock(node.items), body=StmtBlock(node.body))
def visit_AsyncWith(self, node: AsyncWith) -> AsyncWith:
if isinstance(node.items, WithItemBlock):
return node
self.generic_visit(node)
return AsyncWith(
items=WithItemBlock(node.items), body=StmtBlock(node.body)
)
# EXPRESSIONS
def visit_Num(self, node: Num) -> PALLeaf[complex]:
return PALLeaf[complex](type(node.n).__name__, Num, node.n)
def visit_Str(self, node: Str) -> PALLeaf[str]:
return PALLeaf[str]("str", Str, node.s)
def visit_Bytes(self, node: Bytes) -> PALLeaf[bytes]:
return PALLeaf[bytes]("bytes", Bytes, node.s)
def visit_NameConstant(self, node: NameConstant) -> PALLeaf:
return PALLeaf[bool]("bool", NameConstant, node.value)
def visit_ExtSlice(self, node: ExtSlice) -> ExtSlice:
if isinstance(node.dims, SliceBlock):
return node
self.generic_visit(node)
return ExtSlice(dims=SliceBlock(node.dims))
def visit_arguments(self, node: arguments) -> arguments:
if isinstance(node.args, ArgBlock):
return node
self.generic_visit(node)
return arguments(
args=ArgBlock(node.args),
vararg=node.vararg,
kwonlyargs=ArgBlock(node.kwonlyargs),
kw_defaults=ExprBlock(node.kw_defaults),
kwarg=node.kwarg,
defaults=ExprBlock(node.defaults),
)
def visit_arg(self, node: arg) -> arg:
if isinstance(node.arg, PALIdentifier):
return node
self.generic_visit(node)
return arg(arg=PALIdentifier(node.arg), annotation=node.annotation)
|
[
"quiche.pal.pal_block.SliceBlock",
"quiche.pal.pal_block.ArgBlock",
"quiche.pal.pal_block.WithItemBlock",
"quiche.pal.pal_block.ExprBlock",
"quiche.pal.pal_block.StmtBlock",
"quiche.pal.pal_block.PALIdentifier"
] |
[((879, 899), 'quiche.pal.pal_block.StmtBlock', 'StmtBlock', (['node.body'], {}), '(node.body)\n', (888, 899), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((1079, 1099), 'quiche.pal.pal_block.StmtBlock', 'StmtBlock', (['node.body'], {}), '(node.body)\n', (1088, 1099), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((1316, 1340), 'quiche.pal.pal_block.PALIdentifier', 'PALIdentifier', (['node.name'], {}), '(node.name)\n', (1329, 1340), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((1387, 1407), 'quiche.pal.pal_block.StmtBlock', 'StmtBlock', (['node.body'], {}), '(node.body)\n', (1396, 1407), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((1436, 1466), 'quiche.pal.pal_block.ExprBlock', 'ExprBlock', (['node.decorator_list'], {}), '(node.decorator_list)\n', (1445, 1466), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((1747, 1771), 'quiche.pal.pal_block.PALIdentifier', 'PALIdentifier', (['node.name'], {}), '(node.name)\n', (1760, 1771), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((1818, 1838), 'quiche.pal.pal_block.StmtBlock', 'StmtBlock', (['node.body'], {}), '(node.body)\n', (1827, 1838), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((1867, 1897), 'quiche.pal.pal_block.ExprBlock', 'ExprBlock', (['node.decorator_list'], {}), '(node.decorator_list)\n', (1876, 1897), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((2131, 2154), 'quiche.pal.pal_block.ExprBlock', 'ExprBlock', (['node.targets'], {}), '(node.targets)\n', (2140, 2154), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((2688, 2708), 'quiche.pal.pal_block.StmtBlock', 'StmtBlock', (['node.body'], {}), '(node.body)\n', (2697, 2708), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((2729, 2751), 'quiche.pal.pal_block.StmtBlock', 'StmtBlock', (['node.orelse'], {}), '(node.orelse)\n', (2738, 2751), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((3026, 3046), 'quiche.pal.pal_block.StmtBlock', 'StmtBlock', (['node.body'], {}), '(node.body)\n', (3035, 3046), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((3067, 3089), 'quiche.pal.pal_block.StmtBlock', 'StmtBlock', (['node.orelse'], {}), '(node.orelse)\n', (3076, 3089), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((3281, 3306), 'quiche.pal.pal_block.WithItemBlock', 'WithItemBlock', (['node.items'], {}), '(node.items)\n', (3294, 3306), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((3313, 3333), 'quiche.pal.pal_block.StmtBlock', 'StmtBlock', (['node.body'], {}), '(node.body)\n', (3322, 3333), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((3548, 3573), 'quiche.pal.pal_block.WithItemBlock', 'WithItemBlock', (['node.items'], {}), '(node.items)\n', (3561, 3573), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((3580, 3600), 'quiche.pal.pal_block.StmtBlock', 'StmtBlock', (['node.body'], {}), '(node.body)\n', (3589, 3600), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((4288, 4309), 'quiche.pal.pal_block.SliceBlock', 'SliceBlock', (['node.dims'], {}), '(node.dims)\n', (4298, 4309), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((4517, 4536), 'quiche.pal.pal_block.ArgBlock', 'ArgBlock', (['node.args'], {}), '(node.args)\n', (4525, 4536), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((4593, 4618), 'quiche.pal.pal_block.ArgBlock', 'ArgBlock', (['node.kwonlyargs'], {}), '(node.kwonlyargs)\n', (4601, 4618), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((4644, 4671), 'quiche.pal.pal_block.ExprBlock', 'ExprBlock', (['node.kw_defaults'], {}), '(node.kw_defaults)\n', (4653, 4671), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((4724, 4748), 'quiche.pal.pal_block.ExprBlock', 'ExprBlock', (['node.defaults'], {}), '(node.defaults)\n', (4733, 4748), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n'), ((4932, 4955), 'quiche.pal.pal_block.PALIdentifier', 'PALIdentifier', (['node.arg'], {}), '(node.arg)\n', (4945, 4955), False, 'from quiche.pal.pal_block import PALIdentifier, PALLeaf, PALPrimitive, StmtBlock, ExprBlock, SliceBlock, ArgBlock, WithItemBlock\n')]
|
#Form classes can be declared here and imported into the routes view
#More to come soon
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
class TestForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
|
[
"wtforms.BooleanField",
"wtforms.SubmitField",
"wtforms.validators.DataRequired"
] |
[((422, 449), 'wtforms.BooleanField', 'BooleanField', (['"""Remember Me"""'], {}), "('Remember Me')\n", (434, 449), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField\n'), ((463, 485), 'wtforms.SubmitField', 'SubmitField', (['"""Sign In"""'], {}), "('Sign In')\n", (474, 485), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField\n'), ((317, 331), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (329, 331), False, 'from wtforms.validators import DataRequired\n'), ((387, 401), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (399, 401), False, 'from wtforms.validators import DataRequired\n')]
|
"""
MIT License
Copyright (c) 2022 <NAME> (https://github.com/vadniks)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Callable, List, Tuple
import requests as rq
import json
from time import sleep
from threading import Thread
sid = -1
pid = 0
name: str
level = 0
_HOST = 'http://127.0.0.1:5000'
_THRESHOLD = 0.5 # seconds
_waiterThread: Thread
_canWaitForServer = True
solo = True
_isReady = False
def connect(_name: str, script: str) -> bool:
global sid, pid, name
name = _name
try:
rsp: rq.Response = rq.post(f'{_HOST}/new',
json={'name': name, 'script': script, 'level': level})
except Exception: return False
if rsp.status_code == 200:
if (sid := int(json.loads(rsp.text)['sid'])) == -1:
return False
pid = int(json.loads(rsp.text)['pid'])
return True
# id name status
def checkForPlayers() -> List[Tuple[int, str, bool]] | None:
try:
rsp: rq.Response = rq.get(f'{_HOST}/chk/{pid}')
except Exception: return None
if rsp.status_code != 200:
return None
else:
a = json.loads(rsp.text)
_list = [(int(i[0]), i[1], bool(i[2])) for i in a]
return _list
def waitForPlayers(onWait: Callable, onFinish: Callable) -> Callable: # stop
global _waiterThread, _canWaitForServer, _isReady
def checkStatus(players: List[Tuple[int, str, bool]]) -> bool:
result = True
for i in players: result = result and i[2]
return result
def wait():
while _canWaitForServer:
if (players := checkForPlayers()) is not None:
if len(players) > 0 and checkStatus(players) and _isReady:
onFinish(players)
break
else:
onWait(players)
else:
onWait(None)
sleep(_THRESHOLD)
_waiterThread = Thread(target=wait)
_waiterThread.daemon = True
_waiterThread.start()
return endWaiter
def endWaiter():
global _canWaitForServer
_canWaitForServer = False
_waiterThread.join()
def quitt():
try: rq.post(f'{_HOST}/qt/{pid}')
except Exception: pass
# id name level x y gold
def tracePlayers() -> List[Tuple[int, str, int, int, int, int]] | None:
if solo: return None
try: rsp: rq.Response = rq.get(f'{_HOST}/trc/{sid}/{pid}')
except Exception: return None
if rsp.status_code != 200: return None
jsn = json.loads(rsp.text)
_list = []
for i in jsn:
_list.append((int(i[0]), i[1], int(i[2]), int(i[3]),
int(i[4]), int(i[5])))
return _list
def updatePlayer(lvl: int, x: int, y: int, goldAmount: int):
if solo: return
try:
rq.post(f'{_HOST}/upd/{pid}',
json={'level': lvl, 'x': x, 'y': y, 'gold': goldAmount})
except Exception: pass
# x y
def updateBoard(goldTakenFrom: Tuple[int, int]):
if solo: return
try: rq.post(f'{_HOST}/brd/{sid}/{level}',
json={'pid': pid, 'gtf_x': goldTakenFrom[0], 'gtf_y': goldTakenFrom[1]})
except Exception: pass
# pid x y
def traceBoard() -> List[Tuple[int, int, int]] | None:
if solo: return None
try: rsp: rq.Response = rq.get(f'{_HOST}/trc_b/{sid}/{pid}/{level}')
except Exception: return None
if rsp.status_code != 200: return None
jsn = json.loads(rsp.text)
_list = []
for i in jsn:
_list.append((int(i[0]), int(i[1]), int(i[2])))
return _list
def getCurrentGoldAmountOnBoard() -> int | None:
if solo: return None
try: rsp: rq.Response = rq.get(f'{_HOST}/gld/{sid}/{level}')
except Exception: return None
if rsp.status_code != 200: return None
return int(rsp.text)
# name score
def getSavedPlayers() -> List[Tuple[str, int]] | None:
try: rsp: rq.Response = rq.get(f'{_HOST}/db',
json={'mode': 'select', 'pid': pid})
except Exception: return None
if rsp.status_code != 200: return None
jsn = json.loads(rsp.text)
_list = []
[_list.append((i[0], int(i[1]))) for i in jsn]
return _list
def saveCurrentPlayerResult():
if solo: return
try: rq.Response = rq.post(f'{_HOST}/db',
json={'mode': 'insert', 'pid': pid})
except Exception: pass
def notifyPlayerIsReady():
global _isReady
_isReady = True
try: rq.post(f'{_HOST}/rd/{pid}')
except Exception: pass
def hasPlayerLeft(pid: int) -> bool | None:
if solo: return None
try: rsp: rq.Response = rq.get(f'{_HOST}/hpl/{pid}/{sid}')
except Exception: return None
if rsp.status_code != 200: return None
return bool(rsp.text)
|
[
"threading.Thread",
"json.loads",
"time.sleep",
"requests.get",
"requests.post"
] |
[((2931, 2950), 'threading.Thread', 'Thread', ([], {'target': 'wait'}), '(target=wait)\n', (2937, 2950), False, 'from threading import Thread\n'), ((3528, 3548), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (3538, 3548), False, 'import json\n'), ((4503, 4523), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (4513, 4523), False, 'import json\n'), ((5165, 5185), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (5175, 5185), False, 'import json\n'), ((1530, 1608), 'requests.post', 'rq.post', (['f"""{_HOST}/new"""'], {'json': "{'name': name, 'script': script, 'level': level}"}), "(f'{_HOST}/new', json={'name': name, 'script': script, 'level': level})\n", (1537, 1608), True, 'import requests as rq\n'), ((1993, 2021), 'requests.get', 'rq.get', (['f"""{_HOST}/chk/{pid}"""'], {}), "(f'{_HOST}/chk/{pid}')\n", (1999, 2021), True, 'import requests as rq\n'), ((2130, 2150), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (2140, 2150), False, 'import json\n'), ((3158, 3186), 'requests.post', 'rq.post', (['f"""{_HOST}/qt/{pid}"""'], {}), "(f'{_HOST}/qt/{pid}')\n", (3165, 3186), True, 'import requests as rq\n'), ((3404, 3438), 'requests.get', 'rq.get', (['f"""{_HOST}/trc/{sid}/{pid}"""'], {}), "(f'{_HOST}/trc/{sid}/{pid}')\n", (3410, 3438), True, 'import requests as rq\n'), ((3805, 3895), 'requests.post', 'rq.post', (['f"""{_HOST}/upd/{pid}"""'], {'json': "{'level': lvl, 'x': x, 'y': y, 'gold': goldAmount}"}), "(f'{_HOST}/upd/{pid}', json={'level': lvl, 'x': x, 'y': y, 'gold':\n goldAmount})\n", (3812, 3895), True, 'import requests as rq\n'), ((4060, 4174), 'requests.post', 'rq.post', (['f"""{_HOST}/brd/{sid}/{level}"""'], {'json': "{'pid': pid, 'gtf_x': goldTakenFrom[0], 'gtf_y': goldTakenFrom[1]}"}), "(f'{_HOST}/brd/{sid}/{level}', json={'pid': pid, 'gtf_x':\n goldTakenFrom[0], 'gtf_y': goldTakenFrom[1]})\n", (4067, 4174), True, 'import requests as rq\n'), ((4369, 4413), 'requests.get', 'rq.get', (['f"""{_HOST}/trc_b/{sid}/{pid}/{level}"""'], {}), "(f'{_HOST}/trc_b/{sid}/{pid}/{level}')\n", (4375, 4413), True, 'import requests as rq\n'), ((4734, 4770), 'requests.get', 'rq.get', (['f"""{_HOST}/gld/{sid}/{level}"""'], {}), "(f'{_HOST}/gld/{sid}/{level}')\n", (4740, 4770), True, 'import requests as rq\n'), ((5005, 5063), 'requests.get', 'rq.get', (['f"""{_HOST}/db"""'], {'json': "{'mode': 'select', 'pid': pid}"}), "(f'{_HOST}/db', json={'mode': 'select', 'pid': pid})\n", (5011, 5063), True, 'import requests as rq\n'), ((5345, 5404), 'requests.post', 'rq.post', (['f"""{_HOST}/db"""'], {'json': "{'mode': 'insert', 'pid': pid}"}), "(f'{_HOST}/db', json={'mode': 'insert', 'pid': pid})\n", (5352, 5404), True, 'import requests as rq\n'), ((5522, 5550), 'requests.post', 'rq.post', (['f"""{_HOST}/rd/{pid}"""'], {}), "(f'{_HOST}/rd/{pid}')\n", (5529, 5550), True, 'import requests as rq\n'), ((5677, 5711), 'requests.get', 'rq.get', (['f"""{_HOST}/hpl/{pid}/{sid}"""'], {}), "(f'{_HOST}/hpl/{pid}/{sid}')\n", (5683, 5711), True, 'import requests as rq\n'), ((2892, 2909), 'time.sleep', 'sleep', (['_THRESHOLD'], {}), '(_THRESHOLD)\n', (2897, 2909), False, 'from time import sleep\n'), ((1792, 1812), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (1802, 1812), False, 'import json\n'), ((1711, 1731), 'json.loads', 'json.loads', (['rsp.text'], {}), '(rsp.text)\n', (1721, 1731), False, 'import json\n')]
|
from pyspark.sql import SparkSession
class SparkSessionBuilder:
@staticmethod
def build():
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0,") \
.master("local[*]") \
.getOrCreate()
return spark
|
[
"pyspark.sql.SparkSession.builder.config"
] |
[((118, 211), 'pyspark.sql.SparkSession.builder.config', 'SparkSession.builder.config', (['"""spark.jars.packages"""', '"""org.apache.hadoop:hadoop-aws:2.7.0,"""'], {}), "('spark.jars.packages',\n 'org.apache.hadoop:hadoop-aws:2.7.0,')\n", (145, 211), False, 'from pyspark.sql import SparkSession\n')]
|
import hydra
from omegaconf import DictConfig
@hydra.main(config_path="conf", config_name="config.yaml")
def main(cfg: DictConfig) -> None:
print(cfg)
if __name__ == "__main__":
main()
|
[
"hydra.main"
] |
[((48, 105), 'hydra.main', 'hydra.main', ([], {'config_path': '"""conf"""', 'config_name': '"""config.yaml"""'}), "(config_path='conf', config_name='config.yaml')\n", (58, 105), False, 'import hydra\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 10 22:57:42 2020
@author: ninjaac
"""
"""
Given a string, find the first non-repeating character in it and return its index. If it doesn't exist, return -1.
Examples:
s = "leetcode"
return 0.
s = "loveleetcode"
return 2.
"""
from collections import Counter
class Solution:
@staticmethod
def firstUniqChar(s):
if s=="":
return -1
if s==" ":
return -1
freq=Counter(s)
print(freq.items())
for let,c in freq.items():
if c==1:
return s.index(let)
return -1
print(Solution().firstUniqChar(s='leedcode'))
|
[
"collections.Counter"
] |
[((489, 499), 'collections.Counter', 'Counter', (['s'], {}), '(s)\n', (496, 499), False, 'from collections import Counter\n')]
|
#!/usr/bin/python
#
# Gpprefdecrypt - Decrypt the password of local users added via Windows 2008 Group Policy Preferences.
#
# This tool decrypts the cpassword attribute value embedded in the Groups.xml file stored in the domain controller's Sysvol share.
#
# Updated by <NAME>
# Edited to run with Python 3.x
#
import sys, codecs
from Crypto.Cipher import AES
from base64 import b64decode
from Crypto import Random
def decrypt(cpassword):
# Init the key
# From MSDN: http://msdn.microsoft.com/en-us/library/2c15cbf0-f086-4c74-8b70-1f2fa45dd4be%28v=PROT.13%29#endNote2
#key = """
#4e 99 06 e8 <KEY> fa f4 93 10 62 0f fe e8
#f4 96 e8 06 cc 05 79 90 20 9b 09 a4 33 b6 6c 1b
#""".replace(" ","").replace("\n","").replace("\n","")
key = ("<KEY>"
"<KEY>")
decode_hex = codecs.getdecoder("hex_codec")
key = decode_hex(key)[0]
#print("decoded key " + str(key))
# Add padding to the base64 string and decode it
cpassword += "=" * ((4 - len(sys.argv[1]) % 4) % 4)
password = b64decode(cpassword)
# Decrypt the password
o = AES.new(key, AES.MODE_CBC, ("\x00" * 16).encode("utf8")).decrypt(password)
#iv = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
#o = AES.new(key, AES.MODE_CBC, iv).decrypt(password)
# Print it
print (o.decode('utf-16'))
return (o.decode('utf16'))
if(len(sys.argv) != 2):
print ("Usage: decrypt.py <cpassword>")
sys.exit(0)
cpassword = sys.argv[1]
try:
decrypt(cpassword)
except:
print("Input correct cpassword format...")
|
[
"sys.exit",
"codecs.getdecoder",
"base64.b64decode"
] |
[((841, 871), 'codecs.getdecoder', 'codecs.getdecoder', (['"""hex_codec"""'], {}), "('hex_codec')\n", (858, 871), False, 'import sys, codecs\n'), ((1064, 1084), 'base64.b64decode', 'b64decode', (['cpassword'], {}), '(cpassword)\n', (1073, 1084), False, 'from base64 import b64decode\n'), ((1483, 1494), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1491, 1494), False, 'import sys, codecs\n')]
|
from glob import glob
import os, sys, re
USAGE = """Usage: mk_symbols_file [PATTERN]...
Make resources/symbols.txt, using the files matching given PATTERN(s).
Each PATTERN must contain exactly one "*" wildcard,
for the location in the path of the symbol.
Earlier patterns have precedence over later ones.
Examples:
python3 mk_symbols_file.py resources/*_daily_bars.csv
"""
CORRUPTED_FILENAME = 'resources/corrupted_files.txt'
OUTPUT_FILENAME = 'resources/symbols.txt'
TIME_COL_NAME = 'Unix Timestamp'
PRICE_COL_NAME = 'Close'
GARBAGE_THRESH = 5
def main():
try:
patterns = sys.argv[1:]
assert len(patterns) > 0, 'Must provide at least 1 pattern'
if patterns[0].lower() in {'-h', '--help'}:
print(USAGE)
return
assert all(p.count('*') == 1 for p in patterns), 'Every pattern must have exactly 1 "*" wildcard'
except Exception as e:
print(e)
print(USAGE)
return
print('Finding files...')
# Get a dictionary of pattern -> matching filenames
fns = {p:glob(p) for p in patterns}
print('Extracting symbol list...')
# Flatten and filter list of symbols, extracted from filenames using regex
symbols = {re.fullmatch(p.replace('*', '(.*)'), x).group(1) for p,xs in fns.items() for x in xs}
with open(CORRUPTED_FILENAME) as f:
symbols.difference_update({line.strip() for line in f})
total = len(symbols)
print('Found {} symbols. Processing for start & end times... Progress -{:5.1f}%'.format(total, 0.0), end='', flush=True)
with open(OUTPUT_FILENAME, "w") as f_out:
print('Symbol,Start Time,End Time', file=f_out)
for i,sym in enumerate(symbols):
for p in patterns:
fn = p.replace('*', sym)
start,end = findStartEnd(fn)
if start is not None and end is not None:
break
if start is None or end is None:
print('Error reading data files for {}'.format(sym))
else:
print('{},{},{}'.format(sym, start, end), file=f_out)
print("\b\b\b\b\b\b{:5.1f}%".format(100.0 * i / total), end='', flush=True)
print(' - Done.\nMade symbols file "{}" successfully.'.format(OUTPUT_FILENAME))
def findStartEnd(fn):
try:
timeCol = None
priceCol = None
start = None
end = None
price = None
with open(fn) as f:
while timeCol is None or priceCol is None:
parts = next(f).strip().split(',')
try:
timeCol = parts.index(TIME_COL_NAME)
except ValueError:
pass
try:
priceCol = parts.index(PRICE_COL_NAME)
except ValueError:
pass
while start is None:
try:
start = int( next(f).strip().split(',')[timeCol] )
if start > 10000000000:
start = start // 1000
except (ValueError, IndexError):
pass
for line in f:
parts = line.strip().split(',')
try:
end = int( parts[timeCol] )
if end > 10000000000:
end = end // 1000
except (ValueError, IndexError):
pass
try:
newPrice = int( parts[priceCol] )
if price is not None and (newPrice > GARBAGE_THRESH*price or newPrice*GARBAGE_THRESH < price):
# Data is bad, return none
return None,None
price = newPrice
except (ValueError, IndexError):
pass
return start,end
except KeyboardInterrupt:
exit(1)
except:
return None,None
if __name__ == '__main__':
main()
|
[
"glob.glob"
] |
[((1077, 1084), 'glob.glob', 'glob', (['p'], {}), '(p)\n', (1081, 1084), False, 'from glob import glob\n')]
|
# Generated by Django 3.0.4 on 2020-05-30 01:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('owners', '0002_ownersmodel'),
]
operations = [
migrations.DeleteModel(
name='OwnersModel',
),
]
|
[
"django.db.migrations.DeleteModel"
] |
[((219, 261), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""OwnersModel"""'}), "(name='OwnersModel')\n", (241, 261), False, 'from django.db import migrations\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 16 18:04:26 2020
@author: hp
"""
import pandas as pd
import numpy as np
ratings= pd.read_csv('ratings.csv')
movies= pd.read_csv(r'movies.csv' )
ts = ratings['timestamp']
ts = pd.to_datetime(ts, unit = 's').dt.hour
movies['hours'] = ts
merged = ratings.merge(movies, left_on = 'movieId' , right_on = 'movieId', suffixes = ['_user',''])
merged = merged[['userId', 'movieId','genres','hours']]
merged = pd.concat([merged,merged['genres'].str.get_dummies(sep = '|')], axis = 1)
del merged['genres']
del merged['(no genres listed)']
def activateuserprofile(userId):
userprofile = merged.loc[merged['userId'] == userId]
del userprofile ['userId']
del userprofile['movieId']
userprofile = userprofile.groupby(['hours'], as_index = False, sort =True).sum()
userprofile.iloc[:,1:20] = userprofile.iloc[:,1:20].apply(lambda x:(x - np.min(x))/(np.max(x)-np.min(x)),axis = 1)
return(userprofile)
activeuser = activateuserprofile(30)
recommend = movies= pd.read_csv(r'recommend.csv' )
del merged['userId']
del merged['rating']
merged = merged.drop_duplicate()
user_pref = recommend.merge(merged, left_on = 'movieId' , right_on = 'movieId', suffixes = ['_user',''])
product = np.dot(user_pref.iloc[:,2:21].as_matrix(), activeuser.iloc[21,2:21].as_matrix())#IndexError: single positional indexer is out-of-bounds
preferences = np.stack((user_pref['movieId'], product), axis =-1)
df = pd.DataFrame(preferences, columns = ['movieId', 'prefrernces'])
result = (df.sort_values(['preferences'], ascending = False).iloc[0:10],0)
|
[
"pandas.DataFrame",
"numpy.stack",
"pandas.read_csv",
"numpy.min",
"numpy.max",
"pandas.to_datetime"
] |
[((141, 167), 'pandas.read_csv', 'pd.read_csv', (['"""ratings.csv"""'], {}), "('ratings.csv')\n", (152, 167), True, 'import pandas as pd\n'), ((177, 202), 'pandas.read_csv', 'pd.read_csv', (['"""movies.csv"""'], {}), "('movies.csv')\n", (188, 202), True, 'import pandas as pd\n'), ((1461, 1489), 'pandas.read_csv', 'pd.read_csv', (['"""recommend.csv"""'], {}), "('recommend.csv')\n", (1472, 1489), True, 'import pandas as pd\n'), ((1845, 1895), 'numpy.stack', 'np.stack', (["(user_pref['movieId'], product)"], {'axis': '(-1)'}), "((user_pref['movieId'], product), axis=-1)\n", (1853, 1895), True, 'import numpy as np\n'), ((1904, 1965), 'pandas.DataFrame', 'pd.DataFrame', (['preferences'], {'columns': "['movieId', 'prefrernces']"}), "(preferences, columns=['movieId', 'prefrernces'])\n", (1916, 1965), True, 'import pandas as pd\n'), ((239, 267), 'pandas.to_datetime', 'pd.to_datetime', (['ts'], {'unit': '"""s"""'}), "(ts, unit='s')\n", (253, 267), True, 'import pandas as pd\n'), ((926, 935), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (932, 935), True, 'import numpy as np\n'), ((938, 947), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (944, 947), True, 'import numpy as np\n'), ((948, 957), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (954, 957), True, 'import numpy as np\n')]
|
# A neural network which approximates linear function y = 2x + 3.
# The network has 1 layer with 1 node, which has 1 input (and a bias).
# As there is no activation effectively this node is a linear function.
# After +/- 10.000 iterations W should be close to 2 and B should be close to 3.
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(formatter={"float": "{: 0.3f}".format}, linewidth=np.inf)
np.random.seed(1)
X = np.array([[0], [1], [2], [3], [4]]) # X = input (here: 5 values)
Y = 2 * X + 3 # Y = output: y = 2x + 3 (as many values as there are X's)
W = np.random.normal(scale=0.1, size=(1, 1)) # layer: (1, 1) = 1 node with 1 input
B = np.random.normal(scale=0.1, size=(1, 1)) # bias: (1, 1) = for 1 node (and by definition only 1 bias value per node)
learning_rate = 0.001
iterations = 10000
error = []
print("initial :", "W =", W, "B =", B, "(random initialization)")
m = X.shape[0]
for _ in range(iterations):
# forward pass
a = W.dot(X.T) + B
# back propagation
da = a - Y.T # da = error
dz = da # no activation
dw = dz.dot(X) / m
db = np.sum(dz, axis=1, keepdims=True) / m
W -= learning_rate * dw
B -= learning_rate * db
error.append(np.average(da ** 2))
print("result :", "W =", W, "B =", B, "(after {} iterations)".format(iterations))
print("expected: W = 2, B = 3")
plt.plot(range(iterations), error)
plt.title("MSE (mean squared error)")
plt.xlabel("training iterations")
plt.ylabel("mse")
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.set_printoptions",
"numpy.random.seed",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.average",
"numpy.array",
"numpy.random.normal",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((343, 420), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'formatter': "{'float': '{: 0.3f}'.format}", 'linewidth': 'np.inf'}), "(formatter={'float': '{: 0.3f}'.format}, linewidth=np.inf)\n", (362, 420), True, 'import numpy as np\n'), ((421, 438), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (435, 438), True, 'import numpy as np\n'), ((444, 479), 'numpy.array', 'np.array', (['[[0], [1], [2], [3], [4]]'], {}), '([[0], [1], [2], [3], [4]])\n', (452, 479), True, 'import numpy as np\n'), ((589, 629), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.1)', 'size': '(1, 1)'}), '(scale=0.1, size=(1, 1))\n', (605, 629), True, 'import numpy as np\n'), ((673, 713), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.1)', 'size': '(1, 1)'}), '(scale=0.1, size=(1, 1))\n', (689, 713), True, 'import numpy as np\n'), ((1399, 1436), 'matplotlib.pyplot.title', 'plt.title', (['"""MSE (mean squared error)"""'], {}), "('MSE (mean squared error)')\n", (1408, 1436), True, 'import matplotlib.pyplot as plt\n'), ((1437, 1470), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""training iterations"""'], {}), "('training iterations')\n", (1447, 1470), True, 'import matplotlib.pyplot as plt\n'), ((1471, 1488), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mse"""'], {}), "('mse')\n", (1481, 1488), True, 'import matplotlib.pyplot as plt\n'), ((1489, 1499), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1497, 1499), True, 'import matplotlib.pyplot as plt\n'), ((1113, 1146), 'numpy.sum', 'np.sum', (['dz'], {'axis': '(1)', 'keepdims': '(True)'}), '(dz, axis=1, keepdims=True)\n', (1119, 1146), True, 'import numpy as np\n'), ((1226, 1245), 'numpy.average', 'np.average', (['(da ** 2)'], {}), '(da ** 2)\n', (1236, 1245), True, 'import numpy as np\n')]
|
# Licensed to SkyAPM org under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. SkyAPM org licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
ignored_chars = '//\n \t'
ignored_paths = [
"reporter/grpc"
]
license_header = ' '.join(
[
line.strip(ignored_chars) for line in """
// Licensed to SkyAPM org under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. SkyAPM org licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
""".splitlines()
]
).strip(ignored_chars)
def walk_through_dir(d) -> bool:
checked = True
for root, sub_dirs, files in os.walk(d):
ignored = False
for ignored_path in ignored_paths:
if root.__contains__(ignored_path):
ignored = True
break
if ignored:
continue
for filename in files:
if not filename.endswith(".go"):
continue
file_path = os.path.join(root, filename)
with open(file_path, 'r') as f:
header = ' '.join([line.strip(ignored_chars) for line in f.readlines() if line.startswith('//')]).strip()
print('%s license header in file: %s' % ('✅' if header.startswith(license_header) else '❌', file_path))
checked &= header.startswith(license_header)
return checked
if __name__ == "__main__":
if not walk_through_dir("./"):
sys.exit(1)
|
[
"os.walk",
"os.path.join",
"sys.exit"
] |
[((1981, 1991), 'os.walk', 'os.walk', (['d'], {}), '(d)\n', (1988, 1991), False, 'import os\n'), ((2796, 2807), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2804, 2807), False, 'import sys\n'), ((2329, 2357), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (2341, 2357), False, 'import os\n')]
|
#
# The MIT License
#
# Copyright 2020 Vector Informatik, GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#parallel_build.py
from __future__ import unicode_literals
from __future__ import print_function
import sys, os, subprocess, argparse, glob, shutil
from pprint import pprint
import pdb, time
from datetime import timedelta
from io import open
from vector.apps.DataAPI.vcproject_api import VCProjectApi
from threading import Thread, Lock, Semaphore
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
VCD = os.environ['VECTORCAST_DIR']
class ParallelBuild(object):
def __init__(self):
self.manageProject = None
self.lock = Lock()
parser = argparse.ArgumentParser()
parser.add_argument('--project', '-p', help='Manager Project Name')
parser.add_argument('--dryrun', help='Dry Run without build/execute', action="store_true")
parser.add_argument('--jobs', '-j', help='Number of concurrent jobs', default="1")
parser.add_argument('--verbose', help='Dry Run without build/execute', action="store_true")
parser.add_argument('--ci', help='Use CI Licenses', action="store_true", default = False)
args = parser.parse_args()
if args.ci:
self.useCI = " --ci "
else:
self.useCI = ""
try:
self.manageProject = os.environ['VCV_ENVIRONMENT_FILE']
except:
self.manageProject = args.project
self.dryrun = args.dryrun
if self.manageProject is None:
print ("\n** Use either --project [Manage Project Name] or enviroment variable VCV_ENVIRONMENT_FILE to specify the manage project name")
sys.exit()
if not os.path.isfile(self.manageProject) and not os.path.isfile(self.manageProject + ".vcm"):
raise IOError(self.manageProject + ' does not exist')
return
if args.verbose:
self.verbose = True
else:
self.verbose = False
self.mpName = self.manageProject.replace(".vcm","")
self.reportName = os.path.basename(self.manageProject).replace(".vcm","")
self.buildSemaphore = Semaphore(int(args.jobs))
print ("Disabling range check globally")
try:
self.api = VCProjectApi(self.manageProject)
except:
statusCmd = VCD + "/manage --project " + self.manageProject + self.useCI + " --status"
self.runManageCmd(statusCmd)
self.api = VCProjectApi(self.manageProject)
self.oldRangeCheck = self.api.project.options["enums"]["RANGE_CHECK"][0]
self.api.close()
buildCmd = VCD + "/manage --project " + self.manageProject + self.useCI + " --config=RANGE_CHECK=NONE"
self.runManageCmd(buildCmd)
self.api = VCProjectApi(self.manageProject)
def __enter__(self):
return self
def __exit__(self, exct_type, exce_value, traceback):
self.api.close()
print ("Clearing disable of range check globally")
buildCmd = VCD + "/manage --project " + self.manageProject + self.useCI +" --config=RANGE_CHECK="+self.oldRangeCheck
self.runManageCmd(buildCmd)
build_log_data = ""
for file in glob.glob("build*.log"):
build_log_data += " ".join(open(file,"r").readlines())
os.remove(file)
try:
open("complete_build.log","w", encoding="utf-8").write(unicode(build_log_data))
except:
open("complete_build.log","w").write(build_log_data)
print(build_log_data)
def th_Print (self, str):
self.lock.acquire()
print (str)
self.lock.release()
def runManageCmd(self, cmd, env = None):
if self.verbose:
self.th_Print (cmd)
if self.dryrun:
return
if env:
logName = "build_" + self.reportName + "_" + env.compiler.name + "_" + env.testsuite.name + "_" + env.name + ".log"
build_log = open(logName,"w")
process = subprocess.Popen(cmd, shell=True, stdout=build_log, stderr=build_log)
process.wait()
build_log.close()
else:
process = subprocess.Popen(cmd, shell=True)
process.wait()
def build_env(self,env):
if not self.verbose:
self.th_Print ("Building: " + env.compiler.name + "/" + env.testsuite.name + "/" + env.name)
buildCmd = VCD + "/manage --project " + self.manageProject + self.useCI + " --build --level " + env.compiler.name + "/" + env.testsuite.name + " --environment " + env.name
self.runManageCmd(buildCmd,env)
self.buildSemaphore.release()
def doit(self):
buildingList = []
for env in self.api.Environment.all():
if env.system_tests:
print("Building System Test: " + env.compiler.name + "/" + env.testsuite.name + "/" + env.name)
buildCmd = VCD + "/manage --project " + self.manageProject + self.useCI + " --build --level " + env.compiler.name + "/" + env.testsuite.name + " --environment " + env.name
self.runManageCmd(buildCmd,env)
continue
self.buildSemaphore.acquire()
t = Thread(target=self.build_env,args=[env])
t.daemon = True # thread dies with the program
t.start()
buildingList.append(t)
checkThreads = True
while checkThreads:
checkThreads = False
for t in buildingList:
if t.is_alive():
time.sleep(1)
checkThreads = True
break
if __name__ == '__main__':
with ParallelBuild() as parallel_build:
parallel_build.doit()
|
[
"threading.Thread",
"os.remove",
"subprocess.Popen",
"argparse.ArgumentParser",
"os.path.basename",
"vector.apps.DataAPI.vcproject_api.VCProjectApi",
"time.sleep",
"threading.Lock",
"os.path.isfile",
"io.open",
"glob.glob",
"sys.exit"
] |
[((1737, 1743), 'threading.Lock', 'Lock', ([], {}), '()\n', (1741, 1743), False, 'from threading import Thread, Lock, Semaphore\n'), ((1762, 1787), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1785, 1787), False, 'import sys, os, subprocess, argparse, glob, shutil\n'), ((4015, 4047), 'vector.apps.DataAPI.vcproject_api.VCProjectApi', 'VCProjectApi', (['self.manageProject'], {}), '(self.manageProject)\n', (4027, 4047), False, 'from vector.apps.DataAPI.vcproject_api import VCProjectApi\n'), ((4465, 4488), 'glob.glob', 'glob.glob', (['"""build*.log"""'], {}), "('build*.log')\n", (4474, 4488), False, 'import sys, os, subprocess, argparse, glob, shutil\n'), ((2826, 2836), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2834, 2836), False, 'import sys, os, subprocess, argparse, glob, shutil\n'), ((3483, 3515), 'vector.apps.DataAPI.vcproject_api.VCProjectApi', 'VCProjectApi', (['self.manageProject'], {}), '(self.manageProject)\n', (3495, 3515), False, 'from vector.apps.DataAPI.vcproject_api import VCProjectApi\n'), ((4569, 4584), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (4578, 4584), False, 'import sys, os, subprocess, argparse, glob, shutil\n'), ((5281, 5299), 'io.open', 'open', (['logName', '"""w"""'], {}), "(logName, 'w')\n", (5285, 5299), False, 'from io import open\n'), ((5321, 5390), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdout': 'build_log', 'stderr': 'build_log'}), '(cmd, shell=True, stdout=build_log, stderr=build_log)\n', (5337, 5390), False, 'import sys, os, subprocess, argparse, glob, shutil\n'), ((5487, 5520), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (5503, 5520), False, 'import sys, os, subprocess, argparse, glob, shutil\n'), ((6587, 6628), 'threading.Thread', 'Thread', ([], {'target': 'self.build_env', 'args': '[env]'}), '(target=self.build_env, args=[env])\n', (6593, 6628), False, 'from threading import Thread, Lock, Semaphore\n'), ((2861, 2895), 'os.path.isfile', 'os.path.isfile', (['self.manageProject'], {}), '(self.manageProject)\n', (2875, 2895), False, 'import sys, os, subprocess, argparse, glob, shutil\n'), ((2904, 2947), 'os.path.isfile', 'os.path.isfile', (["(self.manageProject + '.vcm')"], {}), "(self.manageProject + '.vcm')\n", (2918, 2947), False, 'import sys, os, subprocess, argparse, glob, shutil\n'), ((3265, 3301), 'os.path.basename', 'os.path.basename', (['self.manageProject'], {}), '(self.manageProject)\n', (3281, 3301), False, 'import sys, os, subprocess, argparse, glob, shutil\n'), ((3695, 3727), 'vector.apps.DataAPI.vcproject_api.VCProjectApi', 'VCProjectApi', (['self.manageProject'], {}), '(self.manageProject)\n', (3707, 3727), False, 'from vector.apps.DataAPI.vcproject_api import VCProjectApi\n'), ((4630, 4679), 'io.open', 'open', (['"""complete_build.log"""', '"""w"""'], {'encoding': '"""utf-8"""'}), "('complete_build.log', 'w', encoding='utf-8')\n", (4634, 4679), False, 'from io import open\n'), ((6938, 6951), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6948, 6951), False, 'import pdb, time\n'), ((4529, 4544), 'io.open', 'open', (['file', '"""r"""'], {}), "(file, 'r')\n", (4533, 4544), False, 'from io import open\n'), ((4738, 4769), 'io.open', 'open', (['"""complete_build.log"""', '"""w"""'], {}), "('complete_build.log', 'w')\n", (4742, 4769), False, 'from io import open\n')]
|
__author__ = "Robin 'r0w' Weiland"
__date__ = "2019-05-07"
__version__ = "0.0.0"
from configparser import ConfigParser
from io import StringIO, TextIOWrapper
from warnings import warn
class Section:
_config = None
_name = str()
def __getattribute__(self, item): return super(Section, self).__getattribute__(item)
__getitem__ = __getattribute__
def __setattr__(self, key, value):
try:
if key not in ('_config', '_name'): self._config.saveEntry(self._name, key, value)
super(Section, self).__setattr__(key, value)
except (AttributeError,):
raise NotImplemented('adding options will be possible in the future') # TODO: add option into section here
__setitem__ = __setattr__
def __contains__(self, item): return item in self.__dict__
class Config(ConfigParser):
def __init__(self, path, **kwargs):
self._path = path
super(Config, self).__init__(**kwargs)
self(self._path)
self.load()
def __call__(self, nFile=None):
if isinstance(nFile, (StringIO, TextIOWrapper,)):
self.read_file(nFile)
self._path = nFile.name
if isinstance(nFile, str):
self.read(nFile)
self._path = nFile
return self
def __repr__(self): return str(self._path)
__str__ = __repr__
def __getattribute__(self, item): return super(Config, self).__getattribute__(item)
__getitem__ = __getattribute__
def __setattr__(self, key, value):
try: super(Config, self).__setattr__(key, value)
except (AttributeError,):
raise NotImplemented('adding sections will be possible in the future') # TODO: add sections here
__setitem__ = __setattr__
def __contains__(self, item): return item in self.__dict__
def load(self):
for section in self.sections():
s = Section()
s._config = self
s._name = section
for option in self.options(section):
s.__dict__[option] = self.loadEntry(section, option)
self.__dict__[section] = s
def loadEntry(self, section, option, fallback=None, datatype=str, subdatatype=str, chunksize=None):
try:
loadedItem = self.get(section, option, fallback=fallback)
if datatype == list or datatype == tuple:
loadedItem = loadedItem.replace('(', '').replace(')', '')
if loadedItem == '': return datatype([])
loadedItem = datatype(map(subdatatype, loadedItem.split(', ')))
if chunksize is not None: loadedItem = datatype((zip(*[iter(loadedItem)] * chunksize)))
elif datatype == bool: loadedItem = bool(int(loadedItem))
else: loadedItem = datatype(loadedItem)
return loadedItem
except (Exception,) as e:
warn(f'failed loading {section}:{option}; returned fallback; {e.__name__}')
return fallback
def save(self):
with open(self._path, 'w') as configfile: self.write(configfile)
def saveEntry(self, section, option, value, saveToFile=True):
if isinstance(value, (list, tuple,)): value = ', '.join(value)
elif type(value) == bool: value = str(int(value))
self.set(section, option, str(value))
if saveToFile: self.save()
if __name__ == '__main__':
from pathlib import Path
c = Config(Path(r'C:\Users\robin\Documents\Private\Python\GameResources\testing\config.ini').open('r'))
print(c.gameplay.player_speed)
# c.gameplay['player-speed'] = 4
|
[
"warnings.warn",
"pathlib.Path"
] |
[((2878, 2953), 'warnings.warn', 'warn', (['f"""failed loading {section}:{option}; returned fallback; {e.__name__}"""'], {}), "(f'failed loading {section}:{option}; returned fallback; {e.__name__}')\n", (2882, 2953), False, 'from warnings import warn\n'), ((3426, 3524), 'pathlib.Path', 'Path', (['"""C:\\\\Users\\\\robin\\\\Documents\\\\Private\\\\Python\\\\GameResources\\\\testing\\\\config.ini"""'], {}), "(\n 'C:\\\\Users\\\\robin\\\\Documents\\\\Private\\\\Python\\\\GameResources\\\\testing\\\\config.ini'\n )\n", (3430, 3524), False, 'from pathlib import Path\n')]
|
import asyncio
import discord
import sys
from discord.ext import commands
from utils import checks
from mods.cog import Cog
class Commands(Cog):
def __init__(self, bot):
super().__init__(bot)
self.cursor = bot.mysql.cursor
self.escape = bot.escape
@commands.group(pass_context=True, aliases=['setprefix', 'changeprefix'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def prefix(self, ctx, *, txt:str=None):
"""Change the Bots Prefix for the Server"""
if txt is None:
sql = "SELECT prefix FROM `prefix` WHERE server={0}"
sql = sql.format(ctx.message.server.id)
sql_channel = "SELECT prefix FROM `prefix_channel` WHERE server={0} AND channel={1}"
sql_channel = sql_channel.format(ctx.message.server.id, ctx.message.channel.id)
result = self.cursor.execute(sql).fetchall()
result2 = self.cursor.execute(sql_channel).fetchall()
if len(result) == 0:
server_prefix = '.'
else:
server_prefix = result[0]['prefix']
if len(result2) == 0:
channel_prefix = None
else:
channel_prefix = result2[0]['prefix']
msg = "Server Prefix: `{0}`\n".format(server_prefix)
if channel_prefix != None:
msg += "**Current** Channel Prefix: `{0}`".format(channel_prefix)
await self.bot.say(msg)
return
sql = "INSERT INTO `prefix` (`server`, `prefix`, `id`) VALUES (%s, %s, %s)"
update_sql = "UPDATE `prefix` SET prefix={0} WHERE server={1}"
update_sql = update_sql.format(self.escape(txt), ctx.message.server.id)
check = "SELECT server FROM `prefix` WHERE server={0}"
check = check.format(ctx.message.server.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
self.cursor.execute(sql, (ctx.message.server.id, txt, ctx.message.author.id))
self.cursor.commit()
await self.bot.say(":white_check_mark: Set bot prefix to \"{0}\" for the server\n".format(txt))
else:
self.cursor.execute(update_sql)
self.cursor.commit()
await self.bot.say(":white_check_mark: Updated bot prefix to \"{0}\" for the server".format(txt))
@prefix.command(pass_context=True, name='channel', no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def _prefix_channel(self, ctx, *, txt:str):
"""Change the Bots Prefix for the current Channel"""
channel = ctx.message.channel
for c in ctx.message.channel_mentions:
channel = c
txt = txt.replace(channel.mention, '').replace('#'+channel.name, '')
sql = "INSERT INTO `prefix_channel` (`server`, `prefix`, `channel`, `id`) VALUES (%s, %s, %s, %s)"
update_sql = "UPDATE `prefix_channel` SET prefix={0} WHERE server={1} AND channel={2}"
update_sql = update_sql.format(self.escape(txt), ctx.message.server.id, channel.id)
check = "SELECT * FROM `prefix_channel` WHERE server={0} AND channel={1}"
check = check.format(ctx.message.server.id, channel.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
self.cursor.execute(sql, (ctx.message.server.id, txt, channel.id, ctx.message.author.id))
self.cursor.commit()
await self.bot.say(":white_check_mark: Set bot prefix to \"{0}\" for {1}".format(txt, channel.mention))
else:
self.cursor.execute(update_sql)
self.cursor.commit()
await self.bot.say(":white_check_mark: Updated bot prefix to \"{0}\" for {1}".format(txt, channel.mention))
@prefix.command(pass_context=True, name='reset', no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def _prefix_reset(self, ctx, what:str=None, channel:discord.Channel=None):
"""Reset All Custom Set Prefixes For the Bot"""
if what is None or what == "server":
sql = "DELETE FROM `prefix` WHERE server={0}"
sql = sql.format(ctx.message.server.id)
check = "SELECT * FROM `prefix` WHERE server={0}"
check = check.format(ctx.message.server.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: Current server does **not** have a custom prefix set!")
return
else:
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":exclamation: **Reset server prefix**\nThis does not reset channel prefixes, run \"all\" after reset to reset all prefixes *or* \"channels\" to reset all custom channel prefixes.")
elif what == "channel":
if channel is None:
channel = ctx.message.channel
sql = "DELETE FROM `prefix_channel` WHERE server={0} AND channel={1}"
sql = sql.format(ctx.message.server.id, channel.id)
check = "SELECT * FROM `prefix_channel` WHERE server={0} AND channel={1}"
check = check.format(ctx.message.server.id, channel.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: {0} does **not** have a custom prefix Set!\nMention the channel after \"reset channel\" for a specific channel.".format(channel.mention))
return
else:
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":exclamation: Reset {0}'s prefix!\nThis does **not** reset all custom channel prefixes, \"reset channels\" to do so.".format(channel.mention))
return
elif what == "channels":
sql = "DELETE FROM `prefix_channel` WHERE server={0}"
sql = sql.format(ctx.message.server.id)
check = "SELECT * FROM `prefix_channel` WHERE server={0}"
check = check.format(ctx.message.server.id)
result = self.cursor.execute(check).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: Server does **not** reset a custom prefix set for any channel!\nMention the channel after \"reset channel\" for a specific channel.")
return
else:
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":exclamation: Reset all channels custom prefixes!")
return
elif what == "all" or what == "everything":
sql = "DELETE FROM `prefix_channel` WHERE server={0}"
sql = sql.format(ctx.message.server.id)
sql2 = "DELETE FROM `prefix` WHERE server={0}"
sql2 = sql2.format(ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.execute(sql2)
self.cursor.commit()
await self.bot.say(":warning: Reset all custom server prefix settings!")
return
else:
await self.bot.say(":no_entry: Invalid Option\nOptions: `server, channel, channels, all/everything`")
good_commands = ['command', 'blacklist', 'help', 'invite']
async def command_toggle(self, t:str, ctx, cmd:str, user=None, msg=True):
try:
if cmd in self.good_commands:
await self.bot.send_message(ctx.message.channel, ':no_entry: You cannot disable command: `{0}`!'.format(self.good_commands[self.good_commands.index(cmd)]))
return
if t == 'server':
sql = "SELECT * FROM `command_blacklist` WHERE type='server' AND server={0} AND command={1}"
sql = sql.format(ctx.message.server.id, self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`, `server`) VALUES (%s, %s, %s)'
self.cursor.execute(sql, (cmd, "server", ctx.message.server.id))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':negative_squared_cross_mark: Disabled command `{0}`.'.format(cmd))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='server' AND server={0} AND command={1}"
sql = sql.format(ctx.message.server.id, self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}`.'.format(cmd))
elif t == 'channel':
channel = user
sql = "SELECT * FROM `command_blacklist` WHERE type='channel' AND server={0} AND channel={1} AND command={2}"
sql = sql.format(ctx.message.server.id, channel.id, self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`, `server`, `channel`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (cmd, "channel", ctx.message.server.id, channel.id))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':negative_squared_cross_mark: Disabled command `{0}` for channel {1}.'.format(cmd, channel.mention))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='channel' AND server={0} AND channel={1} AND command={2}"
sql = sql.format(ctx.message.server.id, channel.id, self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}` for channel {1}.'.format(cmd, channel.mention))
elif t == 'user':
sql = "SELECT * FROM `command_blacklist` WHERE type='user' AND server={0} AND user={1} AND command={2}"
sql = sql.format(ctx.message.server.id, user.id, self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`, `server`, `user`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (cmd, "user", ctx.message.server.id, user.id))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':negative_squared_cross_mark: Disabled command `{0}` for user `{1}`.'.format(cmd, user))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='user' AND server={0} AND user={1} AND command={2}"
sql = sql.format(ctx.message.server.id, user.id, self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}` for user `{1}`.'.format(cmd, user))
elif t == 'role':
role = user
sql = "SELECT * FROM `command_blacklist` WHERE type='role' AND server={0} AND role={1} AND command={2}"
sql = sql.format(ctx.message.server.id, role.id, self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`, `server`, `role`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (cmd, "role", ctx.message.server.id, role.id))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':negative_squared_cross_mark: Disabled command `{0}` for role {1}.'.format(cmd, role.mention))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='role' AND server={0} AND role={1} AND command={2}"
sql = sql.format(ctx.message.server.id, role.id, self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}` for role {1}.'.format(cmd, role.mention))
elif t == 'global':
sql = "SELECT * FROM `command_blacklist` WHERE type='global' AND command={0}"
sql = sql.format(self.escape(cmd))
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
sql = 'INSERT INTO `command_blacklist` (`command`, `type`) VALUES (%s, %s)'
self.cursor.execute(sql, (cmd, "global"))
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':globe_with_meridians: Disabled command `{0}` globally.'.format(cmd))
else:
sql = "DELETE FROM `command_blacklist` WHERE type='global' AND command={0}"
sql = sql.format(self.escape(cmd))
self.cursor.execute(sql)
self.cursor.commit()
if msg:
await self.bot.send_message(ctx.message.channel, ':white_check_mark: Enabled command `{0}` globally.'.format(cmd))
else:
return
except Exception as e:
await self.bot.send_message(ctx.message.channel, str(e))
async def module_command_toggle(self, module, t:str, ctx):
try:
count = 0
disabled = []
for command in self.bot.commands:
if self.bot.commands[command].module == module and command not in disabled:
count += 1
cmd = str(self.bot.commands[command].name)
await self.command_toggle(t, ctx, cmd, msg=False)
await asyncio.sleep(0.21)
disabled.append(command)
return count
except Exception as e:
await self.bot.send_message(ctx.message.channel, str(e))
async def get_modules(self):
modules = []
for module in sys.modules:
if module.startswith('mods.'):
if module == 'mods.Repl' or module == 'mods.Stats' or module == 'mods.Commands':
continue
mod = module.replace('mods.', '')
modules.append(mod)
return modules
@commands.group(pass_context=True, invoke_without_command=True, aliases=['commands', 'cmd'], no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command(self, ctx, cmd:str):
"""Toggle a command for the server"""
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('server', ctx, cmd)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='toggle', aliases=['enable', 'disable'], pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def cmd_toggle(self, ctx, cmd:str):
"""Server wide Command Toggle"""
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('server', ctx, cmd)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='user', pass_context=True, aliases=['member'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_user(self, ctx, cmd:str, user:discord.User=None):
"""Toggle Command for a user"""
if user is None:
user = ctx.message.author
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('user', ctx, cmd, user)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='role', pass_context=True, aliases=['rank'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_role(self, ctx, cmd:str, role:discord.Role):
"""Toggle Command for a role"""
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('role', ctx, cmd, role)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='channel', pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_channel(self, ctx, cmd:str, chan:discord.Channel=None):
"""Toggle Command for a channel"""
if chan is None:
chan = ctx.message.channel
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('channel', ctx, cmd, chan)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.command(name='global', pass_context=True, invoke_without_command=True)
@checks.is_owner()
async def command_toggle_global(self, ctx, cmd:str):
"""Toggle command globally"""
if cmd in self.bot.commands:
cmd = str(self.bot.commands[cmd])
await self.command_toggle('global', ctx, cmd)
else:
await self.bot.say(':no_entry: `Command does not exist.`')
@command.group(name='module', pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_module(self, ctx, module:str, chan:discord.Channel=None):
"""Toggle a bot command module"""
try:
mod = sys.modules['mods.{0}'.format(module)]
except KeyError:
modules = await self.get_modules()
await self.bot.say(':no_entry: Invalid Module\n**Modules**\n`{0}`'.format(', '.join(modules)))
return
if chan:
count = await self.module_command_toggle(mod, 'channel', ctx)
else:
count = await self.module_command_toggle(mod, 'server', ctx)
await self.bot.say(':white_check_mark: Disabled **{0}** commands in module `{1}`.'.format(count, module))
@command_toggle_module.command(name='list', pass_context=True, invoke_without_command=True)
async def command_toggle_module_list(self, ctx):
modules = await self.get_modules()
await self.bot.say(':information_source: **Modules**\n`{0}`'.format(', '.join(modules)))
@command.command(name='all', pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def command_toggle_all(self, ctx):
sql = 'SELECT COUNT(*) FROM `command_blacklist` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
count = str(self.cursor.execute(sql).fetchall()[0]['COUNT(*)'])
sql = 'DELETE FROM `command_blacklist` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(':white_check_mark: Enabled **{0}** server command(s).'.format(count))
@command.command(name='list', pass_context=True, invoke_without_command=True, no_pm=True)
async def command_list(self, ctx):
sql = 'SELECT * FROM `command_blacklist` WHERE server={0} OR type="global"'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(':no_entry: Server does **not** have any commands blacklisted.')
return
msg = ''
for s in result:
if s['type'] == 'global':
msg += ':globe_with_meridians: Globaly Command Disabled: `{0}`\n'.format(s['command'])
elif s['type'] == 'server':
msg += ':desktop: Command Disabled on Server: `{0}`\n'.format(s['command'])
elif s['type'] == 'channel':
msg += ':arrow_right: Command Disabled in <#{0}>: `{1}`\n'.format(s['channel'] ,s['command'])
elif s['type'] == 'role':
msg += ':eight_spoked_asterisk: Command Disabled for <@&{0}>: `{1}`\n'.format(s['role'], s['command'])
elif s['type'] == 'user':
user = discord.utils.get(self.bot.get_all_members(), id=str(s['user']))
if user is None:
user = '<@{0}> (Not Found)'.format(s['user'])
msg += ':bust_in_silhouette: Command Disabled for **{0}**: `{1}`\n'.format(user, s['command'])
await self.bot.say(':white_check_mark: **Commands Disabled**\n'+msg)
def setup(bot):
bot.add_cog(Commands(bot))
|
[
"utils.checks.is_owner",
"utils.checks.admin_or_perm",
"asyncio.sleep",
"discord.ext.commands.group"
] |
[((259, 376), 'discord.ext.commands.group', 'commands.group', ([], {'pass_context': '(True)', 'aliases': "['setprefix', 'changeprefix']", 'invoke_without_command': '(True)', 'no_pm': '(True)'}), "(pass_context=True, aliases=['setprefix', 'changeprefix'],\n invoke_without_command=True, no_pm=True)\n", (273, 376), False, 'from discord.ext import commands\n'), ((375, 415), 'utils.checks.admin_or_perm', 'checks.admin_or_perm', ([], {'manage_server': '(True)'}), '(manage_server=True)\n', (395, 415), False, 'from utils import checks\n'), ((2125, 2165), 'utils.checks.admin_or_perm', 'checks.admin_or_perm', ([], {'manage_server': '(True)'}), '(manage_server=True)\n', (2145, 2165), False, 'from utils import checks\n'), ((3381, 3421), 'utils.checks.admin_or_perm', 'checks.admin_or_perm', ([], {'manage_server': '(True)'}), '(manage_server=True)\n', (3401, 3421), False, 'from utils import checks\n'), ((12437, 12545), 'discord.ext.commands.group', 'commands.group', ([], {'pass_context': '(True)', 'invoke_without_command': '(True)', 'aliases': "['commands', 'cmd']", 'no_pm': '(True)'}), "(pass_context=True, invoke_without_command=True, aliases=[\n 'commands', 'cmd'], no_pm=True)\n", (12451, 12545), False, 'from discord.ext import commands\n'), ((12543, 12583), 'utils.checks.admin_or_perm', 'checks.admin_or_perm', ([], {'manage_server': '(True)'}), '(manage_server=True)\n', (12563, 12583), False, 'from utils import checks\n'), ((12978, 13018), 'utils.checks.admin_or_perm', 'checks.admin_or_perm', ([], {'manage_server': '(True)'}), '(manage_server=True)\n', (12998, 13018), False, 'from utils import checks\n'), ((13398, 13438), 'utils.checks.admin_or_perm', 'checks.admin_or_perm', ([], {'manage_server': '(True)'}), '(manage_server=True)\n', (13418, 13438), False, 'from utils import checks\n'), ((13900, 13940), 'utils.checks.admin_or_perm', 'checks.admin_or_perm', ([], {'manage_server': '(True)'}), '(manage_server=True)\n', (13920, 13940), False, 'from utils import checks\n'), ((14334, 14374), 'utils.checks.admin_or_perm', 'checks.admin_or_perm', ([], {'manage_server': '(True)'}), '(manage_server=True)\n', (14354, 14374), False, 'from utils import checks\n'), ((14821, 14838), 'utils.checks.is_owner', 'checks.is_owner', ([], {}), '()\n', (14836, 14838), False, 'from utils import checks\n'), ((15206, 15246), 'utils.checks.admin_or_perm', 'checks.admin_or_perm', ([], {'manage_server': '(True)'}), '(manage_server=True)\n', (15226, 15246), False, 'from utils import checks\n'), ((16208, 16248), 'utils.checks.admin_or_perm', 'checks.admin_or_perm', ([], {'manage_server': '(True)'}), '(manage_server=True)\n', (16228, 16248), False, 'from utils import checks\n'), ((11996, 12015), 'asyncio.sleep', 'asyncio.sleep', (['(0.21)'], {}), '(0.21)\n', (12009, 12015), False, 'import asyncio\n')]
|
import requests
import re
import os
import itertools
from bs4 import BeautifulSoup as beSo
from clint.textui import colored
def check_structure(name, basedir):
# Check if the question folder exists for the name passed.
status = True
if not os.path.exists(os.path.join(basedir, f'{name}')) or not \
os.path.exists(os.path.join(basedir, f'{name}',f'{name}.in')) or not \
os.path.exists(os.path.join(basedir, f'{name}', f'{name}.op')):
status = False
return status
def fetch_tests(file_list, contestName):
try:
basedir = os.path.join(os.getcwd(), contestName) if not os.path.basename(os.getcwd()) == contestName else os.getcwd()
contest_number = ''.join(re.findall(r'\d+', contestName))
if not len(contest_number):
print(colored.red("Invalid contest number."))
return
load_page = requests.get(f"https://codeforces.com/contest/{contest_number}/problems")
soup = beSo(load_page.content, 'html.parser')
tests = soup.findAll("div", attrs={"class":"sample-tests"})
if(len(tests) == 0):
print(colored.red("Invalid contest number."))
else:
print("Fetching sample test cases...")
for file_name, test in zip(file_list, tests):
# Check if proper directory structure exists, if not generate error.
correct_dir_structure = True
if(check_structure(file_name, basedir)):
# Add inputs to .in files
for t in test.findAll("div", attrs={"class":"input"}):
inp = t.pre.contents
with open(os.path.join(basedir, f'{file_name}' , f'{file_name}.in'), 'a') as f:
for i in range(len(inp)):
if str(inp[i]) in ('<br>', '<br/>'):
# Make sure separate testcases are separated by a newline
f.write('\n\n') if i == len(inp)-1 else f.write('\n')
continue
f.write(inp[i])
# Add outputs to .op files
for t in test.findAll("div", attrs={"class":"output"}):
outp = t.pre.contents
with open(os.path.join(basedir, f'{file_name}' , f'{file_name}.op'), 'a') as f:
for o in range(len(outp)):
if str(outp[o]) in ('<br>', '<br/>'):
# Make sure separate testcases are separated by a newline
f.write('\n\n') if o == len(outp)-1 else f.write('\n')
continue
f.write(outp[o])
else:
correct_dir_structure = False
break
print("Sample test cases added." if correct_dir_structure else
colored.red(f"Failed to add sample test cases: Incorrect directory structure !!"))
# In case of any error with scraping, display warning.
except:
print(colored.red("There was some error fetching the tests !!"))
|
[
"clint.textui.colored.red",
"os.getcwd",
"re.findall",
"requests.get",
"bs4.BeautifulSoup",
"os.path.join"
] |
[((845, 918), 'requests.get', 'requests.get', (['f"""https://codeforces.com/contest/{contest_number}/problems"""'], {}), "(f'https://codeforces.com/contest/{contest_number}/problems')\n", (857, 918), False, 'import requests\n'), ((930, 968), 'bs4.BeautifulSoup', 'beSo', (['load_page.content', '"""html.parser"""'], {}), "(load_page.content, 'html.parser')\n", (934, 968), True, 'from bs4 import BeautifulSoup as beSo\n'), ((654, 665), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (663, 665), False, 'import os\n'), ((695, 726), 're.findall', 're.findall', (['"""\\\\d+"""', 'contestName'], {}), "('\\\\d+', contestName)\n", (705, 726), False, 'import re\n'), ((262, 294), 'os.path.join', 'os.path.join', (['basedir', 'f"""{name}"""'], {}), "(basedir, f'{name}')\n", (274, 294), False, 'import os\n'), ((329, 375), 'os.path.join', 'os.path.join', (['basedir', 'f"""{name}"""', 'f"""{name}.in"""'], {}), "(basedir, f'{name}', f'{name}.in')\n", (341, 375), False, 'import os\n'), ((409, 455), 'os.path.join', 'os.path.join', (['basedir', 'f"""{name}"""', 'f"""{name}.op"""'], {}), "(basedir, f'{name}', f'{name}.op')\n", (421, 455), False, 'import os\n'), ((571, 582), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (580, 582), False, 'import os\n'), ((774, 812), 'clint.textui.colored.red', 'colored.red', (['"""Invalid contest number."""'], {}), "('Invalid contest number.')\n", (785, 812), False, 'from clint.textui import colored\n'), ((1070, 1108), 'clint.textui.colored.red', 'colored.red', (['"""Invalid contest number."""'], {}), "('Invalid contest number.')\n", (1081, 1108), False, 'from clint.textui import colored\n'), ((2752, 2809), 'clint.textui.colored.red', 'colored.red', (['"""There was some error fetching the tests !!"""'], {}), "('There was some error fetching the tests !!')\n", (2763, 2809), False, 'from clint.textui import colored\n'), ((2592, 2678), 'clint.textui.colored.red', 'colored.red', (['f"""Failed to add sample test cases: Incorrect directory structure !!"""'], {}), "(\n f'Failed to add sample test cases: Incorrect directory structure !!')\n", (2603, 2678), False, 'from clint.textui import colored\n'), ((621, 632), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (630, 632), False, 'import os\n'), ((1537, 1593), 'os.path.join', 'os.path.join', (['basedir', 'f"""{file_name}"""', 'f"""{file_name}.in"""'], {}), "(basedir, f'{file_name}', f'{file_name}.in')\n", (1549, 1593), False, 'import os\n'), ((2066, 2122), 'os.path.join', 'os.path.join', (['basedir', 'f"""{file_name}"""', 'f"""{file_name}.op"""'], {}), "(basedir, f'{file_name}', f'{file_name}.op')\n", (2078, 2122), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import requests
import json
from requests.status_codes import codes
from .exceptions import (
UserAlreadyExists,
NotFound,
UserNotFound,
GroupNotFound,
GroupMissingID,
UserMissingID,
GroupUpdatedSimultaneous,
UserAlreadyMember,
UserUpdatedSimultaneous,
DeploymentNotFound
)
USERS_FIELDS = [
'id', 'firstName', 'lastNAme', 'email', 'firstNameLike',
'lastNameLike', 'emailLike', 'memberOfGroup', 'potentialStarter',
'sort'
]
GROUPS_FIELDS = [
'id', 'name', 'type', 'nameLike', 'member', 'potentialStarter', 'sort'
]
DEPLOYMENTS_FIELDS = [
'name', 'nameLike', 'category', 'categoryNotEquals', 'tenantId',
'tenantIdLike', 'withoutTenantId', 'sort'
]
def check_parameters(fields, args):
arguments = {}
for item in fields:
value = args.pop(item, None)
if value:
arguments[item] = value
return arguments
class Activiti(object):
def __init__(self, endpoint, auth=('kermit', 'kermit')):
self.endpoint = endpoint
self.auth = auth
self.session = requests.Session()
self.session.auth = self.auth
self.session.headers.update({'content-type': 'application/json'})
user_url = lambda self, id: self.users_url(id)
group_url = lambda self, id: self.groups_url(id)
def user_exists(self, login):
response = self._get(self.user_url(login))
return response.status_code == codes.ok
def get_user(self, login):
response = self._get(self.user_url(login))
if response.status_code == codes.ok:
return response.json()
raise UserNotFound()
def users_url(self, *args):
return self._to_endpoint('identity', 'users', *args)
def users(self, **parameters):
params = check_parameters(USERS_FIELDS, parameters)
response = self._get(self.users_url(), params=params)
if response.status_code == codes.ok:
return response.json()
raise NotImplementedError()
def get_users_member_of(self, group):
return self.users(memberOfGroups=group)
def create_user(self, login, email, password, firstname=None, lastname=None):
user = {
'id': login,
'email': email,
'password': password,
'firstName': firstname or '',
'lastName': lastname or ''
}
response = self._post(self.users_url(), user)
if response.status_code == codes.created:
return response.json()
elif response.status_code == codes.conflict:
raise UserAlreadyExists(response.json()['exception'])
elif response.status_code == codes.bad_request:
raise UserMissingID()
return response.status_code == codes.created
def user_update(self, user_id, values=None):
response = self._put(self.user_url(user_id), values=values)
if response.status_code == codes.ok:
return response.json()
elif response.status_code == codes.not_found:
raise UserNotFound()
elif response.status_code == codes.conflict:
raise UserUpdatedSimultaneous()
def delete_user(self, login):
response = self._delete(self.user_url(login))
if response.status_code == codes.no_content:
return True
elif response.status_code == codes.not_found:
raise UserNotFound()
def groups_url(self, *args):
return self._to_endpoint('identity', 'groups', *args)
def get_group(self, group_id):
response = self._get(self.group_url(group_id))
if response.status_code == codes.ok:
return True
elif response.status_code == codes.not_found:
return False
raise NotImplementedError()
def groups(self, **parameters):
params = check_parameters(GROUPS_FIELDS, parameters)
response = self._get(self.groups_url(), params=params)
if response.status_code == codes.ok:
return response.json()
raise NotImplementedError()
def group_update(self, group_id, values=None):
response = self._put(self.group_url(group_id), values=values)
if response.status_code == codes.ok:
return response.json()
elif response.status_code == codes.not_found:
raise GroupNotFound()
elif response.status_code == codes.conflict:
raise GroupUpdatedSimultaneous()
def create_group(self, id, name, type):
values = dict(id=id, name=name, type=type)
response = self._post(self.groups_url(), values)
if response.status_code == codes.created:
return response.json()
elif response.status_code == codes.bad_request:
raise GroupMissingID()
def delete_group(self, group_id):
response = self._delete(self.group_url(group_id))
if response.status_code == codes.no_content:
return True
elif response.status_code == codes.not_found:
raise GroupNotFound()
def group_add_member(self, group_id, user_id):
values = {
'userId': user_id,
}
response = self._post(
self._to_endpoint('identity', 'groups', group_id, 'members'),
values=values
)
if response.status_code == codes.created:
return response.json()
elif response.status_code == codes.not_found:
raise GroupNotFound()
elif response.status_code == codes.conflict:
raise UserAlreadyMember()
def group_remove_member(self, group_id, user_id):
response = self._delete(
self._to_endpoint('identity', 'groups', group_id, 'members', user_id)
)
if response.status_code == codes.no_content:
return True
elif response.status_code == codes.not_found:
raise NotFound()
def process_definitions(self):
response = self._get('/repository/process-definitions')
return json.loads(response.content)
def _delete(self, service):
return self.session.delete(service)
def _post(self, service, values=None):
if values:
values = json.dumps(values)
return self.session.post(service, data=values)
def _get(self, service, params=None):
return self.session.get(service, params=params)
def _put(self, service, values=None):
if values:
values = json.dumps(values)
return self.session.put(service, data=values)
def _to_endpoint(self, *args):
return '/'.join([self.endpoint, 'service'] + list(str(arg) for arg in args))
def start_process_by_key(self, key, variables=None):
if variables is None:
variables = {}
variables = [
{'name': _key, 'value': value}
for _key, value in variables.iteritems()
]
values = {
'processDefinitionKey': key,
'businessKey': 'business%s' % key,
'variables': variables,
}
return self._post('/runtime/process-instances', values)
def get_user_task_list(self, user, process=None):
url = '/runtime/tasks?involvedUser=%s' % (user,)
if process:
url += '&processDefinitionKey=%s' % (process,)
response = self._get(url)
return json.loads(response.content)
def get_task_form(self, task_id):
response = self._get('/form/form-data?taskId=%s' % (task_id,))
return json.loads(response.content)
def submit_task_form(self, task_id, properties=None):
if properties is None:
properties = {}
properties = [
{'id': _key, 'value': value}
for _key, value in properties.iteritems()
]
values = {
'taskId': task_id,
'properties': properties,
}
return self._post('/form/form-data', values)
# Keep the backward-compatibility
submitTaskForm = submit_task_form
getTaskForm = get_task_form
startProcessByKey = start_process_by_key
getUserTaskList = get_user_task_list
def deployments_url(self, *args):
return self._to_endpoint('repository', 'deployments', *args)
def deployment_url(self, deployment_id):
return self.deployments_url(deployment_id)
def deployments(self, **parameters):
response = self._get(self.deployments_url(), params=parameters)
if response.status_code == codes.ok:
return response.json()
raise NotImplementedError()
def get_deployment(self, deployment_id):
response = self._get(self.deployment_url(deployment_id))
if response.status_code == codes.ok:
return response.json()
elif response.status_code == codes.not_found:
raise DeploymentNotFound()
raise NotImplementedError()
# def create_deployment(self, files):
# response = self.session.post(self.deployments_url(), files=files)
|
[
"requests.Session",
"json.loads",
"json.dumps"
] |
[((1101, 1119), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1117, 1119), False, 'import requests\n'), ((6040, 6068), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (6050, 6068), False, 'import json\n'), ((7382, 7410), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (7392, 7410), False, 'import json\n'), ((7536, 7564), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (7546, 7564), False, 'import json\n'), ((6230, 6248), 'json.dumps', 'json.dumps', (['values'], {}), '(values)\n', (6240, 6248), False, 'import json\n'), ((6486, 6504), 'json.dumps', 'json.dumps', (['values'], {}), '(values)\n', (6496, 6504), False, 'import json\n')]
|
import os
import sys
import argparse
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from vedacls.runner import InferenceRunner
from vedacls.utils import Config
def parse_args():
parser = argparse.ArgumentParser(description='Demo')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument('inp', type=str, help='input video path')
parser.add_argument('--json_pth', type=str, help='input video path', default=None)
parser.add_argument('--save_pth', type=str, default=None, help='video output path')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
inference_cfg = cfg['inference']
common_cfg = cfg.get('common')
runner = InferenceRunner(inference_cfg, common_cfg)
runner.load_checkpoint(args.checkpoint)
# labels, scores = runner.inference(args.inp)
# print(labels)
# print(scores)
args.save_pth = args.inp.split('/')[-1]
print(args.save_pth)
if args.json_pth:
runner.plot_v3(args.inp, args.json_pth, args.save_pth)
else:
runner.plot(args.inp, args.save_pth)
if __name__ == '__main__':
main()
|
[
"os.path.dirname",
"vedacls.utils.Config.fromfile",
"argparse.ArgumentParser",
"vedacls.runner.InferenceRunner"
] |
[((214, 257), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Demo"""'}), "(description='Demo')\n", (237, 257), False, 'import argparse\n'), ((742, 770), 'vedacls.utils.Config.fromfile', 'Config.fromfile', (['args.config'], {}), '(args.config)\n', (757, 770), False, 'from vedacls.utils import Config\n'), ((858, 900), 'vedacls.runner.InferenceRunner', 'InferenceRunner', (['inference_cfg', 'common_cfg'], {}), '(inference_cfg, common_cfg)\n', (873, 900), False, 'from vedacls.runner import InferenceRunner\n'), ((69, 94), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (84, 94), False, 'import os\n')]
|
import pytest
from odis.entities import *
from django.db import models
class Sut():
class States(models.TextChoices):
FIRST = ('one', 'the first state')
SECOND = ('two', 'the second state')
THIRD = ('three', 'the third state')
SHORTER = ('s', 'two part tuple')
ANY = ('any', 'from any')
state = None
def to_second(self, *args, **kwargs):
self.state = Sut.States.SECOND
def to_third(self, *args, **kwargs):
self.state = Sut.States.THIRD
def to_any(self, *args, **kwargs):
self.state = Sut.States.ANY
def do(self, action: str, *args, **kwargs):
transitions = {
'progress': [
[Sut.States.FIRST, self.to_second],
[Sut.States.SECOND, self.to_third]
],
'third': [[
Sut.States.FIRST,
Sut.States.SECOND,
self.to_third
], [
Sut.States.THIRD,
None #NoOp
]],
'test_any': self.to_any
}
guard_state_transition(transitions, action, self.state, *args, **kwargs)
def test_progress_action():
sut = Sut()
sut.state = Sut.States.FIRST
sut.do('progress')
assert sut.state == Sut.States.SECOND
sut.do('progress')
assert sut.state == Sut.States.THIRD
with pytest.raises(StateTransitionError):
sut.do('progress')
def test_third_action():
sut = Sut()
sut.state = Sut.States.FIRST
sut.do('third')
assert sut.state == Sut.States.THIRD
sut.do('third')
assert sut.state == Sut.States.THIRD
def test_invalid_operation():
sut = Sut()
sut.state = Sut.States.FIRST
with pytest.raises(UndefinedActionError):
sut.do('undefined')
def test_simple_always_available_operation():
sut = Sut()
sut.do('test_any')
assert sut.state == Sut.States.ANY
|
[
"pytest.raises"
] |
[((1367, 1402), 'pytest.raises', 'pytest.raises', (['StateTransitionError'], {}), '(StateTransitionError)\n', (1380, 1402), False, 'import pytest\n'), ((1726, 1761), 'pytest.raises', 'pytest.raises', (['UndefinedActionError'], {}), '(UndefinedActionError)\n', (1739, 1761), False, 'import pytest\n')]
|
#"""
#This file is part of Happypanda.
#Happypanda is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 2 of the License, or
#any later version.
#Happypanda is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with Happypanda. If not, see <http://www.gnu.org/licenses/>.
#"""
import datetime
import os
import subprocess
import sys
import logging
import zipfile
import hashlib
import shutil
import uuid
import re
import scandir
import rarfile
import json
import send2trash
import functools
import time
from PyQt5.QtGui import QImage, qRgba
from PIL import Image,ImageChops
try:
import app_constants
from database import db_constants
except:
from . import app_constants
from .database import db_constants
log = logging.getLogger(__name__)
log_i = log.info
log_d = log.debug
log_w = log.warning
log_e = log.error
log_c = log.critical
IMG_FILES = ('.jpg','.bmp','.png','.gif', '.jpeg')
ARCHIVE_FILES = ('.zip', '.cbz', '.rar', '.cbr')
FILE_FILTER = '*.zip *.cbz *.rar *.cbr'
IMG_FILTER = '*.jpg *.bmp *.png *.jpeg'
rarfile.PATH_SEP = '/'
rarfile.UNRAR_TOOL = app_constants.unrar_tool_path
if not app_constants.unrar_tool_path:
FILE_FILTER = '*.zip *.cbz'
ARCHIVE_FILES = ('.zip', '.cbz')
class GMetafile:
def __init__(self, path=None, archive=''):
self.metadata = {
"title":'',
"artist":'',
"type":'',
"tags":{},
"language":'',
"pub_date":'',
"link":'',
"info":'',
}
self.files = []
if path is None:
return
if archive:
zip = ArchiveFile(archive)
c = zip.dir_contents(path)
for x in c:
if x.endswith(app_constants.GALLERY_METAFILE_KEYWORDS):
self.files.append(open(zip.extract(x), encoding='utf-8'))
else:
for p in scandir.scandir(path):
if p.name in app_constants.GALLERY_METAFILE_KEYWORDS:
self.files.append(open(p.path, encoding='utf-8'))
if self.files:
self.detect()
else:
log_d('No metafile found...')
def _eze(self, fp):
if not fp.name.endswith('.json'):
return
j = json.load(fp, encoding='utf-8')
eze = ['gallery_info', 'image_api_key', 'image_info']
# eze
if all(x in j for x in eze):
log_i('Detected metafile: eze')
ezedata = j['gallery_info']
t_parser = title_parser(ezedata['title'])
self.metadata['title'] = t_parser['title']
self.metadata['type'] = ezedata['category']
for ns in ezedata['tags']:
self.metadata['tags'][ns.capitalize()] = ezedata['tags'][ns]
self.metadata['tags']['default'] = self.metadata['tags'].pop('Misc', [])
self.metadata['artist'] = self.metadata['tags']['Artist'][0].capitalize()\
if 'Artist' in self.metadata['tags'] else t_parser['artist']
self.metadata['language'] = ezedata['language']
d = ezedata['upload_date']
# should be zero padded
d[1] = int("0" + str(d[1])) if len(str(d[1])) == 1 else d[1]
d[3] = int("0" + str(d[1])) if len(str(d[1])) == 1 else d[1]
self.metadata['pub_date'] = datetime.datetime.strptime("{} {} {}".format(d[0], d[1], d[3]), "%Y %m %d")
l = ezedata['source']
self.metadata['link'] = 'http://' + l['site'] + '.org/g/' + str(l['gid']) + '/' + l['token']
return True
def _hdoujindler(self, fp):
"HDoujin Downloader"
if fp.name.endswith('info.txt'):
log_i('Detected metafile: HDoujin text')
lines = fp.readlines()
if lines:
for line in lines:
splitted = line.split(':', 1)
if len(splitted) > 1:
other = splitted[1].strip()
if not other:
continue
l = splitted[0].lower()
if "title" == l:
self.metadata['title'] = other
if "artist" == l:
self.metadata['artist'] = other.capitalize()
if "tags" == l:
self.metadata['tags'].update(tag_to_dict(other))
if "description" == l:
self.metadata['info'] = other
if "circle" in l:
if not "group" in self.metadata['tags']:
self.metadata['tags']['group'] = []
self.metadata['tags']['group'].append(other.strip().lower())
if "url" == l:
self.metadata['link'] = other
return True
## Doesnt work for some reason.. too lazy to debug
#elif fp.name.endswith('info.json'):
# log_i('Detected metafile: HDoujin json')
# j = json.load(fp, encoding='utf-8')
# j = j['manga_info']
# self.metadata['title'] = j['title']
# for n, a in enumerate(j['artist']):
# at = a
# if not n+1 == len(j['artist']):
# at += ', '
# self.metadata['artist'] += at
# tags = {}
# for x in j['tags']:
# ns = 'default' if x == 'misc' else x.capitalize()
# tags[ns] = []
# for y in j[tags][x]:
# tags[ns].append(y.strip().lower())
# self.metadata['tags'] = tags
# self.metadata['link'] = j['url']
# self.metadata['info'] = j['description']
# for x in j['circle']:
# if not "group" in self.metadata['tags']:
# self.metadata['tags']['group'] = []
# self.metadata['tags']['group'].append(x.strip().lower())
# return True
def detect(self):
for fp in self.files:
with fp:
z = False
for x in [self._eze, self._hdoujindler]:
try:
if x(fp):
z = True
break
except Exception:
log.exception('Error in parsing metafile')
continue
if not z:
log_i('Incompatible metafiles found')
def update(self, other):
self.metadata.update((x, y) for x, y in other.metadata.items() if y)
def apply_gallery(self, gallery):
log_i('Applying metafile to gallery')
if self.metadata['title']:
gallery.title = self.metadata['title']
if self.metadata['artist']:
gallery.artist = self.metadata['artist']
if self.metadata['type']:
gallery.type = self.metadata['type']
if self.metadata['tags']:
gallery.tags = self.metadata['tags']
if self.metadata['language']:
gallery.language = self.metadata['language']
if self.metadata['pub_date']:
gallery.pub_date = self.metadata['pub_date']
if self.metadata['link']:
gallery.link = self.metadata['link']
if self.metadata['info']:
gallery.info = self.metadata['info']
return gallery
def backup_database(db_path=db_constants.DB_PATH):
log_i("Perfoming database backup")
date = "{}".format(datetime.datetime.today()).split(' ')[0]
base_path, name = os.path.split(db_path)
backup_dir = os.path.join(base_path, 'backup')
if not os.path.isdir(backup_dir):
os.mkdir(backup_dir)
db_name = "{}-{}".format(date, name)
current_try = 0
orig_db_name = db_name
while current_try < 50:
if current_try:
db_name = "{}({})-{}".format(date, current_try, orig_db_name)
try:
dst_path = os.path.join(backup_dir, db_name)
if os.path.exists(dst_path):
raise ValueError
shutil.copyfile(db_path, dst_path)
break
except ValueError:
current_try += 1
log_i("Database backup perfomed: {}".format(db_name))
return True
def get_date_age(date):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
"""
def formatn(n, s):
'''Add "s" if it's plural'''
if n == 1:
return "1 %s" % s
elif n > 1:
return "%d %ss" % (n, s)
def q_n_r(a, b):
'''Return quotient and remaining'''
return a / b, a % b
class PrettyDelta:
def __init__(self, dt):
now = datetime.datetime.now()
delta = now - dt
self.day = delta.days
self.second = delta.seconds
self.year, self.day = q_n_r(self.day, 365)
self.month, self.day = q_n_r(self.day, 30)
self.hour, self.second = q_n_r(self.second, 3600)
self.minute, self.second = q_n_r(self.second, 60)
def format(self):
for period in ['year', 'month', 'day', 'hour', 'minute', 'second']:
n = getattr(self, period)
if n > 0.9:
return formatn(n, period)
return "0 second"
return PrettyDelta(date).format()
def all_opposite(*args):
"Returns true if all items in iterable evaluae to false"
for iterable in args:
for x in iterable:
if x:
return False
return True
def update_gallery_path(new_path, gallery):
"Updates a gallery's chapters path"
for chap in gallery.chapters:
head, tail = os.path.split(chap.path)
if gallery.path == chap.path:
chap.path = new_path
elif gallery.path == head:
chap.path = os.path.join(new_path, tail)
gallery.path = new_path
return gallery
def move_files(path, dest='', only_path=False):
"""
Move files to a new destination. If dest is not set,
imported_galleries_def_path will be used instead.
"""
if not dest:
dest = app_constants.IMPORTED_GALLERY_DEF_PATH
if not dest:
return path
f = os.path.split(path)[1]
new_path = os.path.join(dest, f)
if not only_path:
log_i("Moving to: {}".format(new_path))
if new_path == os.path.join(*os.path.split(path)): # need to unpack to make sure we get the corrct sep
return path
if not os.path.exists(new_path):
app_constants.TEMP_PATH_IGNORE.append(os.path.normcase(new_path))
if not only_path:
new_path = shutil.move(path, new_path)
else:
return path
return new_path
def check_ignore_list(key):
k = os.path.normcase(key)
if os.path.isdir(key) and 'Folder' in app_constants.IGNORE_EXTS:
return False
_, ext = os.path.splitext(key)
if ext in app_constants.IGNORE_EXTS:
return False
for path in app_constants.IGNORE_PATHS:
p = os.path.normcase(path)
if p in k:
return False
return True
def gallery_text_fixer(gallery):
regex_str = app_constants.GALLERY_DATA_FIX_REGEX
if regex_str:
try:
valid_regex = re.compile(regex_str)
except re.error:
return None
if not valid_regex:
return None
def replace_regex(text):
new_text = re.sub(regex_str, app_constants.GALLERY_DATA_FIX_REPLACE, text)
return new_text
if app_constants.GALLERY_DATA_FIX_TITLE:
gallery.title = replace_regex(gallery.title)
if app_constants.GALLERY_DATA_FIX_ARTIST:
gallery.artist = replace_regex(gallery.artist)
return gallery
def b_search(data, key):
if key:
lo = 0
hi = len(data) - 1
while hi >= lo:
mid = lo + (hi - lo) // 2
if data[mid] < key:
lo = mid + 1
elif data[mid] > key:
hi = mid - 1
else:
return data[mid]
return None
def generate_img_hash(src):
"""
Generates sha1 hash based on the given bytes.
Returns hex-digits
"""
chunk = 8129
sha1 = hashlib.sha1()
buffer = src.read(chunk)
log_d("Generating hash")
while len(buffer) > 0:
sha1.update(buffer)
buffer = src.read(chunk)
return sha1.hexdigest()
class ArchiveFile():
"""
Work with archive files, raises exception if instance fails.
namelist -> returns a list with all files in archive
extract <- Extracts one specific file to given path
open -> open the given file in archive, returns bytes
close -> close archive
"""
zip, rar = range(2)
def __init__(self, filepath):
self.type = 0
try:
if filepath.endswith(ARCHIVE_FILES):
if filepath.endswith(ARCHIVE_FILES[:2]):
self.archive = zipfile.ZipFile(os.path.normcase(filepath))
b_f = self.archive.testzip()
self.type = self.zip
elif filepath.endswith(ARCHIVE_FILES[2:]):
self.archive = rarfile.RarFile(os.path.normcase(filepath))
b_f = self.archive.testrar()
self.type = self.rar
# test for corruption
if b_f:
log_w('Bad file found in archive {}'.format(filepath.encode(errors='ignore')))
raise app_constants.CreateArchiveFail
else:
log_e('Archive: Unsupported file format')
raise app_constants.CreateArchiveFail
except:
log.exception('Create archive: FAIL')
raise app_constants.CreateArchiveFail
def namelist(self):
filelist = self.archive.namelist()
return filelist
def is_dir(self, name):
"""
Checks if the provided name in the archive is a directory or not
"""
if not name:
return False
if not name in self.namelist():
log_e('File {} not found in archive'.format(name))
raise app_constants.FileNotFoundInArchive
if self.type == self.zip:
if name.endswith('/'):
return True
elif self.type == self.rar:
info = self.archive.getinfo(name)
return info.isdir()
return False
def dir_list(self, only_top_level=False):
"""
Returns a list of all directories found recursively. For directories not in toplevel
a path in the archive to the diretory will be returned.
"""
if only_top_level:
if self.type == self.zip:
return [x for x in self.namelist() if x.endswith('/') and x.count('/') == 1]
elif self.type == self.rar:
potential_dirs = [x for x in self.namelist() if x.count('/') == 0]
return [x.filename for x in [self.archive.getinfo(y) for y in potential_dirs] if x.isdir()]
else:
if self.type == self.zip:
return [x for x in self.namelist() if x.endswith('/') and x.count('/') >= 1]
elif self.type == self.rar:
return [x.filename for x in self.archive.infolist() if x.isdir()]
def dir_contents(self, dir_name):
"""
Returns a list of contents in the directory
An empty string will return the contents of the top folder
"""
if dir_name and not dir_name in self.namelist():
log_e('Directory {} not found in archive'.format(dir_name))
raise app_constants.FileNotFoundInArchive
if not dir_name:
if self.type == self.zip:
con = [x for x in self.namelist() if x.count('/') == 0 or \
(x.count('/') == 1 and x.endswith('/'))]
elif self.type == self.rar:
con = [x for x in self.namelist() if x.count('/') == 0]
return con
if self.type == self.zip:
dir_con_start = [x for x in self.namelist() if x.startswith(dir_name)]
return [x for x in dir_con_start if x.count('/') == dir_name.count('/') and \
(x.count('/') == dir_name.count('/') and not x.endswith('/')) or \
(x.count('/') == 1 + dir_name.count('/') and x.endswith('/'))]
elif self.type == self.rar:
return [x for x in self.namelist() if x.startswith(dir_name) and \
x.count('/') == 1 + dir_name.count('/')]
return []
def extract(self, file_to_ext, path=None):
"""
Extracts one file from archive to given path
Creates a temp_dir if path is not specified
Returns path to the extracted file
"""
if not path:
path = os.path.join(app_constants.temp_dir, str(uuid.uuid4()))
os.mkdir(path)
if not file_to_ext:
return self.extract_all(path)
else:
if self.type == self.zip:
membs = []
for name in self.namelist():
if name.startswith(file_to_ext) and name != file_to_ext:
membs.append(name)
temp_p = self.archive.extract(file_to_ext, path)
for m in membs:
self.archive.extract(m, path)
elif self.type == self.rar:
temp_p = os.path.join(path, file_to_ext)
self.archive.extract(file_to_ext, path)
return temp_p
def extract_all(self, path=None, member=None):
"""
Extracts all files to given path, and returns path
If path is not specified, a temp dir will be created
"""
if not path:
path = os.path.join(app_constants.temp_dir, str(uuid.uuid4()))
os.mkdir(path)
if member:
self.archive.extractall(path, member)
self.archive.extractall(path)
return path
def open(self, file_to_open, fp=False):
"""
Returns bytes. If fp set to true, returns file-like object.
"""
if fp:
return self.archive.open(file_to_open)
else:
return self.archive.open(file_to_open).read()
def close(self):
self.archive.close()
def check_archive(archive_path):
"""
Checks archive path for potential galleries.
Returns a list with a path in archive to galleries
if there is no directories
"""
try:
zip = ArchiveFile(archive_path)
except app_constants.CreateArchiveFail:
return []
if not zip:
return []
galleries = []
zip_dirs = zip.dir_list()
def gallery_eval(d):
con = zip.dir_contents(d)
if con:
gallery_probability = len(con)
for n in con:
if not n.lower().endswith(IMG_FILES):
gallery_probability -= 1
if gallery_probability >= (len(con) * 0.8):
return d
if zip_dirs: # There are directories in the top folder
# check parent
r = gallery_eval('')
if r:
galleries.append('')
for d in zip_dirs:
r = gallery_eval(d)
if r:
galleries.append(r)
zip.close()
else: # all pages are in top folder
if isinstance(gallery_eval(''), str):
galleries.append('')
zip.close()
return galleries
def recursive_gallery_check(path):
"""
Recursively checks a folder for any potential galleries
Returns a list of paths for directories and a list of tuples where first
index is path to gallery in archive and second index is path to archive.
Like this:
["C:path/to/g"] and [("path/to/g/in/a", "C:path/to/a")]
"""
gallery_dirs = []
gallery_arch = []
found_paths = 0
for root, subfolders, files in scandir.walk(path):
if files:
for f in files:
if f.endswith(ARCHIVE_FILES):
arch_path = os.path.join(root, f)
for g in check_archive(arch_path):
found_paths += 1
gallery_arch.append((g, arch_path))
if not subfolders:
if not files:
continue
gallery_probability = len(files)
for f in files:
if not f.lower().endswith(IMG_FILES):
gallery_probability -= 1
if gallery_probability >= (len(files) * 0.8):
found_paths += 1
gallery_dirs.append(root)
log_i('Found {} in {}'.format(found_paths, path).encode(errors='ignore'))
return gallery_dirs, gallery_arch
def today():
"Returns current date in a list: [dd, Mmm, yyyy]"
_date = datetime.date.today()
day = _date.strftime("%d")
month = _date.strftime("%b")
year = _date.strftime("%Y")
return [day, month, year]
def external_viewer_checker(path):
check_dict = app_constants.EXTERNAL_VIEWER_SUPPORT
viewer = os.path.split(path)[1]
for x in check_dict:
allow = False
for n in check_dict[x]:
if viewer.lower() in n.lower():
allow = True
break
if allow:
return x
def open_chapter(chapterpath, archive=None):
is_archive = True if archive else False
if not is_archive:
chapterpath = os.path.normpath(chapterpath)
temp_p = archive if is_archive else chapterpath
custom_args = app_constants.EXTERNAL_VIEWER_ARGS
send_folder_t = '{$folder}'
send_image_t = '{$file}'
send_folder = True
if app_constants.USE_EXTERNAL_VIEWER:
send_folder = True
if custom_args:
if send_folder_t in custom_args:
send_folder = True
elif send_image_t in custom_args:
send_folder = False
def find_f_img_folder():
filepath = os.path.join(temp_p, [x for x in sorted([y.name for y in scandir.scandir(temp_p)])\
if x.lower().endswith(IMG_FILES) and not x.startswith('.')][0]) # Find first page
return temp_p if send_folder else filepath
def find_f_img_archive(extract=True):
zip = ArchiveFile(temp_p)
if extract:
app_constants.NOTIF_BAR.add_text('Extracting...')
t_p = os.path.join('temp', str(uuid.uuid4()))
os.mkdir(t_p)
if is_archive or chapterpath.endswith(ARCHIVE_FILES):
if os.path.isdir(chapterpath):
t_p = chapterpath
elif chapterpath.endswith(ARCHIVE_FILES):
zip2 = ArchiveFile(chapterpath)
f_d = sorted(zip2.dir_list(True))
if f_d:
f_d = f_d[0]
t_p = zip2.extract(f_d, t_p)
else:
t_p = zip2.extract('', t_p)
else:
t_p = zip.extract(chapterpath, t_p)
else:
zip.extract_all(t_p) # Compatibility reasons.. TODO: REMOVE IN BETA
if send_folder:
filepath = t_p
else:
filepath = os.path.join(t_p, [x for x in sorted([y.name for y in scandir.scandir(t_p)])\
if x.lower().endswith(IMG_FILES) and not x.startswith('.')][0]) # Find first page
filepath = os.path.abspath(filepath)
else:
if is_archive or chapterpath.endswith(ARCHIVE_FILES):
con = zip.dir_contents('')
f_img = [x for x in sorted(con) if x.lower().endswith(IMG_FILES) and not x.startswith('.')]
if not f_img:
log_w('Extracting archive.. There are no images in the top-folder. ({})'.format(archive))
return find_f_img_archive()
filepath = os.path.normpath(archive)
else:
app_constants.NOTIF_BAR.add_text("Fatal error: Unsupported gallery!")
raise ValueError("Unsupported gallery version")
return filepath
try:
try: # folder
filepath = find_f_img_folder()
except NotADirectoryError: # archive
try:
if not app_constants.EXTRACT_CHAPTER_BEFORE_OPENING and app_constants.EXTERNAL_VIEWER_PATH:
filepath = find_f_img_archive(False)
else:
filepath = find_f_img_archive()
except app_constants.CreateArchiveFail:
log.exception('Could not open chapter')
app_constants.NOTIF_BAR.add_text('Could not open chapter. Check happypanda.log for more details.')
return
except FileNotFoundError:
log.exception('Could not find chapter {}'.format(chapterpath))
app_constants.NOTIF_BAR.add_text("Chapter does no longer exist!")
return
except IndexError:
log.exception('No images found: {}'.format(chapterpath))
app_constants.NOTIF_BAR.add_text("No images found in chapter!")
return
if send_folder_t in custom_args:
custom_args = custom_args.replace(send_folder_t, filepath)
elif send_image_t in custom_args:
custom_args = custom_args.replace(send_image_t, filepath)
else:
custom_args = filepath
try:
app_constants.NOTIF_BAR.add_text('Opening chapter...')
if not app_constants.USE_EXTERNAL_VIEWER:
if sys.platform.startswith('darwin'):
subprocess.call(('open', custom_args))
elif os.name == 'nt':
os.startfile(custom_args)
elif os.name == 'posix':
subprocess.call(('xdg-open', custom_args))
else:
ext_path = app_constants.EXTERNAL_VIEWER_PATH
viewer = external_viewer_checker(ext_path)
if viewer == 'honeyview':
if app_constants.OPEN_GALLERIES_SEQUENTIALLY:
subprocess.call((ext_path, custom_args))
else:
subprocess.Popen((ext_path, custom_args))
else:
if app_constants.OPEN_GALLERIES_SEQUENTIALLY:
subprocess.check_call((ext_path, custom_args))
else:
subprocess.Popen((ext_path, custom_args))
except subprocess.CalledProcessError:
app_constants.NOTIF_BAR.add_text("Could not open chapter. Invalid external viewer.")
log.exception('Could not open chapter. Invalid external viewer.')
except:
app_constants.NOTIF_BAR.add_text("Could not open chapter for unknown reasons. Check happypanda.log!")
log_e('Could not open chapter {}'.format(os.path.split(chapterpath)[1]))
def get_gallery_img(gallery_or_path, chap_number=0):
"""
Returns a path to image in gallery chapter
"""
archive = None
if isinstance(gallery_or_path, str):
path = gallery_or_path
else:
path = gallery_or_path.chapters[chap_number].path
if gallery_or_path.is_archive:
archive = gallery_or_path.path
# TODO: add chapter support
try:
name = os.path.split(path)[1]
except IndexError:
name = os.path.split(path)[0]
is_archive = True if archive or name.endswith(ARCHIVE_FILES) else False
real_path = archive if archive else path
img_path = None
if is_archive:
try:
log_i('Getting image from archive')
zip = ArchiveFile(real_path)
temp_path = os.path.join(app_constants.temp_dir, str(uuid.uuid4()))
os.mkdir(temp_path)
if not archive:
f_img_name = sorted([img for img in zip.namelist() if img.lower().endswith(IMG_FILES) and not img.startswith('.')])[0]
else:
f_img_name = sorted([img for img in zip.dir_contents(path) if img.lower().endswith(IMG_FILES) and not img.startswith('.')])[0]
img_path = zip.extract(f_img_name, temp_path)
zip.close()
except app_constants.CreateArchiveFail:
img_path = app_constants.NO_IMAGE_PATH
elif os.path.isdir(real_path):
log_i('Getting image from folder')
first_img = sorted([img.name for img in scandir.scandir(real_path) if img.name.lower().endswith(tuple(IMG_FILES)) and not img.name.startswith('.')])
if first_img:
img_path = os.path.join(real_path, first_img[0])
if img_path:
return os.path.abspath(img_path)
else:
log_e("Could not get gallery image")
def tag_to_string(gallery_tag, simple=False):
"""
Takes gallery tags and converts it to string, returns string
if simple is set to True, returns a CSV string, else a dict-like string
"""
assert isinstance(gallery_tag, dict), "Please provide a dict like this: {'namespace':['tag1']}"
string = ""
if not simple:
for n, namespace in enumerate(sorted(gallery_tag), 1):
if len(gallery_tag[namespace]) != 0:
if namespace != 'default':
string += namespace + ":"
# find tags
if namespace != 'default' and len(gallery_tag[namespace]) > 1:
string += '['
for x, tag in enumerate(sorted(gallery_tag[namespace]), 1):
# if we are at the end of the list
if x == len(gallery_tag[namespace]):
string += tag
else:
string += tag + ', '
if namespace != 'default' and len(gallery_tag[namespace]) > 1:
string += ']'
# if we aren't at the end of the list
if not n == len(gallery_tag):
string += ', '
else:
for n, namespace in enumerate(sorted(gallery_tag), 1):
if len(gallery_tag[namespace]) != 0:
if namespace != 'default':
string += namespace + ","
# find tags
for x, tag in enumerate(sorted(gallery_tag[namespace]), 1):
# if we are at the end of the list
if x == len(gallery_tag[namespace]):
string += tag
else:
string += tag + ', '
# if we aren't at the end of the list
if not n == len(gallery_tag):
string += ', '
return string
def tag_to_dict(string, ns_capitalize=True):
"Receives a string of tags and converts it to a dict of tags"
namespace_tags = {'default':[]}
level = 0 # so we know if we are in a list
buffer = ""
stripped_set = set() # we only need unique values
for n, x in enumerate(string, 1):
if x == '[':
level += 1 # we are now entering a list
if x == ']':
level -= 1 # we are now exiting a list
if x == ',': # if we meet a comma
# we trim our buffer if we are at top level
if level is 0:
# add to list
stripped_set.add(buffer.strip())
buffer = ""
else:
buffer += x
elif n == len(string): # or at end of string
buffer += x
# add to list
stripped_set.add(buffer.strip())
buffer = ""
else:
buffer += x
def tags_in_list(br_tags):
"Receives a string of tags enclosed in brackets, returns a list with tags"
unique_tags = set()
tags = br_tags.replace('[', '').replace(']','')
tags = tags.split(',')
for t in tags:
if len(t) != 0:
unique_tags.add(t.strip().lower())
return list(unique_tags)
unique_tags = set()
for ns_tag in stripped_set:
splitted_tag = ns_tag.split(':')
# if there is a namespace
if len(splitted_tag) > 1 and len(splitted_tag[0]) != 0:
if splitted_tag[0] != 'default':
if ns_capitalize:
namespace = splitted_tag[0].capitalize()
else:
namespace = splitted_tag[0]
else:
namespace = splitted_tag[0]
tags = splitted_tag[1]
# if tags are enclosed in brackets
if '[' in tags and ']' in tags:
tags = tags_in_list(tags)
tags = [x for x in tags if len(x) != 0]
# if namespace is already in our list
if namespace in namespace_tags:
for t in tags:
# if tag not already in ns list
if not t in namespace_tags[namespace]:
namespace_tags[namespace].append(t)
else:
# to avoid empty strings
namespace_tags[namespace] = tags
else: # only one tag
if len(tags) != 0:
if namespace in namespace_tags:
namespace_tags[namespace].append(tags)
else:
namespace_tags[namespace] = [tags]
else: # no namespace specified
tag = splitted_tag[0]
if len(tag) != 0:
unique_tags.add(tag.lower())
if len(unique_tags) != 0:
for t in unique_tags:
namespace_tags['default'].append(t)
return namespace_tags
import re as regex
def title_parser(title):
"Receives a title to parse. Returns dict with 'title', 'artist' and language"
log_d("Parsing title: {}".format(title))
#If title is not absolute, then it's not a pathname and we allow a "/" inside it
if(os.path.isabs(title)):
title = os.path.basename(title)
title = " ".join(title.split())
# if '/' in title:
# try:
# title = os.path.split(title)[1]
# if not title:
# title = title
# except IndexError:
# pass
for x in ARCHIVE_FILES:
if title.endswith(x):
title = title[:-len(x)]
parsed_title = {'title':"", 'artist':"", 'language':""}
try:
a = regex.findall('((?<=\[) *[^\]]+( +\S+)* *(?=\]))', title)
assert len(a) != 0
try:
artist = a[0][0].strip()
except IndexError:
artist = ''
parsed_title['artist'] = artist
try:
assert a[1]
lang = app_constants.G_LANGUAGES + app_constants.G_CUSTOM_LANGUAGES
for x in a:
l = x[0].strip()
l = l.lower()
l = l.capitalize()
if l in lang:
parsed_title['language'] = l
break
else:
parsed_title['language'] = app_constants.G_DEF_LANGUAGE
except IndexError:
parsed_title['language'] = app_constants.G_DEF_LANGUAGE
t = title
for x in a:
t = t.replace(x[0], '')
t = t.replace('[]', '')
final_title = t.strip()
parsed_title['title'] = final_title
except AssertionError:
parsed_title['title'] = title
return parsed_title
import webbrowser
def open_web_link(url):
if not url:
return
try:
webbrowser.open_new_tab(url)
except:
log_e('Could not open URL in browser')
def open_path(path, select=''):
""
try:
if sys.platform.startswith('darwin'):
subprocess.Popen(['open', path])
elif os.name == 'nt':
if select:
subprocess.Popen(r'explorer.exe /select,"{}"'.format(os.path.normcase(select)), shell=True)
else:
os.startfile(path)
elif os.name == 'posix':
subprocess.Popen(('xdg-open', path))
else:
app_constants.NOTIF_BAR.add_text("I don't know how you've managed to do this.. If you see this, you're in deep trouble...")
log_e('Could not open path: no OS found')
except:
app_constants.NOTIF_BAR.add_text("Could not open specified location. It might not exist anymore.")
log_e('Could not open path')
def open_torrent(path):
if not app_constants.TORRENT_CLIENT:
open_path(path)
else:
subprocess.Popen([app_constants.TORRENT_CLIENT, path])
def delete_path(path):
"Deletes the provided recursively"
s = True
if os.path.exists(path):
error = ''
if app_constants.SEND_FILES_TO_TRASH:
try:
send2trash.send2trash(path)
except:
log.exception("Unable to send file to trash")
error = 'Unable to send file to trash'
else:
try:
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
except PermissionError:
error = 'PermissionError'
except FileNotFoundError:
pass
if error:
p = os.path.split(path)[1]
log_e('Failed to delete: {}:{}'.format(error, p))
app_constants.NOTIF_BAR.add_text('An error occured while trying to delete: {}'.format(error))
s = False
return s
def regex_search(a, b, override_case=False, args=[]):
"Looks for a in b"
if a and b:
try:
if not app_constants.Search.Case in args or override_case:
if regex.search(a, b, regex.IGNORECASE):
return True
else:
if regex.search(a, b):
return True
except regex.error:
pass
return False
def search_term(a, b, override_case=False, args=[]):
"Searches for a in b"
if a and b:
if not app_constants.Search.Case in args or override_case:
b = b.lower()
a = a.lower()
if app_constants.Search.Strict in args:
if a == b:
return True
else:
if a in b:
return True
return False
def get_terms(term):
"Dividies term into pieces. Returns a list with the pieces"
# some variables we will use
pieces = []
piece = ''
qoute_level = 0
bracket_level = 0
brackets_tags = {}
current_bracket_ns = ''
end_of_bracket = False
blacklist = ['[', ']', '"', ',']
for n, x in enumerate(term):
# if we meet brackets
if x == '[':
bracket_level += 1
brackets_tags[piece] = set() # we want unique tags!
current_bracket_ns = piece
elif x == ']':
bracket_level -= 1
end_of_bracket = True
# if we meet a double qoute
if x == '"':
if qoute_level > 0:
qoute_level -= 1
else:
qoute_level += 1
# if we meet a whitespace, comma or end of term and are not in a double qoute
if (x == ' ' or x == ',' or n == len(term) - 1) and qoute_level == 0:
# if end of term and x is allowed
if (n == len(term) - 1) and not x in blacklist and x != ' ':
piece += x
if piece:
if bracket_level > 0 or end_of_bracket: # if we are inside a bracket we put piece in the set
end_of_bracket = False
if piece.startswith(current_bracket_ns):
piece = piece[len(current_bracket_ns):]
if piece:
try:
brackets_tags[current_bracket_ns].add(piece)
except KeyError: # keyerror when there is a closing bracket without a starting bracket
pass
else:
pieces.append(piece) # else put it in the normal list
piece = ''
continue
# else append to the buffers
if not x in blacklist:
if qoute_level > 0: # we want to include everything if in double qoute
piece += x
elif x != ' ':
piece += x
# now for the bracket tags
for ns in brackets_tags:
for tag in brackets_tags[ns]:
ns_tag = ns
# if they want to exlucde this tag
if tag[0] == '-':
if ns_tag[0] != '-':
ns_tag = '-' + ns
tag = tag[1:] # remove the '-'
# put them together
ns_tag += tag
# done
pieces.append(ns_tag)
return pieces
def image_greyscale(filepath):
"""
Check if image is monochrome (1 channel or 3 identical channels)
"""
log_d("Checking if img is monochrome: {}".format(filepath))
im = Image.open(filepath).convert("RGB")
if im.mode not in ("L", "RGB"):
return False
if im.mode == "RGB":
rgb = im.split()
if ImageChops.difference(rgb[0],rgb[1]).getextrema()[1] != 0:
return False
if ImageChops.difference(rgb[0],rgb[2]).getextrema()[1] != 0:
return False
return True
def PToQImageHelper(im):
"""
The Python Imaging Library (PIL) is
Copyright © 1997-2011 by Secret Labs AB
Copyright © 1995-2011 by <NAME>
"""
def rgb(r, g, b, a=255):
"""(Internal) Turns an RGB color into a Qt compatible color integer."""
# use qRgb to pack the colors, and then turn the resulting long
# into a negative integer with the same bitpattern.
return (qRgba(r, g, b, a) & 0xffffffff)
def align8to32(bytes, width, mode):
"""
converts each scanline of data from 8 bit to 32 bit aligned
"""
bits_per_pixel = {
'1': 1,
'L': 8,
'P': 8,
}[mode]
# calculate bytes per line and the extra padding if needed
bits_per_line = bits_per_pixel * width
full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)
bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)
extra_padding = -bytes_per_line % 4
# already 32 bit aligned by luck
if not extra_padding:
return bytes
new_data = []
for i in range(len(bytes) // bytes_per_line):
new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line] + b'\x00' * extra_padding)
return b''.join(new_data)
data = None
colortable = None
# handle filename, if given instead of image name
if hasattr(im, "toUtf8"):
# FIXME - is this really the best way to do this?
if str is bytes:
im = unicode(im.toUtf8(), "utf-8")
else:
im = str(im.toUtf8(), "utf-8")
if isinstance(im, (bytes, str)):
im = Image.open(im)
if im.mode == "1":
format = QImage.Format_Mono
elif im.mode == "L":
format = QImage.Format_Indexed8
colortable = []
for i in range(256):
colortable.append(rgb(i, i, i))
elif im.mode == "P":
format = QImage.Format_Indexed8
colortable = []
palette = im.getpalette()
for i in range(0, len(palette), 3):
colortable.append(rgb(*palette[i:i+3]))
elif im.mode == "RGB":
data = im.tobytes("raw", "BGRX")
format = QImage.Format_RGB32
elif im.mode == "RGBA":
try:
data = im.tobytes("raw", "BGRA")
except SystemError:
# workaround for earlier versions
r, g, b, a = im.split()
im = Image.merge("RGBA", (b, g, r, a))
format = QImage.Format_ARGB32
else:
raise ValueError("unsupported image mode %r" % im.mode)
# must keep a reference, or Qt will crash!
__data = data or align8to32(im.tobytes(), im.size[0], im.mode)
return {
'data': __data, 'im': im, 'format': format, 'colortable': colortable
}
def make_chapters(gallery_object):
chap_container = gallery_object.chapters
path = gallery_object.path
metafile = GMetafile()
try:
log_d('Listing dir...')
con = scandir.scandir(path) # list all folders in gallery dir
log_i('Gallery source is a directory')
log_d('Sorting')
chapters = sorted([sub.path for sub in con if sub.is_dir() or sub.name.endswith(ARCHIVE_FILES)]) #subfolders
# if gallery has chapters divided into sub folders
if len(chapters) != 0:
log_d('Chapters divided in folders..')
for ch in chapters:
chap = chap_container.create_chapter()
chap.title = title_parser(ch)['title']
chap.path = os.path.join(path, ch)
metafile.update(GMetafile(chap.path))
chap.pages = len([x for x in scandir.scandir(chap.path) if x.name.lower().endswith(IMG_FILES)])
else: #else assume that all images are in gallery folder
chap = chap_container.create_chapter()
chap.title = title_parser(os.path.split(path)[1])['title']
chap.path = path
metafile.update(GMetafile(path))
chap.pages = len([x for x in scandir.scandir(path) if x.name.lower().endswith(IMG_FILES)])
except NotADirectoryError:
if path.endswith(ARCHIVE_FILES):
gallery_object.is_archive = 1
log_i("Gallery source is an archive")
archive_g = sorted(check_archive(path))
for g in archive_g:
chap = chap_container.create_chapter()
chap.path = g
chap.in_archive = 1
metafile.update(GMetafile(g, path))
arch = ArchiveFile(path)
chap.pages = len(arch.dir_contents(g))
arch.close()
metafile.apply_gallery(gallery_object)
def timeit(func):
@functools.wraps(func)
def newfunc(*args, **kwargs):
startTime = time.time()
func(*args, **kwargs)
elapsedTime = time.time() - startTime
print('function [{}] finished in {} ms'.format(
func.__name__, int(elapsedTime * 1000)))
return newfunc
def makedirs_if_not_exists(folder):
"""Create directory if not exists.
Args:
folder: Target folder.
"""
if not os.path.isdir(folder):
os.makedirs(folder)
def lookup_tag(tag):
"Issues a tag lookup on preferred site"
assert isinstance(tag, str), "str not " + str(type(tag))
# remove whitespace at edges and replace whitespace with +
tag = tag.strip().lower().replace(' ', '+')
url = app_constants.DEFAULT_EHEN_URL
if not url.endswith('/'):
url += '/'
if not ':' in tag:
tag = 'misc:' + tag
url += 'tag/' + tag
open_web_link(url)
|
[
"PIL.ImageChops.difference",
"os.mkdir",
"sys.platform.startswith",
"os.remove",
"send2trash.send2trash",
"os.path.isfile",
"shutil.rmtree",
"app_constants.NOTIF_BAR.add_text",
"os.path.join",
"subprocess.check_call",
"PIL.Image.merge",
"os.path.abspath",
"hashlib.sha1",
"os.path.exists",
"PyQt5.QtGui.qRgba",
"webbrowser.open_new_tab",
"re.findall",
"scandir.scandir",
"os.path.normpath",
"shutil.copyfile",
"os.path.normcase",
"datetime.datetime.now",
"re.sub",
"re.search",
"os.startfile",
"scandir.walk",
"subprocess.Popen",
"datetime.datetime.today",
"os.path.basename",
"datetime.date.today",
"subprocess.call",
"functools.wraps",
"re.compile",
"os.path.isabs",
"json.load",
"uuid.uuid4",
"os.makedirs",
"os.path.isdir",
"PIL.Image.open",
"time.time",
"os.path.splitext",
"shutil.move",
"os.path.split",
"logging.getLogger"
] |
[((1094, 1121), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1111, 1121), False, 'import logging\n'), ((8077, 8099), 'os.path.split', 'os.path.split', (['db_path'], {}), '(db_path)\n', (8090, 8099), False, 'import os\n'), ((8117, 8150), 'os.path.join', 'os.path.join', (['base_path', '"""backup"""'], {}), "(base_path, 'backup')\n", (8129, 8150), False, 'import os\n'), ((11006, 11027), 'os.path.join', 'os.path.join', (['dest', 'f'], {}), '(dest, f)\n', (11018, 11027), False, 'import os\n'), ((11500, 11521), 'os.path.normcase', 'os.path.normcase', (['key'], {}), '(key)\n', (11516, 11521), False, 'import os\n'), ((11625, 11646), 'os.path.splitext', 'os.path.splitext', (['key'], {}), '(key)\n', (11641, 11646), False, 'import os\n'), ((12983, 12997), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (12995, 12997), False, 'import hashlib\n'), ((20691, 20709), 'scandir.walk', 'scandir.walk', (['path'], {}), '(path)\n', (20703, 20709), False, 'import scandir\n'), ((21669, 21690), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (21688, 21690), False, 'import datetime\n'), ((34599, 34619), 'os.path.isabs', 'os.path.isabs', (['title'], {}), '(title)\n', (34612, 34619), False, 'import os\n'), ((37318, 37338), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (37332, 37338), False, 'import os\n'), ((46764, 46785), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (46779, 46785), False, 'import functools\n'), ((2620, 2651), 'json.load', 'json.load', (['fp'], {'encoding': '"""utf-8"""'}), "(fp, encoding='utf-8')\n", (2629, 2651), False, 'import json\n'), ((8162, 8187), 'os.path.isdir', 'os.path.isdir', (['backup_dir'], {}), '(backup_dir)\n', (8175, 8187), False, 'import os\n'), ((8197, 8217), 'os.mkdir', 'os.mkdir', (['backup_dir'], {}), '(backup_dir)\n', (8205, 8217), False, 'import os\n'), ((10435, 10459), 'os.path.split', 'os.path.split', (['chap.path'], {}), '(chap.path)\n', (10448, 10459), False, 'import os\n'), ((10968, 10987), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (10981, 10987), False, 'import os\n'), ((11236, 11260), 'os.path.exists', 'os.path.exists', (['new_path'], {}), '(new_path)\n', (11250, 11260), False, 'import os\n'), ((11529, 11547), 'os.path.isdir', 'os.path.isdir', (['key'], {}), '(key)\n', (11542, 11547), False, 'import os\n'), ((11765, 11787), 'os.path.normcase', 'os.path.normcase', (['path'], {}), '(path)\n', (11781, 11787), False, 'import os\n'), ((21921, 21940), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (21934, 21940), False, 'import os\n'), ((22292, 22321), 'os.path.normpath', 'os.path.normpath', (['chapterpath'], {}), '(chapterpath)\n', (22308, 22321), False, 'import os\n'), ((26219, 26273), 'app_constants.NOTIF_BAR.add_text', 'app_constants.NOTIF_BAR.add_text', (['"""Opening chapter..."""'], {}), "('Opening chapter...')\n", (26251, 26273), False, 'import app_constants\n'), ((29003, 29027), 'os.path.isdir', 'os.path.isdir', (['real_path'], {}), '(real_path)\n', (29016, 29027), False, 'import os\n'), ((29345, 29370), 'os.path.abspath', 'os.path.abspath', (['img_path'], {}), '(img_path)\n', (29360, 29370), False, 'import os\n'), ((34638, 34661), 'os.path.basename', 'os.path.basename', (['title'], {}), '(title)\n', (34654, 34661), False, 'import os\n'), ((35067, 35128), 're.findall', 'regex.findall', (['"""((?<=\\\\[) *[^\\\\]]+( +\\\\S+)* *(?=\\\\]))"""', 'title'], {}), "('((?<=\\\\[) *[^\\\\]]+( +\\\\S+)* *(?=\\\\]))', title)\n", (35080, 35128), True, 'import re as regex\n'), ((36188, 36216), 'webbrowser.open_new_tab', 'webbrowser.open_new_tab', (['url'], {}), '(url)\n', (36211, 36216), False, 'import webbrowser\n'), ((36336, 36369), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (36359, 36369), False, 'import sys\n'), ((37180, 37234), 'subprocess.Popen', 'subprocess.Popen', (['[app_constants.TORRENT_CLIENT, path]'], {}), '([app_constants.TORRENT_CLIENT, path])\n', (37196, 37234), False, 'import subprocess\n'), ((43714, 43728), 'PIL.Image.open', 'Image.open', (['im'], {}), '(im)\n', (43724, 43728), False, 'from PIL import Image, ImageChops\n'), ((45039, 45060), 'scandir.scandir', 'scandir.scandir', (['path'], {}), '(path)\n', (45054, 45060), False, 'import scandir\n'), ((46840, 46851), 'time.time', 'time.time', ([], {}), '()\n', (46849, 46851), False, 'import time\n'), ((47193, 47214), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (47206, 47214), False, 'import os\n'), ((47224, 47243), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (47235, 47243), False, 'import os\n'), ((2254, 2275), 'scandir.scandir', 'scandir.scandir', (['path'], {}), '(path)\n', (2269, 2275), False, 'import scandir\n'), ((8469, 8502), 'os.path.join', 'os.path.join', (['backup_dir', 'db_name'], {}), '(backup_dir, db_name)\n', (8481, 8502), False, 'import os\n'), ((8518, 8542), 'os.path.exists', 'os.path.exists', (['dst_path'], {}), '(dst_path)\n', (8532, 8542), False, 'import os\n'), ((8589, 8623), 'shutil.copyfile', 'shutil.copyfile', (['db_path', 'dst_path'], {}), '(db_path, dst_path)\n', (8604, 8623), False, 'import shutil\n'), ((9437, 9460), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9458, 9460), False, 'import datetime\n'), ((11308, 11334), 'os.path.normcase', 'os.path.normcase', (['new_path'], {}), '(new_path)\n', (11324, 11334), False, 'import os\n'), ((11385, 11412), 'shutil.move', 'shutil.move', (['path', 'new_path'], {}), '(path, new_path)\n', (11396, 11412), False, 'import shutil\n'), ((11992, 12013), 're.compile', 're.compile', (['regex_str'], {}), '(regex_str)\n', (12002, 12013), False, 'import re\n'), ((12172, 12235), 're.sub', 're.sub', (['regex_str', 'app_constants.GALLERY_DATA_FIX_REPLACE', 'text'], {}), '(regex_str, app_constants.GALLERY_DATA_FIX_REPLACE, text)\n', (12178, 12235), False, 'import re\n'), ((17668, 17682), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (17676, 17682), False, 'import os\n'), ((18628, 18642), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (18636, 18642), False, 'import os\n'), ((23137, 23186), 'app_constants.NOTIF_BAR.add_text', 'app_constants.NOTIF_BAR.add_text', (['"""Extracting..."""'], {}), "('Extracting...')\n", (23169, 23186), False, 'import app_constants\n'), ((23257, 23270), 'os.mkdir', 'os.mkdir', (['t_p'], {}), '(t_p)\n', (23265, 23270), False, 'import os\n'), ((25695, 25760), 'app_constants.NOTIF_BAR.add_text', 'app_constants.NOTIF_BAR.add_text', (['"""Chapter does no longer exist!"""'], {}), "('Chapter does no longer exist!')\n", (25727, 25760), False, 'import app_constants\n'), ((25872, 25935), 'app_constants.NOTIF_BAR.add_text', 'app_constants.NOTIF_BAR.add_text', (['"""No images found in chapter!"""'], {}), "('No images found in chapter!')\n", (25904, 25935), False, 'import app_constants\n'), ((26339, 26372), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (26362, 26372), False, 'import sys\n'), ((27254, 27343), 'app_constants.NOTIF_BAR.add_text', 'app_constants.NOTIF_BAR.add_text', (['"""Could not open chapter. Invalid external viewer."""'], {}), "(\n 'Could not open chapter. Invalid external viewer.')\n", (27286, 27343), False, 'import app_constants\n'), ((27433, 27539), 'app_constants.NOTIF_BAR.add_text', 'app_constants.NOTIF_BAR.add_text', (['"""Could not open chapter for unknown reasons. Check happypanda.log!"""'], {}), "(\n 'Could not open chapter for unknown reasons. Check happypanda.log!')\n", (27465, 27539), False, 'import app_constants\n'), ((28031, 28050), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (28044, 28050), False, 'import os\n'), ((28469, 28488), 'os.mkdir', 'os.mkdir', (['temp_path'], {}), '(temp_path)\n', (28477, 28488), False, 'import os\n'), ((36383, 36415), 'subprocess.Popen', 'subprocess.Popen', (["['open', path]"], {}), "(['open', path])\n", (36399, 36415), False, 'import subprocess\n'), ((36936, 37039), 'app_constants.NOTIF_BAR.add_text', 'app_constants.NOTIF_BAR.add_text', (['"""Could not open specified location. It might not exist anymore."""'], {}), "(\n 'Could not open specified location. It might not exist anymore.')\n", (36968, 37039), False, 'import app_constants\n'), ((41678, 41698), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (41688, 41698), False, 'from PIL import Image, ImageChops\n'), ((42450, 42467), 'PyQt5.QtGui.qRgba', 'qRgba', (['r', 'g', 'b', 'a'], {}), '(r, g, b, a)\n', (42455, 42467), False, 'from PyQt5.QtGui import QImage, qRgba\n'), ((46904, 46915), 'time.time', 'time.time', ([], {}), '()\n', (46913, 46915), False, 'import time\n'), ((10590, 10618), 'os.path.join', 'os.path.join', (['new_path', 'tail'], {}), '(new_path, tail)\n', (10602, 10618), False, 'import os\n'), ((11131, 11150), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (11144, 11150), False, 'import os\n'), ((23356, 23382), 'os.path.isdir', 'os.path.isdir', (['chapterpath'], {}), '(chapterpath)\n', (23369, 23382), False, 'import os\n'), ((24274, 24299), 'os.path.abspath', 'os.path.abspath', (['filepath'], {}), '(filepath)\n', (24289, 24299), False, 'import os\n'), ((24746, 24771), 'os.path.normpath', 'os.path.normpath', (['archive'], {}), '(archive)\n', (24762, 24771), False, 'import os\n'), ((24806, 24875), 'app_constants.NOTIF_BAR.add_text', 'app_constants.NOTIF_BAR.add_text', (['"""Fatal error: Unsupported gallery!"""'], {}), "('Fatal error: Unsupported gallery!')\n", (24838, 24875), False, 'import app_constants\n'), ((26390, 26428), 'subprocess.call', 'subprocess.call', (["('open', custom_args)"], {}), "(('open', custom_args))\n", (26405, 26428), False, 'import subprocess\n'), ((28092, 28111), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (28105, 28111), False, 'import os\n'), ((29274, 29311), 'os.path.join', 'os.path.join', (['real_path', 'first_img[0]'], {}), '(real_path, first_img[0])\n', (29286, 29311), False, 'import os\n'), ((37438, 37465), 'send2trash.send2trash', 'send2trash.send2trash', (['path'], {}), '(path)\n', (37459, 37465), False, 'import send2trash\n'), ((37653, 37673), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (37667, 37673), False, 'import os\n'), ((37945, 37964), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (37958, 37964), False, 'import os\n'), ((38368, 38404), 're.search', 'regex.search', (['a', 'b', 'regex.IGNORECASE'], {}), '(a, b, regex.IGNORECASE)\n', (38380, 38404), True, 'import re as regex\n'), ((38475, 38493), 're.search', 'regex.search', (['a', 'b'], {}), '(a, b)\n', (38487, 38493), True, 'import re as regex\n'), ((45595, 45617), 'os.path.join', 'os.path.join', (['path', 'ch'], {}), '(path, ch)\n', (45607, 45617), False, 'import os\n'), ((8014, 8039), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (8037, 8039), False, 'import datetime\n'), ((17641, 17653), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (17651, 17653), False, 'import uuid\n'), ((18210, 18241), 'os.path.join', 'os.path.join', (['path', 'file_to_ext'], {}), '(path, file_to_ext)\n', (18222, 18241), False, 'import os\n'), ((18601, 18613), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (18611, 18613), False, 'import uuid\n'), ((20835, 20856), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (20847, 20856), False, 'import os\n'), ((23230, 23242), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (23240, 23242), False, 'import uuid\n'), ((26479, 26504), 'os.startfile', 'os.startfile', (['custom_args'], {}), '(custom_args)\n', (26491, 26504), False, 'import os\n'), ((26848, 26888), 'subprocess.call', 'subprocess.call', (['(ext_path, custom_args)'], {}), '((ext_path, custom_args))\n', (26863, 26888), False, 'import subprocess\n'), ((26931, 26972), 'subprocess.Popen', 'subprocess.Popen', (['(ext_path, custom_args)'], {}), '((ext_path, custom_args))\n', (26947, 26972), False, 'import subprocess\n'), ((27073, 27119), 'subprocess.check_call', 'subprocess.check_call', (['(ext_path, custom_args)'], {}), '((ext_path, custom_args))\n', (27094, 27119), False, 'import subprocess\n'), ((27162, 27203), 'subprocess.Popen', 'subprocess.Popen', (['(ext_path, custom_args)'], {}), '((ext_path, custom_args))\n', (27178, 27203), False, 'import subprocess\n'), ((28442, 28454), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (28452, 28454), False, 'import uuid\n'), ((36611, 36629), 'os.startfile', 'os.startfile', (['path'], {}), '(path)\n', (36623, 36629), False, 'import os\n'), ((36675, 36711), 'subprocess.Popen', 'subprocess.Popen', (["('xdg-open', path)"], {}), "(('xdg-open', path))\n", (36691, 36711), False, 'import subprocess\n'), ((36738, 36871), 'app_constants.NOTIF_BAR.add_text', 'app_constants.NOTIF_BAR.add_text', (['"""I don\'t know how you\'ve managed to do this.. If you see this, you\'re in deep trouble..."""'], {}), '(\n "I don\'t know how you\'ve managed to do this.. If you see this, you\'re in deep trouble..."\n )\n', (36770, 36871), False, 'import app_constants\n'), ((37695, 37710), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (37704, 37710), False, 'import os\n'), ((37753, 37772), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (37766, 37772), False, 'import shutil\n'), ((13723, 13749), 'os.path.normcase', 'os.path.normcase', (['filepath'], {}), '(filepath)\n', (13739, 13749), False, 'import os\n'), ((25464, 25567), 'app_constants.NOTIF_BAR.add_text', 'app_constants.NOTIF_BAR.add_text', (['"""Could not open chapter. Check happypanda.log for more details."""'], {}), "(\n 'Could not open chapter. Check happypanda.log for more details.')\n", (25496, 25567), False, 'import app_constants\n'), ((26558, 26600), 'subprocess.call', 'subprocess.call', (["('xdg-open', custom_args)"], {}), "(('xdg-open', custom_args))\n", (26573, 26600), False, 'import subprocess\n'), ((27584, 27610), 'os.path.split', 'os.path.split', (['chapterpath'], {}), '(chapterpath)\n', (27597, 27610), False, 'import os\n'), ((29120, 29146), 'scandir.scandir', 'scandir.scandir', (['real_path'], {}), '(real_path)\n', (29135, 29146), False, 'import scandir\n'), ((41833, 41870), 'PIL.ImageChops.difference', 'ImageChops.difference', (['rgb[0]', 'rgb[1]'], {}), '(rgb[0], rgb[1])\n', (41854, 41870), False, 'from PIL import Image, ImageChops\n'), ((41929, 41966), 'PIL.ImageChops.difference', 'ImageChops.difference', (['rgb[0]', 'rgb[2]'], {}), '(rgb[0], rgb[2])\n', (41950, 41966), False, 'from PIL import Image, ImageChops\n'), ((45939, 45958), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (45952, 45958), False, 'import os\n'), ((46087, 46108), 'scandir.scandir', 'scandir.scandir', (['path'], {}), '(path)\n', (46102, 46108), False, 'import scandir\n'), ((13951, 13977), 'os.path.normcase', 'os.path.normcase', (['filepath'], {}), '(filepath)\n', (13967, 13977), False, 'import os\n'), ((36538, 36562), 'os.path.normcase', 'os.path.normcase', (['select'], {}), '(select)\n', (36554, 36562), False, 'import os\n'), ((45717, 45743), 'scandir.scandir', 'scandir.scandir', (['chap.path'], {}), '(chap.path)\n', (45732, 45743), False, 'import scandir\n'), ((44488, 44521), 'PIL.Image.merge', 'Image.merge', (['"""RGBA"""', '(b, g, r, a)'], {}), "('RGBA', (b, g, r, a))\n", (44499, 44521), False, 'from PIL import Image, ImageChops\n'), ((22856, 22879), 'scandir.scandir', 'scandir.scandir', (['temp_p'], {}), '(temp_p)\n', (22871, 22879), False, 'import scandir\n'), ((24121, 24141), 'scandir.scandir', 'scandir.scandir', (['t_p'], {}), '(t_p)\n', (24136, 24141), False, 'import scandir\n')]
|
#!/usr/bin/env python
import sys
import socket
import logging
import json
class EventListener():
def __init__(self):
self.logger = logging.getLogger('Event Listener')
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def write_stdout(self, s):
# only eventlistener protocol messages may be sent to stdout
sys.stdout.write(s)
sys.stdout.flush()
def write_stderr(self, s):
sys.stderr.write(s)
sys.stderr.flush()
def start(self):
# Connect the socket to the port where the server is listening
server_address = '/run/supervisoragent.sock'
self.logger.info('Attemping to connect to {0}'.format(server_address))
try:
self.socket.connect(server_address)
except socket.error as error:
self.logger.error(error)
sys.exit(1)
while 1:
# transition from ACKNOWLEDGED to READY
self.write_stdout('READY\n')
# read header line
line = sys.stdin.readline()
# read event payload and send to socket
# don't forget the new-line character
headers = dict([x.split(':') for x in line.split()])
raw_data = sys.stdin.read(int(headers['len']))
data = dict([x.split(':') for x in raw_data.split()])
self.logger.info(raw_data)
try:
response = {}
response['name'] = data['processname']
response['group'] = data['groupname']
response['from_state'] = data['from_state']
response['eventname'] = headers['eventname']
response['statename'] = headers['eventname'].split('_')[2]
try:
response['pid'] = int(data['pid'])
except:
response['pid'] = None
json_str = json.dumps(response)
self.socket.sendall('LENGTH:{0}\n'.format(len(json_str)))
self.socket.sendall(json_str)
self.logger.info(json_str)
except Exception as e:
self.logger.error(e)
# transition from READY to ACKNOWLEDGED
self.write_stdout('RESULT 2\nOK')
def main():
format = '%(asctime)s::%(levelname)s::%(name)s::%(message)s'
logging.basicConfig(filename='/tmp/eventlistener.log',
format=format, level=logging.DEBUG)
event_listener = EventListener()
event_listener.start()
if __name__ == '__main__':
main()
|
[
"sys.stdout.write",
"logging.basicConfig",
"socket.socket",
"json.dumps",
"sys.stdout.flush",
"sys.exit",
"sys.stderr.write",
"sys.stderr.flush",
"sys.stdin.readline",
"logging.getLogger"
] |
[((2360, 2455), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""/tmp/eventlistener.log"""', 'format': 'format', 'level': 'logging.DEBUG'}), "(filename='/tmp/eventlistener.log', format=format, level\n =logging.DEBUG)\n", (2379, 2455), False, 'import logging\n'), ((146, 181), 'logging.getLogger', 'logging.getLogger', (['"""Event Listener"""'], {}), "('Event Listener')\n", (163, 181), False, 'import logging\n'), ((204, 253), 'socket.socket', 'socket.socket', (['socket.AF_UNIX', 'socket.SOCK_STREAM'], {}), '(socket.AF_UNIX, socket.SOCK_STREAM)\n', (217, 253), False, 'import socket\n'), ((363, 382), 'sys.stdout.write', 'sys.stdout.write', (['s'], {}), '(s)\n', (379, 382), False, 'import sys\n'), ((391, 409), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (407, 409), False, 'import sys\n'), ((450, 469), 'sys.stderr.write', 'sys.stderr.write', (['s'], {}), '(s)\n', (466, 469), False, 'import sys\n'), ((478, 496), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (494, 496), False, 'import sys\n'), ((1044, 1064), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1062, 1064), False, 'import sys\n'), ((870, 881), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (878, 881), False, 'import sys\n'), ((1922, 1942), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (1932, 1942), False, 'import json\n')]
|
import tensorflow as tf
from model2 import model
flags = tf.compat.v1.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model', '', 'Model to restore')
model.load_weights(FLAGS.model, by_name=True, skip_mismatch=True)
model.save('converted.h5')
|
[
"model2.model.load_weights",
"model2.model.save"
] |
[((156, 221), 'model2.model.load_weights', 'model.load_weights', (['FLAGS.model'], {'by_name': '(True)', 'skip_mismatch': '(True)'}), '(FLAGS.model, by_name=True, skip_mismatch=True)\n', (174, 221), False, 'from model2 import model\n'), ((223, 249), 'model2.model.save', 'model.save', (['"""converted.h5"""'], {}), "('converted.h5')\n", (233, 249), False, 'from model2 import model\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
from numpy import log10 as lg
from numpy import pi as pi
from scipy.interpolate import interp1d as sp_interp1d
from scipy.integrate import odeint
from scipy.integrate import ode
import warnings
import timeit
import scipy.optimize as opt
from matplotlib import cm
from astropy import constants as const
from astropy import units as u
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
G=const.G.cgs.value
c=const.c.cgs.value
Ms=const.M_sun.cgs.value
hbar=const.hbar.cgs.value
m_n=const.m_n.cgs.value
km=10**5
import matplotlib.font_manager as font_manager
plt.rcParams['xtick.labelsize'] = 25
plt.rcParams['ytick.labelsize'] = 25
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.major.size'] = 8
plt.rcParams['ytick.major.size'] = 8
plt.rcParams['xtick.minor.size'] = 4
plt.rcParams['ytick.minor.size'] = 4
plt.rcParams['xtick.top'] = True
plt.rcParams['ytick.right'] = True
plt.rcParams['axes.labelpad'] = 8.0
plt.rcParams['figure.constrained_layout.h_pad'] = 0
plt.rcParams['text.usetex'] = True
plt.rc('text', usetex=True)
plt.rcParams['font.sans-serif'] = ['Times New Roman']
plt.tick_params(axis='both', which='minor', labelsize=18)
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
names1= ['m14','m14_5_001','m14_5_1', 'm14_10_001','m14_10_1']
names2=['m20','m20_5_001', 'm20_10_001','m20_10_1']
colors=['black', 'c', 'g', 'orange', 'red', 'black', 'c','orange','red']
linestyle=['-', ':', '-.', '-', '--' ,'-' ,'--' , '-.' ,':']
labels=[r'\rm GR',r'$\xi=5,\,\, a=0.01$', r'$\xi=5,\,\, a=1$',r'$\xi=10,\,\, a=0.01$',r'$\xi=10,\,\, a=1$',r'\rm GR',r'$\xi=5,\,\, a=0.01$',
r'$\xi=10,\,\, a=0.01$',r'$\xi=10,\,\, a=1$']
fig, axs = plt.subplots(2, 2,figsize=(15,12),sharex=True, sharey='row')
plt.subplots_adjust(hspace=0.0)
plt.subplots_adjust(wspace=0)
axs[0,0].yaxis.set_minor_locator(MultipleLocator(0.25/5))
axs[1,0].yaxis.set_minor_locator(MultipleLocator(0.2/5))
axs[0,0].xaxis.set_minor_locator(MultipleLocator(10/5))
for i in range(len(names1)):
data1 = np.genfromtxt('data/'+'sol_'+ 'ap4_'+names1[i]+'.txt')
R, gtt, grr= data1[:,0]/10**5, data1[:,1], data1[:, 2]
axs[1,0].plot(R,gtt,linewidth=2, color=colors[i],linestyle=linestyle[i])
axs[1,0].grid(alpha=0.6)
axs[1,0].set_ylabel(r'$ -g_{tt}$', fontsize=30)
axs[0,0].plot(R,grr,linewidth=2, color=colors[i],linestyle=linestyle[i],label=labels[i])
axs[0,0].grid(alpha=0.6)
axs[0,0].set_ylabel(r'$ g_{rr}$', fontsize=30)
axs[0,0].legend(fontsize=25, frameon=False,loc=(0.37,0.27))
sub_axes = plt.axes([.3, .18, .20, .18])
sub_axes.plot(R,gtt,linewidth=2, color=colors[i],linestyle=linestyle[i])
sub_axes.set_ylim(0.67,0.725)
sub_axes.set_xlim(13.4,14.6)
# sub_axes.set_xticks([10,11,12])
# sub_axes.grid(alpha=0.8)
sub_axes.yaxis.set_minor_locator(MultipleLocator(0.02/5))
sub_axes.xaxis.set_minor_locator(MultipleLocator(0.5/5))
for j in range(len(names2)):
data2 = np.genfromtxt('data/'+'sol_'+ 'ap4_'+names2[j]+'.txt')
R, gtt, grr= data2[:,0]/10**5, data2[:,1], data2[:, 2]
axs[1,1].plot(R,gtt,linewidth=2, color=colors[j+5],linestyle=linestyle[j+5])
axs[1,1].grid(alpha=0.6)
axs[0,1].plot(R,grr,linewidth=2, color=colors[j+5],linestyle=linestyle[j+5],label=labels[j+5])
axs[0,1].grid(alpha=0.6)
axs[0,1].legend(fontsize=25, frameon=False,loc=(0.37,0.4))
sub_axes = plt.axes([.69, .18, .19, .16])
sub_axes.plot(R,gtt,linewidth=2, color=colors[j+5],linestyle=linestyle[j+5])
sub_axes.set_xlim(13.4,14.6)
sub_axes.set_ylim(0.53,0.59)
# sub_axes.set_yticks([6,8,10])
sub_axes.set_yticks([0.54,0.56,0.58])
# sub_axes.grid(alpha=0.8)
sub_axes.yaxis.set_minor_locator(MultipleLocator(0.02/5))
sub_axes.xaxis.set_minor_locator(MultipleLocator(0.5/5))
fig.text(0.48, 0.04, r'$r\,[\rm km]$' ,fontsize=30)
# fig.text(0.7, 0.04, r'$r\,[\rm km]$' ,fontsize=30)
axs[1,0].set_ylim(0.14,0.95)
axs[0,0].set_ylim(0.97,2.35)
axs[0,0].set_xlim(-1,43)
fig.text(0.28, 0.84, r'$M=1.4M_{\odot}$' ,fontsize=25)
fig.text(0.66, 0.84, r'$M=2M_{\odot}$' ,fontsize=25)
plt.savefig("ap41.pdf", format='pdf', bbox_inches="tight")
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.axes",
"numpy.genfromtxt",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.tick_params",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots_adjust"
] |
[((1228, 1255), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1234, 1255), True, 'import matplotlib.pyplot as plt\n'), ((1310, 1367), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""minor"""', 'labelsize': '(18)'}), "(axis='both', which='minor', labelsize=18)\n", (1325, 1367), True, 'import matplotlib.pyplot as plt\n'), ((1943, 2006), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(15, 12)', 'sharex': '(True)', 'sharey': '"""row"""'}), "(2, 2, figsize=(15, 12), sharex=True, sharey='row')\n", (1955, 2006), True, 'import matplotlib.pyplot as plt\n'), ((2004, 2035), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.0)'}), '(hspace=0.0)\n', (2023, 2035), True, 'import matplotlib.pyplot as plt\n'), ((2036, 2065), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)'}), '(wspace=0)\n', (2055, 2065), True, 'import matplotlib.pyplot as plt\n'), ((4410, 4468), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ap41.pdf"""'], {'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('ap41.pdf', format='pdf', bbox_inches='tight')\n", (4421, 4468), True, 'import matplotlib.pyplot as plt\n'), ((4469, 4479), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4477, 4479), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2124), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.25 / 5)'], {}), '(0.25 / 5)\n', (2114, 2124), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((2157, 2181), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.2 / 5)'], {}), '(0.2 / 5)\n', (2172, 2181), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((2214, 2237), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10 / 5)'], {}), '(10 / 5)\n', (2229, 2237), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((2293, 2354), 'numpy.genfromtxt', 'np.genfromtxt', (["('data/' + 'sol_' + 'ap4_' + names1[i] + '.txt')"], {}), "('data/' + 'sol_' + 'ap4_' + names1[i] + '.txt')\n", (2306, 2354), True, 'import numpy as np\n'), ((2832, 2864), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.3, 0.18, 0.2, 0.18]'], {}), '([0.3, 0.18, 0.2, 0.18])\n', (2840, 2864), True, 'import matplotlib.pyplot as plt\n'), ((3261, 3322), 'numpy.genfromtxt', 'np.genfromtxt', (["('data/' + 'sol_' + 'ap4_' + names2[j] + '.txt')"], {}), "('data/' + 'sol_' + 'ap4_' + names2[j] + '.txt')\n", (3274, 3322), True, 'import numpy as np\n'), ((3696, 3730), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.69, 0.18, 0.19, 0.16]'], {}), '([0.69, 0.18, 0.19, 0.16])\n', (3704, 3730), True, 'import matplotlib.pyplot as plt\n'), ((3114, 3139), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.02 / 5)'], {}), '(0.02 / 5)\n', (3129, 3139), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((3176, 3200), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.5 / 5)'], {}), '(0.5 / 5)\n', (3191, 3200), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((4022, 4047), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.02 / 5)'], {}), '(0.02 / 5)\n', (4037, 4047), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((4084, 4108), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.5 / 5)'], {}), '(0.5 / 5)\n', (4099, 4108), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split miniImageNET 100 experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import random
import datetime
import collections
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import construct_split_miniImagenet
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data, grad_check
from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'RESNET-S', 'RESNET-B', 'VGG']
ARCH = 'RESNET-S'
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'M-EWC', 'S-GEM', 'A-GEM', 'FTR_EXT', 'PNN', 'ER-Reservoir', 'ER-Ringbuffer', 'SUBSPACE-PROJ', 'ER-SUBSPACE', 'PROJ-SUBSPACE-GP', 'ER-SUBSPACE-GP'] #List of valid models
IMP_METHOD = 'EWC'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 13
IMG_HEIGHT = 84
IMG_WIDTH = 84
IMG_CHANNELS = 3
TOTAL_CLASSES = 100 # Total number of classes in the dataset
VISUALIZE_IMPORTANCE_MEASURE = False
MEASURE_CONVERGENCE_AFTER = 0.9
EPS_MEM_BATCH_SIZE = 256
DEBUG_EPISODIC_MEMORY = False
K_FOR_CROSS_VAL = 3
TIME_MY_METHOD = False
COUNT_VIOLATONS = False
MEASURE_PERF_ON_EPS_MEMORY = False
## Logging, saving and testing options
LOG_DIR = './split_miniImagenet_results'
RESNET18_miniImageNET10_CHECKPOINT = './resnet-18-pretrained-miniImagenet10/model.ckpt-19999'
DATA_FILE = 'miniImageNet_Dataset/miniImageNet_full.pickle'
## Evaluation options
## Task split
NUM_TASKS = 10
MULTI_TASK = False
PROJECTION_RANK = 50
GRAD_CHECK = False
QR = False
SVB = False
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split miniImagenet experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--maintain-orthogonality", action="store_true",
help="If option is chosen then weights will be projected to Steifel manifold.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Total size of episodic memory.")
parser.add_argument("--eps-mem-batch", type=int, default=EPS_MEM_BATCH_SIZE,
help="Number of samples per class from previous tasks.")
parser.add_argument("--num-tasks", type=int, default=NUM_TASKS,
help="Number of tasks.")
parser.add_argument("--subspace-share-dims", type=int, default=0,
help="Number of dimensions to share across tasks.")
parser.add_argument("--data-file", type=str, default=DATA_FILE,
help="miniImageNet data file.")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, datasets, args):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
if model.imp_method in {'A-GEM', 'ER-Ringbuffer', 'ER-Reservoir', 'ER-SUBSPACE', 'ER-SUBSPACE-GP'}:
use_episodic_memory = True
else:
use_episodic_memory = False
batch_size = args.batch_size
# Loop over number of runs to average over
for runid in range(args.num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(args.random_seed+runid)
random.seed(args.random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
classes_per_task = TOTAL_CLASSES// args.num_tasks
total_classes = classes_per_task * model.num_tasks
if args.online_cross_val:
label_array = np.arange(total_classes)
else:
class_label_offset = K_FOR_CROSS_VAL * classes_per_task
label_array = np.arange(class_label_offset, total_classes+class_label_offset)
np.random.shuffle(label_array)
for tt in range(model.num_tasks):
tt_offset = tt*classes_per_task
task_labels.append(list(label_array[tt_offset:tt_offset+classes_per_task]))
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
# Store the task labels
task_labels_dataset.append(task_labels)
# Set episodic memory size
episodic_mem_size = args.mem_size * total_classes
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = []
if use_episodic_memory:
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
count_cls = np.zeros(TOTAL_CLASSES, dtype=np.int32)
episodic_filled_counter = 0
examples_seen_so_far = 0
# Mask for softmax
logit_mask = np.zeros(TOTAL_CLASSES)
nd_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
if COUNT_VIOLATONS:
violation_count = np.zeros(model.num_tasks)
vc = 0
proj_matrices = generate_projection_matrix(model.num_tasks, feature_dim=model.subspace_proj.get_shape()[0], share_dims=args.subspace_share_dims, qr=QR)
# Check the sanity of the generated matrices
unit_test_projection_matrices(proj_matrices)
# TODO: Temp for gradients check
prev_task_grads = []
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0 and model.imp_method != 'PNN'):
model.restore(sess)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
pnn_train_phase[task] = True
pnn_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
# If not in the cross validation mode then concatenate the train and validation sets
task_train_images, task_train_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
# If multi_task is set then train using all the datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(task_labels)):
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[t_])
task_train_images = np.concatenate((task_train_images, task_tr_images), axis=0)
task_train_labels = np.concatenate((task_train_labels, task_tr_labels), axis=0)
else:
# Skip training for this task
continue
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
print('Unique labels in the task: {}'.format(np.unique(np.nonzero(task_train_labels)[1])))
# Test for the tasks that we've seen so far
test_labels += task_labels[task]
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
logit_mask[:] = 0
# Train a task observing sequence of data
if args.train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
if args.cross_validate_mode:
logit_mask[task_labels[task]] = 1.0
else:
num_iters = args.train_iters
# Set the mask only once before starting the training for the task
logit_mask[task_labels[task]] = 1.0
if MULTI_TASK:
logit_mask[:] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
ftask = []
# Number of iterations after which convergence is checked
convergence_iters = int(num_iters * MEASURE_CONVERGENCE_AFTER)
# Training loop for task T
for iters in range(num_iters):
if args.train_single_epoch and not args.cross_validate_mode and not MULTI_TASK:
if (iters <= 20) or (iters > 20 and iters % 50 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, projection_matrices=proj_matrices)
ftask.append(fbatch)
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
pnn_logit_mask[:] = 0
pnn_logit_mask[task][task_labels[task]] = 1.0
elif model.imp_method in {'A-GEM', 'ER-Ringbuffer'}:
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
else:
# Set the output labels over which the model needs to be trained
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
if args.train_single_epoch:
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
if model.imp_method == 'PNN':
feed_dict = {model.x: train_x[offset:offset+residual], model.y_[task]: train_y[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5, model.learning_rate: args.learning_rate}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True, model.learning_rate: args.learning_rate}
else:
offset = (iters * batch_size) % (num_train_examples - batch_size)
residual = batch_size
if model.imp_method == 'PNN':
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_[task]: train_y[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5, model.learning_rate: args.learning_rate}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True, model.learning_rate: args.learning_rate}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PROJ-SUBSPACE-GP':
if task == 0:
feed_dict[model.output_mask] = logit_mask
feed_dict[model.subspace_proj] = proj_matrices[task]
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
else:
# Compute gradient in \perp space
logit_mask[:] = 0
for tt in range(task):
logit_mask[task_labels[tt]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.train_phase] = False
feed_dict[model.subspace_proj] = np.eye(proj_matrices[task].shape[0]) - proj_matrices[task]
sess.run(model.store_ref_grads, feed_dict=feed_dict)
# Compute gradient in P space and train
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.train_phase] = True
feed_dict[model.subspace_proj] = proj_matrices[task]
_, loss = sess.run([model.train_gp, model.gp_total_loss], feed_dict=feed_dict)
reg = 0.0
elif model.imp_method == 'SUBSPACE-PROJ':
feed_dict[model.output_mask] = logit_mask
feed_dict[model.subspace_proj] = proj_matrices[task]
if args.maintain_orthogonality:
_, loss = sess.run([model.train_stiefel, model.reg_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PNN':
_, loss = sess.run([model.train[task], model.unweighted_entropy[task]], feed_dict=feed_dict)
elif model.imp_method == 'FTR_EXT':
feed_dict[model.output_mask] = logit_mask
if task == 0:
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_classifier, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC' or model.imp_method == 'M-EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
if (iters >= convergence_iters) and (model.imp_method == 'M-EWC'):
_, _, _, _, loss = sess.run([model.weights_old_ops_grouped, model.set_tmp_fisher, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
else:
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Set the mask for all the previous tasks so far
nd_logit_mask[:] = 0
for tt in range(task):
nd_logit_mask[tt][task_labels[tt]] = 1.0
if episodic_filled_counter <= args.eps_mem_batch:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, args.eps_mem_batch, replace=False) # Sample without replacement so that we don't sample an example more than once
# Store the reference gradient
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.keep_prob: 1.0, model.train_phase: True, model.learning_rate: args.learning_rate}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
if COUNT_VIOLATONS:
vc, _, loss = sess.run([model.violation_count, model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
# Put the batch in the ring buffer
update_fifo_buffer(train_x[offset:offset+residual], train_y[offset:offset+residual], episodic_images, episodic_labels,
task_labels[task], args.mem_size, count_cls, episodic_filled_counter)
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'ER-Reservoir':
mem_filled_so_far = examples_seen_so_far if (examples_seen_so_far < episodic_mem_size) else episodic_mem_size
if mem_filled_so_far < args.eps_mem_batch:
er_mem_indices = np.arange(mem_filled_so_far)
else:
er_mem_indices = np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0)
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
labels_in_the_batch = np.unique(np.nonzero(er_train_y_batch)[1])
logit_mask[:] = 0
for tt in range(task+1):
if any(c_lab == t_lab for t_lab in task_labels[tt] for c_lab in labels_in_the_batch):
logit_mask[task_labels[tt]] = 1.0
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True, model.learning_rate: args.learning_rate}
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
# Reservoir update
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
update_reservior(er_x, er_y_, episodic_images, episodic_labels, episodic_mem_size, examples_seen_so_far)
examples_seen_so_far += 1
elif model.imp_method == 'ER-Ringbuffer':
# Sample Bn U Bm
mem_filled_so_far = episodic_filled_counter if (episodic_filled_counter <= episodic_mem_size) else episodic_mem_size
er_mem_indices = np.arange(mem_filled_so_far) if (mem_filled_so_far <= args.eps_mem_batch) else np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0) # TODO: Check if for task 0 the first arg is empty
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
# Set the logit masks
nd_logit_mask[:] = 0
for tt in range(task+1):
nd_logit_mask[tt][task_labels[tt]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True, model.learning_rate: args.learning_rate}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = float(er_train_x_batch.shape[0])
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
# Put the batch in the FIFO ring buffer
update_fifo_buffer(train_x[offset:offset+residual], train_y[offset:offset+residual], episodic_images, episodic_labels,
task_labels[task], args.mem_size, count_cls, episodic_filled_counter)
elif model.imp_method == 'ER-SUBSPACE':
# Zero out all the grads
sess.run([model.reset_er_subspace_grads])
if task > 0:
# Randomly pick a task to replay
tt = np.squeeze(np.random.choice(np.arange(task), 1, replace=False))
mem_offset = tt*args.mem_size*classes_per_task
er_mem_indices = np.arange(mem_offset, mem_offset+args.mem_size*classes_per_task)
np.random.shuffle(er_mem_indices)
er_train_x_batch = episodic_images[er_mem_indices]
er_train_y_batch = episodic_labels[er_mem_indices]
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True, model.task_id: task+1, model.learning_rate: args.learning_rate}
logit_mask[:] = 0
logit_mask[task_labels[tt]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.subspace_proj] = proj_matrices[tt]
sess.run(model.accum_er_subspace_grads, feed_dict=feed_dict)
# Train on the current task
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True, model.task_id: task+1, model.learning_rate: args.learning_rate}
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.subspace_proj] = proj_matrices[task]
if args.maintain_orthogonality:
if SVB:
_, _, loss = sess.run([model.train_er_subspace, model.accum_er_subspace_grads, model.reg_loss], feed_dict=feed_dict)
# Every few iterations bound the singular values
if iters % 20 == 0:
sess.run(model.update_weights_svb)
else:
_, loss = sess.run([model.accum_er_subspace_grads, model.reg_loss], feed_dict=feed_dict)
sess.run(model.train_stiefel, feed_dict={model.learning_rate: args.learning_rate})
else:
_, _, loss = sess.run([model.train_er_subspace, model.accum_er_subspace_grads, model.reg_loss], feed_dict=feed_dict)
# Put the batch in the FIFO ring buffer
update_fifo_buffer(train_x[offset:offset+residual], train_y[offset:offset+residual], episodic_images, episodic_labels,
task_labels[task], args.mem_size, count_cls, episodic_filled_counter)
elif model.imp_method == 'ER-SUBSPACE-GP':
# Zero out all the grads
sess.run([model.reset_er_subspace_gp_grads])
feed_dict = {model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.task_id: task+1, model.learning_rate: args.learning_rate, model.train_phase: True}
if task > 0:
# Randomly pick a task to replay
tt = np.squeeze(np.random.choice(np.arange(task), 1, replace=False))
mem_offset = tt*args.mem_size*classes_per_task
er_mem_indices = np.arange(mem_offset, mem_offset+args.mem_size*classes_per_task)
np.random.shuffle(er_mem_indices)
er_train_x_batch = episodic_images[er_mem_indices]
er_train_y_batch = episodic_labels[er_mem_indices]
logit_mask[:] = 0
logit_mask[task_labels[tt]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.x] = er_train_x_batch
feed_dict[model.y_] = er_train_y_batch
# Compute the gradient in the \perp space
#feed_dict[model.subspace_proj] = np.eye(proj_matrices[tt].shape[0]) - proj_matrices[tt]
#sess.run(model.store_ref_grads, feed_dict=feed_dict)
# Compute the gradient in P space and store the gradient
feed_dict[model.subspace_proj] = proj_matrices[tt]
sess.run(model.accum_er_subspace_grads, feed_dict=feed_dict)
# Train on the current task
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
if task == 0:
feed_dict[model.x] = train_x[offset:offset+residual]
feed_dict[model.y_] = train_y[offset:offset+residual]
else:
# Sample Bn U Bm
mem_filled_so_far = episodic_filled_counter if (episodic_filled_counter <= episodic_mem_size) else episodic_mem_size
er_mem_indices = np.arange(mem_filled_so_far) if (mem_filled_so_far <= args.eps_mem_batch) else np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0) # TODO: Check if for task 0 the first arg is empty
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
feed_dict[model.x] = er_train_x_batch
feed_dict[model.y_] = er_train_y_batch
# Compute the gradient in the \perp space
feed_dict[model.subspace_proj] = np.eye(proj_matrices[tt].shape[0]) - proj_matrices[task]
sess.run(model.store_ref_grads, feed_dict=feed_dict)
# Compute the gradient in P space and store the gradient
feed_dict[model.x] = train_x[offset:offset+residual]
feed_dict[model.y_] = train_y[offset:offset+residual]
feed_dict[model.subspace_proj] = proj_matrices[task]
_, loss = sess.run([model.train_er_gp, model.gp_total_loss], feed_dict=feed_dict)
# Put the batch in the FIFO ring buffer
update_fifo_buffer(train_x[offset:offset+residual], train_y[offset:offset+residual], episodic_images, episodic_labels,
task_labels[task], args.mem_size, count_cls, episodic_filled_counter)
if (iters % 100 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
#print('Step {:d}\t CE: {:.3f}\t Reg: {:.9f}\t TL: {:.3f}'.format(iters, entropy, reg, loss))
#print('Step {:d}\t Reg: {:.9f}\t TL: {:.3f}'.format(iters, reg, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
sys.exit(0)
print('\t\t\t\tTraining for Task%d done!'%(task))
if model.imp_method == 'SUBSPACE-PROJ' and GRAD_CHECK:
# TODO: Compute the average gradient of the task at \theta^*: Could be done as running average (no need for extra passes?)
bbatch_size = 100
grad_sum = []
for iiters in range(train_x.shape[0] // bbatch_size):
offset = iiters * bbatch_size
feed_dict = {model.x: train_x[offset:offset+bbatch_size], model.y_: train_y[offset:offset+bbatch_size],
model.keep_prob: 1.0, model.train_phase: False, model.learning_rate: args.learning_rate}
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
#feed_dict[model.subspace_proj] = proj_matrices[task]
projection_dict = {proj: proj_matrices[proj.get_shape()[0]][task] for proj in model.subspace_proj}
feed_dict.update(projection_dict)
feed_dict[model.mem_batch_size] = residual
grad_vars, train_vars = sess.run([model.reg_gradients_vars, model.trainable_vars], feed_dict=feed_dict)
for v in range(len(train_vars)):
if iiters == 0:
grad_sum.append(grad_vars[v][0])
else:
grad_sum[v] += (grad_vars[v][0] - grad_sum[v])/ iiters
prev_task_grads.append(grad_sum)
if use_episodic_memory:
episodic_filled_counter += args.mem_size * classes_per_task
if model.imp_method == 'A-GEM':
if COUNT_VIOLATONS:
violation_count[task] = vc
print('Task {}: Violation Count: {}'.format(task, violation_count))
sess.run(model.reset_violation_count, feed_dict=feed_dict)
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if (task < (len(task_labels) - 1)) or MEASURE_PERF_ON_EPS_MEMORY:
model.task_updates(sess, task, task_train_images, task_labels[task]) # TODO: For MAS, should the gradients be for current task or all the previous tasks
print('\t\t\t\tTask updates after Task%d done!'%(task))
if args.train_single_epoch and not args.cross_validate_mode:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, projection_matrices=proj_matrices)
print('Task: {}, Acc: {}'.format(task, fbatch))
ftask.append(fbatch)
ftask = np.array(ftask)
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
pnn_logit_mask[:] = 0
pnn_logit_mask[task][task_labels[task]] = 1.0
else:
if MEASURE_PERF_ON_EPS_MEMORY:
eps_mem = {
'images': episodic_images,
'labels': episodic_labels,
}
# Measure perf on episodic memory
ftask = test_task_sequence(model, sess, eps_mem, task_labels, task, classes_per_task=classes_per_task, projection_matrices=proj_matrices)
else:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, projection_matrices=proj_matrices)
print('Task: {}, Acc: {}'.format(task, ftask))
# Store the accuracies computed at task T in a list
evals.append(ftask)
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
runs.append(np.array(evals))
# End for loop runid
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, test_tasks, task, classes_per_task=0, projection_matrices=None):
"""
Snapshot the current performance
"""
if TIME_MY_METHOD:
# Only compute the training time
return np.zeros(model.num_tasks)
final_acc = np.zeros(model.num_tasks)
if model.imp_method in {'PNN', 'A-GEM', 'ER-Ringbuffer'}:
logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
else:
logit_mask = np.zeros(TOTAL_CLASSES)
if MEASURE_PERF_ON_EPS_MEMORY:
for tt, labels in enumerate(test_tasks):
# Multi-head evaluation setting
logit_mask[:] = 0
logit_mask[labels] = 1.0
mem_offset = tt*SAMPLES_PER_CLASS*classes_per_task
feed_dict = {model.x: test_data['images'][mem_offset:mem_offset+SAMPLES_PER_CLASS*classes_per_task],
model.y_: test_data['labels'][mem_offset:mem_offset+SAMPLES_PER_CLASS*classes_per_task], model.keep_prob: 1.0, model.train_phase: False, model.output_mask: logit_mask}
acc = model.accuracy.eval(feed_dict = feed_dict)
final_acc[tt] = acc
return final_acc
for tt, labels in enumerate(test_tasks):
if not MULTI_TASK:
if tt > task:
return final_acc
task_test_images, task_test_labels = load_task_specific_data(test_data, labels)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
logit_mask[:] = 0
logit_mask[tt][labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_[tt]: task_test_labels, model.keep_prob: 1.0}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
acc = model.accuracy[tt].eval(feed_dict = feed_dict)
elif model.imp_method in {'A-GEM', 'ER-Ringbuffer'}:
logit_mask[:] = 0
logit_mask[tt][labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_: task_test_labels, model.keep_prob: 1.0, model.train_phase: False}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
feed_dict.update(logit_mask_dict)
#if model.imp_method in {'SUBSPACE-PROJ', 'ER-SUBSPACE', 'PROJ-SUBSPACE-GP'}:
if False:
feed_dict[model.subspace_proj] = projection_matrices[tt]
#feed_dict[model.subspace_proj] = np.eye(projection_matrices[tt].shape[0])
#projection_dict = {proj: projection_matrices[proj.get_shape()[0]][tt] for proj in model.subspace_proj}
#feed_dict.update(projection_dict)
acc = model.accuracy[tt].eval(feed_dict = feed_dict)
else:
logit_mask[:] = 0
logit_mask[labels] = 1.0
#for ttt in range(task+1):
# logit_mask[test_tasks[ttt]] = 1.0
feed_dict = {model.x: task_test_images,
model.y_: task_test_labels, model.keep_prob: 1.0, model.train_phase: False, model.output_mask: logit_mask}
if model.imp_method in {'SUBSPACE-PROJ', 'ER-SUBSPACE', 'PROJ-SUBSPACE-GP', 'ER-SUBSPACE-GP'}:
feed_dict[model.subspace_proj] = projection_matrices[tt]
acc = model.accuracy.eval(feed_dict = feed_dict)
final_acc[tt] = acc
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_miniImageNET',
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': args.imp_method,
'SYNAP_STGTH': args.synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': args.learning_rate,
'BATCH_SIZE': args.batch_size,
'MEM_SIZE': args.mem_size}
experiment_id = "SPLIT_miniImageNET_META_%s_%s_%r_%s-"%(args.imp_method, str(args.synap_stgth).replace('.', '_'),
str(args.batch_size), str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Get the task labels from the total number of tasks and full label space
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = args.num_tasks - K_FOR_CROSS_VAL
# Load the split miniImagenet dataset
data_labs = [np.arange(TOTAL_CLASSES)]
datasets = construct_split_miniImagenet(data_labs, args.data_file)
# Variables to store the accuracies and standard deviations of the experiment
acc_mean = dict()
acc_std = dict()
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(args.random_seed)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
learning_rate = tf.placeholder(dtype=tf.float32, shape=())
if args.imp_method == 'PNN':
y_ = []
for i in range(num_tasks):
y_.append(tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES]))
else:
y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
elif args.optim == 'MOMENTUM':
#base_lr = tf.constant(args.learning_rate)
#learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(learning_rate, OPT_MOMENTUM)
# Create the Model/ contruct the graph
model = Model(x, y_, num_tasks, opt, args.imp_method, args.synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, learning_rate, network_arch=args.arch)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
runs, task_labels_dataset = train_task_sequence(model, sess, datasets, args)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# If cross-validation flag is enabled, store the stuff in a text file
if args.cross_validate_mode:
acc_mean, acc_std = average_acc_stats_across_runs(runs, model.imp_method)
fgt_mean, fgt_std = average_fgt_stats_across_runs(runs, model.imp_method)
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_miniImageNET_%s_%s'%(args.imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
if MULTI_TASK:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.arch, args.learning_rate, args.synap_stgth, acc_mean[-1,:].mean()))
else:
f.write('ORTHO:{} \t SVB:{}\t NUM_TASKS: {} \t MEM_SIZE: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t SHARED_SUBSPACE:{}, \t ACC: {} (+-{})\t Fgt: {} (+-{})\t QR:{}\t Time: {}\n'.format(args.maintain_orthogonality, SVB, args.num_tasks, args.mem_size, args.arch, args.learning_rate,
args.synap_stgth, args.subspace_share_dims, acc_mean, acc_std, fgt_mean, fgt_std, QR, str(time_spent)))
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"utils.utils.average_acc_stats_across_runs",
"tensorflow.reset_default_graph",
"numpy.ones",
"utils.utils.average_fgt_stats_across_runs",
"tensorflow.ConfigProto",
"numpy.arange",
"utils.vis_utils.snapshot_experiment_meta_data",
"utils.data_utils.construct_split_miniImagenet",
"utils.vis_utils.snapshot_experiment_eval",
"os.path.join",
"os.path.exists",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"random.seed",
"numpy.random.choice",
"datetime.datetime.now",
"numpy.random.shuffle",
"utils.utils.load_task_specific_data",
"math.isnan",
"utils.utils.update_reservior",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.train.MomentumOptimizer",
"tensorflow.Graph",
"tensorflow.train.GradientDescentOptimizer",
"sys.exit",
"numpy.concatenate",
"utils.utils.unit_test_projection_matrices",
"os.makedirs",
"utils.vis_utils.snapshot_task_labels",
"model.Model",
"numpy.zeros",
"time.time",
"numpy.nonzero",
"utils.utils.update_fifo_buffer",
"numpy.array",
"numpy.eye",
"tensorflow.train.AdamOptimizer"
] |
[((3393, 3425), 'os.path.join', 'os.path.join', (['logdir', 'model_name'], {}), '(logdir, model_name)\n', (3405, 3425), False, 'import os\n'), ((4050, 4135), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script for split miniImagenet experiment."""'}), "(description='Script for split miniImagenet experiment.'\n )\n", (4073, 4135), False, 'import argparse\n'), ((42224, 42238), 'numpy.array', 'np.array', (['runs'], {}), '(runs)\n', (42232, 42238), True, 'import numpy as np\n'), ((42565, 42590), 'numpy.zeros', 'np.zeros', (['model.num_tasks'], {}), '(model.num_tasks)\n', (42573, 42590), True, 'import numpy as np\n'), ((47575, 47650), 'utils.vis_utils.snapshot_experiment_meta_data', 'snapshot_experiment_meta_data', (['args.log_dir', 'experiment_id', 'exper_meta_data'], {}), '(args.log_dir, experiment_id, exper_meta_data)\n', (47604, 47650), False, 'from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels\n'), ((47960, 48015), 'utils.data_utils.construct_split_miniImagenet', 'construct_split_miniImagenet', (['data_labs', 'args.data_file'], {}), '(data_labs, args.data_file)\n', (47988, 48015), False, 'from utils.data_utils import construct_split_miniImagenet\n'), ((48177, 48201), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (48199, 48201), True, 'import tensorflow as tf\n'), ((48215, 48225), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (48223, 48225), True, 'import tensorflow as tf\n'), ((51366, 51430), 'utils.vis_utils.snapshot_experiment_eval', 'snapshot_experiment_eval', (['args.log_dir', 'experiment_id', 'exper_acc'], {}), '(args.log_dir, experiment_id, exper_acc)\n', (51390, 51430), False, 'from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels\n'), ((51435, 51498), 'utils.vis_utils.snapshot_task_labels', 'snapshot_task_labels', (['args.log_dir', 'experiment_id', 'exper_labels'], {}), '(args.log_dir, experiment_id, exper_labels)\n', (51455, 51498), False, 'from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels\n'), ((3437, 3459), 'os.path.exists', 'os.path.exists', (['logdir'], {}), '(logdir)\n', (3451, 3459), False, 'import os\n'), ((3467, 3486), 'os.makedirs', 'os.makedirs', (['logdir'], {}), '(logdir)\n', (3478, 3486), False, 'import os\n'), ((8308, 8348), 'numpy.random.seed', 'np.random.seed', (['(args.random_seed + runid)'], {}), '(args.random_seed + runid)\n', (8322, 8348), True, 'import numpy as np\n'), ((8355, 8392), 'random.seed', 'random.seed', (['(args.random_seed + runid)'], {}), '(args.random_seed + runid)\n', (8366, 8392), False, 'import random\n'), ((8882, 8912), 'numpy.random.shuffle', 'np.random.shuffle', (['label_array'], {}), '(label_array)\n', (8899, 8912), True, 'import numpy as np\n'), ((10110, 10133), 'numpy.zeros', 'np.zeros', (['TOTAL_CLASSES'], {}), '(TOTAL_CLASSES)\n', (10118, 10133), True, 'import numpy as np\n'), ((10158, 10200), 'numpy.zeros', 'np.zeros', (['[model.num_tasks, TOTAL_CLASSES]'], {}), '([model.num_tasks, TOTAL_CLASSES])\n', (10166, 10200), True, 'import numpy as np\n'), ((10527, 10571), 'utils.utils.unit_test_projection_matrices', 'unit_test_projection_matrices', (['proj_matrices'], {}), '(proj_matrices)\n', (10556, 10571), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((42522, 42547), 'numpy.zeros', 'np.zeros', (['model.num_tasks'], {}), '(model.num_tasks)\n', (42530, 42547), True, 'import numpy as np\n'), ((42674, 42716), 'numpy.zeros', 'np.zeros', (['[model.num_tasks, TOTAL_CLASSES]'], {}), '([model.num_tasks, TOTAL_CLASSES])\n', (42682, 42716), True, 'import numpy as np\n'), ((42748, 42771), 'numpy.zeros', 'np.zeros', (['TOTAL_CLASSES'], {}), '(TOTAL_CLASSES)\n', (42756, 42771), True, 'import numpy as np\n'), ((43630, 43672), 'utils.utils.load_task_specific_data', 'load_task_specific_data', (['test_data', 'labels'], {}), '(test_data, labels)\n', (43653, 43672), False, 'from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data, grad_check\n'), ((46608, 46636), 'os.path.exists', 'os.path.exists', (['args.log_dir'], {}), '(args.log_dir)\n', (46622, 46636), False, 'import os\n'), ((46704, 46729), 'os.makedirs', 'os.makedirs', (['args.log_dir'], {}), '(args.log_dir)\n', (46715, 46729), False, 'import os\n'), ((47919, 47943), 'numpy.arange', 'np.arange', (['TOTAL_CLASSES'], {}), '(TOTAL_CLASSES)\n', (47928, 47943), True, 'import numpy as np\n'), ((48294, 48330), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (48312, 48330), True, 'import tensorflow as tf\n'), ((48339, 48371), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (48353, 48371), True, 'import numpy as np\n'), ((48380, 48409), 'random.seed', 'random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (48391, 48409), False, 'import random\n'), ((48470, 48547), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS]'}), '(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])\n', (48484, 48547), True, 'import tensorflow as tf\n'), ((48572, 48614), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '()'}), '(dtype=tf.float32, shape=())\n', (48586, 48614), True, 'import tensorflow as tf\n'), ((49471, 49632), 'model.Model', 'Model', (['x', 'y_', 'num_tasks', 'opt', 'args.imp_method', 'args.synap_stgth', 'args.fisher_update_after', 'args.fisher_ema_decay', 'learning_rate'], {'network_arch': 'args.arch'}), '(x, y_, num_tasks, opt, args.imp_method, args.synap_stgth, args.\n fisher_update_after, args.fisher_ema_decay, learning_rate, network_arch\n =args.arch)\n', (49476, 49632), False, 'from model import Model\n'), ((49712, 49728), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (49726, 49728), True, 'import tensorflow as tf\n'), ((49798, 49809), 'time.time', 'time.time', ([], {}), '()\n', (49807, 49809), False, 'import time\n'), ((50036, 50047), 'time.time', 'time.time', ([], {}), '()\n', (50045, 50047), False, 'import time\n'), ((50375, 50428), 'utils.utils.average_acc_stats_across_runs', 'average_acc_stats_across_runs', (['runs', 'model.imp_method'], {}), '(runs, model.imp_method)\n', (50404, 50428), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((50457, 50510), 'utils.utils.average_fgt_stats_across_runs', 'average_fgt_stats_across_runs', (['runs', 'model.imp_method'], {}), '(runs, model.imp_method)\n', (50486, 50510), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((8676, 8700), 'numpy.arange', 'np.arange', (['total_classes'], {}), '(total_classes)\n', (8685, 8700), True, 'import numpy as np\n'), ((8809, 8874), 'numpy.arange', 'np.arange', (['class_label_offset', '(total_classes + class_label_offset)'], {}), '(class_label_offset, total_classes + class_label_offset)\n', (8818, 8874), True, 'import numpy as np\n'), ((9401, 9434), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9432, 9434), True, 'import tensorflow as tf\n'), ((9777, 9843), 'numpy.zeros', 'np.zeros', (['[episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS]'], {}), '([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])\n', (9785, 9843), True, 'import numpy as np\n'), ((9874, 9918), 'numpy.zeros', 'np.zeros', (['[episodic_mem_size, TOTAL_CLASSES]'], {}), '([episodic_mem_size, TOTAL_CLASSES])\n', (9882, 9918), True, 'import numpy as np\n'), ((9943, 9982), 'numpy.zeros', 'np.zeros', (['TOTAL_CLASSES'], {'dtype': 'np.int32'}), '(TOTAL_CLASSES, dtype=np.int32)\n', (9951, 9982), True, 'import numpy as np\n'), ((10260, 10285), 'numpy.zeros', 'np.zeros', (['model.num_tasks'], {}), '(model.num_tasks)\n', (10268, 10285), True, 'import numpy as np\n'), ((11346, 11410), 'utils.utils.load_task_specific_data', 'load_task_specific_data', (["datasets[0]['train']", 'task_labels[task]'], {}), "(datasets[0]['train'], task_labels[task])\n", (11369, 11410), False, 'from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data, grad_check\n'), ((12470, 12525), 'numpy.ones', 'np.ones', (['[task_train_labels.shape[0]]'], {'dtype': 'np.float32'}), '([task_train_labels.shape[0]], dtype=np.float32)\n', (12477, 12525), True, 'import numpy as np\n'), ((13263, 13292), 'numpy.arange', 'np.arange', (['num_train_examples'], {}), '(num_train_examples)\n', (13272, 13292), True, 'import numpy as np\n'), ((13305, 13328), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (13322, 13328), True, 'import numpy as np\n'), ((42166, 42181), 'numpy.array', 'np.array', (['evals'], {}), '(evals)\n', (42174, 42181), True, 'import numpy as np\n'), ((48825, 48880), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, TOTAL_CLASSES]'}), '(tf.float32, shape=[None, TOTAL_CLASSES])\n', (48839, 48880), True, 'import tensorflow as tf\n'), ((48964, 49015), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (48986, 49015), True, 'import tensorflow as tf\n'), ((49823, 49861), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config', 'graph': 'graph'}), '(config=config, graph=graph)\n', (49833, 49861), True, 'import tensorflow as tf\n'), ((11154, 11196), 'numpy.zeros', 'np.zeros', (['[model.num_tasks, TOTAL_CLASSES]'], {}), '([model.num_tasks, TOTAL_CLASSES])\n', (11162, 11196), True, 'import numpy as np\n'), ((37859, 37875), 'math.isnan', 'math.isnan', (['loss'], {}), '(loss)\n', (37869, 37875), False, 'import math\n'), ((40894, 40909), 'numpy.array', 'np.array', (['ftask'], {}), '(ftask)\n', (40902, 40909), True, 'import numpy as np\n'), ((43750, 43775), 'numpy.zeros', 'np.zeros', (['model.num_tasks'], {}), '(model.num_tasks)\n', (43758, 43775), True, 'import numpy as np\n'), ((47520, 47543), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (47541, 47543), False, 'import datetime\n'), ((49069, 49131), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (49102, 49131), True, 'import tensorflow as tf\n'), ((11034, 11059), 'numpy.zeros', 'np.zeros', (['model.num_tasks'], {}), '(model.num_tasks)\n', (11042, 11059), True, 'import numpy as np\n'), ((37952, 37963), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (37960, 37963), False, 'import sys\n'), ((48737, 48792), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, TOTAL_CLASSES]'}), '(tf.float32, shape=[None, TOTAL_CLASSES])\n', (48751, 48792), True, 'import tensorflow as tf\n'), ((49351, 49406), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['learning_rate', 'OPT_MOMENTUM'], {}), '(learning_rate, OPT_MOMENTUM)\n', (49377, 49406), True, 'import tensorflow as tf\n'), ((11670, 11732), 'utils.utils.load_task_specific_data', 'load_task_specific_data', (["datasets[0]['train']", 'task_labels[t_]'], {}), "(datasets[0]['train'], task_labels[t_])\n", (11693, 11732), False, 'from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data, grad_check\n'), ((11777, 11836), 'numpy.concatenate', 'np.concatenate', (['(task_train_images, task_tr_images)'], {'axis': '(0)'}), '((task_train_images, task_tr_images), axis=0)\n', (11791, 11836), True, 'import numpy as np\n'), ((11881, 11940), 'numpy.concatenate', 'np.concatenate', (['(task_train_labels, task_tr_labels)'], {'axis': '(0)'}), '((task_train_labels, task_tr_labels), axis=0)\n', (11895, 11940), True, 'import numpy as np\n'), ((12242, 12271), 'numpy.nonzero', 'np.nonzero', (['task_train_labels'], {}), '(task_train_labels)\n', (12252, 12271), True, 'import numpy as np\n'), ((18695, 18731), 'numpy.eye', 'np.eye', (['proj_matrices[task].shape[0]'], {}), '(proj_matrices[task].shape[0])\n', (18701, 18731), True, 'import numpy as np\n'), ((24848, 25049), 'utils.utils.update_fifo_buffer', 'update_fifo_buffer', (['train_x[offset:offset + residual]', 'train_y[offset:offset + residual]', 'episodic_images', 'episodic_labels', 'task_labels[task]', 'args.mem_size', 'count_cls', 'episodic_filled_counter'], {}), '(train_x[offset:offset + residual], train_y[offset:offset +\n residual], episodic_images, episodic_labels, task_labels[task], args.\n mem_size, count_cls, episodic_filled_counter)\n', (24866, 25049), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((23005, 23039), 'numpy.arange', 'np.arange', (['episodic_filled_counter'], {}), '(episodic_filled_counter)\n', (23014, 23039), True, 'import numpy as np\n'), ((23197, 23273), 'numpy.random.choice', 'np.random.choice', (['episodic_filled_counter', 'args.eps_mem_batch'], {'replace': '(False)'}), '(episodic_filled_counter, args.eps_mem_batch, replace=False)\n', (23213, 23273), True, 'import numpy as np\n'), ((26956, 26989), 'numpy.random.shuffle', 'np.random.shuffle', (['er_mem_indices'], {}), '(er_mem_indices)\n', (26973, 26989), True, 'import numpy as np\n'), ((27029, 27125), 'numpy.concatenate', 'np.concatenate', (['(episodic_images[er_mem_indices], train_x[offset:offset + residual])'], {'axis': '(0)'}), '((episodic_images[er_mem_indices], train_x[offset:offset +\n residual]), axis=0)\n', (27043, 27125), True, 'import numpy as np\n'), ((27159, 27255), 'numpy.concatenate', 'np.concatenate', (['(episodic_labels[er_mem_indices], train_y[offset:offset + residual])'], {'axis': '(0)'}), '((episodic_labels[er_mem_indices], train_y[offset:offset +\n residual]), axis=0)\n', (27173, 27255), True, 'import numpy as np\n'), ((26769, 26797), 'numpy.arange', 'np.arange', (['mem_filled_so_far'], {}), '(mem_filled_so_far)\n', (26778, 26797), True, 'import numpy as np\n'), ((26865, 26935), 'numpy.random.choice', 'np.random.choice', (['mem_filled_so_far', 'args.eps_mem_batch'], {'replace': '(False)'}), '(mem_filled_so_far, args.eps_mem_batch, replace=False)\n', (26881, 26935), True, 'import numpy as np\n'), ((28208, 28316), 'utils.utils.update_reservior', 'update_reservior', (['er_x', 'er_y_', 'episodic_images', 'episodic_labels', 'episodic_mem_size', 'examples_seen_so_far'], {}), '(er_x, er_y_, episodic_images, episodic_labels,\n episodic_mem_size, examples_seen_so_far)\n', (28224, 28316), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((28803, 28836), 'numpy.random.shuffle', 'np.random.shuffle', (['er_mem_indices'], {}), '(er_mem_indices)\n', (28820, 28836), True, 'import numpy as np\n'), ((28876, 28972), 'numpy.concatenate', 'np.concatenate', (['(episodic_images[er_mem_indices], train_x[offset:offset + residual])'], {'axis': '(0)'}), '((episodic_images[er_mem_indices], train_x[offset:offset +\n residual]), axis=0)\n', (28890, 28972), True, 'import numpy as np\n'), ((29057, 29153), 'numpy.concatenate', 'np.concatenate', (['(episodic_labels[er_mem_indices], train_y[offset:offset + residual])'], {'axis': '(0)'}), '((episodic_labels[er_mem_indices], train_y[offset:offset +\n residual]), axis=0)\n', (29071, 29153), True, 'import numpy as np\n'), ((30050, 30251), 'utils.utils.update_fifo_buffer', 'update_fifo_buffer', (['train_x[offset:offset + residual]', 'train_y[offset:offset + residual]', 'episodic_images', 'episodic_labels', 'task_labels[task]', 'args.mem_size', 'count_cls', 'episodic_filled_counter'], {}), '(train_x[offset:offset + residual], train_y[offset:offset +\n residual], episodic_images, episodic_labels, task_labels[task], args.\n mem_size, count_cls, episodic_filled_counter)\n', (30068, 30251), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((27302, 27330), 'numpy.nonzero', 'np.nonzero', (['er_train_y_batch'], {}), '(er_train_y_batch)\n', (27312, 27330), True, 'import numpy as np\n'), ((28633, 28661), 'numpy.arange', 'np.arange', (['mem_filled_so_far'], {}), '(mem_filled_so_far)\n', (28642, 28661), True, 'import numpy as np\n'), ((28712, 28782), 'numpy.random.choice', 'np.random.choice', (['mem_filled_so_far', 'args.eps_mem_batch'], {'replace': '(False)'}), '(mem_filled_so_far, args.eps_mem_batch, replace=False)\n', (28728, 28782), True, 'import numpy as np\n'), ((33252, 33453), 'utils.utils.update_fifo_buffer', 'update_fifo_buffer', (['train_x[offset:offset + residual]', 'train_y[offset:offset + residual]', 'episodic_images', 'episodic_labels', 'task_labels[task]', 'args.mem_size', 'count_cls', 'episodic_filled_counter'], {}), '(train_x[offset:offset + residual], train_y[offset:offset +\n residual], episodic_images, episodic_labels, task_labels[task], args.\n mem_size, count_cls, episodic_filled_counter)\n', (33270, 33453), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((30738, 30806), 'numpy.arange', 'np.arange', (['mem_offset', '(mem_offset + args.mem_size * classes_per_task)'], {}), '(mem_offset, mem_offset + args.mem_size * classes_per_task)\n', (30747, 30806), True, 'import numpy as np\n'), ((30827, 30860), 'numpy.random.shuffle', 'np.random.shuffle', (['er_mem_indices'], {}), '(er_mem_indices)\n', (30844, 30860), True, 'import numpy as np\n'), ((37298, 37499), 'utils.utils.update_fifo_buffer', 'update_fifo_buffer', (['train_x[offset:offset + residual]', 'train_y[offset:offset + residual]', 'episodic_images', 'episodic_labels', 'task_labels[task]', 'args.mem_size', 'count_cls', 'episodic_filled_counter'], {}), '(train_x[offset:offset + residual], train_y[offset:offset +\n residual], episodic_images, episodic_labels, task_labels[task], args.\n mem_size, count_cls, episodic_filled_counter)\n', (37316, 37499), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((34195, 34263), 'numpy.arange', 'np.arange', (['mem_offset', '(mem_offset + args.mem_size * classes_per_task)'], {}), '(mem_offset, mem_offset + args.mem_size * classes_per_task)\n', (34204, 34263), True, 'import numpy as np\n'), ((34284, 34317), 'numpy.random.shuffle', 'np.random.shuffle', (['er_mem_indices'], {}), '(er_mem_indices)\n', (34301, 34317), True, 'import numpy as np\n'), ((36074, 36107), 'numpy.random.shuffle', 'np.random.shuffle', (['er_mem_indices'], {}), '(er_mem_indices)\n', (36091, 36107), True, 'import numpy as np\n'), ((36151, 36247), 'numpy.concatenate', 'np.concatenate', (['(episodic_images[er_mem_indices], train_x[offset:offset + residual])'], {'axis': '(0)'}), '((episodic_images[er_mem_indices], train_x[offset:offset +\n residual]), axis=0)\n', (36165, 36247), True, 'import numpy as np\n'), ((36336, 36432), 'numpy.concatenate', 'np.concatenate', (['(episodic_labels[er_mem_indices], train_y[offset:offset + residual])'], {'axis': '(0)'}), '((episodic_labels[er_mem_indices], train_y[offset:offset +\n residual]), axis=0)\n', (36350, 36432), True, 'import numpy as np\n'), ((36668, 36702), 'numpy.eye', 'np.eye', (['proj_matrices[tt].shape[0]'], {}), '(proj_matrices[tt].shape[0])\n', (36674, 36702), True, 'import numpy as np\n'), ((30589, 30604), 'numpy.arange', 'np.arange', (['task'], {}), '(task)\n', (30598, 30604), True, 'import numpy as np\n'), ((35900, 35928), 'numpy.arange', 'np.arange', (['mem_filled_so_far'], {}), '(mem_filled_so_far)\n', (35909, 35928), True, 'import numpy as np\n'), ((35979, 36049), 'numpy.random.choice', 'np.random.choice', (['mem_filled_so_far', 'args.eps_mem_batch'], {'replace': '(False)'}), '(mem_filled_so_far, args.eps_mem_batch, replace=False)\n', (35995, 36049), True, 'import numpy as np\n'), ((34046, 34061), 'numpy.arange', 'np.arange', (['task'], {}), '(task)\n', (34055, 34061), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from tilec.fg import dBnudT,get_mix
"""
compute various conversion factors for LFI bandpasses
"""
TCMB = 2.726 # Kelvin
TCMB_uK = 2.726e6 # micro-Kelvin
hplanck = 6.626068e-34 # MKS
kboltz = 1.3806503e-23 # MKS
clight = 299792458.0 # MKS
clight_cmpersec = 2.99792458*1.e10 #speed of light in cm/s
N_freqs = 3
LFI_freqs = []
LFI_freqs.append('030')
LFI_freqs.append('044')
LFI_freqs.append('070')
LFI_freqs_GHz = np.array([30.0, 44.0, 70.0])
LFI_files = []
for i in xrange(N_freqs):
print("----------")
print(LFI_freqs[i])
LFI_files.append('../data/LFI_BANDPASS_F'+LFI_freqs[i]+'_reformat.txt')
LFI_loc = np.loadtxt(LFI_files[i])
# check norm, i.e., make sure response is unity for CMB
LFI_loc_GHz = LFI_loc[:,0]
LFI_loc_trans = LFI_loc[:,1]
print("CMB norm = ", np.trapz(LFI_loc_trans, LFI_loc_GHz))
# compute K_CMB -> y_SZ conversion
print("K_CMB -> y_SZ conversion: ", np.trapz(LFI_loc_trans*dBnudT(LFI_loc_GHz)*1.e6, LFI_loc_GHz) / np.trapz(LFI_loc_trans*dBnudT(LFI_loc_GHz)*1.e6*get_mix(LFI_loc_GHz,'tSZ')/TCMB_uK, LFI_loc_GHz) / TCMB)
# compute K_CMB -> MJy/sr conversion [IRAS convention, alpha=-1 power-law SED]
print("K_CMB -> MJy/sr conversion [IRAS convention, alpha=-1 power-law SED]: ", np.trapz(LFI_loc_trans*dBnudT(LFI_loc_GHz)*1.e6, LFI_loc_GHz) / np.trapz(LFI_loc_trans*(LFI_freqs_GHz[i]/LFI_loc_GHz), LFI_loc_GHz) * 1.e20)
# compute color correction from IRAS to "dust" (power-law with alpha=4)
print("MJy/sr color correction (power-law, alpha=-1 to alpha=4): ", np.trapz(LFI_loc_trans*(LFI_freqs_GHz[i]/LFI_loc_GHz), LFI_loc_GHz) / np.trapz(LFI_loc_trans*(LFI_loc_GHz/LFI_freqs_GHz[i])**4.0, LFI_loc_GHz))
# compute color correction from IRAS to modified blackbody with T=13.6 K, beta=1.4 (to compare to results at https://wiki.cosmos.esa.int/planckpla2015/index.php/UC_CC_Tables )
print("MJy/sr color correction (power-law alpha=-1 to MBB T=13.6 K/beta=1.4): ", np.trapz(LFI_loc_trans*(LFI_freqs_GHz[i]/LFI_loc_GHz), LFI_loc_GHz) / np.trapz(LFI_loc_trans*(LFI_loc_GHz/LFI_freqs_GHz[i])**(1.4+3.) * (np.exp(hplanck*LFI_freqs_GHz[i]*1.e9/(kboltz*13.6))-1.)/(np.exp(hplanck*LFI_loc_GHz*1.e9/(kboltz*13.6))-1.), LFI_loc_GHz))
print("----------")
|
[
"numpy.trapz",
"tilec.fg.dBnudT",
"numpy.array",
"numpy.exp",
"numpy.loadtxt",
"tilec.fg.get_mix"
] |
[((506, 534), 'numpy.array', 'np.array', (['[30.0, 44.0, 70.0]'], {}), '([30.0, 44.0, 70.0])\n', (514, 534), True, 'import numpy as np\n'), ((714, 738), 'numpy.loadtxt', 'np.loadtxt', (['LFI_files[i]'], {}), '(LFI_files[i])\n', (724, 738), True, 'import numpy as np\n'), ((888, 924), 'numpy.trapz', 'np.trapz', (['LFI_loc_trans', 'LFI_loc_GHz'], {}), '(LFI_loc_trans, LFI_loc_GHz)\n', (896, 924), True, 'import numpy as np\n'), ((1630, 1701), 'numpy.trapz', 'np.trapz', (['(LFI_loc_trans * (LFI_freqs_GHz[i] / LFI_loc_GHz))', 'LFI_loc_GHz'], {}), '(LFI_loc_trans * (LFI_freqs_GHz[i] / LFI_loc_GHz), LFI_loc_GHz)\n', (1638, 1701), True, 'import numpy as np\n'), ((1700, 1778), 'numpy.trapz', 'np.trapz', (['(LFI_loc_trans * (LFI_loc_GHz / LFI_freqs_GHz[i]) ** 4.0)', 'LFI_loc_GHz'], {}), '(LFI_loc_trans * (LFI_loc_GHz / LFI_freqs_GHz[i]) ** 4.0, LFI_loc_GHz)\n', (1708, 1778), True, 'import numpy as np\n'), ((2039, 2110), 'numpy.trapz', 'np.trapz', (['(LFI_loc_trans * (LFI_freqs_GHz[i] / LFI_loc_GHz))', 'LFI_loc_GHz'], {}), '(LFI_loc_trans * (LFI_freqs_GHz[i] / LFI_loc_GHz), LFI_loc_GHz)\n', (2047, 2110), True, 'import numpy as np\n'), ((1405, 1476), 'numpy.trapz', 'np.trapz', (['(LFI_loc_trans * (LFI_freqs_GHz[i] / LFI_loc_GHz))', 'LFI_loc_GHz'], {}), '(LFI_loc_trans * (LFI_freqs_GHz[i] / LFI_loc_GHz), LFI_loc_GHz)\n', (1413, 1476), True, 'import numpy as np\n'), ((2233, 2295), 'numpy.exp', 'np.exp', (['(hplanck * LFI_loc_GHz * 1000000000.0 / (kboltz * 13.6))'], {}), '(hplanck * LFI_loc_GHz * 1000000000.0 / (kboltz * 13.6))\n', (2239, 2295), True, 'import numpy as np\n'), ((1028, 1047), 'tilec.fg.dBnudT', 'dBnudT', (['LFI_loc_GHz'], {}), '(LFI_loc_GHz)\n', (1034, 1047), False, 'from tilec.fg import dBnudT, get_mix\n'), ((1117, 1144), 'tilec.fg.get_mix', 'get_mix', (['LFI_loc_GHz', '"""tSZ"""'], {}), "(LFI_loc_GHz, 'tSZ')\n", (1124, 1144), False, 'from tilec.fg import dBnudT, get_mix\n'), ((1364, 1383), 'tilec.fg.dBnudT', 'dBnudT', (['LFI_loc_GHz'], {}), '(LFI_loc_GHz)\n', (1370, 1383), False, 'from tilec.fg import dBnudT, get_mix\n'), ((2176, 2243), 'numpy.exp', 'np.exp', (['(hplanck * LFI_freqs_GHz[i] * 1000000000.0 / (kboltz * 13.6))'], {}), '(hplanck * LFI_freqs_GHz[i] * 1000000000.0 / (kboltz * 13.6))\n', (2182, 2243), True, 'import numpy as np\n'), ((1092, 1111), 'tilec.fg.dBnudT', 'dBnudT', (['LFI_loc_GHz'], {}), '(LFI_loc_GHz)\n', (1098, 1111), False, 'from tilec.fg import dBnudT, get_mix\n')]
|
# main app file
from typing import Optional
from fastapi import FastAPI, UploadFile, File, Request
from fastapi.responses import HTMLResponse
import json, os
from pydantic import BaseModel
from fastapi.templating import Jinja2Templates
from mangum import Mangum
# Create app, just like in Flask
app = FastAPI()
templates = Jinja2Templates(directory='templates')
@app.get('/hello')
def hello():
return {'Hello': 'World'}
@app.get('/items/{item_id}')
def read_item(item_id: int, q: Optional[str] = None):
return {'item_id': item_id,
'q': q}
@app.get('/genRandomNumbers/{num_random}')
async def gen_random_ints(num_random: int, upper_limit: Optional[int] = None):
from random import randint
if not upper_limit:
upper_limit = num_random + 20
unique_random_list = set()
if num_random > upper_limit:
num_random = upper_limit
while len(unique_random_list) < num_random:
unique_random_list.add(randint(0, upper_limit))
return json.dumps(list(unique_random_list))
# Pydantic types for complex JSON Post operations
class Item(BaseModel):
name: str
description: Optional[str] = "What an amazing item"
price: Optional[float] = 10.0
tax: Optional[float] = price/10
item_list = []
@app.post('/items')
async def create_item(item: Item):
item.tax = item.price / 10
item_list.append(item)
return item
@app.get('/items')
async def get_items():
return item_list
@app.get('/listFiles')
async def list_uploaded_files():
return os.listdir('./imgs')
# Upload files
@app.post('/uploadImg')
async def create_upload_img(file: UploadFile = File(...)):
if not os.path.exists('./imgs'):
os.mkdir('./imgs')
file_list = os.listdir('./imgs')
file_name = f'{len(file_list)+1}.jpg'
with open(f'./imgs/{file_name}', 'wb+') as fp:
fp.write(file.file.read())
return {'Status':'Uploaded',
'uploaded_files': await list_uploaded_files()}
@app.get('/', response_class=HTMLResponse)
async def index(request: Request):
return templates.TemplateResponse('index.html', {'request':request})
handler = Mangum(app)
|
[
"os.mkdir",
"random.randint",
"mangum.Mangum",
"os.path.exists",
"fastapi.templating.Jinja2Templates",
"fastapi.File",
"os.listdir",
"fastapi.FastAPI"
] |
[((303, 312), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (310, 312), False, 'from fastapi import FastAPI, UploadFile, File, Request\n'), ((325, 363), 'fastapi.templating.Jinja2Templates', 'Jinja2Templates', ([], {'directory': '"""templates"""'}), "(directory='templates')\n", (340, 363), False, 'from fastapi.templating import Jinja2Templates\n'), ((2136, 2147), 'mangum.Mangum', 'Mangum', (['app'], {}), '(app)\n', (2142, 2147), False, 'from mangum import Mangum\n'), ((1528, 1548), 'os.listdir', 'os.listdir', (['"""./imgs"""'], {}), "('./imgs')\n", (1538, 1548), False, 'import json, os\n'), ((1636, 1645), 'fastapi.File', 'File', (['...'], {}), '(...)\n', (1640, 1645), False, 'from fastapi import FastAPI, UploadFile, File, Request\n'), ((1729, 1749), 'os.listdir', 'os.listdir', (['"""./imgs"""'], {}), "('./imgs')\n", (1739, 1749), False, 'import json, os\n'), ((1659, 1683), 'os.path.exists', 'os.path.exists', (['"""./imgs"""'], {}), "('./imgs')\n", (1673, 1683), False, 'import json, os\n'), ((1693, 1711), 'os.mkdir', 'os.mkdir', (['"""./imgs"""'], {}), "('./imgs')\n", (1701, 1711), False, 'import json, os\n'), ((959, 982), 'random.randint', 'randint', (['(0)', 'upper_limit'], {}), '(0, upper_limit)\n', (966, 982), False, 'from random import randint\n')]
|
import os
import pathlib
path_to_folder = pathlib.Path('.', 'folder_to_scan')
def scan_folder_select_star(folder_to_scan=path_to_folder):
with os.scandir(folder_to_scan) as dir_iterator:
for file in dir_iterator:
print(file.name)
def main():
scan_folder_select_star()
if __name__ == '__main__':
main()
list_dir = os.listdir(path_to_folder)
print(list_dir)
|
[
"pathlib.Path",
"os.listdir",
"os.scandir"
] |
[((43, 78), 'pathlib.Path', 'pathlib.Path', (['"""."""', '"""folder_to_scan"""'], {}), "('.', 'folder_to_scan')\n", (55, 78), False, 'import pathlib\n'), ((357, 383), 'os.listdir', 'os.listdir', (['path_to_folder'], {}), '(path_to_folder)\n', (367, 383), False, 'import os\n'), ((150, 176), 'os.scandir', 'os.scandir', (['folder_to_scan'], {}), '(folder_to_scan)\n', (160, 176), False, 'import os\n')]
|
# pylint: disable=no-name-in-module, f0401
from flask import request
from flask.ext.login import login_required, current_user
from app import app
from app.database import session
from app.util import serve_response, serve_error
from app.modules.event_manager.models import Event
from app.modules.project_manager.models import Project
from app.modules.submission_manager.models import Submission
from app.modules.submission_manager.runner import Runner
import time
import os
FILE_EXTENSIONS_FROM_TYPE = {
'cuda': '.cu',
'oacc': '.c'
}
def directory_for_submission(job):
return os.path.join(
app.config['DATA_FOLDER'], 'submits', str(job))
@app.route('/api/submissions', methods=['POST'])
@login_required
def create_submission():
try:
project = (session.query(Project).filter(
Project.project_id == int(request.form['project_id']) and
Project.username == current_user.username).first())
project.body = request.form['body']
submission = Submission(
username=current_user.username,
submit_time=int(time.time()),
type=project.type,
project_id=int(request.form['project_id']),
run=int(request.form['run'])
)
except KeyError:
return serve_error('Form data missing.')
submission.commit_to_session()
project.commit_to_session()
Event.log(current_user.username, 'execute', submission.job)
directory = directory_for_submission(submission.job)
os.mkdir(directory)
file_name = 'submit' + FILE_EXTENSIONS_FROM_TYPE[submission.type]
source_file = open(os.path.join(directory, file_name), 'w')
source_file.write(project.body)
source_file.close()
runner = Runner(submission, file_name)
runner.run_queued()
return serve_response({
'job': submission.job
})
@app.route('/api/submissions')
@login_required
def get_submissions():
submissions = session.query(Submission).filter(Submission.username == current_user.username).all()
subs = list()
for s in submissions:
subs.append(s.to_dict())
return serve_response({'submissions': subs})
@app.route('/api/submissions/<int:job>')
@login_required
def get_submission(job):
submission = session.query(Submission).filter(Submission.username == current_user.username and
Submission.job == job).first()
directory = directory_for_submission(job)
file_name = 'submit' + FILE_EXTENSIONS_FROM_TYPE[submission.type]
source_file = open(os.path.join(directory, file_name))
body = source_file.read()
return serve_response({
'body': body
})
|
[
"app.app.route",
"app.modules.submission_manager.runner.Runner",
"os.mkdir",
"app.database.session.query",
"time.time",
"app.modules.event_manager.models.Event.log",
"app.util.serve_response",
"app.util.serve_error",
"os.path.join"
] |
[((665, 712), 'app.app.route', 'app.route', (['"""/api/submissions"""'], {'methods': "['POST']"}), "('/api/submissions', methods=['POST'])\n", (674, 712), False, 'from app import app\n'), ((1881, 1910), 'app.app.route', 'app.route', (['"""/api/submissions"""'], {}), "('/api/submissions')\n", (1890, 1910), False, 'from app import app\n'), ((2182, 2221), 'app.app.route', 'app.route', (['"""/api/submissions/<int:job>"""'], {}), "('/api/submissions/<int:job>')\n", (2191, 2221), False, 'from app import app\n'), ((1408, 1467), 'app.modules.event_manager.models.Event.log', 'Event.log', (['current_user.username', '"""execute"""', 'submission.job'], {}), "(current_user.username, 'execute', submission.job)\n", (1417, 1467), False, 'from app.modules.event_manager.models import Event\n'), ((1530, 1549), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (1538, 1549), False, 'import os\n'), ((1758, 1787), 'app.modules.submission_manager.runner.Runner', 'Runner', (['submission', 'file_name'], {}), '(submission, file_name)\n', (1764, 1787), False, 'from app.modules.submission_manager.runner import Runner\n'), ((1824, 1863), 'app.util.serve_response', 'serve_response', (["{'job': submission.job}"], {}), "({'job': submission.job})\n", (1838, 1863), False, 'from app.util import serve_response, serve_error\n'), ((2141, 2178), 'app.util.serve_response', 'serve_response', (["{'submissions': subs}"], {}), "({'submissions': subs})\n", (2155, 2178), False, 'from app.util import serve_response, serve_error\n'), ((2659, 2689), 'app.util.serve_response', 'serve_response', (["{'body': body}"], {}), "({'body': body})\n", (2673, 2689), False, 'from app.util import serve_response, serve_error\n'), ((1643, 1677), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (1655, 1677), False, 'import os\n'), ((2582, 2616), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (2594, 2616), False, 'import os\n'), ((1301, 1334), 'app.util.serve_error', 'serve_error', (['"""Form data missing."""'], {}), "('Form data missing.')\n", (1312, 1334), False, 'from app.util import serve_response, serve_error\n'), ((1113, 1124), 'time.time', 'time.time', ([], {}), '()\n', (1122, 1124), False, 'import time\n'), ((1968, 1993), 'app.database.session.query', 'session.query', (['Submission'], {}), '(Submission)\n', (1981, 1993), False, 'from app.database import session\n'), ((2280, 2305), 'app.database.session.query', 'session.query', (['Submission'], {}), '(Submission)\n', (2293, 2305), False, 'from app.database import session\n'), ((782, 804), 'app.database.session.query', 'session.query', (['Project'], {}), '(Project)\n', (795, 804), False, 'from app.database import session\n')]
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.postgres.forms import SimpleArrayField
from projects.models import Buy, Project, IAA, AgencyOffice
from projects.widgets import DurationMultiWidget
from form_utils.forms import BetterModelForm
from form_utils.widgets import AutoResizeTextarea
class ClientForm(forms.ModelForm):
class Meta:
model = AgencyOffice
fields = '__all__'
class IAAForm(forms.ModelForm):
class Meta:
model = IAA
fields = '__all__'
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = '__all__'
class CreateBuyForm(forms.ModelForm):
class Meta:
model = Buy
fields = [
'name',
'description',
'project',
'budget',
'procurement_method'
]
class EditBuyForm(forms.ModelForm):
requirements = SimpleArrayField(
forms.CharField(),
delimiter='\n',
help_text='Multiple requirements are allowed. Enter each one on its '
'own line. Additional formatting, like bullet points, will '
'be added later, so leave that out.',
required=False,
widget=forms.Textarea,
)
skills_needed = SimpleArrayField(
forms.CharField(),
delimiter='\n',
help_text='Multiple skills are allowed. Enter each one on its '
'own line. Additional formatting, like bullet points, will '
'be added later, so leave that out.',
required=False,
widget=forms.Textarea,
)
def __init__(self, *args, **kwargs):
super(EditBuyForm, self).__init__(*args, **kwargs)
buy = kwargs['instance']
team_members = User.objects.filter(project=buy.project.id)
self.fields['technical_evaluation_panel'].queryset = team_members
self.fields['product_owner'].queryset = team_members
self.fields['product_lead'].queryset = team_members
self.fields['acquisition_lead'].queryset = team_members
self.fields['technical_lead'].queryset = team_members
class Meta:
model = Buy
fields = '__all__'
widgets = {
# 'description': AutoResizeTextarea(),
'base_period_length': DurationMultiWidget(),
'option_period_length': DurationMultiWidget(),
# 'technical_evaluation_panel': forms.CheckboxSelectMultiple()
}
|
[
"django.forms.CharField",
"django.contrib.auth.models.User.objects.filter",
"projects.widgets.DurationMultiWidget"
] |
[((956, 973), 'django.forms.CharField', 'forms.CharField', ([], {}), '()\n', (971, 973), False, 'from django import forms\n'), ((1319, 1336), 'django.forms.CharField', 'forms.CharField', ([], {}), '()\n', (1334, 1336), False, 'from django import forms\n'), ((1788, 1831), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'project': 'buy.project.id'}), '(project=buy.project.id)\n', (1807, 1831), False, 'from django.contrib.auth.models import User\n'), ((2322, 2343), 'projects.widgets.DurationMultiWidget', 'DurationMultiWidget', ([], {}), '()\n', (2341, 2343), False, 'from projects.widgets import DurationMultiWidget\n'), ((2381, 2402), 'projects.widgets.DurationMultiWidget', 'DurationMultiWidget', ([], {}), '()\n', (2400, 2402), False, 'from projects.widgets import DurationMultiWidget\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 9 14:34:04 2019
@author: weiruchen
"""
from VorDiff.reverse_operator import ReverseOperator as rop
from VorDiff.reverse_autodiff import ReverseAutoDiff as rad
def create_reverse_vector(array):
x, y = rad.reverse_vector(array)
return x, y
x,y = create_reverse_vector([[1, 2, 3], [1,3,6]])
# for scalar
f = 1 / (x[1]) + rop.sin(1/x[1])
print(rad.partial_scalar(f))
# for vector
x,y = create_reverse_vector([[1, 2, 3], [1,3,6]])
a = x + 1
print(rad.partial_vector(a,x))
x,y = create_reverse_vector([[1, 2, 3], [1,3,6]])
h = rop.sin(x)
print(rad.partial_vector(h,x))
x,y = create_reverse_vector([[1, 2, 3], [1,3,6]])
g = rop.cos(y)**2
print(rad.partial_vector(g,y))
x,y = create_reverse_vector([[1, 2, 3], [1,3,6]])
f = 2*x + y
print(rad.partial_vector(f,x))
x,y = create_reverse_vector([[1, 2, 3], [1,3,6]])
# for multiple functions
def F1(array):
x, y = create_reverse_vector(array)
return 3*x + rop.cos(y)**2 + 1, x, y
def F2(array):
x, y = create_reverse_vector(array)
return rop.sin(x) + 2*rop.sin(y), x, y
array = [[1, 2, 3], [1,3,6]]
vec_functions = [F1(array), F2(array)]
for func in vec_functions:
function, x, y = func
print("The values of the function is ", function._val)
print("The derivatives of the function with respect to values of variable x is", rad.partial_vector(func[0], x))
print("The derivatives of the function with respect to values of variable y is", rad.partial_vector(func[0], y))
|
[
"VorDiff.reverse_autodiff.ReverseAutoDiff.reverse_vector",
"VorDiff.reverse_autodiff.ReverseAutoDiff.partial_vector",
"VorDiff.reverse_autodiff.ReverseAutoDiff.partial_scalar",
"VorDiff.reverse_operator.ReverseOperator.sin",
"VorDiff.reverse_operator.ReverseOperator.cos"
] |
[((612, 622), 'VorDiff.reverse_operator.ReverseOperator.sin', 'rop.sin', (['x'], {}), '(x)\n', (619, 622), True, 'from VorDiff.reverse_operator import ReverseOperator as rop\n'), ((279, 304), 'VorDiff.reverse_autodiff.ReverseAutoDiff.reverse_vector', 'rad.reverse_vector', (['array'], {}), '(array)\n', (297, 304), True, 'from VorDiff.reverse_autodiff import ReverseAutoDiff as rad\n'), ((403, 420), 'VorDiff.reverse_operator.ReverseOperator.sin', 'rop.sin', (['(1 / x[1])'], {}), '(1 / x[1])\n', (410, 420), True, 'from VorDiff.reverse_operator import ReverseOperator as rop\n'), ((425, 446), 'VorDiff.reverse_autodiff.ReverseAutoDiff.partial_scalar', 'rad.partial_scalar', (['f'], {}), '(f)\n', (443, 446), True, 'from VorDiff.reverse_autodiff import ReverseAutoDiff as rad\n'), ((530, 554), 'VorDiff.reverse_autodiff.ReverseAutoDiff.partial_vector', 'rad.partial_vector', (['a', 'x'], {}), '(a, x)\n', (548, 554), True, 'from VorDiff.reverse_autodiff import ReverseAutoDiff as rad\n'), ((629, 653), 'VorDiff.reverse_autodiff.ReverseAutoDiff.partial_vector', 'rad.partial_vector', (['h', 'x'], {}), '(h, x)\n', (647, 653), True, 'from VorDiff.reverse_autodiff import ReverseAutoDiff as rad\n'), ((713, 723), 'VorDiff.reverse_operator.ReverseOperator.cos', 'rop.cos', (['y'], {}), '(y)\n', (720, 723), True, 'from VorDiff.reverse_operator import ReverseOperator as rop\n'), ((733, 757), 'VorDiff.reverse_autodiff.ReverseAutoDiff.partial_vector', 'rad.partial_vector', (['g', 'y'], {}), '(g, y)\n', (751, 757), True, 'from VorDiff.reverse_autodiff import ReverseAutoDiff as rad\n'), ((828, 852), 'VorDiff.reverse_autodiff.ReverseAutoDiff.partial_vector', 'rad.partial_vector', (['f', 'x'], {}), '(f, x)\n', (846, 852), True, 'from VorDiff.reverse_autodiff import ReverseAutoDiff as rad\n'), ((1395, 1425), 'VorDiff.reverse_autodiff.ReverseAutoDiff.partial_vector', 'rad.partial_vector', (['func[0]', 'x'], {}), '(func[0], x)\n', (1413, 1425), True, 'from VorDiff.reverse_autodiff import ReverseAutoDiff as rad\n'), ((1512, 1542), 'VorDiff.reverse_autodiff.ReverseAutoDiff.partial_vector', 'rad.partial_vector', (['func[0]', 'y'], {}), '(func[0], y)\n', (1530, 1542), True, 'from VorDiff.reverse_autodiff import ReverseAutoDiff as rad\n'), ((1096, 1106), 'VorDiff.reverse_operator.ReverseOperator.sin', 'rop.sin', (['x'], {}), '(x)\n', (1103, 1106), True, 'from VorDiff.reverse_operator import ReverseOperator as rop\n'), ((1111, 1121), 'VorDiff.reverse_operator.ReverseOperator.sin', 'rop.sin', (['y'], {}), '(y)\n', (1118, 1121), True, 'from VorDiff.reverse_operator import ReverseOperator as rop\n'), ((1004, 1014), 'VorDiff.reverse_operator.ReverseOperator.cos', 'rop.cos', (['y'], {}), '(y)\n', (1011, 1014), True, 'from VorDiff.reverse_operator import ReverseOperator as rop\n')]
|
import pytest
from idact.core.auth import AuthMethod
from idact.detail.allocation.allocation_parameters import AllocationParameters
from idact.detail.config.client.client_cluster_config import ClusterConfigImpl
from idact.detail.nodes.node_impl import NodeImpl
from idact.detail.nodes.nodes_impl import NodesImpl
from idact.detail.slurm.slurm_allocation import SlurmAllocation
def get_data_for_test():
config = ClusterConfigImpl(host='localhost1',
port=1,
user='user-1',
auth=AuthMethod.ASK)
access_node = NodeImpl(config=config)
return config, access_node
def test_serialize_deserialize():
config, access_node = get_data_for_test()
nodes = [NodeImpl(config=config),
NodeImpl(config=config)]
uuid = '1111'
value = NodesImpl(nodes=nodes,
allocation=SlurmAllocation(
job_id=1,
access_node=access_node,
nodes=nodes,
entry_point_script_path='a',
parameters=AllocationParameters()),
uuid=uuid)
serialized = value.serialize()
assert serialized == {
'type': 'SerializableTypes.NODES_IMPL',
'nodes': [{'type': 'SerializableTypes.NODE_IMPL',
'host': None,
'port': None,
'cores': None,
'memory': None,
'allocated_until': None},
{'type': 'SerializableTypes.NODE_IMPL',
'host': None,
'port': None,
'cores': None,
'memory': None,
'allocated_until': None}],
'allocation': {
'type': 'SerializableTypes.SLURM_ALLOCATION',
'job_id': 1,
'entry_point_script_path': 'a',
'parameters': {'type': 'SerializableTypes.ALLOCATION_PARAMETERS',
'nodes': None,
'cores': None,
'memory_per_node': None,
'walltime': None,
'native_args': {}},
'done_waiting': False}}
deserialized = NodesImpl.deserialize(config=config,
access_node=access_node,
serialized=serialized,
uuid=uuid)
assert deserialized == value
def test_invalid_serialized_type():
config, access_node = get_data_for_test()
serialized = {'type': 'SerializableTypes.NODES_IMPL2'}
with pytest.raises(AssertionError):
NodesImpl.deserialize(config=config,
access_node=access_node,
serialized=serialized,
uuid='1111')
def test_missing_serialized_keys():
config, access_node = get_data_for_test()
serialized = {'type': 'SerializableTypes.NODES_IMPL'}
with pytest.raises(RuntimeError):
NodesImpl.deserialize(config=config,
access_node=access_node,
uuid='1111',
serialized=serialized)
|
[
"idact.detail.nodes.nodes_impl.NodesImpl.deserialize",
"idact.detail.config.client.client_cluster_config.ClusterConfigImpl",
"pytest.raises",
"idact.detail.allocation.allocation_parameters.AllocationParameters",
"idact.detail.nodes.node_impl.NodeImpl"
] |
[((418, 503), 'idact.detail.config.client.client_cluster_config.ClusterConfigImpl', 'ClusterConfigImpl', ([], {'host': '"""localhost1"""', 'port': '(1)', 'user': '"""user-1"""', 'auth': 'AuthMethod.ASK'}), "(host='localhost1', port=1, user='user-1', auth=AuthMethod.ASK\n )\n", (435, 503), False, 'from idact.detail.config.client.client_cluster_config import ClusterConfigImpl\n'), ((610, 633), 'idact.detail.nodes.node_impl.NodeImpl', 'NodeImpl', ([], {'config': 'config'}), '(config=config)\n', (618, 633), False, 'from idact.detail.nodes.node_impl import NodeImpl\n'), ((2304, 2404), 'idact.detail.nodes.nodes_impl.NodesImpl.deserialize', 'NodesImpl.deserialize', ([], {'config': 'config', 'access_node': 'access_node', 'serialized': 'serialized', 'uuid': 'uuid'}), '(config=config, access_node=access_node, serialized=\n serialized, uuid=uuid)\n', (2325, 2404), False, 'from idact.detail.nodes.nodes_impl import NodesImpl\n'), ((761, 784), 'idact.detail.nodes.node_impl.NodeImpl', 'NodeImpl', ([], {'config': 'config'}), '(config=config)\n', (769, 784), False, 'from idact.detail.nodes.node_impl import NodeImpl\n'), ((799, 822), 'idact.detail.nodes.node_impl.NodeImpl', 'NodeImpl', ([], {'config': 'config'}), '(config=config)\n', (807, 822), False, 'from idact.detail.nodes.node_impl import NodeImpl\n'), ((2710, 2739), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2723, 2739), False, 'import pytest\n'), ((2749, 2851), 'idact.detail.nodes.nodes_impl.NodesImpl.deserialize', 'NodesImpl.deserialize', ([], {'config': 'config', 'access_node': 'access_node', 'serialized': 'serialized', 'uuid': '"""1111"""'}), "(config=config, access_node=access_node, serialized=\n serialized, uuid='1111')\n", (2770, 2851), False, 'from idact.detail.nodes.nodes_impl import NodesImpl\n'), ((3090, 3117), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3103, 3117), False, 'import pytest\n'), ((3127, 3228), 'idact.detail.nodes.nodes_impl.NodesImpl.deserialize', 'NodesImpl.deserialize', ([], {'config': 'config', 'access_node': 'access_node', 'uuid': '"""1111"""', 'serialized': 'serialized'}), "(config=config, access_node=access_node, uuid='1111',\n serialized=serialized)\n", (3148, 3228), False, 'from idact.detail.nodes.nodes_impl import NodesImpl\n'), ((1145, 1167), 'idact.detail.allocation.allocation_parameters.AllocationParameters', 'AllocationParameters', ([], {}), '()\n', (1165, 1167), False, 'from idact.detail.allocation.allocation_parameters import AllocationParameters\n')]
|
"""
A minimal Django app, just one file.
See: http://olifante.blogs.com/covil/2010/04/minimal-django.html
"""
import os
from django.conf.urls.defaults import patterns
from django.core.mail import send_mail
from django.http import HttpResponse
filepath, extension = os.path.splitext(__file__)
ROOT_URLCONF = os.path.basename(filepath)
INSTALLED_APPS = (
"lettuce.django"
)
def mail(request):
send_mail('Subject here', 'Here is the message.', '<EMAIL>',
['<EMAIL>'], fail_silently=False)
return HttpResponse('Mail has been sent')
urlpatterns = patterns('', (r'^mail/$', mail))
|
[
"django.http.HttpResponse",
"os.path.basename",
"django.core.mail.send_mail",
"os.path.splitext",
"django.conf.urls.defaults.patterns"
] |
[((269, 295), 'os.path.splitext', 'os.path.splitext', (['__file__'], {}), '(__file__)\n', (285, 295), False, 'import os\n'), ((311, 337), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (327, 337), False, 'import os\n'), ((578, 609), 'django.conf.urls.defaults.patterns', 'patterns', (['""""""', "('^mail/$', mail)"], {}), "('', ('^mail/$', mail))\n", (586, 609), False, 'from django.conf.urls.defaults import patterns\n'), ((409, 507), 'django.core.mail.send_mail', 'send_mail', (['"""Subject here"""', '"""Here is the message."""', '"""<EMAIL>"""', "['<EMAIL>']"], {'fail_silently': '(False)'}), "('Subject here', 'Here is the message.', '<EMAIL>', ['<EMAIL>'],\n fail_silently=False)\n", (418, 507), False, 'from django.core.mail import send_mail\n'), ((528, 562), 'django.http.HttpResponse', 'HttpResponse', (['"""Mail has been sent"""'], {}), "('Mail has been sent')\n", (540, 562), False, 'from django.http import HttpResponse\n')]
|
from kivy.app import App
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.clock import Clock
from kivy.properties import (
NumericProperty, StringProperty, BooleanProperty,
)
import asynckivy as ak
import kivy_garden.draggable
class Magnet(Factory.Widget):
'''
Inspired by
https://github.com/kivy-garden/garden.magnet
'''
do_anim = BooleanProperty(True)
anim_duration = NumericProperty(1)
anim_transition = StringProperty('out_quad')
# default value of the instance attributes
_coro = ak.sleep_forever()
def __init__(self, **kwargs):
self._anim_trigger = trigger = \
Clock.create_trigger(self._start_anim, -1)
super().__init__(**kwargs)
self.fbind('pos', trigger)
self.fbind('size', trigger)
def add_widget(self, widget, *args, **kwargs):
if self.children:
raise ValueError('Magnet can have only one child')
widget.pos = self.pos
widget.size = self.size
return super().add_widget(widget, *args, **kwargs)
def _start_anim(self, *args):
if self.children:
child = self.children[0]
self._coro.close()
if not self.do_anim:
child.pos = self.pos
child.size = self.size
return
self._coro = ak.start(ak.animate(
child,
d=self.anim_duration,
t=self.anim_transition,
x=self.x, y=self.y, width=self.width, height=self.height,
))
KV_CODE = '''
#:import create_spacer kivy_garden.draggable._utils._create_spacer
<ReorderableGridLayout@KXReorderableBehavior+GridLayout>:
<DraggableItem@KXDraggableBehavior+Magnet>:
do_anim: not self.is_being_dragged
anim_duration: .2
drag_cls: 'test'
drag_timeout: 50
font_size: 30
text: ''
opacity: .5 if self.is_being_dragged else 1.
size_hint_min: 50, 50
pos_hint: {'center_x': .5, 'center_y': .5, }
canvas.after:
Color:
rgba: .5, 1, 0, 1 if root.is_being_dragged else .5
Line:
width: 2 if root.is_being_dragged else 1
rectangle: [*self.pos, *self.size, ]
Label:
font_size: 30
text: root.text
<MyButton@Button>:
font_size: sp(20)
size_hint_min_x: self.texture_size[0] + dp(10)
size_hint_min_y: self.texture_size[1] + dp(10)
ReorderableGridLayout:
spacing: 10
padding: 10
drag_classes: ['test', ]
cols: 6
spacer_widgets:
[create_spacer(color=color)
for color in "#000044 #002200 #440000".split()]
'''
class SampleApp(App):
def build(self):
return Builder.load_string(KV_CODE)
def on_start(self):
gl = self.root
DraggableItem = Factory.DraggableItem
DraggableItem()
for i in range(23):
gl.add_widget(DraggableItem(text=str(i)))
if __name__ == '__main__':
SampleApp().run()
|
[
"kivy.clock.Clock.create_trigger",
"kivy.lang.Builder.load_string",
"asynckivy.animate",
"kivy.properties.BooleanProperty",
"kivy.properties.StringProperty",
"kivy.properties.NumericProperty",
"asynckivy.sleep_forever"
] |
[((383, 404), 'kivy.properties.BooleanProperty', 'BooleanProperty', (['(True)'], {}), '(True)\n', (398, 404), False, 'from kivy.properties import NumericProperty, StringProperty, BooleanProperty\n'), ((425, 443), 'kivy.properties.NumericProperty', 'NumericProperty', (['(1)'], {}), '(1)\n', (440, 443), False, 'from kivy.properties import NumericProperty, StringProperty, BooleanProperty\n'), ((466, 492), 'kivy.properties.StringProperty', 'StringProperty', (['"""out_quad"""'], {}), "('out_quad')\n", (480, 492), False, 'from kivy.properties import NumericProperty, StringProperty, BooleanProperty\n'), ((553, 571), 'asynckivy.sleep_forever', 'ak.sleep_forever', ([], {}), '()\n', (569, 571), True, 'import asynckivy as ak\n'), ((660, 702), 'kivy.clock.Clock.create_trigger', 'Clock.create_trigger', (['self._start_anim', '(-1)'], {}), '(self._start_anim, -1)\n', (680, 702), False, 'from kivy.clock import Clock\n'), ((2698, 2726), 'kivy.lang.Builder.load_string', 'Builder.load_string', (['KV_CODE'], {}), '(KV_CODE)\n', (2717, 2726), False, 'from kivy.lang import Builder\n'), ((1366, 1492), 'asynckivy.animate', 'ak.animate', (['child'], {'d': 'self.anim_duration', 't': 'self.anim_transition', 'x': 'self.x', 'y': 'self.y', 'width': 'self.width', 'height': 'self.height'}), '(child, d=self.anim_duration, t=self.anim_transition, x=self.x, y\n =self.y, width=self.width, height=self.height)\n', (1376, 1492), True, 'import asynckivy as ak\n')]
|
from tensorwatch.stream import Stream
s1 = Stream(stream_name='s1', console_debug=True)
s2 = Stream(stream_name='s2', console_debug=True)
s3 = Stream(stream_name='s3', console_debug=True)
s1.subscribe(s2)
s2.subscribe(s3)
s3.write('S3 wrote this')
s2.write('S2 wrote this')
s1.write('S1 wrote this')
|
[
"tensorwatch.stream.Stream"
] |
[((45, 89), 'tensorwatch.stream.Stream', 'Stream', ([], {'stream_name': '"""s1"""', 'console_debug': '(True)'}), "(stream_name='s1', console_debug=True)\n", (51, 89), False, 'from tensorwatch.stream import Stream\n'), ((95, 139), 'tensorwatch.stream.Stream', 'Stream', ([], {'stream_name': '"""s2"""', 'console_debug': '(True)'}), "(stream_name='s2', console_debug=True)\n", (101, 139), False, 'from tensorwatch.stream import Stream\n'), ((145, 189), 'tensorwatch.stream.Stream', 'Stream', ([], {'stream_name': '"""s3"""', 'console_debug': '(True)'}), "(stream_name='s3', console_debug=True)\n", (151, 189), False, 'from tensorwatch.stream import Stream\n')]
|
import os
import argparse
from detect.eval.src.config import prepare_cfg, prepare_weight
from detect.eval.src.dataset import prepare_dataset
from detect.eval.src.detector import Detector
def parse_arg():
parser = argparse.ArgumentParser(description='YOLO v3 evaluation')
parser.add_argument('--bs', type=int, help="Batch size")
parser.add_argument('--reso', type=int, help="Image resolution")
parser.add_argument('--gpu', default='0,1,2,3', help="GPU ids")
parser.add_argument('--name', type=str, choices=['linemod-single', 'linemod-occ'])
parser.add_argument('--seq', type=str, help="Sequence number")
parser.add_argument('--ckpt', type=str, help="Checkpoint path")
return parser.parse_args()
args = parse_arg()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if __name__ == '__main__':
print(args)
detector = Detector(
cfgfile=prepare_cfg(args.name),
seq=args.seq,
weightfile=prepare_weight(args.ckpt),
reso=args.reso
)
_, val_dataloder = prepare_dataset(
name=args.name,
reso=args.reso,
bs=args.bs,
seq=args.seq
)
detector.detect_all(val_dataloder, savedir='./results/detect')
|
[
"detect.eval.src.config.prepare_cfg",
"detect.eval.src.config.prepare_weight",
"argparse.ArgumentParser",
"detect.eval.src.dataset.prepare_dataset"
] |
[((220, 277), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""YOLO v3 evaluation"""'}), "(description='YOLO v3 evaluation')\n", (243, 277), False, 'import argparse\n'), ((1025, 1098), 'detect.eval.src.dataset.prepare_dataset', 'prepare_dataset', ([], {'name': 'args.name', 'reso': 'args.reso', 'bs': 'args.bs', 'seq': 'args.seq'}), '(name=args.name, reso=args.reso, bs=args.bs, seq=args.seq)\n', (1040, 1098), False, 'from detect.eval.src.dataset import prepare_dataset\n'), ((881, 903), 'detect.eval.src.config.prepare_cfg', 'prepare_cfg', (['args.name'], {}), '(args.name)\n', (892, 903), False, 'from detect.eval.src.config import prepare_cfg, prepare_weight\n'), ((946, 971), 'detect.eval.src.config.prepare_weight', 'prepare_weight', (['args.ckpt'], {}), '(args.ckpt)\n', (960, 971), False, 'from detect.eval.src.config import prepare_cfg, prepare_weight\n')]
|
'''
An Elman Network is implemented, taking the output of the last time step of the time series as prediction, and also to
compute the training loss. This is done because this output is thought of as the most informed one.
'''
import torch
from torch import nn
from sklearn.preprocessing import MaxAbsScaler
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score
import random
import numpy as np
import copy
import importlib
import os
import einops
from experiment_config import experiment_path, chosen_experiment
spec = importlib.util.spec_from_file_location(chosen_experiment, experiment_path)
config = importlib.util.module_from_spec(spec)
spec.loader.exec_module(config)
configuration = config.learning_config
def choose_best(models_and_losses):
index_best = [i[1] for i in models_and_losses].index(min([i[1] for i in models_and_losses]))
epoch = index_best+1
return models_and_losses[index_best], epoch
def save_model(model, epoch, loss):
path = os.path.join(config.models_folder, configuration['classifier'])
if not os.path.exists(path):
os.makedirs(path)
try:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': model.optimizer.state_dict(),
'loss': loss,
}, os.path.join(path, 'model.pth'))
except TypeError:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict,
'optimizer_state_dict': model.optimizer.state_dict(),
'loss': loss,
}, os.path.join(path, 'model.pth'))
class RNN(nn.Module):
def __init__(self, input_size, output_size, hidden_dim, n_layers):
super(RNN, self).__init__()
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.output_size = output_size
self.input_size = input_size
self._device = self.choose_device()
self._rnn = nn.RNN(input_size, hidden_dim, n_layers, nonlinearity=configuration["activation function"]).to(self._device)
self._fc = nn.Linear(hidden_dim, output_size).to(self._device)
self.optimizer = self.choose_optimizer(alpha=configuration["learning rate"] * configuration["mini batch size"]) # linear scaling of LR
self._estimator_type = 'classifier'
def forward(self, x):
seq_length = len(x[0])
# Initializing hidden state for first input using method defined below
hidden = self.init_hidden(seq_length).to(self._device)
if x.dim() == 2:
x = x.view(-1,seq_length, 1)
# Passing in the input and hidden state into the model and obtaining outputs
if x.device == torch.device("cpu"):
self._rnn = self._rnn.to(torch.device("cpu"))
self._fc = self._fc.to(torch.device("cpu"))
out, hidden = self._rnn(x, hidden)
out = self._fc(out)
else:
self._rnn = self._rnn.to(self.choose_device())
self._fc = self._fc.to(self.choose_device())
out, hidden = self._rnn(x, hidden)
# feed output into the fully connected layer
out = self._fc(out)
return out, hidden
def init_hidden(self, seq_length):
device = self._device
# This method generates the first hidden state of zeros which we'll use in the forward pass
hidden = torch.zeros(self.n_layers, seq_length, self.hidden_dim).to(device)
# We'll send the tensor holding the hidden state to the device we specified earlier as well
return hidden
def fit(self, train_loader=None, test_loader=None, X_train=None, y_train=None, X_test=None, y_test=None, early_stopping=True, control_lr=None, prev_epoch=1, prev_loss=1, grid_search_parameter=None):
torch.cuda.empty_cache()
self.early_stopping = early_stopping
self.control_lr = control_lr
if X_train and y_train:
X = X_train
y = y_train
mini_batch_size = configuration["mini batch size"]
criterion = nn.CrossEntropyLoss()
nominal_lr = configuration["learning rate"] * mini_batch_size # linear scaling of LR
lr = nominal_lr
loss = 10000000000 #set initial dummy loss
lrs = []
training_losses = []
models_and_val_losses = []
pause = 0 # for early stopping
if prev_epoch is None or grid_search_parameter:
prev_epoch = 1
if grid_search_parameter is not None:
configuration[configuration["grid search"][0]] = grid_search_parameter
for epoch in range(prev_epoch, configuration["number of epochs"] + 1):
if configuration["optimizer"] == 'SGD' and not epoch == prev_epoch: #ADAM optimizer has internal states and should therefore not be reinitialized every epoch; only for SGD bc here changing the learning rate makes sense
self.optimizer, lr = self.control_learning_rate(lr=lr, loss=loss, losses=training_losses, nominal_lr=nominal_lr, epoch=epoch)
lrs.append(lr)
if X_train and y_train:
zipped_X_y = list(zip(X, y))
random.shuffle(zipped_X_y) #randomly shuffle samples to have different mini batches between epochs
X, y = zip(*zipped_X_y)
X = np.array(X)
y = list(y)
if len(X) % mini_batch_size > 0: #drop some samples if necessary to fit with batch size
samples_to_drop = len(X) % mini_batch_size
X = X[:-samples_to_drop]
y = y[:-samples_to_drop]
mini_batches = X.reshape((int(len(X) / mini_batch_size), mini_batch_size, len(X[0])))
mini_batch_targets = np.array(y).reshape(int(len(y) / mini_batch_size), mini_batch_size)
input_seq = [torch.Tensor(i).view(len(i), -1, 1) for i in mini_batches]
target_seq = [torch.Tensor([i]).view(-1).long() for i in mini_batch_targets]
inout_seq = list(zip(input_seq, target_seq))
#optimizer.zero_grad() # Clears existing gradients from previous epoch
for sequences, labels in inout_seq:
labels = labels.to(self._device)
sequences = sequences.to(self._device)
self.optimizer.zero_grad() # Clears existing gradients from previous batch so as not to backprop through entire dataset
output, hidden = self(sequences)
if configuration['decision criteria'] == 'majority vote':
start_voting_outputs = int((configuration['calibration rate']) * output.size()[1])
voting_outputs = torch.stack([i[start_voting_outputs:] for i in
output]) # choose last n outputs of timeseries to do majority vote
relevant_outputs = voting_outputs.to(self._device)
labels = torch.stack([i[-1] for i in labels]).long()
labels = einops.repeat(labels, 'b -> (b copy)', copy=relevant_outputs.size()[1])
labels = torch.stack(torch.split(labels, relevant_outputs.size()[1]), dim=0)
loss = sum([criterion(relevant_outputs[i], labels[i]) for i in list(range(labels.size()[0]))]) / \
labels.size()[0]
else:
last_outputs = torch.stack(
[i[-1] for i in output]) # choose last output of timeseries (most informed output)
relevant_outputs = last_outputs.to(self._device)
labels = torch.stack([i[-1] for i in labels]).long()
loss = criterion(relevant_outputs, labels)
loss.backward() # Does backpropagation and calculates gradients
loss.backward() # Does backpropagation and calculates gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), configuration["gradient clipping"]) # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
self.optimizer.step() # Updates the weights accordingly
self.detach([last_outputs, sequences, labels, hidden]) #detach tensors from GPU to free memory
elif train_loader and test_loader:
import sys
toolbar_width = len(train_loader)
# setup toolbar
print('Epoch {}/{} completed:'.format(epoch, configuration["number of epochs"]))
sys.stdout.write("[%s]" % (" " * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width+1)) # return to start of line, after '['
sys.stdout.flush()
for i, (sequences, labels, raw_seq) in enumerate(train_loader):
labels = labels.to(self._device)
sequences = sequences.to(self._device)
self.optimizer.zero_grad() # Clears existing gradients from previous batch so as not to backprop through entire dataset
output, hidden = self(sequences)
if configuration['decision criteria'] == 'majority vote':
if configuration['calibration rate'] == 1:
start_voting_outputs = int((configuration['calibration rate']) * output.size()[
1]) - 1 # equal to only using the last output
else:
start_voting_outputs = int((configuration['calibration rate']) * output.size()[1])
voting_outputs = torch.stack([i[start_voting_outputs:] for i in output]) #choose last n outputs of timeseries to do majority vote
relevant_outputs = voting_outputs.to(self._device)
labels = torch.stack([i[-1] for i in labels]).long()
labels = einops.repeat(labels, 'b -> (b copy)', copy=relevant_outputs.size()[1])
labels = torch.stack(torch.split(labels, relevant_outputs.size()[1]), dim=0)
loss = sum([criterion(relevant_outputs[i], labels[i]) for i in list(range(labels.size()[0]))])/labels.size()[0]
else:
last_outputs = torch.stack([i[-1] for i in output]) #choose last output of timeseries (most informed output)
relevant_outputs = last_outputs.to(self._device)
labels = torch.stack([i[-1] for i in labels]).long()
loss = criterion(relevant_outputs, labels)
loss.backward() # Does backpropagation and calculates gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), configuration["gradient clipping"]) # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
self.optimizer.step() # Updates the weights accordingly
self.detach([relevant_outputs, sequences, labels, hidden]) #detach tensors from GPU to free memory
progress = (i+1) / len(train_loader)
sys.stdout.write("- %.1f%% " %(progress*100))
sys.stdout.flush()
if config.dev_mode:
break
sys.stdout.write("]\n") # this ends the progress bar
sys.stdout.flush()
else:
print('Either provide X and y or dataloaders!')
if X_train and y_train:
training_losses.append(loss)
val_outputs = torch.stack([i[-1].view(-1) for i in self.predict(X_test)[1]]).to(self._device)
val_loss = criterion(val_outputs, torch.Tensor([np.array(y_test)]).view(-1).long().to(self._device))
else:
training_losses.append(loss)
pred, val_outputs, y_test = self.predict(test_loader=test_loader)
val_outputs = torch.stack([i[-1] for i in val_outputs]).to(self._device)
y_test = y_test.view(-1).long().to(self._device)
val_loss = criterion(val_outputs, y_test).to(self._device)
self.detach([val_outputs])
models_and_val_losses.append((copy.deepcopy(self.state_dict), val_loss.item()))
if configuration["save_model"]:
clf, ep = choose_best(models_and_val_losses)
if ep == epoch:
save_model(self, epoch, val_loss.item())
if self.early_stopping:
try:
if abs(models_and_val_losses[-1][1] - models_and_val_losses[-2][1]) < 1*10**-6:
pause += 1
if pause == 5:
print('Validation loss has not changed for {0} epochs! Early stopping of training after {1} epochs!'.format(pause, epoch))
return models_and_val_losses, training_losses, lrs
except IndexError:
pass
if not configuration["cross_validation"] and epoch % 10 == 0:
print('Epoch: {}/{}.............'.format(epoch, configuration["number of epochs"]), end=' ')
print("Loss: {:.4f}".format(loss.item()))
return models_and_val_losses, training_losses, lrs
def predict(self, test_loader=None, X=None):
if X is not None:
input_sequences = torch.stack([torch.Tensor(i).view(len(i), -1) for i in X])
input_sequences = input_sequences.to(self._device)
outputs, hidden = self(input_sequences)
last_outputs = torch.stack([i[-1] for i in outputs]).to(self._device)
probs = nn.Softmax(dim=-1)(last_outputs)
pred = torch.argmax(probs, dim=-1) # chose class that has highest probability
self.detach([input_sequences, hidden, outputs])
return [i.item() for i in pred], outputs
elif test_loader:
pred = torch.Tensor()
y_test = torch.Tensor()
outputs_cumm = torch.Tensor()
for i, (input_sequences, labels, raw_seq) in enumerate(test_loader):
input_sequences = input_sequences.to(self._device)
outputs, hidden = self(input_sequences)
if configuration['decision criteria'] == 'majority vote':
if configuration['calibration rate'] == 1:
start_voting_outputs = int((configuration['calibration rate']) * outputs.size()[
1]) - 1 # equal to only using the last output
else:
start_voting_outputs = int((configuration['calibration rate']) * outputs.size()[1])
voting_outputs = torch.stack([i[start_voting_outputs:] for i in outputs]) #choose last n outputs of timeseries to do majority vote
relevant_outputs = voting_outputs.to(self._device)
most_likely_outputs = torch.argmax(nn.Softmax(dim=-1)(relevant_outputs), dim=-1)
majority_vote_result = torch.mode(most_likely_outputs, dim=-1)[0]
pred_new = majority_vote_result.float()
else:
last_outputs = torch.stack([i[-1] for i in outputs]).to(self._device)
probs = nn.Softmax(dim=-1)(last_outputs)
pred_new = torch.argmax(probs, dim=-1).float()
outputs_cumm = torch.cat((outputs_cumm.to(self._device), outputs.float()), 0)
pred = torch.cat((pred.to(self._device), pred_new), 0) # chose class that has highest probability
y_test = torch.cat((y_test, labels.float()), 0) # chose class that has highest probability
self.detach([input_sequences, hidden, outputs])
if configuration["train test split"] <= 1:
share_of_test_set = len(test_loader)*configuration["train test split"]*labels.size()[0]
else:
share_of_test_set = configuration["train test split"]
if y_test.size()[0] >= share_of_test_set: #to choose the test set size (memory issues!!)
break
return [i.item() for i in pred], outputs_cumm, y_test
else:
print('Either provide X or a dataloader!')
def choose_optimizer(self, alpha=configuration["learning rate"]):
if configuration["optimizer"] == 'Adam':
optimizer = torch.optim.Adam(self.parameters(), lr=alpha)
else:
optimizer = torch.optim.SGD(self.parameters(), lr=alpha)
return optimizer
def control_learning_rate(self, lr=None, loss=None, losses=None, epoch=None, nominal_lr=None):
warm_up_share = configuration["percentage of epochs for warm up"] / 100
if self.control_lr == 'warm up' and epoch < int(warm_up_share * configuration["number of epochs"]):
lr = nominal_lr * epoch / int((warm_up_share * configuration["number of epochs"]))
optimizer = self.choose_optimizer(alpha=lr)
elif self.control_lr == 'warm up' and epoch >= int(warm_up_share * configuration["number of epochs"]):
lr = nominal_lr * (configuration["number of epochs"] - epoch) / int((1-warm_up_share) * configuration["number of epochs"])
optimizer = self.choose_optimizer(alpha=lr)
elif self.control_lr == 'LR controlled':
if losses[-1] > loss:
lr = lr * 1.1
optimizer = self.choose_optimizer(alpha=lr)
elif losses[-1] <= loss:
lr = lr * 0.90
optimizer = self.choose_optimizer(alpha=lr)
else:
lr = lr
optimizer = self.choose_optimizer(alpha=lr)
return optimizer, lr
def preprocess(self, X_train, X_test):
scaler = self.fit_scaler(X_train)
X_train = self.preprocessing(X_train, scaler)
X_test = self.preprocessing(X_test, scaler)
return X_train, X_test
def fit_scaler(self, X):
X_zeromean = np.array([x - x.mean() for x in X]) # deduct it's own mean from every sample
maxabs_scaler = MaxAbsScaler().fit(X_zeromean) # fit scaler as to scale training data between -1 and 1
return maxabs_scaler
def preprocessing(self, X, scaler):
X_zeromean = np.array([x - x.mean() for x in X])
X = scaler.transform(X_zeromean)
return X
def score(self, y_test, y_pred):
metrics = precision_recall_fscore_support(y_test, y_pred, average='macro')
accuracy = accuracy_score(y_test, y_pred)
return [accuracy, metrics]
def get_params(self, deep=True):
return {"hidden_dim": self.hidden_dim, "n_layers": self.n_layers, "output_size": self.output_size, "input_size" : self.input_size}
def choose_device(self):
is_cuda = torch.cuda.is_available()
if is_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
return device
def detach(self, inputs=[]):
for i in inputs:
torch.detach(i)
torch.cuda.empty_cache()
return
|
[
"sys.stdout.write",
"torch.argmax",
"sklearn.metrics.accuracy_score",
"random.shuffle",
"sklearn.preprocessing.MaxAbsScaler",
"torch.nn.Softmax",
"sys.stdout.flush",
"torch.device",
"os.path.join",
"sklearn.metrics.precision_recall_fscore_support",
"importlib.util.module_from_spec",
"os.path.exists",
"importlib.util.spec_from_file_location",
"torch.Tensor",
"torch.nn.Linear",
"torch.zeros",
"copy.deepcopy",
"torch.cuda.is_available",
"torch.nn.RNN",
"torch.mode",
"torch.detach",
"os.makedirs",
"torch.stack",
"torch.nn.CrossEntropyLoss",
"numpy.array",
"torch.cuda.empty_cache"
] |
[((572, 646), 'importlib.util.spec_from_file_location', 'importlib.util.spec_from_file_location', (['chosen_experiment', 'experiment_path'], {}), '(chosen_experiment, experiment_path)\n', (610, 646), False, 'import importlib\n'), ((656, 693), 'importlib.util.module_from_spec', 'importlib.util.module_from_spec', (['spec'], {}), '(spec)\n', (687, 693), False, 'import importlib\n'), ((1022, 1085), 'os.path.join', 'os.path.join', (['config.models_folder', "configuration['classifier']"], {}), "(config.models_folder, configuration['classifier'])\n", (1034, 1085), False, 'import os\n'), ((1098, 1118), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1112, 1118), False, 'import os\n'), ((1128, 1145), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1139, 1145), False, 'import os\n'), ((3837, 3861), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (3859, 3861), False, 'import torch\n'), ((4105, 4126), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4124, 4126), False, 'from torch import nn\n'), ((18994, 19058), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {'average': '"""macro"""'}), "(y_test, y_pred, average='macro')\n", (19025, 19058), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((19078, 19108), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (19092, 19108), False, 'from sklearn.metrics import accuracy_score\n'), ((19369, 19394), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (19392, 19394), False, 'import torch\n'), ((19629, 19653), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (19651, 19653), False, 'import torch\n'), ((1360, 1391), 'os.path.join', 'os.path.join', (['path', '"""model.pth"""'], {}), "(path, 'model.pth')\n", (1372, 1391), False, 'import os\n'), ((2740, 2759), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2752, 2759), False, 'import torch\n'), ((14169, 14196), 'torch.argmax', 'torch.argmax', (['probs'], {'dim': '(-1)'}), '(probs, dim=-1)\n', (14181, 14196), False, 'import torch\n'), ((19436, 19456), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (19448, 19456), False, 'import torch\n'), ((19492, 19511), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (19504, 19511), False, 'import torch\n'), ((19605, 19620), 'torch.detach', 'torch.detach', (['i'], {}), '(i)\n', (19617, 19620), False, 'import torch\n'), ((1617, 1648), 'os.path.join', 'os.path.join', (['path', '"""model.pth"""'], {}), "(path, 'model.pth')\n", (1629, 1648), False, 'import os\n'), ((1992, 2088), 'torch.nn.RNN', 'nn.RNN', (['input_size', 'hidden_dim', 'n_layers'], {'nonlinearity': "configuration['activation function']"}), "(input_size, hidden_dim, n_layers, nonlinearity=configuration[\n 'activation function'])\n", (1998, 2088), False, 'from torch import nn\n'), ((2120, 2154), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'output_size'], {}), '(hidden_dim, output_size)\n', (2129, 2154), False, 'from torch import nn\n'), ((2798, 2817), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2810, 2817), False, 'import torch\n'), ((2854, 2873), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2866, 2873), False, 'import torch\n'), ((3435, 3490), 'torch.zeros', 'torch.zeros', (['self.n_layers', 'seq_length', 'self.hidden_dim'], {}), '(self.n_layers, seq_length, self.hidden_dim)\n', (3446, 3490), False, 'import torch\n'), ((5317, 5343), 'random.shuffle', 'random.shuffle', (['zipped_X_y'], {}), '(zipped_X_y)\n', (5331, 5343), False, 'import random\n'), ((5489, 5500), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5497, 5500), True, 'import numpy as np\n'), ((14116, 14134), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (14126, 14134), False, 'from torch import nn\n'), ((14400, 14414), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (14412, 14414), False, 'import torch\n'), ((14436, 14450), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (14448, 14450), False, 'import torch\n'), ((14478, 14492), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (14490, 14492), False, 'import torch\n'), ((18641, 18655), 'sklearn.preprocessing.MaxAbsScaler', 'MaxAbsScaler', ([], {}), '()\n', (18653, 18655), False, 'from sklearn.preprocessing import MaxAbsScaler\n'), ((8869, 8917), 'sys.stdout.write', 'sys.stdout.write', (["('[%s]' % (' ' * toolbar_width))"], {}), "('[%s]' % (' ' * toolbar_width))\n", (8885, 8917), False, 'import sys\n'), ((8934, 8952), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8950, 8952), False, 'import sys\n'), ((8969, 9015), 'sys.stdout.write', 'sys.stdout.write', (["('\\x08' * (toolbar_width + 1))"], {}), "('\\x08' * (toolbar_width + 1))\n", (8985, 9015), False, 'import sys\n'), ((9065, 9083), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9081, 9083), False, 'import sys\n'), ((11718, 11741), 'sys.stdout.write', 'sys.stdout.write', (['"""]\n"""'], {}), "(']\\n')\n", (11734, 11741), False, 'import sys\n'), ((11787, 11805), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11803, 11805), False, 'import sys\n'), ((12658, 12688), 'copy.deepcopy', 'copy.deepcopy', (['self.state_dict'], {}), '(self.state_dict)\n', (12671, 12688), False, 'import copy\n'), ((14041, 14078), 'torch.stack', 'torch.stack', (['[i[-1] for i in outputs]'], {}), '([i[-1] for i in outputs])\n', (14052, 14078), False, 'import torch\n'), ((5941, 5952), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5949, 5952), True, 'import numpy as np\n'), ((6923, 6978), 'torch.stack', 'torch.stack', (['[i[start_voting_outputs:] for i in output]'], {}), '([i[start_voting_outputs:] for i in output])\n', (6934, 6978), False, 'import torch\n'), ((7689, 7725), 'torch.stack', 'torch.stack', (['[i[-1] for i in output]'], {}), '([i[-1] for i in output])\n', (7700, 7725), False, 'import torch\n'), ((11545, 11593), 'sys.stdout.write', 'sys.stdout.write', (["('- %.1f%% ' % (progress * 100))"], {}), "('- %.1f%% ' % (progress * 100))\n", (11561, 11593), False, 'import sys\n'), ((11611, 11629), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11627, 11629), False, 'import sys\n'), ((12373, 12414), 'torch.stack', 'torch.stack', (['[i[-1] for i in val_outputs]'], {}), '([i[-1] for i in val_outputs])\n', (12384, 12414), False, 'import torch\n'), ((15186, 15242), 'torch.stack', 'torch.stack', (['[i[start_voting_outputs:] for i in outputs]'], {}), '([i[start_voting_outputs:] for i in outputs])\n', (15197, 15242), False, 'import torch\n'), ((6039, 6054), 'torch.Tensor', 'torch.Tensor', (['i'], {}), '(i)\n', (6051, 6054), False, 'import torch\n'), ((9986, 10041), 'torch.stack', 'torch.stack', (['[i[start_voting_outputs:] for i in output]'], {}), '([i[start_voting_outputs:] for i in output])\n', (9997, 10041), False, 'import torch\n'), ((10661, 10697), 'torch.stack', 'torch.stack', (['[i[-1] for i in output]'], {}), '([i[-1] for i in output])\n', (10672, 10697), False, 'import torch\n'), ((13851, 13866), 'torch.Tensor', 'torch.Tensor', (['i'], {}), '(i)\n', (13863, 13866), False, 'import torch\n'), ((15516, 15555), 'torch.mode', 'torch.mode', (['most_likely_outputs'], {'dim': '(-1)'}), '(most_likely_outputs, dim=-1)\n', (15526, 15555), False, 'import torch\n'), ((15760, 15778), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (15770, 15778), False, 'from torch import nn\n'), ((7201, 7237), 'torch.stack', 'torch.stack', (['[i[-1] for i in labels]'], {}), '([i[-1] for i in labels])\n', (7212, 7237), False, 'import torch\n'), ((7920, 7956), 'torch.stack', 'torch.stack', (['[i[-1] for i in labels]'], {}), '([i[-1] for i in labels])\n', (7931, 7956), False, 'import torch\n'), ((15427, 15445), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (15437, 15445), False, 'from torch import nn\n'), ((15677, 15714), 'torch.stack', 'torch.stack', (['[i[-1] for i in outputs]'], {}), '([i[-1] for i in outputs])\n', (15688, 15714), False, 'import torch\n'), ((15824, 15851), 'torch.argmax', 'torch.argmax', (['probs'], {'dim': '(-1)'}), '(probs, dim=-1)\n', (15836, 15851), False, 'import torch\n'), ((6128, 6145), 'torch.Tensor', 'torch.Tensor', (['[i]'], {}), '([i])\n', (6140, 6145), False, 'import torch\n'), ((10208, 10244), 'torch.stack', 'torch.stack', (['[i[-1] for i in labels]'], {}), '([i[-1] for i in labels])\n', (10219, 10244), False, 'import torch\n'), ((10869, 10905), 'torch.stack', 'torch.stack', (['[i[-1] for i in labels]'], {}), '([i[-1] for i in labels])\n', (10880, 10905), False, 'import torch\n'), ((12145, 12161), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (12153, 12161), True, 'import numpy as np\n')]
|
from gamehelper import card2str
import sqlite3
# Database column information
PLAYER_ROUND_ID_COLUMN = 0
PLAYER_USER_ID_COLUMN = 1
PLAYER_TABLE_ID_COLUMN = 2
PLAYER_INITIAL_STACK_COLUMN = 3
PLAYER_STACK_COLUMN = 4
PLAYER_MY_TURN_COLUMN = 5
PLAYER_HAND1_COLUMN = 6
PLAYER_HAND2_COLUMN = 7
PLAYER_IS_FOLDED_COLUMN = 8
PLAYER_DISPLAY_COLUMN = 9
# Constants
SELECT_QUERY = "SELECT Player.*, User.display FROM players Player INNER JOIN users User on Player.user_id = User.id WHERE User.apikey=? ORDER BY Player.id DESC LIMIT 1"
SELECT_OPPONENTS_QUERY = "SELECT * FROM players WHERE table_id=? AND round_id=? AND user_id!=?"
INSERT_ACTION_QUERY = "INSERT INTO player_actions ('round_id', 'player_id', 'action', 'amount') VALUES (?, ?, ?, ?)"
GET_DISPLAY_QUERY = "SELECT User.* FROM users User INNER JOIN players Player on User.id = Player.user_id WHERE Player.user_id=?"
class PlayerNotFoundError(Exception):
pass
class Player:
def __init__(self, player):
self.round_id = player[PLAYER_ROUND_ID_COLUMN]
self.player_id = player[PLAYER_USER_ID_COLUMN]
self.table_id = player[PLAYER_TABLE_ID_COLUMN]
self.initial_stack = player[PLAYER_INITIAL_STACK_COLUMN]
self.stack = player[PLAYER_STACK_COLUMN]
self.my_turn = bool(player[PLAYER_MY_TURN_COLUMN])
card1 = card2str(player[PLAYER_HAND1_COLUMN])
card2 = card2str(player[PLAYER_HAND2_COLUMN])
self.hand = [card1, card2]
self.is_folded = bool(player[PLAYER_IS_FOLDED_COLUMN])
self.display = None
self.display = player.get_display()
def get_round_id(self):
return self.round_id
def get_player_id(self):
return self.player_id
def get_table_id(self):
return self.table_id
def get_display(self):
if self.display is None:
conn = sqlite3.connect(app.config['DATABASE'])
c = conn.cursor()
data = (self.get_player_id(), )
c.execute(GET_DISPLAY_QUERY, data)
user = c.fetchone()
conn.close()
self.display = user[USER_DISPLAY_COLUMN]
return self.display
def get_initial_stack(self):
return self.initial_stack
def get_stack(self):
return self.stack
def get_my_turn(self):
return self.my_turn
def get_hand(self):
return [card for card in self.hand if card is not None]
def get_is_folded(self):
return self.is_folded
def post_action(self, action, amount=0):
action = action2db(action)
conn = sqlite3.connect(app.config['DATABASE'])
c = conn.cursor()
data = (self.get_round_id(), self.get_player_id(), action, amount, )
c.execute(INSERT_ACTION_QUERY, data)
conn.close()
def get_public_info(self):
info = {}
info['name'] = self.get_display()
info['initial_stack'] = self.get_initial_stack()
info['stack'] = self.get_stack()
info['folded'] = self.get_is_folded()
return info
@classmethod
def get(self_class, api_key):
conn = sqlite3.connect(app.config['DATABASE'])
c = conn.cursor()
data = (api_key, )
c.execute(SELECT_QUERY, data)
player = c.fetchone()
conn.close()
try:
if player is None:
raise PlayerNotFoundError
return Player(player)
except PlayerNotFoundError:
return None
@classmethod
def get_opponents(self_class, player):
conn = sqlite3.connect(app.config['DATABASE'])
c = conn.cursor()
data = (player.get_table_id(), player.get_round_id(), player.get_user_id(), )
c.execute(SELECT_OPPONENTS_QUERY, data)
opponents = c.fetchall()
conn.close()
return [Player(opponent).get_public_info() for opponent in opponents]
|
[
"sqlite3.connect",
"gamehelper.card2str"
] |
[((1371, 1408), 'gamehelper.card2str', 'card2str', (['player[PLAYER_HAND1_COLUMN]'], {}), '(player[PLAYER_HAND1_COLUMN])\n', (1379, 1408), False, 'from gamehelper import card2str\n'), ((1425, 1462), 'gamehelper.card2str', 'card2str', (['player[PLAYER_HAND2_COLUMN]'], {}), '(player[PLAYER_HAND2_COLUMN])\n', (1433, 1462), False, 'from gamehelper import card2str\n'), ((2611, 2650), 'sqlite3.connect', 'sqlite3.connect', (["app.config['DATABASE']"], {}), "(app.config['DATABASE'])\n", (2626, 2650), False, 'import sqlite3\n'), ((3143, 3182), 'sqlite3.connect', 'sqlite3.connect', (["app.config['DATABASE']"], {}), "(app.config['DATABASE'])\n", (3158, 3182), False, 'import sqlite3\n'), ((3582, 3621), 'sqlite3.connect', 'sqlite3.connect', (["app.config['DATABASE']"], {}), "(app.config['DATABASE'])\n", (3597, 3621), False, 'import sqlite3\n'), ((1889, 1928), 'sqlite3.connect', 'sqlite3.connect', (["app.config['DATABASE']"], {}), "(app.config['DATABASE'])\n", (1904, 1928), False, 'import sqlite3\n')]
|
import numpy as np
import pymc3 as pm
import theano
import theano.tensor as tt
# for reproducibility here's some version info for modules used in this notebook
import platform
import IPython
import matplotlib
import matplotlib.pyplot as plt
import emcee
import corner
import os
from autograd import grad
from files.myIOlib import show_seaborn_plot
print("Python version: {}".format(platform.python_version()))
print("IPython version: {}".format(IPython.__version__))
print("Numpy version: {}".format(np.__version__))
print("Theano version: {}".format(theano.__version__))
print("PyMC3 version: {}".format(pm.__version__))
print("Matplotlib version: {}".format(matplotlib.__version__))
print("emcee version: {}".format(emcee.__version__))
print("corner version: {}".format(corner.__version__))
import numpy as np
import pymc3 as pm
import arviz as az
#Ordering imports
from myIOlib import *
from myModel import *
from myFUQ import *
#Ordering tools
import numpy as np
import arviz as az
from scipy import stats
import matplotlib as mpl
from theano import as_op
import theano.tensor as tt
import scipy.special as sc
import math
import time
# Start timing code execution
t0 = time.time()
################# User settings ###################
# Define the amount of samples
N = 1
# Define time of simulation
timestep = 30
endtime = 10320
t1steps = round(endtime / timestep)
Nt = 2*t1steps+1
x = timestep * np.linspace(0, 2 * t1steps, Nt)
# Units
MPA = 1e6
# Forward/Bayesian Inference calculation
performInference = True
useFEA = False
# Location to store output
outfile = 'output/output_%d.png' % N
#################### Core #########################
# Generate text file for parameters
generate_txt( "parameters.txt" )
# Import parameters.txt to variables
print("Reading model parameters...")
params_aquifer, params_well = read_from_txt( "parameters.txt" )
# Construct the objects for the doublet model
print("Constructing the doublet model...")
aquifer = Aquifer(params_aquifer)
# doublet = DoubletGenerator(aquifer)
from myUQ import *
from files.myUQlib import *
######## Forward Uncertainty Quantification #########
if not performInference:
# Run Bayesian FUQ (input parameters not np. but pm. -> random values, as pdf not work in FEA -> output array of values -> mean, stdv -> pm. )
import pymc3 as pm
from pymc3.distributions import Interpolated
print('Running on PyMC3 v{}'.format(pm.__version__))
# Run Forward Uncertainty Quantification
print("\r\nSolving Forward Uncertainty Quantification...")
# Set input from stoichastic parameters
print("\r\nSetting input from stoichastic parameters...")
parametersRVS = generateRVSfromPDF(N)
print("Stoichastic parameters", parametersRVS)
if useFEA:
# Run Finite Element Analysis (Forward)
print("\r\nRunning FEA...")
sol = performFEA(parametersRVS, aquifer, N, timestep, endtime)
else:
# # Run Analytical Analysis (Forward)
print("\r\nRunning Analytical Analysis...")
sol = performAA(parametersRVS, x)
###########################
# Post processing #
###########################
# Output pressure/temperature matrix and plot for single point in time
fig, ax = plt.subplots(1, 1, figsize=(10, 7), tight_layout=True)
ax.set(xlabel='Wellbore pressure [Pa]', ylabel='Probability')
ax.hist(sol[0][:, t1steps], density=True, histtype='stepfilled', alpha=0.2, bins=20)
# plt.show()
# Evaluate the doublet model
print("\r\nEvaluating numerical solution for the doublet model...")
doublet = DoubletGenerator(aquifer, sol)
pnodelist, Tnodelist = evaluateDoublet(doublet)
######## Inverse Uncertainty Quantification #########
else:
# Run Bayesian Inference
import pymc3 as pm
from pymc3.distributions import Interpolated
from pymc3.distributions.timeseries import EulerMaruyama
print('Running on PyMC3 v{}'.format(pm.__version__))
# Set distribution settings
chains = 4
ndraws = 15 # number of draws from the distribution
nburn = 5 # number of "burn-in points" (which we'll discard)
# Library functions
def get_𝜇_K(porosity, size):
constant = np.random.uniform(low=10, high=100, size=size) # np.random.uniform(low=3.5, high=5.8, size=size)
tau = np.random.uniform(low=0.3, high=0.5, size=size)
tothepower = np.random.uniform(low=3, high=5, size=size)
rc = np.random.uniform(low=10e-6, high=30e-6, size=size)
SSA = 3 / rc
permeability = constant * tau ** 2 * (porosity.random(size=N) ** tothepower / SSA ** 2)
𝜇_K = np.mean(permeability)
# constant = np.random.uniform(low=3.5, high=5.8, size=N)
# tothepower = np.random.uniform(low=3, high=5, size=N)
# Tau = (2) ** (1 / 2)
# S0_sand = np.random.uniform(low=1.5e2, high=2.2e2, size=N) # specific surface area [1/cm]
# K_samples = constant * (φpdf.random(size=N) ** tothepower / S0_sand ** 2)
# Kpdf = pm.Lognormal('K', mu=math.log(np.mean(K_samples)), sd=1) #joined distribution
return 𝜇_K
###########################
# Synthetic data #
###########################
# Set up our data
Nt = Nt # number of data points
CV = 0.001 # coefficient of variation noise
# True data
K_true = 1e-12 # 2.2730989084434785e-08
φ_true = 0.163
H_true = 70
ct_true = 1e-10
Q_true = 0.07
cs_true = 2650
# Lognormal priors for true parameters
Hpdf = stats.lognorm(scale=H_true, s=0.01)
φpdf = stats.lognorm(scale=φ_true, s=0.01)
Kpdf = stats.lognorm(scale=K_true, s=0.01)
ctpdf = stats.lognorm(scale=ct_true, s=0.01)
Qpdf = stats.lognorm(scale=Q_true, s=0.01)
cspdf = stats.lognorm(scale=cs_true, s=0.01)
theta = parametersRVS = [Hpdf.rvs(size=1), φpdf.rvs(size=1), Kpdf.rvs(size=1), ctpdf.rvs(size=1),
Qpdf.rvs(size=1), cspdf.rvs(size=1)]
# parametersRVS = [H_true, φ_true, K_true, ct_true, Q_true, cs_true]
# theta = parametersRVS = [H_true, φ_true, K_true, ct_true, Q_true, cs_true]
truemodel = my_model(theta, x)
print("truemodel", truemodel)
# Make data
np.random.seed(716742) # set random seed, so the data is reproducible each time
sigma = CV * np.mean(truemodel)
# data = sigma * np.random.randn(Nt) + truemodel
# Use real data
data = get_welldata('PBH')
# plot transient test
parameters = {'axes.labelsize': 14,
'axes.titlesize': 18}
plt.rcParams.update(parameters)
plt.figure(figsize=(10, 3))
# plt.subplot(121)
plt.plot(truemodel/MPA, 'k', label='$p_{true}$', alpha=0.5), plt.plot(data/MPA, 'r', label='$σ_{noise} = 1.0e-2$', alpha=0.5),\
plt.ylabel("p(t) [MPa]"), plt.xlabel("t [min]"), #plt.legend()
plt.tight_layout()
plt.show()
# Create our Op
logl = LogLikeWithGrad(my_loglike, data, x, sigma)
print(logl)
###########################
# Synthetic data #
###########################
# with pm.Model() as SyntheticModel:
#
# # True data (what actually drives the true pressure)
# K_true = 1e-12 # 2.2730989084434785e-08
# φ_true = 0.163
# H_true = 70
# ct_true = 1e-10
# Q_true = 0.07
# cs_true = 2650
#
# # Lognormal priors for true parameters
# Hpdf = pm.Lognormal('H', mu=np.log(H_true), sd=0.01)
# φpdf = pm.Lognormal('φ', mu=np.log(φ_true), sd=0.01)
# Kpdf = pm.Lognormal('K', mu=np.log(K_true), sd=0.01)
# ctpdf = pm.Lognormal('ct', mu=np.log(ct_true), sd=0.01)
# Qpdf = pm.Lognormal('Q', mu=np.log(Q_true), sd=0.01)
# cspdf = pm.Lognormal('cs', mu=np.log(cs_true), sd=0.01)
# parametersRVS = [Hpdf.random(size=Nt), φpdf.random(size=Nt), Kpdf.random(size=Nt), ctpdf.random(size=Nt),
# Qpdf.random(size=Nt), cspdf.random(size=Nt)]
#
# # parametersRVS = [H_true, φ_true, K_true, ct_true, Q_true, cs_true]
# solAA = performAA(parametersRVS, aquifer, N, timestep, endtime)
# p_true = np.mean(solAA[0].T, axis=1)
# print(p_true)
#
# # Z_t observed data
# np.random.seed(716742) # set random seed, so the data is reproducible each time
# σnoise = 0.1
# sd_p = σnoise * np.var(p_true) ** 0.5
# z_t = p_true + np.random.randn(Nt) * sd_p
# use PyMC3 to sampler from log-likelihood
with pm.Model() as opmodel:
###########################
# Prior information #
###########################
# Mean of expert variables (the specific informative prior)
𝜇_H = aquifer.H # lower_H = 35, upper_H = 105 (COV = 50%)
𝜇_φ = aquifer.φ # lower_φ = 0.1, upper_φ = 0.3 (COV = 50%)
𝜇_ct = aquifer.ct # lower_ct = 0.5e-10, upper_ct = 1.5e-10 (COV = 50%)
𝜇_Q = aquifer.Q # lower_Q = 0.35, upper_Q = 0.105 (COV = 50%)
𝜇_cs = aquifer.cps # lower_cs = 1325 upper_cs = 3975 (COV = 50%)
# Standard deviation of variables (CV=50%)
sd_H = 0.3
sd_φ = 0.3
sd_K = 0.3
sd_ct = 0.3
sd_Q = 0.3
sd_cs = 0.001
# Lognormal priors for unknown model parameters
Hpdf = pm.Uniform('H', lower=35, upper=105)
φpdf = pm.Uniform('φ', lower=0.1, upper=0.3)
Kpdf = pm.Uniform('K', lower=0.5e-13, upper=1.5e-13)
ctpdf = pm.Uniform('ct', lower=0.5e-10, upper=1.5e-10)
Qpdf = pm.Uniform('Q', lower=0.035, upper=0.105)
cspdf = pm.Uniform('cs', lower=1325, upper=3975)
# Hpdf = pm.Lognormal('H', mu=np.log(𝜇_H), sd=sd_H)
# φpdf = pm.Lognormal('φ', mu=np.log(𝜇_φ), sd=sd_φ)
# Kpdf = pm.Lognormal('K', mu=np.log(get_𝜇_K(φpdf, N)), sd=sd_K)
# ctpdf = pm.Lognormal('ct', mu=np.log(𝜇_ct), sd=sd_ct)
# Qpdf = pm.Lognormal('Q', mu=np.log(𝜇_Q), sd=sd_Q)
# cspdf = pm.Lognormal('cs', mu=np.log(𝜇_cs), sd=sd_cs)
thetaprior = [Hpdf, φpdf, Kpdf, ctpdf, Qpdf, cspdf]
# convert thetaprior to a tensor vector
theta = tt.as_tensor_variable([Hpdf, φpdf, Kpdf, ctpdf, Qpdf, cspdf])
# use a DensityDist
pm.DensityDist(
'likelihood',
lambda v: logl(v),
observed={'v': theta}
# random=my_model_random
)
with opmodel:
# Inference
trace = pm.sample(ndraws, cores=1, chains=chains, tune=nburn, discard_tuned_samples=True)
# plot the traces
print(az.summary(trace, round_to=2))
_ = pm.traceplot(trace, lines=(('K', {}, [K_true ]), ('φ', {}, [φ_true]), ('H', {}, [H_true]), ('ct', {}, [ct_true])
, ('Q', {}, [Q_true]), ('cs', {}, [cs_true])))
# put the chains in an array (for later!)
# samples_pymc3_2 = np.vstack((trace['K'], trace['φ'], trace['H'], trace['ct'], trace['Q'], trace['cs'])).T
# just because we can, let's draw posterior predictive samples of the model
# ppc = pm.sample_posterior_predictive(trace, samples=250, model=opmodel)
# _, ax = plt.subplots()
#
# for vals in ppc['likelihood']:
# plt.plot(x, vals, color='b', alpha=0.05, lw=3)
# ax.plot(x, my_model([H_true, φ_true, K_true, ct_true, Q_true, cs_true], x), 'k--', lw=2)
#
# ax.set_xlabel("Predictor (stdz)")
# ax.set_ylabel("Outcome (stdz)")
# ax.set_title("Posterior predictive checks");
###########################
# Post processing #
###########################
# print('Posterior distributions.')
# cmap = mpl.cm.autumn
# for param in ['K', 'φ', 'H', 'ct', 'Q', 'cs']:
# plt.figure(figsize=(8, 2))
# samples = trace[param]
# smin, smax = np.min(samples), np.max(samples)
# x = np.linspace(smin, smax, 100)
# y = stats.gaussian_kde(samples)(x)
# plt.axvline({'K': K_true, 'φ': φ_true, 'H': H_true, 'ct': ct_true, 'Q': Q_true, 'cs': cs_true}[param], c='k')
# plt.ylabel('Probability density')
# plt.title(param)
#
# plt.tight_layout();
data_spp = az.from_pymc3(trace=trace)
trace_K = az.plot_posterior(data_spp, var_names=['K'], kind='hist')
trace_φ = az.plot_posterior(data_spp, var_names=['φ'], kind='hist')
trace_H = az.plot_posterior(data_spp, var_names=['H'], kind='hist')
trace_Q = az.plot_posterior(data_spp, var_names=['Q'], kind='hist')
trace_ct = az.plot_posterior(data_spp, var_names=['ct'], kind='hist')
trace_cs = az.plot_posterior(data_spp, var_names=['cs'], kind='hist')
joint_plt = az.plot_joint(data_spp, var_names=['K', 'φ'], kind='kde', fill_last=False);
# trace_fig = az.plot_trace(trace, var_names=[ 'H', 'φ', 'K', 'ct', 'Q', 'cs'], compact=True);
plt.show()
# a = np.random.uniform(0.1, 0.3)
# b = np.random.uniform(0.5e-12, 1.5e-12)
# _, ax = plt.subplots(1, 2, figsize=(10, 4))
# az.plot_dist(a, color="C1", label="Prior", ax=ax[0])
# az.plot_posterior(data_spp, color="C2", var_names=['φ'], ax=ax[1], kind='hist')
# az.plot_dist(b, color="C1", label="Prior", ax=ax[1])
# az.plot_posterior(data_spp, color="C2", var_names=['K'], label="Posterior", ax=ax[0], kind='hist')
plt.show()
with pm.Model() as PriorModel:
###########################
# Prior information #
###########################
# Mean of expert variables (the specific informative prior)
𝜇_H = aquifer.H # lower_H = 35, upper_H = 105 (COV = 50%)
𝜇_φ = aquifer.φ # lower_φ = 0.1, upper_φ = 0.3 (COV = 50%)
𝜇_ct = aquifer.ct # lower_ct = 0.5e-10, upper_ct = 1.5e-10 (COV = 50%)
𝜇_Q = aquifer.Q # lower_Q = 0.35, upper_Q = 0.105 (COV = 50%)
𝜇_cs = aquifer.cps # lower_cs = 1325 upper_cs = 3975 (COV = 50%)
# Standard deviation of variables (CV=50%)
sd_H = 0.3
sd_φ = 0.3
sd_K = 0.3
sd_ct = 0.3
sd_Q = 0.3
sd_cs = 0.001
# Lognormal priors for unknown model parameters
Hpdf = pm.Lognormal('H', mu=np.log(𝜇_H), sd=sd_H)
φpdf = pm.Lognormal('φ', mu=np.log(𝜇_φ), sd=sd_φ)
Kpdf = pm.Lognormal('K', mu=np.log(get_𝜇_K(φpdf, N)), sd=sd_K)
ctpdf = pm.Lognormal('ct', mu=np.log(𝜇_ct), sd=sd_ct)
Qpdf = pm.Lognormal('Q', mu=np.log(𝜇_Q), sd=sd_Q)
cspdf = pm.Lognormal('cs', mu=np.log(𝜇_cs), sd=sd_cs)
# Uniform priors for unknown model parameters
# Hpdf = pm.Uniform('H', lower=35, upper=105)
# φpdf = pm.Lognormal('φ', mu=np.log(𝜇_φ), sd=sd_φ)
#φpdf = pm.Uniform('φ', lower=0.1, upper=0.3)
# Kpdf = pm.Lognormal('K', mu=np.log(get_𝜇_K(φpdf, N)), sd=sd_K)
# ctpdf = pm.Uniform('ct', lower=0.5e-10, upper=1.5e-10)
# Qpdf = pm.Uniform('Q', lower=0.035, upper=0.105)
# cspdf = pm.Uniform('cs', lower=1325, upper=3975)
theta = [Hpdf.random(size=1), φpdf.random(size=1), Kpdf.random(size=1), ctpdf.random(size=1), Qpdf.random(size=1), cspdf.random(size=1)]
# Run Analytical Analysis (Backward)
print("\r\nRunning Analytical Analysis... (Prior, pymc3)")
# p_t = my_model(theta, x) # draw single sample multiple points in time
# p_t = np.mean(solAA[0].T, axis=1) # draw single sample multiple points in time
# Likelihood (sampling distribution) of observations
# z_h = pm.Lognormal('z_h', mu=np.log(p_t), sd=sigma, observed=np.log(data))
# plot 95% CI with seaborn
# with open('pprior.npy', 'wb') as pprior:
# np.save(pprior, p)
# show_seaborn_plot('pprior.npy', "pwell")
# plt.show()
# mu_p = np.mean(p_t)
# sd_p = np.var(p_t) ** 0.5
# p = pm.Lognormal('p', mu=np.log(mu_p), sd=sd_p)
# # Likelihood (predicted distribution) of observations
# y = pm.Normal('y', mu=p, sd=1e4, observed=z_t)
# with PriorModel:
# # Inference
# start = pm.find_MAP() # Find starting value by optimization
# step = pm.NUTS(scaling=start) # Instantiate MCMC sampling algoritm #HamiltonianMC
#
# trace = pm.sample(10000, start=start, step=step, cores=1, chains=chains)
#
# print(az.summary(trace, round_to=2))
# chain_count = trace.get_values('K').shape[0]
# T_pred = pm.sample_posterior_predictive(trace, samples=chain_count, model=PriorModel)
# data_spp = az.from_pymc3(trace=trace)
# joint_plt = az.plot_joint(data_spp, var_names=['K', 'φ'], kind='kde', fill_last=False);
# trace_fig = az.plot_trace(trace, var_names=[ 'H', 'φ', 'K', 'ct', 'Q', 'cs'], figsize=(12, 8));
# az.plot_trace(trace, var_names=['H', 'φ', 'K', 'ct', 'Q'], compact=True);
# fig, axes = az.plot_forest(trace, var_names=['H', 'φ', 'K', 'ct', 'Q'], combined=True) #94% confidence interval with only lines (must normalize the means!)
# axes[0].grid();
# trace_H = az.plot_posterior(data_spp, var_names=['φ'], kind='hist')
# trace_p = az.plot_posterior(data_spp, var_names=['p'], kind='hist')
# pm.traceplot(trace)
# plt.show()
traces = [trace]
for _ in range(2):
with pm.Model() as InferenceModel:
# Priors are posteriors from previous iteration
H = from_posterior('H', trace['H'])
φ = from_posterior('φ', trace['φ'])
K = from_posterior('K', trace['K'])
ct = from_posterior('ct', trace['ct'])
Q = from_posterior('Q', trace['Q'])
cs = from_posterior('cs', trace['cs'])
# Random sample method
# parametersRVS = [H.random(size=Nt), φ.random(size=Nt), K.random(size=Nt), ct.random(size=Nt), Q.random(size=Nt), cs.random(size=Nt)]
print("\r\nRunning Analytical Analysis... (Backward, pymc3)")
# solAA = performAA(parametersRVS, aquifer, N, timestep, endtime)
# p_t = np.mean(solAA[0].T, axis=1) # draw single sample multiple points in time
# Likelihood (sampling distribution) of observations
# z_h = pm.Lognormal('z_h', mu=np.log(p_t), sd=sd_p, observed=np.log(z_t))
# Inference
# start = pm.find_MAP()
# step = pm.NUTS(scaling=start)
# trace = pm.sample(ndraws, start=start, step=step, cores=1, chains=chains)
thetaprior = [H, φ, K, ct, Q, cs]
# convert thetaprior to a tensor vector
theta = tt.as_tensor_variable([H, φ, K, ct, Q, cs])
# use a DensityDist
pm.DensityDist(
'likelihood',
lambda v: logl(v),
observed={'v': theta}
# random=my_model_random
)
trace = pm.sample(ndraws, cores=1, chains=chains)
traces.append(trace)
# plt.figure(figsize=(10, 3))
# plt.subplot(121)
# plt.plot(np.percentile(trace[ph], [2.5, 97.5], axis=0).T, 'k', label='$\hat{x}_{95\%}(t)$')
# plt.plot(p_t, 'r', label='$p(t)$')
# plt.legend()
#
# plt.subplot(122)
# plt.hist(trace[lam], 30, label='$\hat{\lambda}$', alpha=0.5)
# plt.axvline(porosity_true, color='r', label='$\lambda$', alpha=0.5)
# plt.legend();
#
# plt.figure(figsize=(10, 6))
# plt.subplot(211)
# plt.plot(np.percentile(trace[ph][..., 0], [2.5, 97.5], axis=0).T, 'k', label='$\hat{p}_{95\%}(t)$')
# plt.plot(ps, 'r', label='$p(t)$')
# plt.legend(loc=0)
# plt.subplot(234), plt.hist(trace['Kh']), plt.axvline(K), plt.xlim([1e-13, 1e-11]), plt.title('K')
# plt.subplot(235), plt.hist(trace['φh']), plt.axvline(φ), plt.xlim([0, 1.0]), plt.title('φ')
# plt.subplot(236), plt.hist(trace['Hh']), plt.axvline(m), plt.xlim([50, 100]), plt.title('H')
# plt.tight_layout()
#
# plt.show()
###########################
# Post processing #
###########################
print('Posterior distributions after ' + str(len(traces)) + ' iterations.')
cmap = mpl.cm.autumn
for param in ['K', 'φ', 'H', 'ct', 'Q']:
plt.figure(figsize=(8, 2))
for update_i, trace in enumerate(traces):
samples = trace[param]
smin, smax = np.min(samples), np.max(samples)
x = np.linspace(smin, smax, 100)
y = stats.gaussian_kde(samples)(x)
plt.plot(x, y, color=cmap(1 - update_i / len(traces)))
plt.axvline({'K': K_true, 'φ': φ_true, 'H': H_true, 'ct': ct_true, 'Q': Q_true}[param], c='k')
plt.ylabel('Frequency')
plt.title(param)
plt.tight_layout();
plt.show()
# Stop timing code execution
t2 = time.time()
print("CPU time [s] : ", t2 - t0)
# Stop timing code execution
print("\r\nDone. Post-processing...")
#################### Postprocessing #########################
print('Post processing. Plot 95% CI with seaborn')
cmap = mpl.cm.autumn
plt.figure(figsize=(8, 2))
for node in range(len(pnodelist)):
with open('pnode' + str(node+2) + '.npy', 'wb') as f:
np.save(f, pnodelist[node])
show_seaborn_plot('pnode' + str(node+2) + '.npy', str(node+2))
# plt.legend(str(node+2))
plt.xlabel("t [min]", size=14)
plt.ylabel("p(t) [MPa]", size=14)
plt.tight_layout();
plt.figure(figsize=(8, 2))
for node in range(len(Tnodelist)):
with open('Tnode' + str(node+2) + '.npy', 'wb') as f:
np.save(f, Tnodelist[node])
show_seaborn_plot('Tnode' + str(node+2) + '.npy', str(node+2))
plt.legend(str(node+2))
plt.xlabel("t [min]", size=14)
plt.ylabel("T(t) [K]", size=14)
plt.tight_layout();
# plt.figure(figsize=(8, 2))
# with open('power.npy', 'wb') as f:
# np.save(f, doublet.Phe/1e6)
# show_seaborn_plot('power.npy', 'power output')
# plt.xlabel("t [min]", size=14)
# plt.ylabel("P(t) [MW]", size=14)
plt.show()
# plot 95% CI with seaborn
# with open('pprior.npy', 'wb') as pprior:
# np.save(pprior, sol[0])
#
# show_seaborn_plot('pprior.npy', "p9")
# plt.show()
# with open('pmatrix.npy', 'rb') as f:
# a = np.load(f)
# print("saved solution matrix", a)
# plot 95% CI with seaborn
# with open('pnode9.npy', 'wb') as f9:
# np.save(f9, doublet.pnode9)
#
# with open('pnode8.npy', 'wb') as f8:
# np.save(f8, doublet.pnode8)
# plot_solution(sol, outfile)
# plt.show()
|
[
"pymc3.sample",
"matplotlib.pyplot.title",
"platform.python_version",
"numpy.random.seed",
"arviz.plot_joint",
"arviz.from_pymc3",
"matplotlib.pyplot.figure",
"numpy.mean",
"pymc3.Uniform",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axvline",
"theano.tensor.as_tensor_variable",
"arviz.plot_posterior",
"numpy.max",
"matplotlib.pyplot.rcParams.update",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"pymc3.Model",
"matplotlib.pyplot.show",
"numpy.save",
"scipy.stats.gaussian_kde",
"numpy.min",
"pymc3.traceplot",
"matplotlib.pyplot.ylabel",
"arviz.summary",
"numpy.random.uniform",
"numpy.log",
"matplotlib.pyplot.plot",
"scipy.stats.lognorm",
"time.time",
"matplotlib.pyplot.xlabel"
] |
[((1211, 1222), 'time.time', 'time.time', ([], {}), '()\n', (1220, 1222), False, 'import time\n'), ((21085, 21096), 'time.time', 'time.time', ([], {}), '()\n', (21094, 21096), False, 'import time\n'), ((21351, 21377), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 2)'}), '(figsize=(8, 2))\n', (21361, 21377), True, 'import matplotlib.pyplot as plt\n'), ((21604, 21634), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t [min]"""'], {'size': '(14)'}), "('t [min]', size=14)\n", (21614, 21634), True, 'import matplotlib.pyplot as plt\n'), ((21635, 21668), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""p(t) [MPa]"""'], {'size': '(14)'}), "('p(t) [MPa]', size=14)\n", (21645, 21668), True, 'import matplotlib.pyplot as plt\n'), ((21669, 21687), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21685, 21687), True, 'import matplotlib.pyplot as plt\n'), ((21690, 21716), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 2)'}), '(figsize=(8, 2))\n', (21700, 21716), True, 'import matplotlib.pyplot as plt\n'), ((21941, 21971), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t [min]"""'], {'size': '(14)'}), "('t [min]', size=14)\n", (21951, 21971), True, 'import matplotlib.pyplot as plt\n'), ((21972, 22003), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""T(t) [K]"""'], {'size': '(14)'}), "('T(t) [K]', size=14)\n", (21982, 22003), True, 'import matplotlib.pyplot as plt\n'), ((22004, 22022), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22020, 22022), True, 'import matplotlib.pyplot as plt\n'), ((22243, 22253), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22251, 22253), True, 'import matplotlib.pyplot as plt\n'), ((1440, 1471), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * t1steps)', 'Nt'], {}), '(0, 2 * t1steps, Nt)\n', (1451, 1471), True, 'import numpy as np\n'), ((3282, 3336), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 7)', 'tight_layout': '(True)'}), '(1, 1, figsize=(10, 7), tight_layout=True)\n', (3294, 3336), True, 'import matplotlib.pyplot as plt\n'), ((5563, 5598), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'H_true', 's': '(0.01)'}), '(scale=H_true, s=0.01)\n', (5576, 5598), False, 'from scipy import stats\n'), ((5611, 5646), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'φ_true', 's': '(0.01)'}), '(scale=φ_true, s=0.01)\n', (5624, 5646), False, 'from scipy import stats\n'), ((5657, 5692), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'K_true', 's': '(0.01)'}), '(scale=K_true, s=0.01)\n', (5670, 5692), False, 'from scipy import stats\n'), ((5705, 5741), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'ct_true', 's': '(0.01)'}), '(scale=ct_true, s=0.01)\n', (5718, 5741), False, 'from scipy import stats\n'), ((5753, 5788), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'Q_true', 's': '(0.01)'}), '(scale=Q_true, s=0.01)\n', (5766, 5788), False, 'from scipy import stats\n'), ((5801, 5837), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'cs_true', 's': '(0.01)'}), '(scale=cs_true, s=0.01)\n', (5814, 5837), False, 'from scipy import stats\n'), ((6244, 6266), 'numpy.random.seed', 'np.random.seed', (['(716742)'], {}), '(716742)\n', (6258, 6266), True, 'import numpy as np\n'), ((6578, 6609), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['parameters'], {}), '(parameters)\n', (6597, 6609), True, 'import matplotlib.pyplot as plt\n'), ((6614, 6641), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (6624, 6641), True, 'import matplotlib.pyplot as plt\n'), ((6868, 6886), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6884, 6886), True, 'import matplotlib.pyplot as plt\n'), ((6892, 6902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6900, 6902), True, 'import matplotlib.pyplot as plt\n'), ((10744, 10910), 'pymc3.traceplot', 'pm.traceplot', (['trace'], {'lines': "(('K', {}, [K_true]), ('φ', {}, [φ_true]), ('H', {}, [H_true]), ('ct', {},\n [ct_true]), ('Q', {}, [Q_true]), ('cs', {}, [cs_true]))"}), "(trace, lines=(('K', {}, [K_true]), ('φ', {}, [φ_true]), ('H',\n {}, [H_true]), ('ct', {}, [ct_true]), ('Q', {}, [Q_true]), ('cs', {}, [\n cs_true])))\n", (10756, 10910), True, 'import pymc3 as pm\n'), ((12287, 12313), 'arviz.from_pymc3', 'az.from_pymc3', ([], {'trace': 'trace'}), '(trace=trace)\n', (12300, 12313), True, 'import arviz as az\n'), ((12328, 12385), 'arviz.plot_posterior', 'az.plot_posterior', (['data_spp'], {'var_names': "['K']", 'kind': '"""hist"""'}), "(data_spp, var_names=['K'], kind='hist')\n", (12345, 12385), True, 'import arviz as az\n'), ((12401, 12458), 'arviz.plot_posterior', 'az.plot_posterior', (['data_spp'], {'var_names': "['φ']", 'kind': '"""hist"""'}), "(data_spp, var_names=['φ'], kind='hist')\n", (12418, 12458), True, 'import arviz as az\n'), ((12472, 12529), 'arviz.plot_posterior', 'az.plot_posterior', (['data_spp'], {'var_names': "['H']", 'kind': '"""hist"""'}), "(data_spp, var_names=['H'], kind='hist')\n", (12489, 12529), True, 'import arviz as az\n'), ((12544, 12601), 'arviz.plot_posterior', 'az.plot_posterior', (['data_spp'], {'var_names': "['Q']", 'kind': '"""hist"""'}), "(data_spp, var_names=['Q'], kind='hist')\n", (12561, 12601), True, 'import arviz as az\n'), ((12617, 12675), 'arviz.plot_posterior', 'az.plot_posterior', (['data_spp'], {'var_names': "['ct']", 'kind': '"""hist"""'}), "(data_spp, var_names=['ct'], kind='hist')\n", (12634, 12675), True, 'import arviz as az\n'), ((12691, 12749), 'arviz.plot_posterior', 'az.plot_posterior', (['data_spp'], {'var_names': "['cs']", 'kind': '"""hist"""'}), "(data_spp, var_names=['cs'], kind='hist')\n", (12708, 12749), True, 'import arviz as az\n'), ((12766, 12840), 'arviz.plot_joint', 'az.plot_joint', (['data_spp'], {'var_names': "['K', 'φ']", 'kind': '"""kde"""', 'fill_last': '(False)'}), "(data_spp, var_names=['K', 'φ'], kind='kde', fill_last=False)\n", (12779, 12840), True, 'import arviz as az\n'), ((12946, 12956), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12954, 12956), True, 'import matplotlib.pyplot as plt\n'), ((13407, 13417), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13415, 13417), True, 'import matplotlib.pyplot as plt\n'), ((21014, 21032), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21030, 21032), True, 'import matplotlib.pyplot as plt\n'), ((21039, 21049), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21047, 21049), True, 'import matplotlib.pyplot as plt\n'), ((389, 414), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (412, 414), False, 'import platform\n'), ((4242, 4288), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(10)', 'high': '(100)', 'size': 'size'}), '(low=10, high=100, size=size)\n', (4259, 4288), True, 'import numpy as np\n'), ((4354, 4401), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.3)', 'high': '(0.5)', 'size': 'size'}), '(low=0.3, high=0.5, size=size)\n', (4371, 4401), True, 'import numpy as np\n'), ((4423, 4466), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(3)', 'high': '(5)', 'size': 'size'}), '(low=3, high=5, size=size)\n', (4440, 4466), True, 'import numpy as np\n'), ((4480, 4531), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1e-05)', 'high': '(3e-05)', 'size': 'size'}), '(low=1e-05, high=3e-05, size=size)\n', (4497, 4531), True, 'import numpy as np\n'), ((4666, 4687), 'numpy.mean', 'np.mean', (['permeability'], {}), '(permeability)\n', (4673, 4687), True, 'import numpy as np\n'), ((6342, 6360), 'numpy.mean', 'np.mean', (['truemodel'], {}), '(truemodel)\n', (6349, 6360), True, 'import numpy as np\n'), ((6669, 6730), 'matplotlib.pyplot.plot', 'plt.plot', (['(truemodel / MPA)', '"""k"""'], {'label': '"""$p_{true}$"""', 'alpha': '(0.5)'}), "(truemodel / MPA, 'k', label='$p_{true}$', alpha=0.5)\n", (6677, 6730), True, 'import matplotlib.pyplot as plt\n'), ((6730, 6796), 'matplotlib.pyplot.plot', 'plt.plot', (['(data / MPA)', '"""r"""'], {'label': '"""$σ_{noise} = 1.0e-2$"""', 'alpha': '(0.5)'}), "(data / MPA, 'r', label='$σ_{noise} = 1.0e-2$', alpha=0.5)\n", (6738, 6796), True, 'import matplotlib.pyplot as plt\n'), ((6801, 6825), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""p(t) [MPa]"""'], {}), "('p(t) [MPa]')\n", (6811, 6825), True, 'import matplotlib.pyplot as plt\n'), ((6827, 6848), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t [min]"""'], {}), "('t [min]')\n", (6837, 6848), True, 'import matplotlib.pyplot as plt\n'), ((8540, 8550), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (8548, 8550), True, 'import pymc3 as pm\n'), ((9437, 9473), 'pymc3.Uniform', 'pm.Uniform', (['"""H"""'], {'lower': '(35)', 'upper': '(105)'}), "('H', lower=35, upper=105)\n", (9447, 9473), True, 'import pymc3 as pm\n'), ((9490, 9527), 'pymc3.Uniform', 'pm.Uniform', (['"""φ"""'], {'lower': '(0.1)', 'upper': '(0.3)'}), "('φ', lower=0.1, upper=0.3)\n", (9500, 9527), True, 'import pymc3 as pm\n'), ((9542, 9585), 'pymc3.Uniform', 'pm.Uniform', (['"""K"""'], {'lower': '(5e-14)', 'upper': '(1.5e-13)'}), "('K', lower=5e-14, upper=1.5e-13)\n", (9552, 9585), True, 'import pymc3 as pm\n'), ((9604, 9648), 'pymc3.Uniform', 'pm.Uniform', (['"""ct"""'], {'lower': '(5e-11)', 'upper': '(1.5e-10)'}), "('ct', lower=5e-11, upper=1.5e-10)\n", (9614, 9648), True, 'import pymc3 as pm\n'), ((9666, 9707), 'pymc3.Uniform', 'pm.Uniform', (['"""Q"""'], {'lower': '(0.035)', 'upper': '(0.105)'}), "('Q', lower=0.035, upper=0.105)\n", (9676, 9707), True, 'import pymc3 as pm\n'), ((9724, 9764), 'pymc3.Uniform', 'pm.Uniform', (['"""cs"""'], {'lower': '(1325)', 'upper': '(3975)'}), "('cs', lower=1325, upper=3975)\n", (9734, 9764), True, 'import pymc3 as pm\n'), ((10272, 10333), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['[Hpdf, φpdf, Kpdf, ctpdf, Qpdf, cspdf]'], {}), '([Hpdf, φpdf, Kpdf, ctpdf, Qpdf, cspdf])\n', (10293, 10333), True, 'import theano.tensor as tt\n'), ((10581, 10667), 'pymc3.sample', 'pm.sample', (['ndraws'], {'cores': '(1)', 'chains': 'chains', 'tune': 'nburn', 'discard_tuned_samples': '(True)'}), '(ndraws, cores=1, chains=chains, tune=nburn, discard_tuned_samples\n =True)\n', (10590, 10667), True, 'import pymc3 as pm\n'), ((13428, 13438), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (13436, 13438), True, 'import pymc3 as pm\n'), ((20520, 20546), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 2)'}), '(figsize=(8, 2))\n', (20530, 20546), True, 'import matplotlib.pyplot as plt\n'), ((20857, 20955), 'matplotlib.pyplot.axvline', 'plt.axvline', (["{'K': K_true, 'φ': φ_true, 'H': H_true, 'ct': ct_true, 'Q': Q_true}[param]"], {'c': '"""k"""'}), "({'K': K_true, 'φ': φ_true, 'H': H_true, 'ct': ct_true, 'Q':\n Q_true}[param], c='k')\n", (20868, 20955), True, 'import matplotlib.pyplot as plt\n'), ((20960, 20983), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (20970, 20983), True, 'import matplotlib.pyplot as plt\n'), ((20992, 21008), 'matplotlib.pyplot.title', 'plt.title', (['param'], {}), '(param)\n', (21001, 21008), True, 'import matplotlib.pyplot as plt\n'), ((21479, 21506), 'numpy.save', 'np.save', (['f', 'pnodelist[node]'], {}), '(f, pnodelist[node])\n', (21486, 21506), True, 'import numpy as np\n'), ((21818, 21845), 'numpy.save', 'np.save', (['f', 'Tnodelist[node]'], {}), '(f, Tnodelist[node])\n', (21825, 21845), True, 'import numpy as np\n'), ((10704, 10733), 'arviz.summary', 'az.summary', (['trace'], {'round_to': '(2)'}), '(trace, round_to=2)\n', (10714, 10733), True, 'import arviz as az\n'), ((17463, 17473), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (17471, 17473), True, 'import pymc3 as pm\n'), ((18742, 18785), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['[H, φ, K, ct, Q, cs]'], {}), '([H, φ, K, ct, Q, cs])\n', (18763, 18785), True, 'import theano.tensor as tt\n'), ((19025, 19066), 'pymc3.sample', 'pm.sample', (['ndraws'], {'cores': '(1)', 'chains': 'chains'}), '(ndraws, cores=1, chains=chains)\n', (19034, 19066), True, 'import pymc3 as pm\n'), ((20706, 20734), 'numpy.linspace', 'np.linspace', (['smin', 'smax', '(100)'], {}), '(smin, smax, 100)\n', (20717, 20734), True, 'import numpy as np\n'), ((14349, 14360), 'numpy.log', 'np.log', (['μ_H'], {}), '(μ_H)\n', (14355, 14360), True, 'import numpy as np\n'), ((14409, 14420), 'numpy.log', 'np.log', (['μ_φ'], {}), '(μ_φ)\n', (14415, 14420), True, 'import numpy as np\n'), ((14538, 14550), 'numpy.log', 'np.log', (['μ_ct'], {}), '(μ_ct)\n', (14544, 14550), True, 'import numpy as np\n'), ((14598, 14609), 'numpy.log', 'np.log', (['μ_Q'], {}), '(μ_Q)\n', (14604, 14609), True, 'import numpy as np\n'), ((14658, 14670), 'numpy.log', 'np.log', (['μ_cs'], {}), '(μ_cs)\n', (14664, 14670), True, 'import numpy as np\n'), ((20657, 20672), 'numpy.min', 'np.min', (['samples'], {}), '(samples)\n', (20663, 20672), True, 'import numpy as np\n'), ((20674, 20689), 'numpy.max', 'np.max', (['samples'], {}), '(samples)\n', (20680, 20689), True, 'import numpy as np\n'), ((20751, 20778), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['samples'], {}), '(samples)\n', (20769, 20778), False, 'from scipy import stats\n')]
|
from sklearn.ensemble import AdaBoostClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Normalizer
from homework_1.scikitlearn_pipeline.preprocessors import ConnectFeatures
pipeline = Pipeline(
steps=[
("connect", ConnectFeatures(all=True)),
("norm", Normalizer()),
("ada_boost_classifier", AdaBoostClassifier()),
]
)
|
[
"sklearn.preprocessing.Normalizer",
"sklearn.ensemble.AdaBoostClassifier",
"homework_1.scikitlearn_pipeline.preprocessors.ConnectFeatures"
] |
[((260, 285), 'homework_1.scikitlearn_pipeline.preprocessors.ConnectFeatures', 'ConnectFeatures', ([], {'all': '(True)'}), '(all=True)\n', (275, 285), False, 'from homework_1.scikitlearn_pipeline.preprocessors import ConnectFeatures\n'), ((305, 317), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {}), '()\n', (315, 317), False, 'from sklearn.preprocessing import Normalizer\n'), ((353, 373), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {}), '()\n', (371, 373), False, 'from sklearn.ensemble import AdaBoostClassifier\n')]
|
#!/usr/bin/env python
"""Bootstrap process for system policy"""
__author__ = '<NAME>, <NAME>'
from pyon.public import log
from ion.core.bootstrap_process import BootstrapPlugin
from ion.process.bootstrap.load_system_policy import LoadSystemPolicy
class BootstrapPolicy(BootstrapPlugin):
"""
Bootstrap plugin for system policy
"""
def on_initial_bootstrap(self, process, config, **kwargs):
if config.get_safe("system.load_policy", False):
LoadSystemPolicy.op_load_system_policies(process)
|
[
"ion.process.bootstrap.load_system_policy.LoadSystemPolicy.op_load_system_policies"
] |
[((480, 529), 'ion.process.bootstrap.load_system_policy.LoadSystemPolicy.op_load_system_policies', 'LoadSystemPolicy.op_load_system_policies', (['process'], {}), '(process)\n', (520, 529), False, 'from ion.process.bootstrap.load_system_policy import LoadSystemPolicy\n')]
|