prompt listlengths 1 1 | compression_prompt listlengths 1 1 | target stringlengths 1.03k 828k |
|---|---|---|
[
{
"content": "Repeat the code exactly:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# EM MEDIA HANDLER\n# Copyright (c) 2014-2021 Erin Morelli\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Soft... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# EM MEDIA HANDLER\n# Copyright (c) 2014-2021 Erin Morelli\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation file... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# EM MEDIA HANDLER
# Copyright (c) 2014-2021 Erin Morelli
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Media handler module setup
"""
import os
from setuptools import setup
from mediahandler.util.config import make_config
# Set up extra scripts
_extra_scripts = []
if os.name == 'nt':
_extra_scripts.append('addmedia-deluge.bat')
# Set up mediahandler package
setup(
name='em-media-handler',
version='1.2',
author='Erin Morelli',
author_email='me@erin.dev',
url='http://www.erinmorelli.com/projects/em-media-handler/',
license='MIT',
platforms='Linux, OSX, Windows',
description='A comprehensive media handling automation script.',
long_description=open('README.md').read(),
test_suite='tests.testall.suite',
include_package_data=True,
packages=[
'mediahandler',
'mediahandler.types',
'mediahandler.util',
],
entry_points={
'console_scripts': [
'addmedia=mediahandler.handler:main',
'addmedia-deluge=mediahandler.handler:deluge'
]
},
scripts=_extra_scripts,
install_requires=[
'pyyaml',
'google-api-python-client',
'mutagen',
'oauth2client<=3.0.0',
'setuptools>=40.3.0',
'requests'
],
extras_require={
'music': [
'beets',
'pylast==2.3.0'
],
'deluge': [
'twisted',
'pyopenssl'
],
},
tests_require=[
'unittest2',
'responses',
'mock'
],
classifiers=[
'Topic :: Home Automation',
'Topic :: Multimedia',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Environment :: MacOS X',
'Environment :: Console',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
],
)
# Generate default config file
make_config()
``` |
[
{
"content": "Repeat the code exactly:\n```python\n#The MIT License (MIT)\n#\n#Copyright (c) 2014 Robert Abela\n#\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n#The MIT License (MIT)\n#\n#Copyright (c) 2014 Robert Abela\n#\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software wit... | ```python
#The MIT License (MIT)
#
#Copyright (c) 2014 Robert Abela
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
#See: https://github.com/robert-abela/kfsensor-logparser
import xml.sax.handler
import xml.sax
from datetime import datetime
MAX_QUEUE_SIZE = 5 #max entries in queue
QUEUE_INTERVAL = 10 #seconds
ROOT_ELEMENT_NAME = 'log'
def make_time(timestamp):
timestamp_ms = timestamp +'000' #pad for microseconds
return datetime.strptime(timestamp_ms, '%Y-%m-%d %H:%M:%S:%f')
def append(field, data):
if field is None:
return data
else:
return field + data
debug_on = False
def dbg(debug_message):
if debug_on:
print(debug_message)
class LogFileEvent():
def __init__(self):
self.id = None
self.type = None
self.desc = None
self.action = None
self.name = None
self.protocol = None
self.severity = None
self.domain = None
self.client_ip = None
self.client_port = None
self.host_ip = None
self.bindip = None
self.host_port = None
self.closedby = None
self.start = None
self.recbytes = None
self.received = None
def __str__( self ):
str_rep = ''
if self.id is not None:
str_rep += 'id: ' + self.id + '\n'
if self.type is not None:
str_rep += 'type: ' + self.type + '\n'
if self.desc is not None:
str_rep += 'desc: ' + self.desc + '\n'
if self.action is not None:
str_rep += 'action: ' + self.action + '\n'
if self.name is not None:
str_rep += 'name: ' + self.name + '\n'
if self.protocol is not None:
str_rep += 'protocol: ' + self.protocol + '\n'
if self.severity is not None:
str_rep += 'severity: ' + self.severity + '\n'
if self.domain is not None:
str_rep += 'domain: ' + self.domain + '\n'
if self.client_ip is not None:
str_rep += 'client_ip: ' + self.client_ip + '\n'
if self.client_port is not None:
str_rep += 'client_port: ' + self.client_port + '\n'
if self.host_ip is not None:
str_rep += 'host_ip: ' + self.host_ip + '\n'
if self.bindip is not None:
str_rep += 'bindip: ' + self.bindip + '\n'
if self.host_port is not None:
str_rep += 'host_port: ' + self.host_port + '\n'
if self.closedby is not None:
str_rep += 'closedby: ' + self.closedby + '\n'
if self.start is not None:
str_rep += 'start: ' + self.start + '\n'
if self.recbytes is not None:
str_rep += 'recbytes: ' + self.recbytes + '\n'
return str_rep
class EventQueue():
def __init__(self):
self.__time_deltas = []
self.__last_event = None
self.__in_a_burst = False
def addEvent(self, event):
burst_event = None
if self.__last_event is not None:
old_time = make_time(self.__last_event.start)
time = make_time(event.start)
delta_seconds = (time - old_time).total_seconds()
if self.isQueueFull():
total_time = 0
#Calculate the total of time intervals
for t in self.__time_deltas:
total_time += t
if total_time < QUEUE_INTERVAL:
if not self.__in_a_burst:
self.__print_burst(old_time)
burst_event = self.__last_event
self.__in_a_burst = True
else:
self.__in_a_burst = False
#remove first one since queue is full
self.__time_deltas = self.__time_deltas[1:]
#add to queue: difference between current and previous time
self.__time_deltas.append(delta_seconds)
else:
#Add 0 seconds to queue since this is the first event
self.__time_deltas.append(0)
self.__last_event = event
return burst_event
def getQueueSize(self):
return len(self.__time_deltas)
def isQueueFull(self):
return self.getQueueSize() == MAX_QUEUE_SIZE
def getLastEvent(self):
return self.__last_event
def __print_burst(self, time):
dbg(str(MAX_QUEUE_SIZE)+' events in less than '+
str(QUEUE_INTERVAL)+' second(s) around: '+str(time))
class LogFileHandler(xml.sax.handler.ContentHandler):
def __init__(self, file):
self.inchild = ''
self.events_map = {}
self.current_event = None
self.__parse(file)
def __parse(self, file):
'''
Parses the log file passed in the constructor
return: None
'''
parser = xml.sax.make_parser()
parser.setContentHandler(self)
try:
parser.parse(file)
except xml.sax.SAXParseException as error:
print("Error: {0}".format(error))
def readAttribute(self, name, attributes):
'''
Checks if an attribute with the given name is in the attributes
dictionary and if present returns it
Return: a string or None
'''
if name in attributes.keys():
return attributes[name]
else:
return None
def startElement(self, name, attributes):
'''
SAX Parsing: A new element is found.
return: None
'''
self.inchild = '' #Reset child marker
if name == ROOT_ELEMENT_NAME:
print('------ STARTED PARSING ------')
elif name == 'event':
#Create a new event and populate its fields
self.current_event = LogFileEvent()
curr_id = self.readAttribute('id', attributes)
self.current_event.id = curr_id.rjust(10, '0')
self.current_event.type = self.readAttribute('type', attributes)
self.current_event.desc = self.readAttribute('desc', attributes)
self.current_event.action = self.readAttribute('action', attributes)
self.current_event.name = self.readAttribute('name', attributes)
self.current_event.protocol = self.readAttribute('protocol', attributes)
self.current_event.severity = self.readAttribute('severity', attributes)
elif name == 'client':
self.current_event.domain = self.readAttribute('domain', attributes)
self.current_event.client_ip = self.readAttribute('ip', attributes)
self.current_event.client_port = self.readAttribute('port', attributes)
elif name == 'host':
self.current_event.server_ip = self.readAttribute('ip', attributes)
self.current_event.bindip = self.readAttribute('bindip', attributes)
self.current_event.server_port = self.readAttribute('port', attributes)
elif name == 'connection':
self.current_event.closedby = self.readAttribute('closedby', attributes)
else:
self.inchild = name #keep track of child element name
def characters(self, data):
'''
SAX Parsing: Text is found
return: None
'''
if data is None:
return
if self.inchild == 'start':
self.current_event.start = append(self.current_event.start, data)
#ignore characters after the 23rd
self.current_event.start = self.current_event.start[:23]
elif self.inchild == 'recBytes':
self.current_event.recbytes = append(self.current_event.recbytes, data)
elif self.inchild == 'received':
self.current_event.received = append(self.current_event.received, data)
def endElement(self, name):
'''
SAX Parsing: Element closing
return: None
'''
if name == ROOT_ELEMENT_NAME:
print('------ FINISHED PARSING ------')
elif name == 'event':
self.events_map[self.current_event.id] = self.current_event
self.current_event = None
self.inchild = '' #Reset child marker
def __getSortedKeys(self):
'''
Returns the event ids sorted
return: a list of strings representing the ids of the events
'''
return sorted(self.events_map)
def getAllEvents(self):
'''
Returns all the events in the log file, ordered by ID
return: a list of LogFileEvent objects
'''
all_events_sorted = []
for key in self.__getSortedKeys():
event = self.events_map[key]
if len(event.start) == 23:
all_events_sorted.append(event)
else:
print('Warning: skipping event', event.id, 'with date(', event.start,')')
return all_events_sorted
def filterEvents(self, f):
'''
Returns all the events in the log file, after the filter (f) has been
applied. For an event to match the filter all the fields need to match.
return: a list of LogFileEvent objects, ordered by ID.
'''
filtered = []
for key in self.__getSortedKeys():
event = self.events_map[key]
if ((f.id is not None and event.id != f.id) or
(f.type is not None and event.type != f.type) or
(f.desc is not None and event.desc != f.desc) or
(f.action is not None and event.action != f.action) or
(f.name is not None and event.name != f.name) or
(f.protocol is not None and event.protocol != f.protocol) or
(f.severity is not None and event.severity != f.severity) or
(f.domain is not None and event.domain != f.domain) or
(f.client_ip is not None and event.client_ip != f.client_ip) or
(f.client_port is not None and event.client_port != f.client_port) or
(f.host_ip is not None and event.host_ip != f.host_ip) or
(f.bindip is not None and event.bindip != f.bindip) or
(f.host_port is not None and event.host_port != f.host_port) or
(f.closedby is not None and event.closedby != f.closedby) or
(f.start is not None and event.start != f.start) or
(f.recbytes is not None and event.recbytes != f.recbytes)):
continue
filtered.append(event)
return filtered
def getBursts(self, events, all_ips=True):
'''
Goes through a list of events and filters out only the events that
happened in a burst. A burst is defined by a number of events
(MAX_QUEUE_SIZE) in given number of seconds (QUEUE_INTERVAL).
Basically the algorithm is to keep a fixed size queue with every entry
being the time difference between 2 events. Once the queue is filled
the times are added and if the total is larger than QUEUE_INTERVAL the
current event is added to the list that will be returned.
return: a list of LogFileEvent objects, ordered by ID.
'''
all_queues = {}
burst_events = []
for event in events:
queue = self.__get_event_queue(all_queues, event, all_ips)
#print('adding Event', event.id, event.start)
burst_event = queue.addEvent(event)
if burst_event is not None:
burst_events.append(burst_event)
return burst_events
def __get_event_queue(self, queues, event, all_ips):
if all_ips == True:
if len(queues) == 0:
queues['all_ips'] = EventQueue()
return queues['all_ips']
else:
queue = queues.get(event.client_ip)
if queue is not None:
return queue
new_queue = EventQueue()
queues[event.client_ip] = new_queue
return new_queue
def getBurstsOld(self, events):
'''
Goes through a list of events and filters out only the events that
happened in a burst. A burst is defined by a number of events
(MAX_QUEUE_SIZE) in given number of seconds (QUEUE_INTERVAL).
Basically the algorithm is to keep a fixed size queue with every entry
being the time difference between 2 events. Once the queue is filled
the times are added and if the total is larger than QUEUE_INTERVAL the
current event is added to the list that will be returned.
return: a list of LogFileEvent objects, ordered by ID.
'''
old_time = None
queue = []
burst_events = []
previous_event = None
for event in events:
timestamp = event.start+'000' #pad for microseconds
time = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S:%f')
if old_time is not None:
if len(queue) == MAX_QUEUE_SIZE: #Queue is full
total_time = 0
#Calculate the total of time intervals
for t in queue:
total_time += t
if total_time < QUEUE_INTERVAL:
if time != old_time:
burst_events.append(previous_event)
#remove first one since queue is full
queue = queue [1:]
#add to queue: difference between current and previous time
queue.append((time - old_time).total_seconds())
else:
#Add 0 seconds to queue since this is the first event
queue.append(0)
#update old time
old_time = time
previous_event = event
return burst_events
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n# Amara, universalsubtitles.org\n# \n# Copyright (C) 2013 Participatory Culture Foundation\n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n# Amara, universalsubtitles.org\n# \n# Copyright (C) 2013 Participatory Culture Foundation\n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Pu... | ```python
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.conf.urls.defaults import *
from search.views import rpc_router
urlpatterns = patterns('search.views',
url(r'^$', 'index', name='index'),
url(r'^router/$', rpc_router, name='rpc_router'),
url(r'^router/api/$', rpc_router.api, name='rpc_api'),
)
``` |
[
{
"content": "Replicate the source code:\n```python\n\"\"\"Tools for processing configuration files.\"\"\"\n\nfrom ConfigParser import ConfigParser\n\nfrom pyfusion.conf.exceptions import DisallowedSectionType, \\\n ConfigSectionSyntaxError, UnknownVariableTypeError\n\n# This list contains allowed section t... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n\"\"\"Tools for processing configuration files.\"\"\"\n\nfrom ConfigParser import ConfigParser\n\nfrom pyfusion.conf.exceptions import DisallowedSectionType, \\\n ConfigSectionSyntaxError, UnknownVariableTypeError\n\n# This list contains a... | ```python
"""Tools for processing configuration files."""
from ConfigParser import ConfigParser
from pyfusion.conf.exceptions import DisallowedSectionType, \
ConfigSectionSyntaxError, UnknownVariableTypeError
# This list contains allowed section types, i.e. [SectionType:Name] in
# config files. Be sure to update the documentation (config.rst) when
# adding to this list
allowed_section_types = ['Device', 'Diagnostic', 'Acquisition', 'CoordTransform']
## sections which don't follow the [SectionType:Name] syntax
special_section_names = ['variabletypes', 'global', 'Plots']
class PyfusionConfigParser(ConfigParser):
"""Customised parser to facilitate [Type:Name] config sections.
Inherited ConfigParser methods are extended, and prefixed with pf_
to allow separate arguments for section type and section name,
for example:
ConfigParser.has_section(sectionname) ->
PyfusionConfigParser.pf_has_section(sectiontype, name)
The inherited ConfigParser methods are still available, so the
following are identical:
PyfusionConfigParser.has_section('Device:TJII')
PyfusionConfigParser.pf_has_section('Device','TJII')
"""
#####################################################
## Extensions to ConfigParser methods (incomplete) ##
#####################################################
def pf_has_section(self, sectiontype, sectionname):
return self.has_section("%s:%s"%(sectiontype, sectionname))
def pf_get(self, sectiontype, sectionname, option):
if self.has_option('variabletypes',
"%s__%s"%(sectiontype, option)):
output_type = self.get('variabletypes',
"%s__%s"%(sectiontype, option))
if output_type == 'float':
return self.getfloat("%s:%s"%(sectiontype, sectionname), option)
elif output_type == 'int':
return self.getint("%s:%s"%(sectiontype, sectionname), option)
elif output_type == 'bool':
return self.getboolean("%s:%s"%(sectiontype, sectionname), option)
else:
raise UnknownVariableTypeError
else:
return self.get("%s:%s"%(sectiontype, sectionname), option)
def pf_options(self, sectiontype, sectionname):
return self.options("%s:%s"%(sectiontype, sectionname))
def pf_has_option(self, sectiontype, sectionname, option):
return self.has_option("%s:%s"%(sectiontype, sectionname), option)
#########################################
## Custom PyfusionConfigParser methods ##
#########################################
def check_section_syntax(self):
"""Make sure config file sections follow [Type:Name] syntax."""
for section in self.sections():
if not section in special_section_names:
split_name = section.split(':')
if not len(split_name)==2:
raise ConfigSectionSyntaxError, section
if not (len(split_name[0])>0 and len(split_name[1])>0):
raise ConfigSectionSyntaxError, section
def check_section_types(self, type_list):
"""Make sure section types listed in config file are allowed."""
self.check_section_syntax()
for section in self.sections():
if not section in special_section_names:
section_name = section.split(':')[0]
if not section_name in type_list:
raise DisallowedSectionType, section_name
#config = PyfusionConfigParser()
``` |
[
{
"content": "```python\nfrom __future__ import print_function\n\nimport time\nimport logging\nfrom py2neo import Graph,Node\nfrom py2neo.ext import ogm\nfrom py2neo.packages.httpstream.http import SocketError\n\nlog = logging.getLogger('flask.neo4j')\nlogging.basicConfig()\n\n# Find the stack on which we want ... | [
{
"content": "<|memory_start|>```python\nfrom __future__ import print_function\n\nimport time\nimport logging\nfrom py2neo import Graph,Node\nfrom py2neo.ext import ogm\nfrom py2neo.packages.httpstream.http import SocketError\n\nlog = logging.getLogger('flask.neo4j')\nlogging.basicConfig()\n\n# Find the stack o... | ```python
from __future__ import print_function
import time
import logging
from py2neo import Graph,Node
from py2neo.ext import ogm
from py2neo.packages.httpstream.http import SocketError
log = logging.getLogger('flask.neo4j')
logging.basicConfig()
# Find the stack on which we want to store the GraphDatabaseService instance.
# Starting with Flask 0.9, the _app_ctx_stack is the correct one,
# before that we need to use the _request_ctx_stack.
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
class Neo4j(object):
"""Automatically connects to Neo4j graph database using parameters defined
in Flask configuration.
One can use this extension by providing the Flask app on instantiation or
by calling the :meth:`init_app` method on an instance object of `Neo4j`. An example
of providing the application on instantiation: ::
app = Flask(__name__)
n4j = Neo4j(app)
...and an example calling the :meth:`init_app` method instead: ::
n4j = Neo4j()
def init_app():
app = Flask(__name__)
n4j.init_app(app)
return app
One can also providing a dict of indexes that will be used to automatically
get or create indexes in the graph database ::
app = Flask(__name__)
graph_indexes = {'Species': neo4j.Node}
n4j = Neo4j(app, graph_indexes)
print n4j.gdb.neo4j_version
species_index = n4j.index['Species']
...
"""
def __init__(self, app=None, indexes=None):
self.app = app
self._indexes = indexes
if app is not None:
self.init_app(app)
print ("flask.ext.Neo4j init_app called")
def init_app(self, app):
"""Initialize the `app` for use with this :class:`~Neo4j`. This is
called automatically if `app` is passed to :meth:`~Neo4j.__init__`.
The app is configured according to these configuration variables
``CONNECTION_RETRY``
``RETRY_INTERVAL``
:param flask.Flask app: the application configured for use with
this :class:`~Neo4j`
"""
self.app = app
app.n4j = self
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['neo4j'] = self
# Use the newstyle teardown_appcontext if it's available,
# otherwise fall back to the request context
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
else:
app.teardown_request(self.teardown)
def teardown(self, exception):
ctx = stack.top # TODO clean up teardown related to graph_db behavior
if hasattr(ctx, 'graph_db'):
# py2neo does not have an 'open' connection that needs closing
ctx.graph_db = None
@property
def gdb(self):
"""The graph database service instance as a property, for convenience.
Note: The property will use these configuration variables
``CONNECTION_RETRY``
``RETRY_INTERVAL``
:return: the graph database service as a property
"""
retry = False
if 'CONNECTION_RETRY' in self.app.config:
retry = self.app.config['CONNECTION_RETRY']
retry_interval = 5
if 'RETRY_INTERVAL' in self.app.config:
retry_interval = self.app.config['RETRY_INTERVAL']
retry_count = 0
try:
self.graph_db = Graph(self.app.config['GRAPH_DATABASE'])
except SocketError as se:
log.error('SocketError: {0}'.format(se.message))
if retry:
while retry_count < 3:
log.debug('Waiting {0}secs before Connection Retry to GraphDatabaseService'.format(
retry_interval
))
time.sleep(retry_interval)
#time.sleep(1)
retry_count += 1
try:
self.graph_db = Graph(self.app.config['GRAPH_DATABASE'])
except SocketError as sse:
log.error('SocketError: {0}'.format(sse.message))
if not hasattr(self, 'index'):
self.index = {}
# add all the indexes as app attributes
if self._indexes is not None:
for i, i_type in self._indexes.iteritems():
log.debug('getting or creating graph index:{0} {1}'.format(
i, i_type
))
self.index[i] = \
self.graph_db.legacy.get_or_create_index(i_type, i)
return self.graph_db
@property
def store(self):
"""
The object graph mapping store available as a property.
Note: The property will use these configuration variables
``CONNECTION_RETRY``
``RETRY_INTERVAL``
:return: the object graph mapping store property
"""
store = ogm.Store(self.gdb)
return store
def delete_index(self, index_name):
"""
Simple delete index capability that takes only a name.
Note: uses the index_types as remembered from indexes variable given at
initialization.
:param index_name: the name of the index to delete from the database
"""
i_type = self._indexes[index_name]
self.graph_db.legacy.delete_index(content_type=i_type, index_name=index_name)
if __name__ == '__main__':
from flask import Flask
app = Flask(__name__)
app.config['GRAPH_DATABASE'] = 'http://localhost:7474/db/data/'
graph_indexes = {'Species': Node}
flask4j = Neo4j(app, graph_indexes)
print (flask4j.gdb.neo4j_version)
species_index = flask4j.index['Species']
print ('species index:', species_index)
flask4j.delete_index('Species')
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\nimport os\nimport sys\nimport time\nimport logging\n\nfrom exabgp.logger.handler import getLogger\nfrom exabgp.logger.format import formater\n\n\ndef echo(_):\n return _\n\n\nclass option(object):\n logger = None\n formater = ... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\nimport os\nimport sys\nimport time\nimport logging\n\nfrom exabgp.logger.handler import getLogger\nfrom exabgp.logger.format import formater\n\n\ndef echo(_):\n return _\n\n\nclass option(object):\n logger = None\... | ```python
import os
import sys
import time
import logging
from exabgp.logger.handler import getLogger
from exabgp.logger.format import formater
def echo(_):
return _
class option(object):
logger = None
formater = echo
short = False
level = 'WARNING'
logit = {}
cwd = ''
# where the log should go, stdout, stderr, file, syslog, ...
destination = ''
enabled = {
'pdb': False,
'reactor': False,
'daemon': False,
'processes': False,
'configuration': False,
'network': False,
'wire': False,
'message': False,
'rib': False,
'timer': False,
'routes': False,
'parser': False,
}
@classmethod
def _set_level(cls, level):
cls.level = level
levels = 'FATAL CRITICAL ERROR WARNING INFO DEBUG NOTSET'
index = levels.index(level)
for level in levels.split():
cls.logit[level] = levels.index(level) <= index
@classmethod
def log_enabled(cls, source, level):
return cls.enabled.get(source, True) and cls.logit.get(level, False)
@classmethod
def load(cls, env):
cls.pid = os.getpid()
cls.cwd = os.getcwd()
cls.short = env.log.short
cls._set_level(env.log.level)
cls.option = {
'pdb': env.debug.pdb,
'reactor': env.log.enable and (env.log.all or env.log.reactor),
'daemon': env.log.enable and (env.log.all or env.log.daemon),
'processes': env.log.enable and (env.log.all or env.log.processes),
'configuration': env.log.enable and (env.log.all or env.log.configuration),
'network': env.log.enable and (env.log.all or env.log.network),
'wire': env.log.enable and (env.log.all or env.log.packets),
'message': env.log.enable and (env.log.all or env.log.message),
'rib': env.log.enable and (env.log.all or env.log.rib),
'timer': env.log.enable and (env.log.all or env.log.timers),
'routes': env.log.enable and (env.log.all or env.log.routes),
'parser': env.log.enable and (env.log.all or env.log.parser),
}
destination = env.log.destination
if destination in ('stdout', 'stderr', 'syslog'):
cls.destination = destination
elif destination.startwith('file:'):
cls.destination = destination[5:]
else:
cls.destination = 'stdout'
@classmethod
def setup(cls, env):
cls.load(env)
# the time is used as we will need to re-init the logger once
# we have dropped root privileges so that any permission issues
# can be noticed at start time (and not once we try to rotate file for example)
now = str(time.time())
if cls.destination == 'stdout':
cls.logger = getLogger(
f'ExaBGP stdout {now}',
format='%(message)s',
stream=sys.stderr,
level=cls.level,
)
cls.formater = formater(env.log.short, 'stdout')
return
if cls.destination == 'stdout':
cls.logger = getLogger(
f'ExaBGP stderr {now}',
format='%(message)s',
stream=sys.stderr,
level=cls.level,
)
cls.formater = formater(env.log.short, 'stderr')
return
# if cls.destination == 'file':
# os.path.realpath(os.path.normpath(os.path.join(cls._cwd, destination)))
# _logger = getLogger('ExaBGP file', filename='')
# _format = formater(cls.enabled, 'stderr')
if cls.destination == 'syslog':
cls.logger = getLogger(
f'ExaBGP syslog {now}',
format='%(message)s',
address='/var/run/syslog' if sys.platform == 'darwin' else '/dev/log',
level=cls.level,
)
cls.formater = formater(env.log.short, 'syslog')
# need to re-add remote syslog
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n# F. Giorgi R. Francisco\n# Uncertainties in regional climate change prediction: a regional analysis\n# of ensemble simulations with the HADCM2 coupled AOGCM\n\noutlines = dict()\noutlines[1] = ((110, -45), (155, -45), (15... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n# F. Giorgi R. Francisco\n# Uncertainties in regional climate change prediction: a regional analysis\n# of ensemble simulations with the HADCM2 coupled AOGCM\n\noutlines = dict()\noutlines[1] = ((110, -45),... | ```python
# F. Giorgi R. Francisco
# Uncertainties in regional climate change prediction: a regional analysis
# of ensemble simulations with the HADCM2 coupled AOGCM
outlines = dict()
outlines[1] = ((110, -45), (155, -45), (155, -11), (110, -11))
outlines[2] = ((-82, -20), (-34, -20), (-34, 12), (-82, 12))
outlines[3] = ((-76, -56), (-40, -56), (-40, -20), (-76, -20))
outlines[4] = ((-116, 10), (-83, 10), (-83, 25), (-85, 25), (-85, 30), (-116, 30))
outlines[5] = ((-130, 30), (-103, 30), (-103, 60), (-130, 60))
outlines[6] = ((-103, 30), (-85, 30), (-85, 50), (-103, 50))
outlines[7] = ((-85, 25), (-60, 25), (-60, 50), (-85, 50))
outlines[8] = ((-170, 60), (-103, 60), (-103, 72), (-170, 72))
outlines[9] = ((-103, 50), (-10, 50), (-10, 85), (-103, 85))
outlines[10] = ((-10, 30), (40, 30), (40, 48), (-10, 48))
outlines[11] = ((-10, 48), (40, 48), (40, 75), (-10, 75))
outlines[12] = ((-20, -12), (22, -12), (22, 18), (-20, 18))
outlines[13] = ((22, -12), (52, -12), (52, 18), (22, 18))
outlines[14] = ((-10, -35), (52, -35), (52, -12), (-10, -12))
outlines[15] = ((-20, 18), (65, 18), (65, 30), (-20, 30))
outlines[16] = ((95, -11), (155, -11), (155, 20), (100, 20), (100, 5), (95, 5))
outlines[17] = ((100, 20), (145, 20), (145, 50), (100, 50))
outlines[18] = ((65, 5), (100, 5), (100, 30), (65, 30))
outlines[19] = ((40, 30), (75, 30), (75, 50), (40, 50))
outlines[20] = ((75, 30), (100, 30), (100, 50), (75, 50))
outlines[21] = ((40, 50), (180, 50), (180, 70), (40, 70))
abbrevs = dict()
abbrevs[1] = "AUS"
abbrevs[2] = "AMZ"
abbrevs[3] = "SSA"
abbrevs[4] = "CAM"
abbrevs[5] = "WNA"
abbrevs[6] = "CNA"
abbrevs[7] = "ENA"
abbrevs[8] = "ALA"
abbrevs[9] = "GRL"
abbrevs[10] = "MED"
abbrevs[11] = "NEU"
abbrevs[12] = "WAF"
abbrevs[13] = "EAF"
abbrevs[14] = "SAF"
abbrevs[15] = "SAH"
abbrevs[16] = "SEA"
abbrevs[17] = "EAS"
abbrevs[18] = "SAS"
abbrevs[19] = "CAS"
abbrevs[20] = "TIB"
abbrevs[21] = "NAS"
names = dict()
names[1] = "Australia"
names[2] = "Amazon Basin"
names[3] = "Southern South America"
names[4] = "Central America"
names[5] = "Western North America"
names[6] = "Central North America"
names[7] = "Eastern North America"
names[8] = "Alaska"
names[9] = "Greenland"
names[10] = "Mediterranean Basin"
names[11] = "Northern Europe"
names[12] = "Western Africa"
names[13] = "Eastern Africa"
names[14] = "Southern Africa"
names[15] = "Sahara"
names[16] = "Southeast Asia"
names[17] = "East Asia"
names[18] = "South Asia"
names[19] = "Central Asia"
names[20] = "Tibet"
names[21] = "North Asia"
# =============================================================================
from ..core.regions import Regions
numbers = range(1, 22)
source = (
"Giorgi and Franciso, 2000 " "(http://link.springer.com/article/10.1007/PL00013733)"
)
giorgi = Regions(
outlines,
numbers=numbers,
names=names,
abbrevs=abbrevs,
name="Giorgi",
source=source,
)
``` |
[
{
"content": "```python\nfrom numpy import *\n\ndef metropolis(data, model, nlinks, beta=1., keepchain=True, startlink=0):\n\t'''\n\tThe \"model\" object must implement:\n\t\n\tp = model.get_params()\n\t-- this must return an *independent copy* of the parameters.\n\n\tmodel.set_params(p)\n\n\tp = model.propose_... | [
{
"content": "<|memory_start|>```python\nfrom numpy import *\n\ndef metropolis(data, model, nlinks, beta=1., keepchain=True, startlink=0):\n\t'''\n\tThe \"model\" object must implement:\n\t\n\tp = model.get_params()\n\t-- this must return an *independent copy* of the parameters.\n\n\tmodel.set_params(p)\n\n\tp ... | ```python
from numpy import *
def metropolis(data, model, nlinks, beta=1., keepchain=True, startlink=0):
'''
The "model" object must implement:
p = model.get_params()
-- this must return an *independent copy* of the parameters.
model.set_params(p)
p = model.propose_params()
model.tally(accept, linknumber) accept: boolean
lnp = model.lnposterior(data)
The "data" object is an opaque object that just gets passed back to you
Returns: (bestlnp, bestparams, chain)
Where chain is a list of all the MCMC steps:
[ (lnp, params), (lnp, params), ... ]
'''
oldparams = model.get_params()
oldlnp = model.lnposterior(data)
bestparams = oldparams
bestlnp = oldlnp
chain = []
for link in range(startlink, nlinks):
newparams = model.propose_params()
model.set_params(newparams)
newlnp = model.lnposterior(data)
randnum = random.uniform()
accept = (beta * (newlnp - oldlnp)) > log(randnum)
model.tally(accept, link)
if accept:
# keep new parameters
if keepchain:
chain.append((newlnp, newparams))
oldparams = newparams
oldlnp = newlnp
#naccept += 1
if newlnp > bestlnp:
bestlnp = newlnp
bestparams = newparams
else:
# keep old parameters
if keepchain:
chain.append((oldlnp, oldparams))
model.set_params(oldparams)
return (bestlnp, bestparams, chain)
'''
def cycle_proposals(oldparams, stepinfo):
(stepnum, sigmas, lastip) = stepinfo
NZ = array([i for i,s in enumerate(sigmas) if s != 0])
np = len(NZ)
ip = NZ[stepnum % np]
params = oldparams[:]
params[ip] += random.normal() * sigmas[ip]
return (params, (stepnum+1, sigmas, ip))
# roll-off a quadratic error function to linear at the given 'linsig'.
# the curve and its slope are continuous.
def quadratic_to_linear(sig2, linsig):
lin = (sig2 > linsig**2)
sig2[lin] = -linsig**2 + 2.*linsig * sqrt(sig2[lin])
return sig2
'''
``` |
[
{
"content": "```python\n# Copyright (c) 2016 Ultimaker B.V.\r\n# Cura is released under the terms of the AGPLv3 or higher.\r\n\r\n# This collects a lot of quality and quality changes related code which was split between ContainerManager\r\n# and the MachineManager and really needs to usable from both.\r\nfrom ... | [
{
"content": "<|memory_start|>```python\n# Copyright (c) 2016 Ultimaker B.V.\r\n# Cura is released under the terms of the AGPLv3 or higher.\r\n\r\n# This collects a lot of quality and quality changes related code which was split between ContainerManager\r\n# and the MachineManager and really needs to usable fro... | ```python
# Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
# This collects a lot of quality and quality changes related code which was split between ContainerManager
# and the MachineManager and really needs to usable from both.
from typing import List
from UM.Application import Application
from UM.Settings.ContainerRegistry import ContainerRegistry
from UM.Settings.DefinitionContainer import DefinitionContainer
from UM.Settings.InstanceContainer import InstanceContainer
from cura.Settings.ExtruderManager import ExtruderManager
class QualityManager:
## Get the singleton instance for this class.
@classmethod
def getInstance(cls) -> "QualityManager":
# Note: Explicit use of class name to prevent issues with inheritance.
if not QualityManager.__instance:
QualityManager.__instance = cls()
return QualityManager.__instance
__instance = None # type: "QualityManager"
## Find a quality by name for a specific machine definition and materials.
#
# \param quality_name
# \param machine_definition (Optional) \type{ContainerInstance} If nothing is
# specified then the currently selected machine definition is used.
# \param material_containers (Optional) \type{List[ContainerInstance]} If nothing is specified then
# the current set of selected materials is used.
# \return the matching quality container \type{ContainerInstance}
def findQualityByName(self, quality_name, machine_definition=None, material_containers=None):
criteria = {"type": "quality", "name": quality_name}
result = self._getFilteredContainersForStack(machine_definition, material_containers, **criteria)
# Fall back to using generic materials and qualities if nothing could be found.
if not result and material_containers and len(material_containers) == 1:
basic_materials = self._getBasicMaterials(material_containers[0])
result = self._getFilteredContainersForStack(machine_definition, basic_materials, **criteria)
return result[0] if result else None
## Find a quality changes container by name.
#
# \param quality_changes_name \type{str} the name of the quality changes container.
# \param machine_definition (Optional) \type{ContainerInstance} If nothing is
# specified then the currently selected machine definition is used.
# \param material_containers (Optional) \type{List[ContainerInstance]} If nothing is specified then
# the current set of selected materials is used.
# \return the matching quality changes containers \type{List[ContainerInstance]}
def findQualityChangesByName(self, quality_changes_name, machine_definition=None):
criteria = {"type": "quality_changes", "name": quality_changes_name}
result = self._getFilteredContainersForStack(machine_definition, [], **criteria)
return result
## Fetch the list of available quality types for this combination of machine definition and materials.
#
# \param machine_definition \type{DefinitionContainer}
# \param material_containers \type{List[InstanceContainer]}
# \return \type{List[str]}
def findAllQualityTypesForMachineAndMaterials(self, machine_definition, material_containers):
# Determine the common set of quality types which can be
# applied to all of the materials for this machine.
quality_type_dict = self.__fetchQualityTypeDictForMaterial(machine_definition, material_containers[0])
common_quality_types = set(quality_type_dict.keys())
for material_container in material_containers[1:]:
next_quality_type_dict = self.__fetchQualityTypeDictForMaterial(machine_definition, material_container)
common_quality_types.intersection_update(set(next_quality_type_dict.keys()))
return list(common_quality_types)
## Fetches a dict of quality types names to quality profiles for a combination of machine and material.
#
# \param machine_definition \type{DefinitionContainer} the machine definition.
# \param material \type{ContainerInstance} the material.
# \return \type{Dict[str, ContainerInstance]} the dict of suitable quality type names mapping to qualities.
def __fetchQualityTypeDictForMaterial(self, machine_definition, material):
qualities = self.findAllQualitiesForMachineMaterial(machine_definition, material)
quality_type_dict = {}
for quality in qualities:
quality_type_dict[quality.getMetaDataEntry("quality_type")] = quality
return quality_type_dict
## Find a quality container by quality type.
#
# \param quality_type \type{str} the name of the quality type to search for.
# \param machine_definition (Optional) \type{ContainerInstance} If nothing is
# specified then the currently selected machine definition is used.
# \param material_containers (Optional) \type{List[ContainerInstance]} If nothing is specified then
# the current set of selected materials is used.
# \return the matching quality container \type{ContainerInstance}
def findQualityByQualityType(self, quality_type, machine_definition=None, material_containers=None, **kwargs):
criteria = kwargs
criteria["type"] = "quality"
if quality_type:
criteria["quality_type"] = quality_type
result = self._getFilteredContainersForStack(machine_definition, material_containers, **criteria)
# Fall back to using generic materials and qualities if nothing could be found.
if not result and material_containers and len(material_containers) == 1:
basic_materials = self._getBasicMaterials(material_containers[0])
result = self._getFilteredContainersForStack(machine_definition, basic_materials, **criteria)
return result[0] if result else None
## Find all suitable qualities for a combination of machine and material.
#
# \param machine_definition \type{DefinitionContainer} the machine definition.
# \param material_container \type{ContainerInstance} the material.
# \return \type{List[ContainerInstance]} the list of suitable qualities.
def findAllQualitiesForMachineMaterial(self, machine_definition, material_container):
criteria = {"type": "quality" }
result = self._getFilteredContainersForStack(machine_definition, [material_container], **criteria)
if not result:
basic_materials = self._getBasicMaterials(material_container)
result = self._getFilteredContainersForStack(machine_definition, basic_materials, **criteria)
return result
## Find all quality changes for a machine.
#
# \param machine_definition \type{DefinitionContainer} the machine definition.
# \return \type{List[InstanceContainer]} the list of quality changes
def findAllQualityChangesForMachine(self, machine_definition: DefinitionContainer) -> List[InstanceContainer]:
if machine_definition.getMetaDataEntry("has_machine_quality"):
definition_id = machine_definition.getId()
else:
definition_id = "fdmprinter"
filter_dict = { "type": "quality_changes", "extruder": None, "definition": definition_id }
quality_changes_list = ContainerRegistry.getInstance().findInstanceContainers(**filter_dict)
return quality_changes_list
## Find all usable qualities for a machine and extruders.
#
# Finds all of the qualities for this combination of machine and extruders.
# Only one quality per quality type is returned. i.e. if there are 2 qualities with quality_type=normal
# then only one of then is returned (at random).
#
# \param global_container_stack \type{ContainerStack} the global machine definition
# \param extruder_stacks \type{List[ContainerStack]} the list of extruder stacks
# \return \type{List[InstanceContainer]} the list of the matching qualities. The quality profiles
# return come from the first extruder in the given list of extruders.
def findAllUsableQualitiesForMachineAndExtruders(self, global_container_stack, extruder_stacks):
global_machine_definition = global_container_stack.getBottom()
if extruder_stacks:
# Multi-extruder machine detected.
materials = [stack.findContainer(type="material") for stack in extruder_stacks]
else:
# Machine with one extruder.
materials = [global_container_stack.findContainer(type="material")]
quality_types = self.findAllQualityTypesForMachineAndMaterials(global_machine_definition, materials)
# Map the list of quality_types to InstanceContainers
qualities = self.findAllQualitiesForMachineMaterial(global_machine_definition, materials[0])
quality_type_dict = {}
for quality in qualities:
quality_type_dict[quality.getMetaDataEntry("quality_type")] = quality
return [quality_type_dict[quality_type] for quality_type in quality_types]
## Fetch more basic versions of a material.
#
# This tries to find a generic or basic version of the given material.
# \param material_container \type{InstanceContainer} the material
# \return \type{List[InstanceContainer]} a list of the basic materials or an empty list if one could not be found.
def _getBasicMaterials(self, material_container):
base_material = material_container.getMetaDataEntry("material")
material_container_definition = material_container.getDefinition()
if material_container_definition and material_container_definition.getMetaDataEntry("has_machine_quality"):
definition_id = material_container.getDefinition().getMetaDataEntry("quality_definition", material_container.getDefinition().getId())
else:
definition_id = "fdmprinter"
if base_material:
# There is a basic material specified
criteria = { "type": "material", "name": base_material, "definition": definition_id }
containers = ContainerRegistry.getInstance().findInstanceContainers(**criteria)
containers = [basic_material for basic_material in containers if
basic_material.getMetaDataEntry("variant") == material_container.getMetaDataEntry(
"variant")]
return containers
return []
def _getFilteredContainers(self, **kwargs):
return self._getFilteredContainersForStack(None, None, **kwargs)
def _getFilteredContainersForStack(self, machine_definition=None, material_containers=None, **kwargs):
# Fill in any default values.
if machine_definition is None:
machine_definition = Application.getInstance().getGlobalContainerStack().getBottom()
quality_definition_id = machine_definition.getMetaDataEntry("quality_definition")
if quality_definition_id is not None:
machine_definition = ContainerRegistry.getInstance().findDefinitionContainers(id=quality_definition_id)[0]
if material_containers is None:
active_stacks = ExtruderManager.getInstance().getActiveGlobalAndExtruderStacks()
material_containers = [stack.findContainer(type="material") for stack in active_stacks]
criteria = kwargs
filter_by_material = False
machine_definition = self.getParentMachineDefinition(machine_definition)
whole_machine_definition = self.getWholeMachineDefinition(machine_definition)
if whole_machine_definition.getMetaDataEntry("has_machine_quality"):
definition_id = machine_definition.getMetaDataEntry("quality_definition", whole_machine_definition.getId())
criteria["definition"] = definition_id
filter_by_material = whole_machine_definition.getMetaDataEntry("has_materials")
else:
criteria["definition"] = "fdmprinter"
# Stick the material IDs in a set
if material_containers is None or len(material_containers) == 0:
filter_by_material = False
else:
material_ids = set()
for material_instance in material_containers:
if material_instance is not None:
# Add the parent material too.
for basic_material in self._getBasicMaterials(material_instance):
material_ids.add(basic_material.getId())
material_ids.add(material_instance.getId())
containers = ContainerRegistry.getInstance().findInstanceContainers(**criteria)
result = []
for container in containers:
# If the machine specifies we should filter by material, exclude containers that do not match any active material.
if filter_by_material and container.getMetaDataEntry("material") not in material_ids and not "global_quality" in kwargs:
continue
result.append(container)
return result
## Get the parent machine definition of a machine definition.
#
# \param machine_definition \type{DefinitionContainer} This may be a normal machine definition or
# an extruder definition.
# \return \type{DefinitionContainer} the parent machine definition. If the given machine
# definition doesn't have a parent then it is simply returned.
def getParentMachineDefinition(self, machine_definition: DefinitionContainer) -> DefinitionContainer:
container_registry = ContainerRegistry.getInstance()
machine_entry = machine_definition.getMetaDataEntry("machine")
if machine_entry is None:
# We have a normal (whole) machine defintion
quality_definition = machine_definition.getMetaDataEntry("quality_definition")
if quality_definition is not None:
parent_machine_definition = container_registry.findDefinitionContainers(id=quality_definition)[0]
return self.getParentMachineDefinition(parent_machine_definition)
else:
return machine_definition
else:
# This looks like an extruder. Find the rest of the machine.
whole_machine = container_registry.findDefinitionContainers(id=machine_entry)[0]
parent_machine = self.getParentMachineDefinition(whole_machine)
if whole_machine is parent_machine:
# This extruder already belongs to a 'parent' machine def.
return machine_definition
else:
# Look up the corresponding extruder definition in the parent machine definition.
extruder_position = machine_definition.getMetaDataEntry("position")
parent_extruder_id = parent_machine.getMetaDataEntry("machine_extruder_trains")[extruder_position]
return container_registry.findDefinitionContainers(id=parent_extruder_id)[0]
## Get the whole/global machine definition from an extruder definition.
#
# \param machine_definition \type{DefinitionContainer} This may be a normal machine definition or
# an extruder definition.
# \return \type{DefinitionContainer}
def getWholeMachineDefinition(self, machine_definition):
machine_entry = machine_definition.getMetaDataEntry("machine")
if machine_entry is None:
# This already is a 'global' machine definition.
return machine_definition
else:
container_registry = ContainerRegistry.getInstance()
whole_machine = container_registry.findDefinitionContainers(id=machine_entry)[0]
return whole_machine
``` |
[
{
"content": "Here is the snippet:\n```python\n# import packages\nimport matplotlib; matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.feature_selection import chi2\nfrom sklearn.metrics import roc_auc_score, roc_curve, auc, precision_score, f1_score... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n# import packages\nimport matplotlib; matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.feature_selection import chi2\nfrom sklearn.metrics import roc_auc_score, roc_curve, auc, precision... | ```python
# import packages
import matplotlib; matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.feature_selection import chi2
from sklearn.metrics import roc_auc_score, roc_curve, auc, precision_score, f1_score, mean_squared_error, accuracy_score
# report coefficients
def coef(model, X, X_train, y_train):
df_coef = pd.DataFrame(list(zip(X.columns, np.transpose(model.coef_))))
score, pvalues = chi2(X_train, y_train)
df_coef['p-value'] = pd.DataFrame(list(zip(np.transpose(pvalues))))
df_coef = df_coef.rename(columns = {0:'feature', 1:'coefficient'})
df_coef['coefficient'] = df_coef['coefficient'].str[0]
# intercept
df_intercept = pd.DataFrame(data=model.intercept_,
index=[0],
columns=['coefficient'])
df_intercept['feature'] = 'Intercept'
df_intercept = df_intercept[['feature', 'coefficient']]
df_coef.update(df_intercept)
df_coef['intercept'] = df_coef.iloc[0,1]
df_coef = df_coef[df_coef['feature'] != 'Intercept']
df_coef['log_odds'] = df_coef['intercept'] + df_coef['coefficient']
df_coef['odds'] = np.exp(df_coef['log_odds'])
df_coef['probability'] = df_coef['odds'] / (1 + df_coef['odds'])
df_coef.sort_values('probability', ascending=False, inplace=True)
return df_coef
# report predictions
def pred(model, X, y, df_offenses):
df_pred = X
df_pred['predicted'] = model.predict(X)
df_pred['actual'] = y
df_pred['spn'] = df_offenses['SPN']
return df_pred
# report accuracy
def accuracy(model, X_test, y_test):
accuracy_model = model.score(X_test, y_test)
accuracy_baseline = 1-y_test.mean()
accuracy_change = accuracy_model - accuracy_baseline
df_accuracy = pd.DataFrame({'Baseline Accuracy': [accuracy_baseline],
'Model Accuracy': [accuracy_model],
'Change in Accuracy': [accuracy_change]})
df_accuracy['Baseline Accuracy'] = round(df_accuracy['Baseline Accuracy'],2)
df_accuracy['Model Accuracy'] = round(df_accuracy['Model Accuracy'],2)
df_accuracy['Change in Accuracy'] = round(df_accuracy['Change in Accuracy'],2)
# ROC
y_true = y_test
y_pred = model.predict(X_test)
df_accuracy['roc_auc_score'] = round(
roc_auc_score(y_true, y_pred)
,2)
fpr, tpr, threshold = roc_curve(y_true, y_pred)
roc_auc = auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.savefig('plot_roc.png')
# precision score
df_accuracy['precision_score'] = round(
precision_score(y_true, y_pred)
,2)
# f1 score
df_accuracy['f1_score'] = round(
f1_score(y_true, y_pred)
,2)
# mean squared error
df_accuracy['mean_squared_error'] = round(
mean_squared_error(y_true, y_pred)
,2)
# accuracy score
df_accuracy['accuracy_score'] = round(
accuracy_score(y_true, y_pred)
,2)
return df_accuracy
``` |
[
{
"content": "Here is the snippet:\n```python\n#\n# @file TestReadMathML.py\n# @brief Read MathML unit tests\n#\n# @author Akiya Jouraku (Python conversion)\n# @author Ben Bornstein \n#\n# $Id: TestReadMathML.py 11441 2010-07-09 02:22:23Z mhucka $\n# $HeadURL: https://sbml.svn.sourceforge.net/svnroot/sbm... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n#\n# @file TestReadMathML.py\n# @brief Read MathML unit tests\n#\n# @author Akiya Jouraku (Python conversion)\n# @author Ben Bornstein \n#\n# $Id: TestReadMathML.py 11441 2010-07-09 02:22:23Z mhucka $\n# $HeadURL: https://sbml.svn.sourceforge... | ```python
#
# @file TestReadMathML.py
# @brief Read MathML unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# $Id: TestReadMathML.py 11441 2010-07-09 02:22:23Z mhucka $
# $HeadURL: https://sbml.svn.sourceforge.net/svnroot/sbml/trunk/libsbml/src/bindings/python/test/math/TestReadMathML.py $
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/math/test/TestReadMathML.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
def util_isInf(*x):
return ( (x[0] == util_PosInf()) or (x[0] == util_NegInf()) )
def util_NaN():
z = 1e300
z = z * z
return z - z
def util_PosInf():
z = 1e300
z = z * z
return z
def util_NegInf():
z = 1e300
z = z * z
return -z
def wrapString(s):
return s
pass
def MATHML_FOOTER():
return "</math>"
pass
def MATHML_HEADER():
return "<math xmlns='http://www.w3.org/1998/Math/MathML'>\n"
pass
def MATHML_HEADER_UNITS():
return "<math xmlns='http://www.w3.org/1998/Math/MathML'\n"
pass
def MATHML_HEADER_UNITS2():
return " xmlns:sbml='http://www.sbml.org/sbml/level3/version1/core'>\n"
pass
def XML_HEADER():
return "<?xml version='1.0' encoding='UTF-8'?>\n"
pass
def isnan(x):
return (x != x)
pass
def wrapMathML(s):
r = XML_HEADER()
r += MATHML_HEADER()
r += s
r += MATHML_FOOTER()
return r
pass
def wrapMathMLUnits(s):
r = XML_HEADER()
r += MATHML_HEADER_UNITS()
r += MATHML_HEADER_UNITS2()
r += s
r += MATHML_FOOTER()
return r
pass
def wrapXML(s):
r = XML_HEADER()
r += s
return r
pass
class TestReadMathML(unittest.TestCase):
global F
F = None
global N
N = None
def setUp(self):
self.N = None
self.F = None
pass
def tearDown(self):
self.N = None
self.F = None
pass
def test_element_abs(self):
s = wrapMathML("<apply><abs/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "abs(x)" == self.F ))
pass
def test_element_and(self):
s = wrapMathML("<apply> <and/> <ci>a</ci> <ci>b</ci> <ci>c</ci> </apply>"
)
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "and(a, b, c)" == self.F ))
pass
def test_element_arccos(self):
s = wrapMathML("<apply><arccos/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "acos(x)" == self.F ))
pass
def test_element_arccosh(self):
s = wrapMathML("<apply><arccosh/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "arccosh(x)" == self.F ))
pass
def test_element_arccot(self):
s = wrapMathML("<apply><arccot/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "arccot(x)" == self.F ))
pass
def test_element_arccoth(self):
s = wrapMathML("<apply><arccoth/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "arccoth(x)" == self.F ))
pass
def test_element_arccsc(self):
s = wrapMathML("<apply><arccsc/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "arccsc(x)" == self.F ))
pass
def test_element_arccsch(self):
s = wrapMathML("<apply><arccsch/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "arccsch(x)" == self.F ))
pass
def test_element_arcsec(self):
s = wrapMathML("<apply><arcsec/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "arcsec(x)" == self.F ))
pass
def test_element_arcsech(self):
s = wrapMathML("<apply><arcsech/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "arcsech(x)" == self.F ))
pass
def test_element_arcsin(self):
s = wrapMathML("<apply><arcsin/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "asin(x)" == self.F ))
pass
def test_element_arcsinh(self):
s = wrapMathML("<apply><arcsinh/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "arcsinh(x)" == self.F ))
pass
def test_element_arctan(self):
s = wrapMathML("<apply><arctan/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "atan(x)" == self.F ))
pass
def test_element_arctanh(self):
s = wrapMathML("<apply><arctanh/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "arctanh(x)" == self.F ))
pass
def test_element_bug_apply_ci_1(self):
s = wrapMathML("<apply>" +
" <ci> Y </ci>" +
" <cn> 1 </cn>" +
"</apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_FUNCTION )
self.assert_(( "Y" == self.N.getName() ))
self.assert_( self.N.getNumChildren() == 1 )
c = self.N.getLeftChild()
self.assert_( c != None )
self.assert_( c.getType() == libsbml.AST_REAL )
self.assert_( c.getReal() == 1 )
self.assert_( c.getNumChildren() == 0 )
pass
def test_element_bug_apply_ci_2(self):
s = wrapMathML("<apply>" +
" <ci> Y </ci>" +
" <csymbol encoding='text' " +
" definitionURL='http://www.sbml.org/sbml/symbols/time'> t </csymbol>" +
"</apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_FUNCTION )
self.assert_(( "Y" == self.N.getName() ))
self.assert_( self.N.getNumChildren() == 1 )
c = self.N.getLeftChild()
self.assert_( c != None )
self.assert_( c.getType() == libsbml.AST_NAME_TIME )
self.assert_(( "t" == c.getName() ))
self.assert_( c.getNumChildren() == 0 )
pass
def test_element_bug_cn_e_notation_1(self):
s = wrapMathML("<cn type='e-notation'> 2 <sep/> -8 </cn>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_REAL_E )
self.assert_( self.N.getMantissa() == 2.0 )
self.assert_( self.N.getExponent() == -8.0 )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_bug_cn_e_notation_2(self):
s = wrapMathML("<cn type='e-notation'> -3 <sep/> 4 </cn>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_REAL_E )
self.assert_( self.N.getMantissa() == -3.0 )
self.assert_( self.N.getExponent() == 4.0 )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_bug_cn_e_notation_3(self):
s = wrapMathML("<cn type='e-notation'> -6 <sep/> -1 </cn>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_REAL_E )
self.assert_( self.N.getMantissa() == -6.0 )
self.assert_( self.N.getExponent() == -1.0 )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_bug_cn_integer_negative(self):
s = wrapMathML("<cn type='integer'> -7 </cn>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_INTEGER )
self.assert_( self.N.getInteger() == -7 )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_bug_csymbol_1(self):
s = wrapMathML("<apply>" +
" <gt/>" +
" <csymbol encoding='text' " +
" definitionURL='http://www.sbml.org/sbml/symbols/time'>time</csymbol>" +
" <cn>5000</cn>" +
"</apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_RELATIONAL_GT )
self.assert_( self.N.getNumChildren() == 2 )
c = self.N.getLeftChild()
self.assert_( c != None )
self.assert_( c.getType() == libsbml.AST_NAME_TIME )
self.assert_(( "time" == c.getName() ))
self.assert_( c.getNumChildren() == 0 )
c = self.N.getRightChild()
self.assert_( c != None )
self.assert_( c.getType() == libsbml.AST_REAL )
self.assert_( c.getReal() == 5000 )
self.assert_( c.getNumChildren() == 0 )
pass
def test_element_bug_csymbol_delay_1(self):
s = wrapMathML("<apply>" +
" <csymbol encoding='text' definitionURL='http://www.sbml.org/sbml/" +
"symbols/delay'> my_delay </csymbol>" +
" <ci> x </ci>" +
" <cn> 0.1 </cn>" +
"</apply>\n")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_FUNCTION_DELAY )
self.assert_(( "my_delay" == self.N.getName() ))
self.assert_( self.N.getNumChildren() == 2 )
c = self.N.getLeftChild()
self.assert_( c != None )
self.assert_( c.getType() == libsbml.AST_NAME )
self.assert_(( "x" == c.getName() ))
self.assert_( c.getNumChildren() == 0 )
c = self.N.getRightChild()
self.assert_( c != None )
self.assert_( c.getType() == libsbml.AST_REAL )
self.assert_( c.getReal() == 0.1 )
self.assert_( c.getNumChildren() == 0 )
pass
def test_element_bug_math_xmlns(self):
s = wrapXML("<foo:math xmlns:foo='http://www.w3.org/1998/Math/MathML'>" +
" <foo:apply>" +
" <foo:plus/> <foo:cn>1</foo:cn> <foo:cn>2</foo:cn>" +
" </foo:apply>" +
"</foo:math>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "1 + 2" == self.F ))
pass
def test_element_ceiling(self):
s = wrapMathML("<apply><ceiling/><cn> 1.6 </cn></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "ceil(1.6)" == self.F ))
pass
def test_element_ci(self):
s = wrapMathML("<ci> x </ci>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_NAME )
self.assert_(( "x" == self.N.getName() ))
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_ci_definitionURL(self):
s = wrapMathML("<ci definitionURL=\"foobar\"> x </ci>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_NAME )
self.assert_(( "x" == self.N.getName() ))
self.assert_( self.N.getNumChildren() == 0 )
self.assert_( self.N.getDefinitionURL().getValue(0) == "foobar" )
pass
def test_element_ci_surrounding_spaces_bug(self):
s = wrapMathML(" <ci> s </ci> ")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_NAME )
self.assert_(( "s" == self.N.getName() ))
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_cn_default(self):
s = wrapMathML("<cn> 12345.7 </cn>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_REAL )
self.assert_( self.N.getReal() == 12345.7 )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_cn_e_notation(self):
s = wrapMathML("<cn type='e-notation'> 12.3 <sep/> 5 </cn>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_REAL_E )
self.assert_( self.N.getMantissa() == 12.3 )
self.assert_( self.N.getExponent() == 5 )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_cn_integer(self):
s = wrapMathML("<cn type='integer'> 12345 </cn>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_INTEGER )
self.assert_( self.N.getInteger() == 12345 )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_cn_rational(self):
s = wrapMathML("<cn type='rational'> 12342 <sep/> 2342342 </cn>"
)
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_RATIONAL )
self.assert_( self.N.getNumerator() == 12342 )
self.assert_( self.N.getDenominator() == 2342342 )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_cn_real(self):
s = wrapMathML("<cn type='real'> 12345.7 </cn>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_REAL )
self.assert_( self.N.getReal() == 12345.7 )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_cn_units(self):
s = wrapMathMLUnits("<cn sbml:units=\"mole\"> 12345.7 </cn>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_REAL )
self.assert_( self.N.getReal() == 12345.7 )
self.assert_( self.N.getUnits() == "mole" )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_constants_exponentiale(self):
s = wrapMathML("<exponentiale/>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_CONSTANT_E )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_constants_false(self):
s = wrapMathML("<false/>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_CONSTANT_FALSE )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_constants_infinity(self):
s = wrapMathML("<infinity/>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_REAL )
self.assert_( util_isInf(self.N.getReal()) == True )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_constants_notanumber(self):
s = wrapMathML("<notanumber/>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_REAL )
self.assertEqual( True, isnan(self.N.getReal()) )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_constants_pi(self):
s = wrapMathML("<pi/>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_CONSTANT_PI )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_constants_true(self):
s = wrapMathML("<true/>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_CONSTANT_TRUE )
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_cos(self):
s = wrapMathML("<apply><cos/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "cos(x)" == self.F ))
pass
def test_element_cosh(self):
s = wrapMathML("<apply><cosh/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "cosh(x)" == self.F ))
pass
def test_element_cot(self):
s = wrapMathML("<apply><cot/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "cot(x)" == self.F ))
pass
def test_element_coth(self):
s = wrapMathML("<apply><coth/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "coth(x)" == self.F ))
pass
def test_element_csc(self):
s = wrapMathML("<apply><csc/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "csc(x)" == self.F ))
pass
def test_element_csch(self):
s = wrapMathML("<apply><csch/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "csch(x)" == self.F ))
pass
def test_element_csymbol_avogadro(self):
s = wrapMathML("<csymbol encoding='text' " + "definitionURL='http://www.sbml.org/sbml/symbols/avogadro'> NA </csymbol>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_NAME_AVOGADRO )
self.assert_(( "NA" == self.N.getName() ))
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_csymbol_delay_1(self):
s = wrapMathML("<csymbol encoding='text' " + "definitionURL='http://www.sbml.org/sbml/symbols/delay'> delay </csymbol>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_FUNCTION_DELAY )
self.assert_(( "delay" == self.N.getName() ))
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_csymbol_delay_2(self):
s = wrapMathML("<apply>" +
" <csymbol encoding='text' definitionURL='http://www.sbml.org/sbml/" +
"symbols/delay'> my_delay </csymbol>" +
" <ci> x </ci>" +
" <cn> 0.1 </cn>" +
"</apply>\n")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "my_delay(x, 0.1)" == self.F ))
pass
def test_element_csymbol_delay_3(self):
s = wrapMathML("<apply>" +
" <power/>" +
" <apply>" +
" <csymbol encoding='text' definitionURL='http://www.sbml.org/sbml/" +
"symbols/delay'> delay </csymbol>" +
" <ci> P </ci>" +
" <ci> delta_t </ci>" +
" </apply>\n" +
" <ci> q </ci>" +
"</apply>\n")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "pow(delay(P, delta_t), q)" == self.F ))
pass
def test_element_csymbol_time(self):
s = wrapMathML("<csymbol encoding='text' " + "definitionURL='http://www.sbml.org/sbml/symbols/time'> t </csymbol>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_NAME_TIME )
self.assert_(( "t" == self.N.getName() ))
self.assert_( self.N.getNumChildren() == 0 )
pass
def test_element_eq(self):
s = wrapMathML("<apply> <eq/> <ci>a</ci> <ci>b</ci> <ci>c</ci> </apply>"
)
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "eq(a, b, c)" == self.F ))
pass
def test_element_exp(self):
s = wrapMathML("<apply><exp/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "exp(x)" == self.F ))
pass
def test_element_factorial(self):
s = wrapMathML("<apply><factorial/><cn> 5 </cn></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "factorial(5)" == self.F ))
pass
def test_element_floor(self):
s = wrapMathML("<apply><floor/><cn> 1.2 </cn></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "floor(1.2)" == self.F ))
pass
def test_element_function_call_1(self):
s = wrapMathML("<apply> <ci> foo </ci> <ci> x </ci> </apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "foo(x)" == self.F ))
pass
def test_element_function_call_2(self):
s = wrapMathML("<apply> <plus/> <cn> 1 </cn>" +
" <apply> <ci> f </ci> <ci> x </ci> </apply>" +
"</apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "1 + f(x)" == self.F ))
pass
def test_element_geq(self):
s = wrapMathML("<apply> <geq/> <cn>1</cn> <ci>x</ci> <cn>0</cn> </apply>"
)
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "geq(1, x, 0)" == self.F ))
pass
def test_element_gt(self):
s = wrapMathML("<apply> <gt/> <infinity/>" +
" <apply> <minus/> <infinity/> <cn>1</cn> </apply>" +
"</apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "gt(INF, INF - 1)" == self.F ))
pass
def test_element_invalid_mathml(self):
invalid = wrapMathML("<lambda definitionURL=\"http://biomodels.net/SBO/#SBO:0000065\">" +
"<bvar>" +
"<ci>c</ci>" +
"</bvar>" +
"<apply>" +
" <ci>c</ci>" +
"</apply>" +
"</lambda>\n")
self.N = libsbml.readMathMLFromString(None)
self.assert_( self.N == None )
self.N = libsbml.readMathMLFromString(invalid)
self.assert_( self.N == None )
pass
def test_element_lambda(self):
s = wrapMathML("<lambda>" +
" <bvar> <ci>x</ci> </bvar>" +
" <apply> <sin/>" +
" <apply> <plus/> <ci>x</ci> <cn>1</cn> </apply>" +
" </apply>" +
"</lambda>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "lambda(x, sin(x + 1))" == self.F ))
pass
def test_element_leq(self):
s = wrapMathML("<apply> <leq/> <cn>0</cn> <ci>x</ci> <cn>1</cn> </apply>"
)
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "leq(0, x, 1)" == self.F ))
pass
def test_element_ln(self):
s = wrapMathML("<apply><ln/><ci> a </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "log(a)" == self.F ))
pass
def test_element_log_1(self):
s = wrapMathML("<apply> <log/> <logbase> <cn type='integer'> 3 </cn> </logbase>" +
" <ci> x </ci>" +
"</apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "log(3, x)" == self.F ))
pass
def test_element_log_2(self):
s = wrapMathML("<apply> <log/> <ci> x </ci> </apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "log10(x)" == self.F ))
pass
def test_element_lt(self):
s = wrapMathML("<apply> <lt/> <apply> <minus/> <infinity/> <infinity/> </apply>" +
" <cn>1</cn>" +
"</apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "lt(INF - INF, 1)" == self.F ))
pass
def test_element_math(self):
s = wrapXML("<math xmlns='http://www.w3.org/1998/Math/MathML'/>"
)
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.assert_( self.N.getType() == libsbml.AST_UNKNOWN )
pass
def test_element_neq(self):
s = wrapMathML("<apply> <neq/> <notanumber/> <notanumber/> </apply>"
)
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "neq(NaN, NaN)" == self.F ))
pass
def test_element_not(self):
s = wrapMathML("<apply> <not/> <ci> TooShabby </ci> </apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "not(TooShabby)" == self.F ))
pass
def test_element_operator_plus(self):
s = wrapMathML("<apply> <plus/> <cn> 1 </cn> <cn> 2 </cn> <cn> 3 </cn> </apply>"
)
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "1 + 2 + 3" == self.F ))
pass
def test_element_operator_times(self):
s = wrapMathML("<apply> <times/> <ci> x </ci> <ci> y </ci> <ci> z </ci> </apply>"
)
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "x * y * z" == self.F ))
pass
def test_element_or(self):
s = wrapMathML("<apply> <or/> <ci>a</ci> <ci>b</ci> <ci>c</ci> <ci>d</ci> </apply>"
)
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "or(a, b, c, d)" == self.F ))
pass
def test_element_piecewise(self):
s = wrapMathML("<piecewise>" +
" <piece>" +
" <apply> <minus/> <ci>x</ci> </apply>" +
" <apply> <lt/> <ci>x</ci> <cn>0</cn> </apply>" +
" </piece>" +
" <piece>" +
" <cn>0</cn>" +
" <apply> <eq/> <ci>x</ci> <cn>0</cn> </apply>" +
" </piece>" +
" <piece>" +
" <ci>x</ci>" +
" <apply> <gt/> <ci>x</ci> <cn>0</cn> </apply>" +
" </piece>" +
"</piecewise>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "piecewise(-x, lt(x, 0), 0, eq(x, 0), x, gt(x, 0))" == self.F ))
pass
def test_element_piecewise_otherwise(self):
s = wrapMathML("<piecewise>" +
" <piece>" +
" <cn>0</cn>" +
" <apply> <lt/> <ci>x</ci> <cn>0</cn> </apply>" +
" </piece>" +
" <otherwise>" +
" <ci>x</ci>" +
" </otherwise>" +
"</piecewise>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "piecewise(0, lt(x, 0), x)" == self.F ))
pass
def test_element_power(self):
s = wrapMathML("<apply><power/> <ci>x</ci> <cn>3</cn> </apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "pow(x, 3)" == self.F ))
pass
def test_element_root_1(self):
s = wrapMathML("<apply> <root/> <degree> <cn type='integer'> 3 </cn> </degree>" +
" <ci> a </ci>" +
"</apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "root(3, a)" == self.F ))
pass
def test_element_root_2(self):
s = wrapMathML("<apply> <root/> <ci> a </ci> </apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "sqrt(a)" == self.F ))
pass
def test_element_sec(self):
s = wrapMathML("<apply><sec/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "sec(x)" == self.F ))
pass
def test_element_sech(self):
s = wrapMathML("<apply><sech/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "sech(x)" == self.F ))
pass
def test_element_sin(self):
s = wrapMathML("<apply><sin/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "sin(x)" == self.F ))
pass
def test_element_sinh(self):
s = wrapMathML("<apply><sinh/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "sinh(x)" == self.F ))
pass
def test_element_tan(self):
s = wrapMathML("<apply><tan/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "tan(x)" == self.F ))
pass
def test_element_tanh(self):
s = wrapMathML("<apply><tanh/><ci> x </ci></apply>")
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "tanh(x)" == self.F ))
pass
def test_element_xor(self):
s = wrapMathML("<apply> <xor/> <ci>a</ci> <ci>b</ci> <ci>b</ci> <ci>a</ci> </apply>"
)
self.N = libsbml.readMathMLFromString(s)
self.assert_( self.N != None )
self.F = libsbml.formulaToString(self.N)
self.assert_(( "xor(a, b, b, a)" == self.F ))
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestReadMathML))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\"\"\"\n@script : model.py\n@created : 2012-11-04 01:48:15.090\n@changed : 2012-11-08 10:26:47.237\n@creator : mkpy.py --version 0.0.27\n@author : Igor A.Vetrov <qprostu... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\"\"\"\n@script : model.py\n@created : 2012-11-04 01:48:15.090\n@changed : 2012-11-08 10:26:47.237\n@creator : mkpy.py --version 0.0.27\n@author : Igor A... | ```python
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
@script : model.py
@created : 2012-11-04 01:48:15.090
@changed : 2012-11-08 10:26:47.237
@creator : mkpy.py --version 0.0.27
@author : Igor A.Vetrov <qprostu@gmail.com>
@about : model of TODO application
"""
from __future__ import print_function
from argparse import ArgumentParser
from .sqlite import Table, Field
__revision__ = 11
__project__ = "Todo"
def getRevision():
"""Callback method for -r/--revision option"""
return str(__revision__)
class Priority(Table):
"""Priority model class"""
_fields = [
( "code" , Field(fieldtype="integer", notnull=True, primary=True) ),
( "name" , Field(notnull=True) ),
( "created", Field(fieldtype="timestamp", default="(datetime('now', 'localtime'))") ),
]
def __init__(self, db):
self.__class__._tableName = __project__ + self.__class__.__name__
super(Priority, self).__init__(db)
def setDefaults(self):
self.exec( "insert into {} (code, name) values(?, ?)".format(self._tableName), (1, "Low") )
self.exec( "insert into {} (code, name) values(?, ?)".format(self._tableName), (2, "Medium") )
self.exec( "insert into {} (code, name) values(?, ?)".format(self._tableName), (3, "High") )
self.db.commit()
def getCode(self, name):
row = self.select( "select code from {} where name=?;".format(self._tableName), (name,) )[0]
return row["code"]
def getName(self, _id):
return self.getValue(_id, "name")[0]
def listNames(self):
rows = self.select( "select name from {};".format(self._tableName) )
return [row["name"] for row in rows]
class Task(Table):
"""Task model class"""
_fields = [
( "name" , Field(notnull=True) ),
( "priority" , Field(fieldtype="integer", default=2, foreignkey="TodoPriority(code)") ),
( "deadline" , Field(fieldtype="date", notnull=True, default="(date('now', 'localtime'))") ),
# status may be 0 or 1, if 1 - task completed
( "status" , Field(fieldtype="integer", default=0, index=True) ),
( "completed", Field(fieldtype="timestamp") ),
( "created" , Field(fieldtype="timestamp", default="(datetime('now', 'localtime'))") ),
]
def __init__(self, db):
self.__class__._tableName = __project__ + self.__class__.__name__
super(Task, self).__init__(db)
if __name__ == '__main__':
# setup global parser
parser = ArgumentParser(description='Program description goes here...')
parser.add_argument('-r', '--revision', action='version', version='%(prog)s revision: ' + getRevision())
args = parser.parse_args()
# end of model.py
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\nimport sys, atexit\nimport pickle\nimport bluetooth as bt\nfrom cmd import Cmd\nfrom service import SerialPortService\nimport socket\n\nbanner = '''Welcome to btsdk shell.\n\\tType 'help()' for help\n'''\n\nclass Shell(Cmd):\n p... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\nimport sys, atexit\nimport pickle\nimport bluetooth as bt\nfrom cmd import Cmd\nfrom service import SerialPortService\nimport socket\n\nbanner = '''Welcome to btsdk shell.\n\\tType 'help()' for help\n'''\n\nclass Sh... | ```python
import sys, atexit
import pickle
import bluetooth as bt
from cmd import Cmd
from service import SerialPortService
import socket
banner = '''Welcome to btsdk shell.
\tType 'help()' for help
'''
class Shell(Cmd):
prompt = 'btsdk> '
intro = "btsdk shell"
def __init__(self):
Cmd.__init__(self)
self.device = None
self.port = None
self.service = None
socket.setdefaulttimeout(0.1)
atexit.register(self.on_exit)
print 'loading saved data',
try:
f = open('save.p', 'rb')
saved = pickle.load(f)
self.device = saved['device']
print 'OK'
except (IOError):
print 'FAILED'
if self.device:
print 'DUT:', self.device
else:
print 'No DUT. please scan and select.'
def on_exit(self):
saved = {'device': self.device}
pickle.dump( saved, open( "save.p", "wb" ) )
def do_scan(self, arg):
'scan for bluetooth devices'
self.devices = bt.discover_devices(lookup_names = True)
print '%s\t%s\t\t\t%s' %('Index', 'Address', 'Name')
print
for i in range(len(self.devices)):
d = self.devices[i]
print '%d\t%s\t%s' %(i, d[0], d[1])
print 'please select one as DUT'
def do_select(self, line):
'''select [index]
select the device'''
if line == '':
print 'missing parameter'
return
i = int(line)
if i >= len(self.devices):
print 'Index %d is out of range 0..%d' %(i, len(self.devices) - 1)
return
d = self.devices[i]
self.device = d[0]
print 'selected <%d> %s %s' %(i,d[0], d[1])
def do_conn(self, line):
'connect to DUT'
if self.port:
print 'already connected'
else:
print 'connecting ...'
records = bt.find_service(uuid=bt.SERIAL_PORT_CLASS,
address=self.device)
if len(records) == 0:
print "port not found"
return
portnum = records[0]['port']
print 'SPP port is', portnum
self.port = bt.BluetoothSocket(bt.RFCOMM)
self.port.connect((self.device, portnum))
self.service = SerialPortService(self.port)
self.service.start()
print 'done'
def do_disc(self, line):
'disconnect'
if self.port:
print 'disconnecting ...'
self.service.end()
self.port = None
print 'done'
else:
print 'not connected'
def do_led(self, line):
'set led color r,g,b,w'
self.service.send("1234")
def do_q(self, line):
'quit'
print 'bye'
return True
def do_EOF(self, line):
'quit the system'
print 'bye'
return True
shell = Shell()
shell.cmdloop()
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# -*- coding: utf-8 -*-\n\"\"\"\nHelper functions used in views.\n\"\"\"\n\nimport csv\nimport urllib2\nimport time\nimport threading\nimport locale\nfrom json import dumps\nfrom functools import wraps\nfrom datetime i... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\nHelper functions used in views.\n\"\"\"\n\nimport csv\nimport urllib2\nimport time\nimport threading\nimport locale\nfrom json import dumps\nfrom functools import wraps\... | ```python
# -*- coding: utf-8 -*-
"""
Helper functions used in views.
"""
import csv
import urllib2
import time
import threading
import locale
from json import dumps
from functools import wraps
from datetime import datetime
from lxml import etree
from functools import wraps
from flask import Response
from presence_analyzer.main import app
import logging
log = logging.getLogger(__name__) # pylint: disable-msg=C0103
CACHE = {}
TIMESTAMPS = {}
LOCK = threading.Lock()
def memorize(key, period):
"""
Memorizing decorator. Returning cached data
if its validity period is not expired
"""
def _decoration_wrapper(func):
@wraps(func)
def _caching_wrapper(*args, **kwargs):
cache_key = key
now = time.time()
if TIMESTAMPS.get(cache_key, now) > now:
return CACHE[cache_key]
ret = func(*args, **kwargs)
CACHE[cache_key] = ret
TIMESTAMPS[cache_key] = now + period
return ret
return _caching_wrapper
return _decoration_wrapper
def locker(func):
"""
Global thread locking decorator.
"""
@wraps(func)
def _lock_wrapper(*args, **kwargs):
with LOCK:
ret = func(*args, **kwargs)
return ret
return _lock_wrapper
def jsonify(function):
"""
Creates a response with the JSON representation of wrapped function result.
"""
@wraps(function)
def inner(*args, **kwargs):
return Response(dumps(function(*args, **kwargs)),
mimetype='application/json')
return inner
def refresh_xml():
"""
Download user XML data file from sargo server and save it as
current config file.
"""
req = urllib2.urlopen(app.config['XML_URL'])
with open(app.config['USER_DATA_XML'], 'wb') as xmlfile:
while True:
chunk = req.read(16 * 1024)
if not chunk:
break
xmlfile.write(chunk)
def get_user_data():
"""
Extracts user data from file specified in config.
"""
data = {}
with open(app.config['USER_DATA_XML'], 'r') as xmlfile:
tree = etree.parse(xmlfile)
root = tree.getroot()
config = root[0]
server = {
u'host': unicode(config.findtext('host')),
u'port': unicode(config.findtext('port')),
u'protocol': unicode(config.findtext('protocol')),
}
data['server'] = "%(protocol)s://%(host)s:%(port)s" % server
users = root[1]
locale.setlocale(locale.LC_ALL, 'pl_PL.UTF-8')
data['users'] = [
{
u'id': int(user.attrib['id']),
u'name': unicode(user.findtext('name')),
u'avatar': unicode(user.findtext('avatar'))
}
for user in sorted(
users,
key=lambda user: user.findtext('name'),
cmp=locale.strcoll
)
]
locale.setlocale(locale.LC_ALL, (None, None))
return data
@locker
@memorize('get_data', 30)
def get_data():
"""
Extracts presence data from CSV file and groups it by user_id.
It creates structure like this:
data = {
'user_id': {
datetime.date(2013, 10, 1): {
'start': datetime.time(9, 0, 0),
'end': datetime.time(17, 30, 0),
},
datetime.date(2013, 10, 2): {
'start': datetime.time(8, 30, 0),
'end': datetime.time(16, 45, 0),
},
}
}
"""
data = {}
with open(app.config['DATA_CSV'], 'r') as csvfile:
presence_reader = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(presence_reader):
if len(row) != 4:
# ignore header and footer lines
continue
try:
user_id = int(row[0])
date = datetime.strptime(row[1], '%Y-%m-%d').date()
start = datetime.strptime(row[2], '%H:%M:%S').time()
end = datetime.strptime(row[3], '%H:%M:%S').time()
except (ValueError, TypeError):
log.debug('Problem with line %d: ', i, exc_info=True)
data.setdefault(user_id, {})[date] = {'start': start, 'end': end}
return data
def group_by_weekday(items):
"""
Groups presence entries by weekday.
"""
result = {i: [] for i in range(7)}
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()].append(interval(start, end))
return result
def group_by_weekday_start_end(items):
"""
Groups presence entries by weekday start end.
"""
result = {i: {'starts': [], 'ends': []} for i in range(7)}
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()]['starts'].append(
seconds_since_midnight(start))
result[date.weekday()]['ends'].append(
seconds_since_midnight(end))
return result
def seconds_since_midnight(time):
"""
Calculates amount of seconds since midnight.
"""
return time.hour * 3600 + time.minute * 60 + time.second
def interval(start, end):
"""
Calculates inverval in seconds between two datetime.time objects.
"""
return seconds_since_midnight(end) - seconds_since_midnight(start)
def mean(items):
"""
Calculates arithmetic mean. Returns zero for empty lists.
"""
return float(sum(items)) / len(items) if len(items) > 0 else 0
``` |
[
{
"content": "```python\n#!/usr/bin/pytho\nimport sys\nimport warnings\n\ndef help_message():\n print ' '\n print 'sht11'\n print ' --help Displays this usage message '\n print ' --detect Displays if the sensor is connected on Raspberry Pi'\n prin... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/pytho\nimport sys\nimport warnings\n\ndef help_message():\n print ' '\n print 'sht11'\n print ' --help Displays this usage message '\n print ' --detect Displays if the sensor is connected on Raspber... | ```python
#!/usr/bin/pytho
import sys
import warnings
def help_message():
print ' '
print 'sht11'
print ' --help Displays this usage message '
print ' --detect Displays if the sensor is connected on Raspberry Pi'
print ' -h , --humidity Displays the Humidity '
print ' -t , --temperature Displays the Temperature'
def sht11( sensor ):
try:
warnings.filterwarnings("ignore")
from sht1x.Sht1x import Sht1x as SHT1x
dataPin = 5
clkPin = 3
sht1x = SHT1x(dataPin, clkPin, SHT1x.GPIO_BOARD)
if (sensor == "humidity"):
mesurement = sht1x.read_humidity()
elif (sensor == "temperature"):
mesurement = sht1x.read_temperature_C()
return mesurement
except:
return "false"
def detect():
var = sht11("temperature")
if (type(var) == int or type(var) == float):
print 'sht11'
if __name__ == '__main__':
args = len(sys.argv)
while ( args > 1):
args -= 1
if(sys.argv[args] == "--help"):
help_message()
elif(sys.argv[args] == "--detect"):
detect()
elif(sys.argv[args] == "-t" or sys.argv[args] == "--temperature"):
temperature = sht11("temperature")
print ("temperature %.1f" % temperature)
elif(sys.argv[args] == "-h" or sys.argv[args] == "--humidity"):
humidity = sht11("humidity")
print ("humidity %.1f" % humidity)
``` |
[
{
"content": "```python\n# import nnpy\n# import time\n\n# s=nnpy.Socket(nnpy.AF_SP,nnpy.REP)\n#\n#\n# s.bind('tcp://127.0.0.1:5555')\n#\n# # s.setsockopt(option=nnpy.RCVBUF,level=nnpy.SOL_SOCKET,value=1024*1024)\n# # s.getsockopt(option=nnpy.RCVBUF,level=nnpy.SOL_SOCKET)\n#\n# counter=0\n# while True:\n# t... | [
{
"content": "<|memory_start|>```python\n# import nnpy\n# import time\n\n# s=nnpy.Socket(nnpy.AF_SP,nnpy.REP)\n#\n#\n# s.bind('tcp://127.0.0.1:5555')\n#\n# # s.setsockopt(option=nnpy.RCVBUF,level=nnpy.SOL_SOCKET,value=1024*1024)\n# # s.getsockopt(option=nnpy.RCVBUF,level=nnpy.SOL_SOCKET)\n#\n# counter=0\n# whil... | ```python
# import nnpy
# import time
# s=nnpy.Socket(nnpy.AF_SP,nnpy.REP)
#
#
# s.bind('tcp://127.0.0.1:5555')
#
# # s.setsockopt(option=nnpy.RCVBUF,level=nnpy.SOL_SOCKET,value=1024*1024)
# # s.getsockopt(option=nnpy.RCVBUF,level=nnpy.SOL_SOCKET)
#
# counter=0
# while True:
# try:
# res=s.recv(flags=nnpy.DONTWAIT)
# counter+=1
# except Exception as e:
# if not str(e)=='Resource temporarily unavailable':
# raise(e)
# from IPython import embed
# print ("DEBUG NOW 9")
# embed()
# raise RuntimeError("stop debug here")
# time.sleep(1)
# print(counter)
# continue
#
# s.send("ok")
# # print(res)
from JumpScale import j
def MyMethod(hello):
import time
counter=0
while True:
time.sleep(1)
counter+=1
print("%s:%s"%(hello,counter))
import asyncio
import logging
import aionn
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
counter=0
async def reader(socket,counter):
while True:
# print('receiving...')
name = await socket.recv()
# print('received:', value)
p = j.core.processmanager.startProcess(method=MyMethod,args={"hello":name.decode()},name=name.decode())
counter+=1
print(counter)
async def logger():
counter=0
while True:
for key,p in j.core.processmanager.processes.items():
p.sync()
print(p.new_stdout)
counter+=1
await asyncio.sleep(1)
print("logger:%s"%counter)
async def main(loop):
await asyncio.wait([reader(socket,counter),logger()]),
socket = aionn.Socket(aionn.AF_SP, aionn.PULL)
socket.bind('tcp://*:5555')
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n\"\"\"Graphical user interface.\"\"\"\n\nimport collections\nimport ctypes\n\nimport sdl2\n\nimport hienoi.renderer\nfrom hienoi._common import GLProfile, GraphicsAPI, ParticleDisplay, UserData\nfrom hienoi._vectors import Vector2i, Vector2f, V... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n\"\"\"Graphical user interface.\"\"\"\n\nimport collections\nimport ctypes\n\nimport sdl2\n\nimport hienoi.renderer\nfrom hienoi._common import GLProfile, GraphicsAPI, ParticleDisplay, UserData\nfrom hienoi._vectors import Vecto... | ```python
"""Graphical user interface."""
import collections
import ctypes
import sdl2
import hienoi.renderer
from hienoi._common import GLProfile, GraphicsAPI, ParticleDisplay, UserData
from hienoi._vectors import Vector2i, Vector2f, Vector4f
class NavigationAction(object):
"""Enumerator for the current nagivation action.
Attributes
----------
NONE
MOVE
ZOOM
"""
NONE = 0
MOVE = 1
ZOOM = 2
_Handles = collections.namedtuple(
'_Handles', (
'window',
'renderer',
))
_GLHandles = collections.namedtuple(
'_GLHandles', (
'context',
))
_RGBMasks = collections.namedtuple(
'_RGBMasks', (
'red',
'green',
'blue',
))
_FIT_VIEW_REL_PADDING = 2.0
if sdl2.SDL_BYTEORDER == sdl2.SDL_LIL_ENDIAN:
_RGB_MASKS = _RGBMasks(red=0x000000FF, green=0x0000FF00, blue=0x00FF0000)
else:
_RGB_MASKS = _RGBMasks(red=0x00FF0000, green=0x0000FF00, blue=0x000000FF)
class GUI(object):
"""GUI.
Parameters
----------
window_title : str
Title for the window.
window_position : hienoi.Vector2i
Initial window position.
window_size : hienoi.Vector2i
Initial window size.
window_flags : int
SDL2 window flags.
view_aperture_x : float
Initial length in world units to be shown on the X axis.
view_zoom_range : hienoi.Vector2f
Zoom value range for the view.
mouse_wheel_step : float
Coefficient value for each mouse wheel step.
grid_density : float
See :attr:`GUI.grid_density`.
grid_adaptive_threshold : float
See :attr:`GUI.grid_adaptive_threshold`.
show_grid : bool
See :attr:`GUI.show_grid`.
background_color : hienoi.Vector4f
See :attr:`GUI.background_color`.
grid_color : hienoi.Vector4f
See :attr:`GUI.grid_color`.
grid_origin_color : hienoi.Vector4f
See :attr:`GUI.grid_origin_color`.
particle_display : int
See :attr:`GUI.particle_display`.
point_size : int
See :attr:`GUI.point_size`.
edge_feather : float
See :attr:`GUI.edge_feather`.
stroke_width : float
See :attr:`GUI.stroke_width`.
initialize_callback : function
Callback function to initialize any GUI state.
It takes a single argument ``gui``, an instance of this class.
on_event_callback : function
Callback function ran during the event polling.
It takes 3 arguments: ``gui``, an instance of this class,
``data``, some data to pass back and forth between the caller and this
callback function, and ``event``, the event fired.
renderer : dict
Keyword arguments for the configuration of the renderer. See the
parameters for the class :class:`hienoi.renderer.Renderer`.
Attributes
----------
view_position : hienoi.Vector2f
Position of the view (camera).
view_zoom : float
Current zoom value for the view.
grid_density : float
Density of the grid.
A density of 10.0 means that there are around 10 grid divisions
displayed on the X axis. A grid division unit represents a fixed length
in world units, meaning that the actual grid density changes depending
on the view's zoom.
show_grid : bool
True to show the grid.
background_color : hienoi.Vector4f
Color for the background.
grid_color : hienoi.Vector4f
Color for the grid.
grid_origin_color : hienoi.Vector4f
Color for the origin axis of the grid.
particle_display : int
Display mode for the particles. Available values are enumerated in the
:class:`~hienoi.ParticleDisplay` class.
point_size : int
Size of the particles in pixels when the display mode is set to
:attr:`~hienoi.ParticleDisplay.POINT`.
edge_feather : float
Feather fall-off in pixels to apply to objects drawn with displays such
as :attr:`~hienoi.ParticleDisplay.CIRCLE` or
:attr:`~hienoi.ParticleDisplay.DISC`.
stroke_width : float
Width of the stroke in pixels to apply to objects drawn with displays
such as :attr:`~hienoi.ParticleDisplay.CIRCLE`.
quit : bool
``True`` to signal to the application that it should quit.
has_view_changed : bool
``True`` if the view state has just been changed following an event. It
is reset to ``False`` whenever :meth:`poll_events` is called.
user_data : object
Attribute reserved for any user data.
"""
def __init__(self,
window_title='hienoi',
window_position=Vector2i(sdl2.SDL_WINDOWPOS_CENTERED,
sdl2.SDL_WINDOWPOS_CENTERED),
window_size=Vector2i(800, 600),
window_flags=sdl2.SDL_WINDOW_RESIZABLE,
view_aperture_x=100.0,
view_zoom_range=Vector2f(1e-6, 1e+6),
mouse_wheel_step=0.01,
grid_density=10.0,
grid_adaptive_threshold=3.0,
show_grid=True,
background_color=Vector4f(0.15, 0.15, 0.15, 1.0),
grid_color=Vector4f(0.85, 0.85, 0.85, 0.05),
grid_origin_color=Vector4f(0.85, 0.25, 0.25, 0.25),
particle_display=ParticleDisplay.DISC,
point_size=4,
edge_feather=2.0,
stroke_width=0.0,
initialize_callback=None,
on_event_callback=None,
renderer=None):
renderer = {} if renderer is None else renderer
if sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO) != 0:
raise RuntimeError(sdl2.SDL_GetError().decode())
renderer_info = hienoi.renderer.get_info()
if renderer_info.api == GraphicsAPI.OPENGL:
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MAJOR_VERSION,
renderer_info.major_version)
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MINOR_VERSION,
renderer_info.minor_version)
if renderer_info.profile == GLProfile.CORE:
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_PROFILE_MASK,
sdl2.SDL_GL_CONTEXT_PROFILE_CORE)
self._handles = _create_handles(window_title, window_position,
window_size, window_flags,
renderer_info)
self._renderer = hienoi.renderer.Renderer(**renderer)
self._initial_view_aperture_x = view_aperture_x
self._view_zoom_range = view_zoom_range
self._mouse_wheel_step = mouse_wheel_step
self._grid_adaptive_threshold = grid_adaptive_threshold
self._on_event_callback = on_event_callback
self._listen_for_navigation = False
self._is_view_manipulated = False
self.view_position = Vector2f(0.0, 0.0)
self._view_zoom = 1.0
self.grid_density = grid_density
self.show_grid = show_grid
self.background_color = background_color
self.grid_color = grid_color
self.grid_origin_color = grid_origin_color
self.particle_display = particle_display
self.point_size = point_size
self.edge_feather = edge_feather
self.stroke_width = stroke_width
self._navigation_action = NavigationAction.NONE
self.quit = False
self.user_data = UserData()
if initialize_callback:
initialize_callback(self)
@property
def view_zoom(self):
return self._view_zoom
@view_zoom.setter
def view_zoom(self, value):
self._view_zoom = max(self._view_zoom_range[0],
min(self._view_zoom_range[1], value))
@property
def navigation_action(self):
return self._navigation_action
@property
def has_view_changed(self):
return self._has_view_changed
def poll_events(self, scene_state, data=None):
"""Process each event in the queue.
Parameters
----------
scene_state : hienoi.renderer.SceneState
Scene state.
data : object
Data to pass back and forth between the caller and the function set
for the 'on event' callback.
"""
self._has_view_changed = False
event = sdl2.SDL_Event()
while sdl2.SDL_PollEvent(ctypes.byref(event)) != 0:
event_type = event.type
if event_type == sdl2.SDL_QUIT:
self._on_quit_event(event.quit)
elif event_type == sdl2.SDL_WINDOWEVENT:
self._on_window_event(event.window)
elif event_type == sdl2.SDL_KEYDOWN:
self._on_key_down_event(event.key, scene_state)
elif event_type == sdl2.SDL_KEYUP:
self._on_key_up_event(event.key)
elif event_type == sdl2.SDL_MOUSEBUTTONDOWN:
self._on_mouse_button_down_event(event.button)
elif event_type == sdl2.SDL_MOUSEBUTTONUP:
self._on_mouse_button_up_event(event.button)
elif event_type == sdl2.SDL_MOUSEWHEEL:
self._on_mouse_wheel_event(event.wheel)
elif event_type == sdl2.SDL_MOUSEMOTION:
self._on_mouse_motion_event(event.motion)
if self._on_event_callback:
self._on_event_callback(self, data, event)
if self.quit:
break
def render(self, scene_state):
"""Render a new frame.
Parameters
----------
scene_state : hienoi.renderer.SceneState
Scene state.
"""
renderer_state = hienoi.renderer.State(
window_size=self.get_window_size(),
view_position=self.view_position,
view_zoom=self._view_zoom,
origin=self.world_to_screen(Vector2f(0.0, 0.0)),
initial_view_aperture_x=self._initial_view_aperture_x,
view_aperture=self.get_view_aperture(),
grid_density=self.grid_density,
grid_adaptive_threshold=self._grid_adaptive_threshold,
background_color=self.background_color,
grid_color=self.grid_color,
grid_origin_color=self.grid_origin_color,
show_grid=self.show_grid,
particle_display=self.particle_display,
point_size=self.point_size,
edge_feather=self.edge_feather,
stroke_width=self.stroke_width,
)
self._renderer.render(renderer_state, scene_state)
if hienoi.renderer.get_info().api == GraphicsAPI.OPENGL:
sdl2.SDL_GL_SwapWindow(self._handles.window)
def terminate(self):
"""Cleanup the GUI resources."""
self._renderer.cleanup()
if hienoi.renderer.get_info().api == GraphicsAPI.OPENGL:
sdl2.SDL_GL_DeleteContext(self._handles.renderer.context)
sdl2.SDL_DestroyWindow(self._handles.window)
sdl2.SDL_Quit()
def get_window_size(self):
"""Retrieve the window size.
Returns
-------
hienoi.Vector2i
The window size.
"""
window_size_x = ctypes.c_int()
window_size_y = ctypes.c_int()
sdl2.SDL_GetWindowSize(self._handles.window,
ctypes.byref(window_size_x),
ctypes.byref(window_size_y))
return Vector2i(window_size_x.value, window_size_y.value)
def get_view_aperture(self):
"""Retrieve the view aperture.
It represents the area in world units covered by the view.
Returns
-------
hienoi.Vector2f
The view aperture.
"""
window_size = self.get_window_size()
aperture_x = self._initial_view_aperture_x / self._view_zoom
return Vector2f(aperture_x, aperture_x * window_size.y / window_size.x)
def get_mouse_position(self):
"""Retrieve the mouse position in screen space.
Returns
-------
hienoi.Vector2i
The mouse position.
"""
position_x = ctypes.c_int()
position_y = ctypes.c_int()
sdl2.SDL_GetMouseState(ctypes.byref(position_x),
ctypes.byref(position_y))
return Vector2i(position_x.value, position_y.value)
def get_screen_to_world_ratio(self):
"""Retrieve the ratio to convert a sreen unit into a world unit.
Returns
-------
float
The screen to world ratio.
"""
window_size = self.get_window_size()
aperture_x = self._initial_view_aperture_x / self._view_zoom
return aperture_x / window_size.x
def screen_to_world(self, point):
"""Convert a point from screen space to world space coordinates.
Parameters
----------
point : hienoi.Vector2i
Point in screen space coordinates.
Returns
-------
hienoi.Vector2f
The point in world space coordinates.
"""
window_size = self.get_window_size()
view_aperture = self.get_view_aperture()
return Vector2f(
(self.view_position.x
+ (point.x - window_size.x / 2.0)
* view_aperture.x / window_size.x),
(self.view_position.y
- (point.y - window_size.y / 2.0)
* view_aperture.y / window_size.y))
def world_to_screen(self, point):
"""Convert a point from world space to screen space coordinates.
Parameters
----------
point : hienoi.Vector2f
Point in world space coordinates.
Returns
-------
hienoi.Vector2i
The point in screen space coordinates.
"""
window_size = self.get_window_size()
view_aperture = self.get_view_aperture()
return Vector2i(
int(round(
(window_size.x / view_aperture.x)
* (-self.view_position.x + point.x + view_aperture.x / 2.0))),
int(round(
(window_size.y / view_aperture.y)
* (self.view_position.y - point.y + view_aperture.y / 2.0))))
def write_snapshot(self, filename):
"""Take a snapshot of the view and write it as a BMP image.
Parameters
----------
filename : str
Destination filename.
"""
pixel_size = 4
pixels = self._renderer.read_pixels()
surface = sdl2.SDL_CreateRGBSurfaceFrom(
pixels.data, pixels.width, pixels.height,
8 * pixel_size, pixels.width * pixel_size,
_RGB_MASKS.red, _RGB_MASKS.green, _RGB_MASKS.blue, 0)
sdl2.SDL_SaveBMP(surface, filename)
sdl2.SDL_FreeSurface(surface)
def _reset_view(self):
"""Reset the view position and zoom."""
self.view_position = Vector2f(0.0, 0.0)
self.view_zoom = 1.0
self._has_view_changed = True
def _fit_view(self, scene_state):
"""Fit the view to the scene."""
if len(scene_state.particles) > 1:
window_size = self.get_window_size()
initial_size = Vector2f(
self._initial_view_aperture_x,
self._initial_view_aperture_x * window_size.y / window_size.x)
lower_bounds = scene_state.lower_bounds
upper_bounds = scene_state.upper_bounds
required_size = (upper_bounds - lower_bounds).iscale(
_FIT_VIEW_REL_PADDING)
required_size = Vector2f(
max(required_size.x,
initial_size.x * self._view_zoom_range[0]),
max(required_size.y,
initial_size.y * self._view_zoom_range[0]))
self.view_position = (lower_bounds + upper_bounds).iscale(0.5)
self.view_zoom = min(initial_size.x / required_size.x,
initial_size.y / required_size.y)
elif len(scene_state.particles) == 1:
self.view_position = Vector2f(
*scene_state.particles['position'][0])
self.view_zoom = 1.0
else:
self._reset_view()
self._has_view_changed = True
def _on_quit_event(self, event):
"""Event 'on quit'."""
self.quit = True
def _on_window_event(self, event):
"""Event 'on window'."""
if event.event == sdl2.SDL_WINDOWEVENT_SIZE_CHANGED:
self._renderer.resize(event.data1, event.data2)
def _on_key_down_event(self, event, scene_state):
"""Event 'on key down'."""
code = event.keysym.sym
modifier = event.keysym.mod
if modifier == sdl2.KMOD_NONE:
if code == sdl2.SDLK_SPACE:
self._listen_for_navigation = True
elif code == sdl2.SDLK_d:
self.particle_display = (
(self.particle_display + 1) % (ParticleDisplay._LAST + 1))
elif code == sdl2.SDLK_f:
self._fit_view(scene_state)
elif code == sdl2.SDLK_g:
self.show_grid = not self.show_grid
elif code == sdl2.SDLK_r:
self._reset_view()
def _on_key_up_event(self, event):
"""Event 'on key up'."""
code = event.keysym.sym
if code == sdl2.SDLK_SPACE:
self._listen_for_navigation = False
def _on_mouse_button_down_event(self, event):
"""Event 'on mouse button down'."""
if self._listen_for_navigation:
if event.button == sdl2.SDL_BUTTON_LEFT:
self._navigation_action = NavigationAction.MOVE
elif event.button == sdl2.SDL_BUTTON_RIGHT:
self._navigation_action = NavigationAction.ZOOM
def _on_mouse_button_up_event(self, event):
"""Event 'on mouse button up'."""
if (event.button == sdl2.SDL_BUTTON_LEFT
or event.button == sdl2.SDL_BUTTON_RIGHT):
self._navigation_action = NavigationAction.NONE
def _on_mouse_wheel_event(self, event):
"""Event 'on mouse wheel'."""
scale = 1.0 + self._mouse_wheel_step * event.y
self.view_zoom *= scale
self._has_view_changed = True
def _on_mouse_motion_event(self, event):
"""Event 'on mouse motion'."""
window_size = self.get_window_size()
view_aperture = self.get_view_aperture()
if self._navigation_action == NavigationAction.MOVE:
self.view_position.set(
(self.view_position.x
- event.xrel * view_aperture.x / window_size.x),
(self.view_position.y
+ event.yrel * view_aperture.y / window_size.y))
self._has_view_changed = True
elif self._navigation_action == NavigationAction.ZOOM:
scale = (1.0
+ float(event.xrel) / window_size.x
- float(event.yrel) / window_size.y)
self.view_zoom *= scale
self._has_view_changed = True
def _create_handles(window_title, window_position, window_size, window_flags,
renderer_info):
"""Create the SDL2 handles."""
window_flags = sdl2.SDL_WINDOW_SHOWN | window_flags
if renderer_info.api == GraphicsAPI.OPENGL:
window_flags |= sdl2.SDL_WINDOW_OPENGL
window = sdl2.SDL_CreateWindow(
window_title.encode(),
window_position.x, window_position.y,
window_size.x, window_size.y,
window_flags)
if not window:
raise RuntimeError(sdl2.SDL_GetError().decode())
context = sdl2.SDL_GL_CreateContext(window)
if not context:
raise RuntimeError(sdl2.SDL_GetError().decode())
# Try to disable the vertical synchronization. It applies to the active
# context and thus needs to be called after `SDL_GL_CreateContext`.
sdl2.SDL_GL_SetSwapInterval(0)
return _Handles(
window=window,
renderer=_GLHandles(context=context))
``` |
[
{
"content": "Repeat the following code:\n```python\n\"\"\"Tests for maintenance.py\"\"\"\nimport unittest\nimport random\nimport time\nimport datetime\nfrom string import ascii_letters\n\nimport webtest\n\nfrom google.appengine.ext import testbed\nfrom google.appengine.ext import ndb\n\nimport maintenance\nimp... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n\"\"\"Tests for maintenance.py\"\"\"\nimport unittest\nimport random\nimport time\nimport datetime\nfrom string import ascii_letters\n\nimport webtest\n\nfrom google.appengine.ext import testbed\nfrom google.appengine.ext import ndb\n\nimport ... | ```python
"""Tests for maintenance.py"""
import unittest
import random
import time
import datetime
from string import ascii_letters
import webtest
from google.appengine.ext import testbed
from google.appengine.ext import ndb
import maintenance
import models
def random_string(length=10, chars=ascii_letters + ' '):
"""Generate a string of gibberish"""
return ''.join([random.choice(chars) for _ in range(length)])
def random_record(timestamp=time.time()):
"""Generate a random record with the given time
Args:
timestamp (optional[float]) - the timestamp for the record. Default is
the current time.
Returns:
models.Message with random data and the specified timestamp.
"""
return models.Message(
username=random_string(),
text=random_string(50),
timestamp=datetime.datetime.fromtimestamp(timestamp)
)
class MaintenanceTest(unittest.TestCase):
"""Does tests"""
def setUp(self):
"""gets ready to go"""
reload(maintenance)
self.testapp = webtest.TestApp(maintenance.app)
self.testbed = testbed.Testbed()
self.testbed.activate()
# set up testbed stubs for ndb
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub()
ndb.get_context().clear_cache()
def tearDown(self):
"""turns off testbed"""
self.testbed.deactivate()
def trim_test(self):
"""Ensures it trims the correct number of records
and trims the correct ones"""
# first we are going to have to fill up the datastore with
# some stuff
for _ in range(10):
random_record().put()
# should be 10 in store
assert models.Message.query().count() == 10
maintenance.MAX_RECORDS = 5
maintenance.trim_records(5)
assert models.Message.query().count() == 5
def endpoint_test(self):
"""Make sure that the actual handler does the job it is supposed to.
At this stage this is only trim excess items.
"""
for _ in range(10):
random_record().put()
# should be 10 in store
assert models.Message.query().count() == 10
maintenance.MAX_RECORDS = 5
self.testapp.get('/_maintain')
assert models.Message.query().count() == 5
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Train the CTC model (CSJ corpus).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom os.path import j... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Train the CTC model (CSJ corpus).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom ... | ```python
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Train the CTC model (CSJ corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, isfile, abspath
import sys
import time
import tensorflow as tf
from setproctitle import setproctitle
import yaml
import shutil
sys.path.append(abspath('../../../'))
from experiments.csj.data.load_dataset_ctc import Dataset
from experiments.csj.metrics.ctc import do_eval_cer
from utils.io.labels.sparsetensor import list2sparsetensor
from utils.training.learning_rate_controller import Controller
from utils.training.plot import plot_loss, plot_ler
from utils.training.multi_gpu import average_gradients
from utils.directory import mkdir_join, mkdir
from utils.parameter import count_total_parameters
from models.ctc.ctc import CTC
def do_train(model, params, gpu_indices):
"""Run training.
Args:
model: the model to train
params (dict): A dictionary of parameters
gpu_indices (list): GPU indices
"""
# Load dataset
train_data = Dataset(
data_type='train', train_data_size=params['train_data_size'],
label_type=params['label_type'],
batch_size=params['batch_size'], max_epoch=params['num_epoch'],
splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
sort_utt=True, sort_stop_epoch=params['sort_stop_epoch'],
num_gpu=len(gpu_indices))
dev_data = Dataset(
data_type='dev', train_data_size=params['train_data_size'],
label_type=params['label_type'],
batch_size=params['batch_size'], splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
sort_utt=False, num_gpu=len(gpu_indices))
# Tell TensorFlow that the model will be built into the default graph
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to track the global step
global_step = tf.Variable(0, name='global_step', trainable=False)
# Set optimizer
learning_rate_pl = tf.placeholder(tf.float32, name='learning_rate')
optimizer = model._set_optimizer(
params['optimizer'], learning_rate_pl)
# Calculate the gradients for each model tower
total_grads_and_vars, total_losses = [], []
decode_ops, ler_ops = [], []
all_devices = ['/gpu:%d' % i_gpu for i_gpu in range(len(gpu_indices))]
# NOTE: /cpu:0 is prepared for evaluation
with tf.variable_scope(tf.get_variable_scope()):
for i_gpu in range(len(all_devices)):
with tf.device(all_devices[i_gpu]):
with tf.name_scope('tower_gpu%d' % i_gpu) as scope:
# Define placeholders in each tower
model.create_placeholders()
# Calculate the total loss for the current tower of the
# model. This function constructs the entire model but
# shares the variables across all towers.
tower_loss, tower_logits = model.compute_loss(
model.inputs_pl_list[i_gpu],
model.labels_pl_list[i_gpu],
model.inputs_seq_len_pl_list[i_gpu],
model.keep_prob_pl_list[i_gpu],
scope)
tower_loss = tf.expand_dims(tower_loss, axis=0)
total_losses.append(tower_loss)
# Reuse variables for the next tower
tf.get_variable_scope().reuse_variables()
# Calculate the gradients for the batch of data on this
# tower
tower_grads_and_vars = optimizer.compute_gradients(
tower_loss)
# Gradient clipping
tower_grads_and_vars = model._clip_gradients(
tower_grads_and_vars)
# TODO: Optionally add gradient noise
# Keep track of the gradients across all towers
total_grads_and_vars.append(tower_grads_and_vars)
# Add to the graph each operation per tower
decode_op_tower = model.decoder(
tower_logits,
model.inputs_seq_len_pl_list[i_gpu],
beam_width=params['beam_width'])
decode_ops.append(decode_op_tower)
ler_op_tower = model.compute_ler(
decode_op_tower, model.labels_pl_list[i_gpu])
ler_op_tower = tf.expand_dims(ler_op_tower, axis=0)
ler_ops.append(ler_op_tower)
# Aggregate losses, then calculate average loss
total_losses = tf.concat(axis=0, values=total_losses)
loss_op = tf.reduce_mean(total_losses, axis=0)
ler_ops = tf.concat(axis=0, values=ler_ops)
ler_op = tf.reduce_mean(ler_ops, axis=0)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers
average_grads_and_vars = average_gradients(total_grads_and_vars)
# Apply the gradients to adjust the shared variables.
train_op = optimizer.apply_gradients(average_grads_and_vars,
global_step=global_step)
# Define learning rate controller
lr_controller = Controller(
learning_rate_init=params['learning_rate'],
decay_start_epoch=params['decay_start_epoch'],
decay_rate=params['decay_rate'],
decay_patient_epoch=params['decay_patient_epoch'],
lower_better=True)
# Build the summary tensor based on the TensorFlow collection of
# summaries
summary_train = tf.summary.merge(model.summaries_train)
summary_dev = tf.summary.merge(model.summaries_dev)
# Add the variable initializer operation
init_op = tf.global_variables_initializer()
# Create a saver for writing training checkpoints
saver = tf.train.Saver(max_to_keep=None)
# Count total parameters
parameters_dict, total_parameters = count_total_parameters(
tf.trainable_variables())
for parameter_name in sorted(parameters_dict.keys()):
print("%s %d" % (parameter_name, parameters_dict[parameter_name]))
print("Total %d variables, %s M parameters" %
(len(parameters_dict.keys()),
"{:,}".format(total_parameters / 1000000)))
csv_steps, csv_loss_train, csv_loss_dev = [], [], []
csv_ler_train, csv_ler_dev = [], []
# Create a session for running operation on the graph
# NOTE: Start running operations on the Graph. allow_soft_placement
# must be set to True to build towers on GPU, as some of the ops do not
# have GPU implementations.
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)) as sess:
# Instantiate a SummaryWriter to output summaries and the graph
summary_writer = tf.summary.FileWriter(
model.save_path, sess.graph)
# Initialize parameters
sess.run(init_op)
# Train model
start_time_train = time.time()
start_time_epoch = time.time()
start_time_step = time.time()
cer_dev_best = 1
not_improved_epoch = 0
learning_rate = float(params['learning_rate'])
for step, (data, is_new_epoch) in enumerate(train_data):
# Create feed dictionary for next mini batch (train)
inputs, labels, inputs_seq_len, _ = data
feed_dict_train = {}
for i_gpu in range(len(gpu_indices)):
feed_dict_train[model.inputs_pl_list[i_gpu]
] = inputs[i_gpu]
feed_dict_train[model.labels_pl_list[i_gpu]] = list2sparsetensor(
labels[i_gpu], padded_value=train_data.padded_value)
feed_dict_train[model.inputs_seq_len_pl_list[i_gpu]
] = inputs_seq_len[i_gpu]
feed_dict_train[model.keep_prob_pl_list[i_gpu]
] = 1 - float(params['dropout'])
feed_dict_train[learning_rate_pl] = learning_rate
# Update parameters
sess.run(train_op, feed_dict=feed_dict_train)
if (step + 1) % int(params['print_step'] / len(gpu_indices)) == 0:
# Create feed dictionary for next mini batch (dev)
inputs, labels, inputs_seq_len, _ = dev_data.next()[0]
feed_dict_dev = {}
for i_gpu in range(len(gpu_indices)):
feed_dict_dev[model.inputs_pl_list[i_gpu]
] = inputs[i_gpu]
feed_dict_dev[model.labels_pl_list[i_gpu]] = list2sparsetensor(
labels[i_gpu], padded_value=dev_data.padded_value)
feed_dict_dev[model.inputs_seq_len_pl_list[i_gpu]
] = inputs_seq_len[i_gpu]
feed_dict_dev[model.keep_prob_pl_list[i_gpu]] = 1.0
# Compute loss
loss_train = sess.run(loss_op, feed_dict=feed_dict_train)
loss_dev = sess.run(loss_op, feed_dict=feed_dict_dev)
csv_steps.append(step)
csv_loss_train.append(loss_train)
csv_loss_dev.append(loss_dev)
# Change to evaluation mode
for i_gpu in range(len(gpu_indices)):
feed_dict_train[model.keep_prob_pl_list[i_gpu]] = 1.0
# Compute accuracy & update event files
ler_train, summary_str_train = sess.run(
[ler_op, summary_train], feed_dict=feed_dict_train)
ler_dev, summary_str_dev = sess.run(
[ler_op, summary_dev], feed_dict=feed_dict_dev)
csv_ler_train.append(ler_train)
csv_ler_dev.append(ler_dev)
summary_writer.add_summary(summary_str_train, step + 1)
summary_writer.add_summary(summary_str_dev, step + 1)
summary_writer.flush()
duration_step = time.time() - start_time_step
print("Step %d (epoch: %.3f): loss = %.3f (%.3f) / ler = %.3f (%.3f) / lr = %.5f (%.3f min)" %
(step + 1, train_data.epoch_detail, loss_train, loss_dev, ler_train, ler_dev,
learning_rate, duration_step / 60))
sys.stdout.flush()
start_time_step = time.time()
# Save checkpoint and evaluate model per epoch
if is_new_epoch:
duration_epoch = time.time() - start_time_epoch
print('-----EPOCH:%d (%.3f min)-----' %
(train_data.epoch, duration_epoch / 60))
# Save fugure of loss & ler
plot_loss(csv_loss_train, csv_loss_dev, csv_steps,
save_path=model.save_path)
plot_ler(csv_ler_train, csv_ler_dev, csv_steps,
label_type=params['label_type'],
save_path=model.save_path)
if train_data.epoch >= params['eval_start_epoch']:
start_time_eval = time.time()
print('=== Dev Data Evaluation ===')
cer_dev_epoch = do_eval_cer(
session=sess,
decode_ops=decode_ops,
model=model,
dataset=dev_data,
label_type=params['label_type'],
train_data_size=params['train_data_size'],
eval_batch_size=1)
print(' CER: %f %%' % (cer_dev_epoch * 100))
if cer_dev_epoch < cer_dev_best:
cer_dev_best = cer_dev_epoch
not_improved_epoch = 0
print('■■■ ↑Best Score (CER)↑ ■■■')
# Save model only (check point)
checkpoint_file = join(
model.save_path, 'model.ckpt')
save_path = saver.save(
sess, checkpoint_file, global_step=train_data.epoch)
print("Model saved in file: %s" % save_path)
else:
not_improved_epoch += 1
duration_eval = time.time() - start_time_eval
print('Evaluation time: %.3f min' %
(duration_eval / 60))
# Early stopping
if not_improved_epoch == params['not_improved_patient_epoch']:
break
# Update learning rate
learning_rate = lr_controller.decay_lr(
learning_rate=learning_rate,
epoch=train_data.epoch,
value=cer_dev_epoch)
start_time_epoch = time.time()
duration_train = time.time() - start_time_train
print('Total time: %.3f hour' % (duration_train / 3600))
# Training was finished correctly
with open(join(model.model_dir, 'complete.txt'), 'w') as f:
f.write('')
def main(config_path, model_save_path, gpu_indices):
# Load a config file (.yml)
with open(config_path, "r") as f:
config = yaml.load(f)
params = config['param']
# Except for a blank label
if params['label_type'] == 'kana':
params['num_classes'] = 146
elif params['label_type'] == 'kana_divide':
params['num_classes'] = 147
elif params['label_type'] == 'kanji':
if params['train_data_size'] == 'train_subset':
params['num_classes'] = 2981
elif params['train_data_size'] == 'train_fullset':
params['num_classes'] = 3385
elif params['label_type'] == 'kanji_divide':
if params['train_data_size'] == 'train_subset':
params['num_classes'] = 2982
elif params['train_data_size'] == 'train_fullset':
params['num_classes'] = 3386
else:
raise TypeError
# Model setting
model = CTC(encoder_type=params['encoder_type'],
input_size=params['input_size'],
splice=params['splice'],
num_stack=params['num_stack'],
num_units=params['num_units'],
num_layers=params['num_layers'],
num_classes=params['num_classes'],
lstm_impl=params['lstm_impl'],
use_peephole=params['use_peephole'],
parameter_init=params['weight_init'],
clip_grad_norm=params['clip_grad_norm'],
clip_activation=params['clip_activation'],
num_proj=params['num_proj'],
weight_decay=params['weight_decay'])
# Set process name
setproctitle(
'tf_csj_' + model.name + '_' + params['train_data_size'] + '_' + params['label_type'])
model.name += '_' + str(params['num_units'])
model.name += '_' + str(params['num_layers'])
model.name += '_' + params['optimizer']
model.name += '_lr' + str(params['learning_rate'])
if params['num_proj'] != 0:
model.name += '_proj' + str(params['num_proj'])
if params['dropout'] != 0:
model.name += '_drop' + str(params['dropout'])
if params['num_stack'] != 1:
model.name += '_stack' + str(params['num_stack'])
if params['weight_decay'] != 0:
model.name += '_wd' + str(params['weight_decay'])
if params['bottleneck_dim'] != 0:
model.name += '_bottle' + str(params['bottleneck_dim'])
if len(gpu_indices) >= 2:
model.name += '_gpu' + str(len(gpu_indices))
# Set save path
model.save_path = mkdir_join(
model_save_path, 'ctc', params['label_type'],
params['train_data_size'], model.name)
# Reset model directory
model_index = 0
new_model_path = model.save_path
while True:
if isfile(join(new_model_path, 'complete.txt')):
# Training of the first model have been finished
model_index += 1
new_model_path = model.save_path + '_' + str(model_index)
elif isfile(join(new_model_path, 'config.yml')):
# Training of the first model have not been finished yet
model_index += 1
new_model_path = model.save_path + '_' + str(model_index)
else:
break
model.save_path = mkdir(new_model_path)
# Save config file
shutil.copyfile(config_path, join(model.save_path, 'config.yml'))
sys.stdout = open(join(model.save_path, 'train.log'), 'w')
# TODO(hirofumi): change to logger
do_train(model=model, params=params, gpu_indices=gpu_indices)
if __name__ == '__main__':
args = sys.argv
if len(args) != 3 and len(args) != 4:
raise ValueError
main(config_path=args[1], model_save_path=args[2],
gpu_indices=list(map(int, args[3].split(','))))
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ndirect PAS\nPython Application Services\n----------------------------------------------------------------------------\n(C) direct Netware Group - All rights reserved\nhttps://www.direct-netware.de/r... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ndirect PAS\nPython Application Services\n----------------------------------------------------------------------------\n(C) direct Netware Group - All rights reserved\nhttps://www.dir... | ```python
# -*- coding: utf-8 -*-
"""
direct PAS
Python Application Services
----------------------------------------------------------------------------
(C) direct Netware Group - All rights reserved
https://www.direct-netware.de/redirect?pas;upnp
The following license agreement remains valid unless any additions or
changes are being made by direct Netware Group in a written form.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
----------------------------------------------------------------------------
https://www.direct-netware.de/redirect?licenses;gpl
----------------------------------------------------------------------------
#echo(pasUPnPVersion)#
#echo(__FILEPATH__)#
"""
from dNG.data.settings import Settings
from dNG.data.supports_mixin import SupportsMixin
from dNG.data.upnp.client_settings_mixin import ClientSettingsMixin
from dNG.module.named_loader import NamedLoader
from dNG.plugins.hook import Hook
from dNG.runtime.exception_log_trap import ExceptionLogTrap
from dNG.runtime.io_exception import IOException
from dNG.runtime.type_exception import TypeException
from dNG.runtime.value_exception import ValueException
from .criteria_definition import CriteriaDefinition
class Resources(ClientSettingsMixin, SupportsMixin):
"""
The "Resources" search instance is used to execute UPnP searches.
:author: direct Netware Group et al.
:copyright: direct Netware Group - All rights reserved
:package: pas
:subpackage: upnp
:since: v0.2.00
:license: https://www.direct-netware.de/redirect?licenses;gpl
GNU General Public License 2
"""
SORT_ASCENDING = "+"
"""
Ascending sort direction
"""
SORT_DESCENDING = "-"
"""
Descending sort direction
"""
def __init__(self):
"""
Constructor __init__(Resources)
:since: v0.2.00
"""
ClientSettingsMixin.__init__(self)
SupportsMixin.__init__(self)
self.criteria_definition = None
"""
UPnP search criteria definition instance
"""
self.executed = False
"""
True if search has been executed and results are ready
"""
self.limit = 50
"""
UPnP resource search results limit
"""
self.limit_max = int(Settings.get("pas_upnp_resource_search_limit_max", 50))
"""
UPnP resource search results limit
"""
self.log_handler = NamedLoader.get_singleton("dNG.data.logging.LogHandler", False)
"""
The LogHandler is called whenever debug messages should be logged or errors
happened.
"""
self.offset = 0
"""
UPnP resource search results offset
"""
self.resources = [ ]
"""
UPnP resources matching the criteria definition
"""
self.total = 0
"""
UPnP resource search results count from all segments
"""
self.root_resource = None
"""
UPnP root resource for searching matches
"""
self.sort_tuples = [ ]
"""
Sort list to be applied
"""
self.supported_features['sortable'] = self._supports_sortable
#
def add_sort_definition(self, _property, direction):
"""
Adds a sort definition.
:param _property: UPnP property to sort
:param direction: Sort direction
:since: v0.2.00
"""
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.add_sort_definition({1}, {2})- (#echo(__LINE__)#)", self, _property, direction, context = "pas_upnp")
if (direction not in ( Resources.SORT_ASCENDING, Resources.SORT_DESCENDING )): raise TypeException("Sort direction given is invalid")
self.sort_tuples.append(( _property, direction ))
#
def _execute(self):
"""
Executes the search to calculate the total number of matches and the UPnP
resource search results list as defined by "offset" and "limit".
:since: v0.2.00
"""
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}._execute()- (#echo(__LINE__)#)", self, context = "pas_upnp")
if (self.criteria_definition is None): raise ValueException("UPnP search criteria instance is not defined")
if (self.executed): raise IOException("UPnP resource search should not be executed twice")
segments = self._get_segments()
self.executed = True
if (type(segments) is list and len(segments) > 0):
offset = self.offset
limit = self.limit
for segment in segments:
with ExceptionLogTrap("pas_upnp"):
segment.set_criteria_definition(self.criteria_definition)
resources_count = segment.get_count()
self.total += resources_count
if (offset >= resources_count): offset -= resources_count
elif (resources_count > 0
and (limit is None or len(self.resources) < self.limit)
):
segment.set_sort_tuples(self.sort_tuples)
segment.set_offset(offset)
if (limit is not None): segment.set_limit(limit)
resources_list = segment.get_list()
resources_list_count = len(resources_list)
if (offset > 0):
if (offset > resources_list_count): offset -= resources_list_count
else: offset = 0
#
if (limit is not None): limit -= resources_list_count
self.resources += resources_list
#
#
#
#
#
def get(self, position):
"""
Returns the UPnP resource search result at the given position.
:param position: Position of the UPnP resource search result to be returned
:return: (object) UPnP resource; None if position is undefined
:since: v0.2.00
"""
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.get({1:d})- (#echo(__LINE__)#)", self, position, context = "pas_upnp")
results_list = self.get_list()
return (results_list[position]
if (position >= 0 and len(results_list) > position) else
None
)
#
def get_count(self):
"""
Returns the total number of matches.
:return: (int) Number of matches
:since: v0.2.00
"""
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.get_count()- (#echo(__LINE__)#)", self, context = "pas_upnp")
if (not self.executed): self._execute()
return self.total
#
def get_list(self):
"""
Returns the list of UPnP resource search results as defined by "offset" and
"limit".
:return: (list) List of search results
:since: v0.2.00
"""
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.get_list()- (#echo(__LINE__)#)", self, context = "pas_upnp")
if (not self.executed): self._execute()
return self.resources
#
def _get_segments(self):
"""
Returns the search segment instances.
:return: (list) List of search segments; None if not registered
:since: v0.2.00
"""
return (Hook.call("dNG.pas.upnp.Resource.getSearchSegments",
criteria_definition = self.criteria_definition
)
if (self.root_resource is None) else
Hook.call("dNG.pas.upnp.Resource.getSearchSegments",
id = self.root_resource.get_resource_id(),
criteria_definition = self.criteria_definition
)
)
#
def set_criteria_definition(self, criteria_definition):
"""
Sets the UPnP search criteria definition used.
:param criteria_definition: Criteria definition instance
:since: v0.2.00
"""
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.set_criteria_definition()- (#echo(__LINE__)#)", self, context = "pas_upnp")
if (not isinstance(criteria_definition, CriteriaDefinition)): raise TypeException("UPnP search criteria instance given is invalid")
if (self.executed): raise IOException("UPnP resource search can not be modified after execution")
self.criteria_definition = criteria_definition
#
def set_limit(self, limit):
"""
Sets the UPnP resource search results limit.
:param limit: Results limit
:since: v0.2.00
"""
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.set_limit({1:d})- (#echo(__LINE__)#)", self, limit, context = "pas_upnp")
if (self.executed): raise IOException("UPnP resource search can not be modified after execution")
self.limit = (limit if (limit < self.limit_max) else self.limit_max)
#
def set_offset(self, offset):
"""
Sets the UPnP resource search results offset.
:param offset: Results offset
:since: v0.2.00
"""
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.set_offset({1:d})- (#echo(__LINE__)#)", self, offset, context = "pas_upnp")
if (self.executed): raise IOException("UPnP resource search can not be modified after execution")
self.offset = offset
#
def set_root_resource(self, resource):
"""
Sets the UPnP root resource for searching matches.
:param resource: UPnP search root resource
:since: v0.2.00
"""
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.set_root_resource()- (#echo(__LINE__)#)", self, context = "pas_upnp")
if (self.executed): raise IOException("UPnP resource search can not be modified after execution")
self.root_resource = resource
#
def set_sort_criteria(self, criteria_list):
"""
Sets the UPnP sort criteria for search matches.
:param criteria_list: UPnP search sort criteria list
:since: v0.2.00
"""
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.set_sort_criteria()- (#echo(__LINE__)#)", self, context = "pas_upnp")
self.sort_tuples = [ ]
for criteria in criteria_list:
criteria_first_char = criteria[:1]
if (criteria_first_char == "+" or criteria_first_char == "-"): criteria = criteria[1:]
direction = (Resources.SORT_ASCENDING if (criteria_first_char == "+") else Resources.SORT_DESCENDING)
self.sort_tuples.append(( criteria, direction ))
#
#
def _supports_sortable(self):
"""
Returns false if sorting is not supported.
:return: (bool) True if sorting of search results is supported
:since: v0.2.00
"""
segments = self._get_segments()
return (type(segments) is list and len(segments) == 1)
#
#
``` |
[
{
"content": "```python\n# Copyright 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\nimport telemetry.timeline.async_slice as async_slice_module\nimport telemetry.timeline.event_container as event_container\... | [
{
"content": "<|memory_start|>```python\n# Copyright 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\nimport telemetry.timeline.async_slice as async_slice_module\nimport telemetry.timeline.event_container as ... | ```python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.timeline.async_slice as async_slice_module
import telemetry.timeline.event_container as event_container
import telemetry.timeline.flow_event as flow_event_module
import telemetry.timeline.sample as sample_module
import telemetry.timeline.slice as slice_module
class Thread(event_container.TimelineEventContainer):
"""A Thread stores all the trace events collected for a particular
thread. We organize the synchronous slices on a thread by "subrows," where
subrow 0 has all the root slices, subrow 1 those nested 1 deep, and so on.
The asynchronous slices are stored in an AsyncSliceGroup object.
"""
def __init__(self, process, tid):
super(Thread, self).__init__('thread %s' % tid, parent=process)
self.tid = tid
self._async_slices = []
self._flow_events = []
self._samples = []
self._toplevel_slices = []
self._all_slices = []
# State only valid during import.
self._open_slices = []
self._newly_added_slices = []
@property
def toplevel_slices(self):
return self._toplevel_slices
@property
def all_slices(self):
return self._all_slices
@property
def samples(self):
return self._samples
@property
def async_slices(self):
return self._async_slices
@property
def open_slice_count(self):
return len(self._open_slices)
def IterChildContainers(self):
return
yield # pylint: disable=unreachable
def IterEventsInThisContainer(self, event_type_predicate, event_predicate):
if event_type_predicate(slice_module.Slice):
for s in self._newly_added_slices:
if event_predicate(s):
yield s
for s in self._all_slices:
if event_predicate(s):
yield s
if event_type_predicate(async_slice_module.AsyncSlice):
for async_slice in self._async_slices:
if event_predicate(async_slice):
yield async_slice
for sub_slice in async_slice.IterEventsInThisContainerRecrusively():
if event_predicate(sub_slice):
yield sub_slice
if event_type_predicate(flow_event_module.FlowEvent):
for flow_event in self._flow_events:
if event_predicate(flow_event):
yield flow_event
if event_type_predicate(sample_module.Sample):
for sample in self._samples:
if event_predicate(sample):
yield sample
def AddSample(self, category, name, timestamp, args=None):
if len(self._samples) and timestamp < self._samples[-1].start:
raise ValueError(
'Samples must be added in increasing timestamp order')
sample = sample_module.Sample(
self, category, name, timestamp, args=args)
self._samples.append(sample)
def AddAsyncSlice(self, async_slice):
self._async_slices.append(async_slice)
def AddFlowEvent(self, flow_event):
self._flow_events.append(flow_event)
def BeginSlice(self, category, name, timestamp, thread_timestamp=None,
args=None):
"""Opens a new slice for the thread.
Calls to beginSlice and endSlice must be made with
non-monotonically-decreasing timestamps.
* category: Category to which the slice belongs.
* name: Name of the slice to add.
* timestamp: The timetsamp of the slice, in milliseconds.
* thread_timestamp: Thread specific clock (scheduled) timestamp of the
slice, in milliseconds.
* args: Arguments associated with
Returns newly opened slice
"""
if len(self._open_slices) > 0 and timestamp < self._open_slices[-1].start:
raise ValueError(
'Slices must be added in increasing timestamp order')
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
self._open_slices.append(new_slice)
new_slice.did_not_finish = True
self.PushSlice(new_slice)
return new_slice
def EndSlice(self, end_timestamp, end_thread_timestamp=None):
""" Ends the last begun slice in this group and pushes it onto the slice
array.
* end_timestamp: Timestamp when the slice ended in milliseconds
* end_thread_timestamp: Timestamp when the scheduled time of the slice ended
in milliseconds
returns completed slice.
"""
if not len(self._open_slices):
raise ValueError(
'EndSlice called without an open slice')
curr_slice = self._open_slices.pop()
if end_timestamp < curr_slice.start:
raise ValueError(
'Slice %s end time is before its start.' % curr_slice.name)
curr_slice.duration = end_timestamp - curr_slice.start
# On Windows, it is possible to have a value for |end_thread_timestamp|
# but not for |curr_slice.thread_start|, because it takes some time to
# initialize the thread time timer.
if curr_slice.thread_start != None and end_thread_timestamp != None:
curr_slice.thread_duration = (end_thread_timestamp -
curr_slice.thread_start)
curr_slice.did_not_finish = False
return curr_slice
def PushCompleteSlice(self, category, name, timestamp, duration,
thread_timestamp, thread_duration, args=None):
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
if duration is None:
new_slice.did_not_finish = True
else:
new_slice.duration = duration
new_slice.thread_duration = thread_duration
self.PushSlice(new_slice)
return new_slice
def PushMarkSlice(
self, category, name, timestamp, thread_timestamp, args=None):
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
self.PushSlice(new_slice)
return new_slice
def PushSlice(self, new_slice):
self._newly_added_slices.append(new_slice)
return new_slice
def AutoCloseOpenSlices(self, max_timestamp, max_thread_timestamp):
for s in self._newly_added_slices:
if s.did_not_finish:
s.duration = max_timestamp - s.start
assert s.duration >= 0
if s.thread_start != None:
s.thread_duration = max_thread_timestamp - s.thread_start
assert s.thread_duration >= 0
self._open_slices = []
def IsTimestampValidForBeginOrEnd(self, timestamp):
if not len(self._open_slices):
return True
return timestamp >= self._open_slices[-1].start
def FinalizeImport(self):
self._BuildSliceSubRows()
def _BuildSliceSubRows(self):
"""This function works by walking through slices by start time.
The basic idea here is to insert each slice as deep into the subrow
list as it can go such that every subslice is fully contained by its
parent slice.
Visually, if we start with this:
0: [ a ]
1: [ b ]
2: [c][d]
To place this slice:
[e]
We first check row 2's last item, [d]. [e] wont fit into [d] (they dont
even intersect). So we go to row 1. That gives us [b], and [d] wont fit
into that either. So, we go to row 0 and its last slice, [a]. That can
completely contain [e], so that means we should add [e] as a subslice
of [a]. That puts it on row 1, yielding:
0: [ a ]
1: [ b ][e]
2: [c][d]
If we then get this slice:
[f]
We do the same deepest-to-shallowest walk of the subrows trying to fit
it. This time, it doesn't fit in any open slice. So, we simply append
it to row 0 (a root slice):
0: [ a ] [f]
1: [ b ][e]
"""
def CompareSlices(s1, s2):
if s1.start == s2.start:
# Break ties by having the slice with the greatest
# end timestamp come first.
return cmp(s2.end, s1.end)
return cmp(s1.start, s2.start)
assert len(self._toplevel_slices) == 0
assert len(self._all_slices) == 0
if not len(self._newly_added_slices):
return
self._all_slices.extend(self._newly_added_slices)
sorted_slices = sorted(self._newly_added_slices, cmp=CompareSlices)
root_slice = sorted_slices[0]
self._toplevel_slices.append(root_slice)
for s in sorted_slices[1:]:
if not self._AddSliceIfBounds(root_slice, s):
root_slice = s
self._toplevel_slices.append(root_slice)
self._newly_added_slices = []
def _AddSliceIfBounds(self, root, child):
"""Adds a child slice to a root slice its proper row.
Return False if the child slice is not in the bounds
of the root slice.
Because we know that the start time of child is >= the start time
of all other slices seen so far, we can just check the last slice
of each row for bounding.
"""
# The source trace data is in microseconds but we store it as milliseconds
# in floating-point. Since we can't represent micros as millis perfectly,
# two end=start+duration combos that should be the same will be slightly
# different. Round back to micros to ensure equality below.
child_end_micros = round(child.end * 1000)
root_end_micros = round(root.end * 1000)
if child.start >= root.start and child_end_micros <= root_end_micros:
if len(root.sub_slices) > 0:
if self._AddSliceIfBounds(root.sub_slices[-1], child):
return True
child.parent_slice = root
root.AddSubSlice(child)
return True
return False
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n# -*- coding: utf8 -*-\n\nfrom qstatpretty.ttyutil.unicode import unicode, ulen\n\nDELIMITERS_FULL = {\n\n 'header_tl': u\"┌\",\n 'header_t': u\"─\",\n 'header_tr': u\"┐\",\n 'header_b': u\"─\",\n 'header_bl': u\"├\",\n... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n# -*- coding: utf8 -*-\n\nfrom qstatpretty.ttyutil.unicode import unicode, ulen\n\nDELIMITERS_FULL = {\n\n 'header_tl': u\"┌\",\n 'header_t': u\"─\",\n 'header_tr': u\"┐\",\n 'header_b': u\"─\",\n 'header_bl... | ```python
# -*- coding: utf8 -*-
from qstatpretty.ttyutil.unicode import unicode, ulen
DELIMITERS_FULL = {
'header_tl': u"┌",
'header_t': u"─",
'header_tr': u"┐",
'header_b': u"─",
'header_bl': u"├",
'header_l': u"│",
'header_r': u"│",
'header_br': u"┤",
'header_csep_t': u"┬",
'header_csep_m': u"│",
'header_csep_b': u"┼",
'body_r': u"│",
'body_br': u"┘",
'body_l': u"│",
'body_bl': u"└",
'body_b': u"─",
'body_csep_m': u"│",
'body_csep_b': u"┴",
}
DELIMITERS_COLS = {
'header_tl': u"",
'header_t': u"",
'header_tr': u"",
'header_b': u"─",
'header_bl': u"",
'header_l': u"",
'header_r': u"",
'header_br': u"",
'header_csep_t': u"",
'header_csep_m': u"│",
'header_csep_b': u"┼",
'body_r': u"",
'body_br': u"",
'body_l': u"",
'body_bl': u"",
'body_b': u"",
'body_csep_m': u"│",
'body_csep_b': u"",
}
DELIMITERS_MINIMAL = {
'header_tl': u"",
'header_t': u"",
'header_tr': u"",
'header_b': u"─",
'header_bl': u"",
'header_l': u"",
'header_r': u"",
'header_br': u"",
'header_csep_t': u"",
'header_csep_m': u" ",
'header_csep_b': u" ",
'body_r': u"",
'body_br': u"",
'body_l': u"",
'body_bl': u"",
'body_b': u"",
'body_csep_m': u" ",
'body_csep_b': u"",
}
DELIMITERS = {
'full': DELIMITERS_FULL,
'cols': DELIMITERS_COLS,
'minimal': DELIMITERS_MINIMAL,
}
def pretty_table(tbl, colordef, header_row=True, delimiters=DELIMITERS_FULL):
from .color import COLOR_BLACK, colortext
d = delimiters
max_widths = [max(ulen(c) for c in col) for col in zip(*tbl)]
tjust = [[u"{0:{1}s}".format(c, w)
for w, c in zip(max_widths, row)] for row in tbl]
pretty_top = d['header_tl'] + d['header_csep_t'].join(
d['header_t'] * w for w in max_widths) + d['header_tr'] + "\n"
pretty_bottom = d[
'body_bl'] + d['body_csep_b'].join(d['body_b'] * w for w in max_widths) + d['body_br']
if header_row:
header = tjust.pop(0)
header = [colortext(h, COLOR_BLACK, True) for h in header]
pretty_colheader = d['header_l'] + \
d['header_csep_m'].join(header) + d['header_r']
pretty_underline = d[
'header_bl'] + d['header_csep_b'].join(d['header_b'] * w for w in max_widths) + d['header_br']
pretty_header = pretty_colheader + "\n" + pretty_underline + "\n"
pretty_top = pretty_top + pretty_header
tjust = [[colortext(t, fmt['color'](t))
for fmt, t in zip(colordef, row)] for row in tjust]
return pretty_top + "".join(d['body_l'] + d['body_csep_m'].join(row) + d['body_r'] + "\n" for row in tjust) + pretty_bottom
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n#!/usr/bin/env python\n\n#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#\n#\n# Name: generate_gaussian_samples.py \n#\n# Author: Constantin Weisser (weisser@mit.edu)\n#\n# Purpose: This is a python script to write a f... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n#!/usr/bin/env python\n\n#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#\n#\n# Name: generate_gaussian_samples.py \n#\n# Author: Constantin Weisser (weisser@mit.edu)\n#\n# Purpose: This is a python scr... | ```python
#!/usr/bin/env python
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#
# Name: generate_gaussian_samples.py
#
# Author: Constantin Weisser (weisser@mit.edu)
#
# Purpose: This is a python script to write a file containing 10000 data points
# sampled from a 2D Gaussian
#
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#Constantin Weisser
from __future__ import print_function
from random import gauss
import sys
import numpy as np
import json #needed to read in means and stdev as numpy arrays
import random
no_points=10000
original_mean1 = 0.2
original_mean2 = 0.8
original_std = 0.05
label_no = 1
args = str(sys.argv)
#print ("Args list: %s " % args)
#The first argument is the name of this python file
total = len(sys.argv)
verbose=True
if(total==8):
no_points = int(sys.argv[1])
#mean = np.array(json.loads(sys.argv[2]))
#std = np.array(json.loads(sys.argv[3]))
original_mean1 = float(sys.argv[2])
original_mean2 = float(sys.argv[3])
original_std = float(sys.argv[4])
distance_to_original = float(sys.argv[5])
no_dim = int(sys.argv[6])
label_no =float(sys.argv[7])
else:
print("Using standard arguments")
if verbose:
print("original_mean1 : ", original_mean1)
print("original_mean2 : ", original_mean2)
print("original_std : ",original_std)
#print(mean.shape[0])
for dim in range(no_dim):
values = np.zeros((no_points,1))
for i in range(no_points):
if bool(random.getrandbits(1)):
values[i] = gauss(original_mean1+distance_to_original,original_std)
else:
values[i] = gauss(original_mean2-distance_to_original,original_std)
#print(values)
if dim==0:
full_cords=values
else:
full_cords=np.column_stack((full_cords,values))
print(full_cords)
np.savetxt("gauss_data/data_double_high{0}Dgauss_".format(int(no_dim))+str(int(no_points))+"_"+str(original_mean1)+"_"+str(original_mean2)+"_"+str(original_std)+"_"+str(distance_to_original)+"_"+str(int(label_no))+ ".txt",full_cords)
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n/***************************************************************************\r\n A QGIS plugin\r\n CLUZ for QGIS\r\n -------------------... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n/***************************************************************************\r\n A QGIS plugin\r\n CLUZ for QGIS\r\n ---... | ```python
# -*- coding: utf-8 -*-
"""
/***************************************************************************
A QGIS plugin
CLUZ for QGIS
-------------------
begin : 2016-23-02
copyright : (C) 2016 by Bob Smith, DICE
email : r.j.smith@kent.ac.uk
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import qgis
from qgis.gui import *
import os
import copy
import cluz_mpfunctions
import cluz_mpoutputs
import cluz_mpsetup
import cluz_functions2
import cluz_display
def runMinPatch(setupObject, minpatchObject, minpatchDataDict):
marxanNameString = minpatchObject.marxanFileName + "_r"
finalNameString = "mp_" + marxanNameString
marxanSolFileList = cluz_mpsetup.makeMarxanFileList(setupObject, marxanNameString)
preMarxanUnitDict = minpatchDataDict["initialUnitDictionary"]
summedSolDict = cluz_mpoutputs.produceSummedDict(preMarxanUnitDict)
patchResultsDict = {}
zoneStatsDict = {}
zoneFeaturePropStatsDict = {}
bestPortfolioCost = -1
continueBool = True
for marxanSolFilePath in marxanSolFileList:
runningUnitDict = createRunningUnitDictionary(minpatchDataDict, marxanSolFilePath)
patchDict = cluz_mpfunctions.makePatchDict(runningUnitDict, minpatchDataDict)
qgis.utils.iface.mainWindow().statusBar().showMessage("Processing file " + marxanSolFilePath + ".")
if minpatchDataDict["patch_stats"] and continueBool:
beforePatchStatsDict = cluz_mpoutputs.makePatchStatsDict(patchDict, minpatchDataDict)
if minpatchDataDict["rem_small_patch"] and continueBool:
runningUnitDict = cluz_mpfunctions.remSmallPatchesFromUnitDict(minpatchDataDict,runningUnitDict, patchDict)
qgis.utils.iface.mainWindow().statusBar().showMessage("Processing file " + marxanSolFilePath + ". Removing patches that are smaller than the specified thresholds...")
if minpatchDataDict["add_patches"] and continueBool:
runningUnitDict, continueBool = cluz_mpfunctions.addPatches(minpatchDataDict, runningUnitDict)
qgis.utils.iface.mainWindow().statusBar().showMessage("Processing file " + marxanSolFilePath + ". Adding new patches...")
if minpatchDataDict["whittle_polish"] and continueBool:
runningUnitDict = cluz_mpfunctions.runSimWhittle(runningUnitDict, minpatchDataDict)
qgis.utils.iface.mainWindow().statusBar().showMessage("Processing file " + marxanSolFilePath + ". Simulated whittling...")
runningUnitDict = addConservedPUs(runningUnitDict,minpatchDataDict)
if minpatchDataDict["patch_stats"] and continueBool:
patchDict = cluz_mpfunctions.makePatchDict(runningUnitDict, minpatchDataDict)
afterPatchStatsDict = cluz_mpoutputs.makePatchStatsDict(patchDict, minpatchDataDict)
if continueBool:
outputFilePath = marxanSolFilePath.replace(marxanNameString, finalNameString)
cluz_mpoutputs.printRunResults(minpatchDataDict, runningUnitDict, outputFilePath)
costDict = makeCostDict(minpatchDataDict, runningUnitDict)
totalCost = costDict['totalBoundaryCost'] + costDict['totalUnitCost']
if minpatchDataDict["patch_stats"]:
patchResultsDict = cluz_mpoutputs.producePatchResultsDict(patchResultsDict, marxanSolFilePath, beforePatchStatsDict, afterPatchStatsDict, costDict)
if minpatchDataDict["zone_stats"]:
zoneNameString = os.path.basename(marxanSolFilePath)
zoneStatsDict[zoneNameString] = cluz_mpoutputs.makeRunZoneStatsDict(minpatchDataDict, runningUnitDict, zoneStatsDict)
zoneFeaturePropStatsDict[zoneNameString] = cluz_mpoutputs.makeRunZoneFeaturePropStatsDict(minpatchDataDict, runningUnitDict)
if bestPortfolioCost == -1:
bestPortfolioCost = totalCost
bestPortfolio = copy.deepcopy(runningUnitDict)
if bestPortfolioCost <> -1 and totalCost < bestPortfolioCost:
bestPortfolioCost = totalCost
bestPortfolio = copy.deepcopy(runningUnitDict)
summedDict = cluz_mpoutputs.updateSummedDict(summedSolDict,runningUnitDict)
if continueBool:
bestFileName = setupObject.outputPath + os.sep + 'mp_' + minpatchObject.marxanFileName + '_best.txt'
cluz_mpoutputs.printRunResults(minpatchDataDict, bestPortfolio, bestFileName)
summedFileName = setupObject.outputPath + os.sep + 'mp_' + minpatchObject.marxanFileName + '_summed.txt'
cluz_mpoutputs.printSummedResults(summedDict, summedFileName)
if minpatchDataDict["patch_stats"]:
patchstatsFileName = setupObject.outputPath + os.sep + 'mp_' + minpatchObject.marxanFileName + '_patchstats.csv'
cluz_mpoutputs.printPatchStats(patchResultsDict, patchstatsFileName)
if minpatchDataDict["zone_stats"]:
zoneStatsBaseFileName = setupObject.outputPath + os.sep + 'mp_' + minpatchObject.marxanFileName
cluz_mpoutputs.printZoneStats(minpatchDataDict, zoneStatsDict, zoneStatsBaseFileName)
cluz_mpoutputs.printZoneFeaturePropStats(minpatchDataDict, zoneFeaturePropStatsDict, zoneStatsBaseFileName)
cluz_functions2.addBestMarxanOutputToPUShapefile(setupObject, bestFileName, "MP_Best")
cluz_functions2.addSummedMarxanOutputToPUShapefile(setupObject, summedFileName, "MP_SF_Scr")
cluz_display.reloadPULayer(setupObject)
cluz_display.removePreviousMinPatchLayers()
bestLayerName = "MP Best (" + minpatchObject.marxanFileName + ")"
summedLayerName = "MP SF_Score (" + minpatchObject.marxanFileName + ")"
cluz_display.displayBestOutput(setupObject, "MP_Best", bestLayerName)
cluz_display.displayGraduatedLayer(setupObject, "MP_SF_Scr", summedLayerName, 1) #1 is SF legend code
qgis.utils.iface.mainWindow().statusBar().showMessage("")
qgis.utils.iface.messageBar().pushMessage("MinPatch results", "MinPatch has completed the analysis and the results files are in the specified output folder.", QgsMessageBar.INFO, 3)
def createRunningUnitDictionary(minpatchDataDict, marxanSolLocationString):
preMarxanUnitDict = minpatchDataDict["initialUnitDictionary"]
initUnitDict = copy.deepcopy(preMarxanUnitDict)
marxanSolDictOKBool, aMarxanSolDict = cluz_mpsetup.makeMarxanSolDict(marxanSolLocationString) #marxanSolDictOKBool not used here
runningUnitDict = makeStartUnitDict(initUnitDict, aMarxanSolDict)
return runningUnitDict
def makeStartUnitDict(unitDictionary, marxanSolDictionary):
for aRow in marxanSolDictionary:
solPUStatus = marxanSolDictionary[aRow]
if solPUStatus == 1:
puList = unitDictionary[aRow]
puList[1] = 1
unitDictionary[aRow] = puList
return unitDictionary
def addConservedPUs(runningUnitDict, minpatchDataDict):
initUnitDict = minpatchDataDict["initialUnitDictionary"]
for puUnitValue in runningUnitDict:
if initUnitDict[puUnitValue][1] == 2:
puList = runningUnitDict[puUnitValue]
puList[1] = 2
runningUnitDict[puUnitValue] = puList
return runningUnitDict
def makeCostDict(minpatchDataDict, puDict):
costDict = {}
abundanceMatrixDict = minpatchDataDict["abundanceMatrixDictionary"]
targetDict = minpatchDataDict["targetDictionary"]
boundaryMatrixDict = minpatchDataDict["boundaryMatrixDictionary"]
targetList = targetDict.keys()
targetList.sort()
abundValuesDict, numActivePUs = makeAbundValuesDict_numActivePUs(targetList, abundanceMatrixDict, puDict)
costDict["abundanceValuesDictionary"] = abundValuesDict
costDict["numberActivePUs"] = numActivePUs
totalUnitCost, conUnitCount = calcUnitCosts(puDict)
costDict["totalUnitCost"] = totalUnitCost
costDict["conservedUnitCount"] = conUnitCount
amountConservedDict = makeAmountConservedDictionary(targetList, abundanceMatrixDict, puDict)
costDict["amountConservedDictionary"] = amountConservedDict
costDict["totalTargetCost"] = makeTotalTargetCost(amountConservedDict, targetDict)
totalBoundLength, totalBoundaryCost = makeBoundCosts(minpatchDataDict, boundaryMatrixDict, puDict)
costDict["totalBoundaryLength"] = totalBoundLength
costDict["totalBoundaryCost"] = totalBoundaryCost
return costDict
def makeAbundValuesDict_numActivePUs(targetList, abundanceMatrixDict, puDict):
numActivePUs = 0
abundValuesDict = {}
for aRow in targetList:
abundValuesDict[aRow] = [0, 0, 0, 0]
for aUnit in abundanceMatrixDict:
puList = puDict[aUnit]
puStatus = puList[1]
#Count the number of units that could be selected in the iteration section
if puStatus == 0 or puStatus ==1:
numActivePUs += 1
puAbundDict = abundanceMatrixDict[aUnit]
for aFeature in puAbundDict:
theAmount = puAbundDict[aFeature]
featureList = abundValuesDict[aFeature]
runningValue = featureList[puStatus]
runningValue += theAmount
featureList[puStatus] = runningValue
abundValuesDict[aFeature] = featureList
return abundValuesDict, numActivePUs
def calcUnitCosts(puDict):
totalUnitCost = 0
conUnitCount = 0
for unitID in puDict:
theList = puDict[unitID]
unitValue, unitStatus = theList
if unitStatus == 1 or unitStatus == 2:
totalUnitCost += unitValue
conUnitCount += 1
return totalUnitCost, conUnitCount
def makeAmountConservedDictionary(targetList, abundanceMatrixDictionary, unitDictionary):
amountConservedDict = {}
for bNum in targetList:
amountConservedDict[bNum] = 0
for puID in abundanceMatrixDictionary:
puStatus = unitDictionary[puID][1]
if puStatus == 1 or puStatus == 2:
puAbundDict = abundanceMatrixDictionary[puID]
for featID in puAbundDict:
featAmount = puAbundDict[featID]
conTotalValue = amountConservedDict[featID]
conTotalValue += featAmount
amountConservedDict[featID] = conTotalValue
return amountConservedDict
def makeTotalTargetCost(amountConservedDictionary, targetDictionary):
totalTargetCost = 0
for featureID in amountConservedDictionary.keys():
amountConserved = amountConservedDictionary[featureID]
targetValuesList = targetDictionary[featureID]
theTarget = targetValuesList[1]
thePenalty = targetValuesList[2]
if amountConserved < theTarget:
totalTargetCost = totalTargetCost + thePenalty
return totalTargetCost
def makeBoundCosts(minpatchDataDict, boundaryMatrixDict, puDict):
totalBoundLength = cluz_functions2.calcTotalBoundLength(boundaryMatrixDict, puDict)
BLMvalue = minpatchDataDict["bound_cost"]
totalBoundaryCost = totalBoundLength * BLMvalue
return totalBoundLength, totalBoundaryCost
``` |
[
{
"content": "```python\nfrom sentinels import NOTHING\n\nfrom .api_object import APIObject\nfrom .lazy_query import LazyQuery\n\nclass Test(APIObject):\n\n def report_end(self, duration=NOTHING):\n self.client.api.call_function('report_test_end', {'id': self.id, 'duration': duration})\n\n def mark... | [
{
"content": "<|memory_start|>```python\nfrom sentinels import NOTHING\n\nfrom .api_object import APIObject\nfrom .lazy_query import LazyQuery\n\nclass Test(APIObject):\n\n def report_end(self, duration=NOTHING):\n self.client.api.call_function('report_test_end', {'id': self.id, 'duration': duration})... | ```python
from sentinels import NOTHING
from .api_object import APIObject
from .lazy_query import LazyQuery
class Test(APIObject):
def report_end(self, duration=NOTHING):
self.client.api.call_function('report_test_end', {'id': self.id, 'duration': duration})
def mark_skipped(self):
self.client.api.call_function('report_test_skipped', {'id': self.id})
def mark_interrupted(self):
self.client.api.call_function('report_test_interrupted', {'id': self.id})
def add_error(self):
return self.client.api.call_function('add_test_error', {'id': self.id})
def add_failure(self):
return self.client.api.call_function('add_test_failure', {'id': self.id})
def add_metadata(self, metadata):
return self.client.api.call_function('add_test_metadata', {'id': self.id, 'metadata': metadata})
def set_conclusion(self, conclusion):
return self.client.api.call_function('set_test_conclusion', {'id': self.id, 'conclusion': conclusion})
def add_error_data(self, exception, exception_type, traceback, timestamp=NOTHING):
return self.client.api.call_function('add_test_error_data', {'id': self.id,
'exception': exception,
'exception_type': exception_type,
'traceback': traceback,
'timestamp': timestamp
})
def edit_status(self, status):
return self.client.api.call_function('edit_test_status', {'id': self.id, 'status': status})
def query_errors(self):
"""Queries tests of the current session
:rtype: A lazy query object
"""
return LazyQuery(self.client, '/rest/errors', query_params={'test_id': self.id})
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you un... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this f... | ```python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""Unit tests for various models and operators"""
from time import time
import os
import sys
from scipy.stats import t as tdistr
import numpy as np
import torch
import torchvision
from torch.nn import Module
import tvm
from tvm import relay
from tvm.contrib import graph_runtime
from tvm.contrib.nvcc import have_fp16
import tvm.testing
from packaging import version as package_version
sys.setrecursionlimit(10000)
def list_ops(expr):
class OpLister(tvm.relay.ExprVisitor):
def visit_op(self, expr):
if expr not in self.node_set:
self.node_list.append(expr)
return super().visit_op(expr)
def list_nodes(self, expr):
self.node_set = {}
self.node_list = []
self.visit(expr)
return self.node_list
return OpLister().list_nodes(expr)
def assert_shapes_match(tru, est):
if tru.shape != est.shape:
msg = "Output shapes {} and {} don't match"
raise AssertionError(msg.format(tru.shape, est.shape))
def load_torchvision(model_name):
"""Given a model name, returns a Torchvision model in eval mode as well
as an example input."""
with torch.no_grad():
if model_name.startswith("inception"):
height = width = 299
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
else:
height = width = 224
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
input_shape = [1, 3, height, width]
input_data = torch.randn(input_shape).float()
for channel in range(3):
input_data[:, channel] -= mean[channel]
input_data[:, channel] /= std[channel]
if model_name.startswith("googlenet"):
model = getattr(torchvision.models, model_name)(pretrained=True, aux_logits=True)
else:
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.float().eval()
return model, [input_data]
def load_pretrainedmodels(model_name):
"""Given a model name, returns a pretrainedmodels.pytorch model in eval
mode as well as an example input."""
import pretrainedmodels # https://github.com/Cadene/pretrained-models.pytorch
model = getattr(pretrainedmodels, model_name)().float().eval()
input_shape = [1, *model.input_size]
input_data = torch.rand(input_shape).float() * 256
for channel in range(3):
input_data[:, channel] -= model.mean[channel]
input_data[:, channel] /= model.std[channel]
return model, [input_data]
def load_model(model_name):
"""Given a model name, returns a model as well as an example input."""
if hasattr(torchvision.models, model_name):
return load_torchvision(model_name)
try:
import pretrainedmodels
if hasattr(pretrainedmodels, model_name):
return load_pretrainedmodels(model_name)
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install pretrainedmodels.pytorch")
raise RuntimeError("Model not supported")
def confidence_interval(mean, stdev, count, alpha=0.01):
"""Returns the lower and upper bounds of the confidence interval of a random
variable. Confidence is 1 - alpha (default confidence is 99%)."""
stdval = tdistr.ppf(1 - alpha / 2, count - 1)
lower, upper = mean + np.array([-1, 1]) * stdval * stdev / np.sqrt(count)
return lower, upper
def measure_latency(model, input_shapes, output_shapes, thresh, dryruns=40):
"""Compute the latency of the given model"""
latencies = []
count = 0
while True:
if isinstance(model, Module):
input_data = [torch.rand(shape).float() for shape in input_shapes]
if torch.cuda.is_available():
input_data = list(map(lambda x: x.cuda(), input_data))
model = model.cuda()
t_start = time()
with torch.no_grad():
model(*input_data)
t_end = time()
latencies.append(t_end - t_start)
else:
input_data = {}
for i, shape in enumerate(input_shapes):
name = "input" + str(i)
arr = np.random.random(shape).astype("float32")
input_data[name] = tvm.nd.array(arr)
t_start = time()
model.set_input(**input_data)
model.run()
for i, shape in enumerate(output_shapes):
arr = np.zeros(shape).astype("float32")
model.get_output(i, tvm.nd.array(arr))
t_end = time()
count += 1
if count < dryruns:
continue
latencies.append(t_end - t_start)
mean = np.mean(latencies)
stdev = np.std(latencies)
sample_size = len(latencies)
if sample_size > dryruns:
lower, upper = confidence_interval(mean, stdev, sample_size)
est = (upper + lower) / 2
err = (upper - lower) / 2
if err < thresh:
return est
def verify_model(model_name, input_data=[], custom_convert_map={}, rtol=1e-5, atol=1e-5):
"""Assert that the output of a compiled model matches with that of its
baseline."""
if isinstance(model_name, str):
baseline_model, baseline_input = load_model(model_name)
elif isinstance(input_data, list):
baseline_model = model_name
baseline_input = input_data
elif isinstance(input_data, torch.Tensor) or len(input_data.shape) == 0:
baseline_model = model_name
baseline_input = [input_data]
else:
assert False, "Unexpected input format"
if torch.cuda.is_available():
if isinstance(baseline_model, torch.nn.Module):
baseline_model = baseline_model.cuda()
baseline_input = [inp.cuda() for inp in baseline_input]
with torch.no_grad():
baseline_outputs = baseline_model(*baseline_input)
if isinstance(baseline_outputs, tuple):
baseline_outputs = tuple(out.cpu().numpy() for out in baseline_outputs)
else:
baseline_outputs = (baseline_outputs.cpu().numpy(),)
trace = torch.jit.trace(baseline_model, baseline_input)
if isinstance(baseline_model, torch.nn.Module):
trace = trace.float().eval()
if torch.cuda.is_available():
trace = trace.cuda()
else:
trace = trace.cpu()
input_names = ["input{}".format(idx) for idx, inp in enumerate(baseline_input)]
input_shapes = list(zip(input_names, [inp.shape for inp in baseline_input]))
mod, params = relay.frontend.from_pytorch(trace, input_shapes, custom_convert_map)
compiled_input = dict(zip(input_names, [inp.cpu().numpy() for inp in baseline_input]))
with tvm.transform.PassContext(opt_level=3):
for target, ctx in tvm.testing.enabled_targets():
relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)
relay_model = graph_runtime.create(relay_graph, relay_lib, ctx)
relay_model.set_input(**relay_params)
for name, inp in compiled_input.items():
relay_model.set_input(name, inp)
relay_model.run()
for i, baseline_output in enumerate(baseline_outputs):
compiled_output = relay_model.get_output(i).asnumpy()
assert_shapes_match(baseline_output, compiled_output)
tvm.testing.assert_allclose(baseline_output, compiled_output, rtol=rtol, atol=atol)
del model_name
del baseline_model
torch.cuda.empty_cache()
# Single operator tests
@tvm.testing.uses_gpu
def test_forward_pixel_shuffle():
torch.set_grad_enabled(False)
input_shape = [1, 144, 16, 16]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.PixelShuffle(2).float().eval(), input_data=input_data)
verify_model(torch.nn.PixelShuffle(3).float().eval(), input_data=input_data)
verify_model(torch.nn.PixelShuffle(4).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_add():
torch.set_grad_enabled(False)
input_shape = [10]
class Add1(Module):
def forward(self, *args):
return args[0] + args[0]
class Add2(Module):
def forward(self, *args):
return args[0] + 1
class Add3(Module):
def forward(self, *args):
ones = torch.ones(input_shape, dtype=torch.float)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] + ones
class Add4(Module):
def forward(self, *args):
ones = torch.ones([], dtype=torch.float)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] + ones
input_data = torch.rand(input_shape).float()
verify_model(Add1().float().eval(), input_data=input_data)
verify_model(Add2().float().eval(), input_data=input_data)
verify_model(Add3().float().eval(), input_data=input_data)
verify_model(Add4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_subtract():
torch.set_grad_enabled(False)
input_shape = [10]
class Subtract1(Module):
def forward(self, *args):
return args[0] - args[0]
class Subtract2(Module):
def forward(self, *args):
return args[0] - 1
class Subtract3(Module):
def forward(self, *args):
ones = torch.ones(input_shape)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] - ones
class Subtract4(Module):
def forward(self, *args):
ones = torch.ones([])
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] - ones
input_data = torch.rand(input_shape).float()
verify_model(Subtract1().float().eval(), input_data=input_data)
verify_model(Subtract2().float().eval(), input_data=input_data)
verify_model(Subtract3().float().eval(), input_data=input_data)
verify_model(Subtract4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_multiply():
torch.set_grad_enabled(False)
input_shape = [10]
class Multiply1(Module):
def forward(self, *args):
return args[0] * args[0]
class Multiply2(Module):
def forward(self, *args):
return args[0] * 1.0
class Multiply3(Module):
def forward(self, *args):
ones = torch.ones(input_shape)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] * ones
class Multiply4(Module):
def forward(self, *args):
ones = torch.ones([])
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] * ones
input_data = torch.rand(input_shape).float()
verify_model(Multiply1().float().eval(), input_data=input_data)
verify_model(Multiply2().float().eval(), input_data=input_data)
verify_model(Multiply3().float().eval(), input_data=input_data)
verify_model(Multiply4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_min_max():
class Max(Module):
def forward(self, inp):
return torch.max(inp)
class Min(Module):
def forward(self, inp):
return torch.min(inp)
class Max2(Module):
def forward(self, inp):
out, _ = torch.max(inp, 1, keepdim=True)
return out
class Min2(Module):
def forward(self, inp):
out, _ = torch.min(inp, 0, keepdim=False)
return out
class Max3(Module):
def forward(self, lhs, rhs):
return torch.max(lhs, rhs)
class Min3(Module):
def forward(self, lhs, rhs):
return torch.min(lhs, rhs)
input_data = [torch.rand((10, 10)), torch.rand((10, 10))]
verify_model(Max(), input_data=input_data[0])
verify_model(Min(), input_data=input_data[0])
verify_model(Max2(), input_data=input_data[0])
verify_model(Min2(), input_data=input_data[0])
verify_model(Max3(), input_data=input_data)
verify_model(Min3(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reciprocal():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Reciprocal1(Module):
def forward(self, *args):
return args[0].reciprocal()
input_data = torch.rand(input_shape).float()
verify_model(Reciprocal1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_repeat():
torch.set_grad_enabled(False)
input_shape = [1, 3]
class Repeat1(Module):
def forward(self, *args):
return args[0].repeat(1, 1)
class Repeat2(Module):
def forward(self, *args):
return args[0].repeat(4, 2)
class Repeat3(Module):
def forward(self, *args):
return args[0].repeat(4, 2, 1)
input_data = torch.rand(input_shape).float()
verify_model(Repeat1().float().eval(), input_data=input_data)
verify_model(Repeat2().float().eval(), input_data=input_data)
verify_model(Repeat3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_repeat_interleave():
torch.set_grad_enabled(False)
input_shape = [2, 2, 3]
class RepeatInterleave1(Module):
def forward(self, *args):
return args[0].repeat_interleave(2)
class RepeatInterleave2(Module):
def forward(self, *args):
return args[0].repeat_interleave(3, dim=0)
class RepeatInterleave3(Module):
def forward(self, *args):
return args[0].repeat_interleave(2, dim=1)
class RepeatInterleave4(Module):
def forward(self, *args):
return args[0].repeat_interleave(4, dim=2)
input_data = torch.rand(input_shape).float()
verify_model(RepeatInterleave1().float().eval(), input_data=input_data)
verify_model(RepeatInterleave2().float().eval(), input_data=input_data)
verify_model(RepeatInterleave3().float().eval(), input_data=input_data)
verify_model(RepeatInterleave4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_unsqueeze():
torch.set_grad_enabled(False)
input_shape = [10, 10]
class Unsqueeze1(Module):
def forward(self, *args):
return args[0].unsqueeze(2)
input_data = torch.rand(input_shape).float()
verify_model(Unsqueeze1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_squeeze():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Squeeze1(Module):
def forward(self, *args):
return args[0].squeeze()
class Squeeze2(Module):
def forward(self, *args):
return args[0].squeeze(1)
input_data = torch.rand(input_shape).float()
verify_model(Squeeze1().float().eval(), input_data=input_data)
verify_model(Squeeze2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_arange():
torch.set_grad_enabled(False)
class Arange1(Module):
def forward(self, *args):
return torch.arange(5)
class Arange2(Module):
def forward(self, *args):
return torch.arange(2.5)
class Arange3(Module):
def forward(self, *args):
return torch.arange(1, 4)
class Arange4(Module):
def forward(self, *args):
return torch.arange(1, 2.5, 0.5)
class Arange5(Module):
def forward(self, *args):
return torch.arange(1, 2, 1, dtype=torch.int32)
class Arange6(Module):
def forward(self, *args):
return torch.arange(start=1, end=6, step=2)
class Arange7(Module):
def forward(self, *args):
return torch.arange(1, 4, dtype=torch.float32)
class Arange8(Module):
def forward(self, *args):
return torch.arange(1, 2, 1, dtype=torch.int16)
class Arange9(Module):
def forward(self, *args):
end = torch.add(torch.tensor(4), 1)
return torch.arange(end) + torch.ones((5,), dtype=torch.int64)
class Arange10(Module):
def forward(self, *args):
end = torch.add(torch.tensor(4.0), torch.tensor(1.0))
return torch.arange(end) + torch.ones((5,), dtype=torch.float)
class Arange11(Module):
def forward(self, *args):
start = torch.add(torch.tensor(1), 1)
end = torch.add(torch.tensor(4), 1)
step = torch.add(torch.tensor(2), 1)
out = torch.arange(start, end, step)
return out + torch.ones((3,), dtype=torch.int64)
class Arange12(Module):
def forward(self, *args):
start = torch.add(torch.tensor(1), 1)
end = torch.add(torch.tensor(4), 1)
step = torch.add(torch.tensor(2.5), torch.tensor(4.1))
out = torch.arange(start, end, step)
return out + torch.ones((3,), dtype=torch.float)
verify_model(Arange1().float().eval())
verify_model(Arange2().float().eval())
verify_model(Arange3().float().eval())
verify_model(Arange4().float().eval())
verify_model(Arange5().float().eval())
verify_model(Arange6().float().eval())
verify_model(Arange7().float().eval())
verify_model(Arange8().float().eval())
verify_model(Arange9().float().eval())
verify_model(Arange10().float().eval())
verify_model(Arange11().float().eval())
verify_model(Arange12().float().eval())
@tvm.testing.uses_gpu
def test_forward_mesh_grid():
torch.set_grad_enabled(False)
class MeshGrid1(Module):
def forward(self, *args):
x = torch.tensor([1, 2, 3])
y = torch.tensor([4, 5, 6])
grid_x, grid_y = torch.meshgrid([x, y])
return grid_x, grid_y
class MeshGrid2(Module):
def forward(self, *args):
x = torch.tensor([1, 2, 3], dtype=torch.float32)
y = torch.add(torch.tensor(5, dtype=torch.float32), 1)
grid_x, grid_y = torch.meshgrid([x, y])
return grid_x, grid_y
verify_model(MeshGrid1().float().eval())
verify_model(MeshGrid2().float().eval())
@tvm.testing.uses_gpu
def test_forward_abs():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Abs1(Module):
def forward(self, *args):
return args[0].abs()
input_data = torch.rand(input_shape).float()
verify_model(Abs1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_concatenate():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Concatenate1(Module):
def forward(self, *args):
return torch.cat([args[0][:, 0].unsqueeze(1), args[0][:, 1].unsqueeze(1)], 1)
class Concatenate2(Module):
def forward(self, *args):
a = (args[0][:, :, 0] + 2) * 7
b = (args[0][:, :, 1] + 3) * 11
c = (args[0][:, :, 2] + 5) * 13
return torch.cat([t.unsqueeze(2) for t in [a, b, c]], 2)
input_data = torch.rand(input_shape).float()
verify_model(Concatenate1().float().eval(), input_data=input_data)
verify_model(Concatenate2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_relu():
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.ReLU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_prelu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.PReLU(num_parameters=3).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_leakyrelu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.LeakyReLU().eval(), input_data=input_data)
verify_model(torch.nn.LeakyReLU(negative_slope=0.05).eval(), input_data=input_data)
verify_model(torch.nn.LeakyReLU(negative_slope=1.0, inplace=True).eval(), input_data=input_data)
verify_model(
torch.nn.LeakyReLU(negative_slope=1.25, inplace=True).eval(), input_data=input_data
)
@tvm.testing.uses_gpu
def test_forward_elu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.ELU().eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=0.3).eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=1.0).eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=1.3).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_celu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.CELU().eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=0.3).eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=1.0).eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=1.3).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_gelu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.GELU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_selu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.SELU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_softplus():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Softplus().eval(), input_data=input_data)
verify_model(torch.nn.Softplus(beta=1.5, threshold=20).eval(), input_data=input_data)
verify_model(torch.nn.Softplus(beta=5, threshold=10).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_softsign():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Softsign().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_log_sigmoid():
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.LogSigmoid().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_adaptiveavgpool():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AdaptiveAvgPool2d([1, 1]).eval(), input_data=input_data)
verify_model(torch.nn.AdaptiveAvgPool2d([10, 10]).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool2d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool2d(kernel_size=[1, 1]).eval(), input_data)
verify_model(torch.nn.MaxPool2d(kernel_size=[10, 10]).eval(), input_data)
verify_model(torch.nn.MaxPool2d(kernel_size=[4, 4], padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool2D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool2d(args[0], kernel_size=[10, 10])
verify_model(MaxPool2D(), input_data=input_data)
class MaxPool2DWithIndices(Module):
def __init__(self):
super(MaxPool2DWithIndices, self).__init__()
self.pool = torch.nn.MaxPool2d(kernel_size=[1, 1], return_indices=True)
def forward(self, *args):
output, indices = self.pool(args[0])
return output
verify_model(MaxPool2DWithIndices().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool1d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool1d(kernel_size=1).eval(), input_data)
verify_model(torch.nn.MaxPool1d(kernel_size=10).eval(), input_data)
verify_model(torch.nn.MaxPool1d(kernel_size=4, padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool1D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool1d(args[0], kernel_size=10)
verify_model(MaxPool1D(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool3d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool3d(kernel_size=[1, 1, 1]).eval(), input_data)
verify_model(torch.nn.MaxPool3d(kernel_size=[10, 10, 10]).eval(), input_data)
verify_model(torch.nn.MaxPool3d(kernel_size=[4, 4, 4], padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool3D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool3d(args[0], kernel_size=[10, 10, 10])
verify_model(MaxPool3D(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_split():
torch.set_grad_enabled(False)
input_shape = [4, 10]
class Split(Module):
def __init__(self, split_size_or_sections, dim):
super(Split, self).__init__()
self.split_size_or_sections = split_size_or_sections
self.dim = dim
def forward(self, *args):
return torch.split(args[0], self.split_size_or_sections, self.dim)
input_data = torch.rand(input_shape).float()
verify_model(Split(2, 0).float().eval(), input_data=input_data)
verify_model(Split(3, 1).float().eval(), input_data=input_data)
verify_model(Split(4, 1).float().eval(), input_data=input_data)
verify_model(Split([2, 3, 5], 1).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_avgpool():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class AvgPool2D2(Module):
def forward(self, *args):
return torch.nn.functional.avg_pool2d(args[0], kernel_size=[10, 10])
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AvgPool2d(kernel_size=[10, 10]).eval(), input_data=input_data)
verify_model(AvgPool2D2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_avgpool3d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10, 10]
class AvgPool3D1(Module):
def forward(self, *args):
return torch.nn.functional.avg_pool3d(args[0], kernel_size=[10, 10, 10])
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AvgPool3d(kernel_size=[10, 10, 10]).eval(), input_data=input_data)
verify_model(AvgPool3D1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_hardtanh():
torch.set_grad_enabled(False)
input_shape = [10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Hardtanh().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_conv():
torch.set_grad_enabled(False)
conv1d_input_shape = [1, 3, 10]
conv2d_input_shape = [1, 3, 10, 10]
class Conv2D1(Module):
def __init__(self):
super(Conv2D1, self).__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, bias=True)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv2D2(Module):
def __init__(self):
super(Conv2D2, self).__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv2D3(Module):
def __init__(self):
super(Conv2D3, self).__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, groups=3, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D1(Module):
def __init__(self):
super(Conv1D1, self).__init__()
self.conv = torch.nn.Conv1d(3, 6, 7)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D2(Module):
def __init__(self):
super(Conv1D2, self).__init__()
self.conv = torch.nn.Conv1d(3, 6, 7, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D3(Module):
def __init__(self):
super(Conv1D3, self).__init__()
self.conv = torch.nn.Conv1d(3, 6, 7, groups=3, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
conv2d_input_data = torch.rand(conv2d_input_shape).float()
verify_model(Conv2D1().float().eval(), input_data=conv2d_input_data)
verify_model(Conv2D2().float().eval(), input_data=conv2d_input_data)
# depth wise conv with channel mult 2
verify_model(Conv2D3().float().eval(), input_data=conv2d_input_data)
# group conv
verify_model(
torch.nn.Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), groups=2).eval(),
input_data=torch.randn((1, 8, 16, 16)),
)
conv1d_input_data = torch.rand(conv1d_input_shape).float()
verify_model(Conv1D1().float().eval(), input_data=conv1d_input_data)
verify_model(Conv1D2().float().eval(), input_data=conv1d_input_data)
verify_model(Conv1D3().float().eval(), input_data=conv1d_input_data)
@tvm.testing.uses_gpu
def test_forward_conv_transpose():
torch.set_grad_enabled(False)
conv2d_input_shape = [1, 3, 10, 10]
conv2d_input_data = torch.rand(conv2d_input_shape).float()
verify_model(torch.nn.ConvTranspose2d(3, 6, 7, bias=True), input_data=conv2d_input_data)
verify_model(torch.nn.ConvTranspose2d(3, 12, 3, bias=False), input_data=conv2d_input_data)
conv1d_input_shape = [1, 3, 10]
conv1d_input_data = torch.rand(conv1d_input_shape).float()
verify_model(torch.nn.ConvTranspose1d(3, 6, 7, bias=True), input_data=conv1d_input_data)
verify_model(torch.nn.ConvTranspose1d(3, 12, 3, bias=False), input_data=conv1d_input_data)
@tvm.testing.uses_gpu
def test_forward_threshold():
torch.set_grad_enabled(False)
input_shape = [1, 3]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Threshold(0, 0).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_contiguous():
torch.set_grad_enabled(False)
input_shape = [10]
class Contiguous1(Module):
def forward(self, *args):
return args[0].contiguous()
input_data = torch.rand(input_shape).float()
verify_model(Contiguous1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_batchnorm():
def init_weight(m):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.normal_(m.bias)
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for bn, inp in [(torch.nn.BatchNorm2d(16), inp_2d), (torch.nn.BatchNorm3d(16), inp_3d)]:
init_weight(bn.eval())
verify_model(bn.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_instancenorm():
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for ins_norm, inp in [
(torch.nn.InstanceNorm2d(16), inp_2d),
(torch.nn.InstanceNorm3d(16), inp_3d),
]:
verify_model(ins_norm.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_layernorm():
def init_weight(m):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.normal_(m.bias, 0.02)
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for ln, inp in [(torch.nn.LayerNorm(10), inp_2d), (torch.nn.LayerNorm(10), inp_3d)]:
init_weight(ln.eval())
verify_model(ln.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_groupnorm():
input_shape = [10, 6, 5, 5]
input_data = torch.rand(input_shape).float()
# Separate 6 channels into 3 groups
verify_model(torch.nn.GroupNorm(3, 6).eval(), input_data=input_data)
# Put all 6 channels into a single group (equivalent with LayerNorm)
verify_model(torch.nn.GroupNorm(1, 6).eval(), input_data=input_data)
# Separate 6 channels into 6 groups (equivalent with InstanceNorm)
verify_model(torch.nn.GroupNorm(6, 6).eval(), input_data=input_data)
input_shape = [1, 10, 4, 7]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.GroupNorm(1, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(2, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(5, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(10, 10).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reshape():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
new_shape = [2, 1, 10, 10]
class Reshape1(Module):
def forward(self, *args):
return args[0].reshape(new_shape)
class Reshape2(Module):
def forward(self, *args):
return args[0].reshape([-1])
class Reshape3(torch.nn.Module):
def forward(self, x):
x_shape = x.shape
return x.reshape((x_shape[0] * x_shape[1], x_shape[2]))
input_data = torch.rand(input_shape).float()
verify_model(Reshape1(), input_data=input_data)
verify_model(Reshape2(), input_data=input_data)
verify_model(Reshape3(), input_data=torch.randn(2, 3, 4))
@tvm.testing.uses_gpu
def test_flatten():
class Flatten(Module):
def forward(self, x):
return torch.flatten(x)
class BatchFlatten(Module):
def forward(self, x):
return torch.flatten(x, start_dim=1)
inp = torch.rand((5, 2, 2))
verify_model(Flatten(), input_data=inp)
verify_model(BatchFlatten(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_transpose():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Transpose1(Module):
def forward(self, *args):
return args[0].transpose(2, 3)
class Transpose2(Module):
def forward(self, *args):
return args[0].transpose(-2, -1)
class Transpose3(Module):
def forward(self, *args):
return args[0].permute(0, 2, 3, 1)
input_data = torch.rand(input_shape).float()
verify_model(Transpose1().float().eval(), input_data=input_data)
verify_model(Transpose2().float().eval(), input_data=input_data)
verify_model(Transpose3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_size():
torch.set_grad_enabled(False)
input_shape = [1, 3]
class Size1(Module):
def forward(self, *args):
return float(args[0].size(0)) * args[0]
input_data = torch.rand(input_shape).float()
verify_model(Size1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_type_as():
torch.set_grad_enabled(False)
input_shape = [1, 3]
def _create_module(dtype):
class TypeAs(Module):
def forward(self, *args):
expected_type_tensor = torch.zeros(1, 3, dtype=dtype)
return args[0].type_as(expected_type_tensor)
return TypeAs()
input_data = torch.randn(input_shape).float()
verify_model(_create_module(torch.float64), input_data=input_data)
verify_model(_create_module(torch.float32), input_data=input_data)
verify_model(_create_module(torch.int64), input_data=input_data)
verify_model(_create_module(torch.int32), input_data=input_data)
verify_model(_create_module(torch.int16), input_data=input_data)
verify_model(_create_module(torch.int8), input_data=input_data)
if torch.cuda.is_available():
check_fp16 = False
try:
# Only check half precision on supported hardwares.
if have_fp16(tvm.gpu(0).compute_version):
check_fp16 = True
except Exception as e:
# If GPU is not enabled in TVM, skip the fp16 test.
pass
# Temporary disable fp16 test
check_fp16 = False
if check_fp16:
verify_model(_create_module(torch.float16), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_view():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class View1(Module):
def forward(self, *args):
return args[0].view((1, 3 * 10 * 10))
class View2(Module):
def forward(self, *args):
return args[0].view(args[0].shape[0], -1)
class View3(Module):
def forward(self, *args):
d1 = torch.tensor(3) * torch.tensor(10) * torch.tensor(10)
return args[0].view(args[0].shape[0], d1)
input_data = torch.rand(input_shape).float()
verify_model(View1().float().eval(), input_data=input_data)
verify_model(View2().float().eval(), input_data=input_data)
verify_model(View3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_select():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Select1(Module):
def forward(self, *args):
return args[0].select(1, 1)
class IndexedSelect(Module):
def __init__(self, inp, dim):
super().__init__()
self.inp = inp
self.dim = dim
if torch.cuda.is_available():
self.inp = self.inp.cuda()
def forward(self, index):
return torch.index_select(self.inp, self.dim, index)
input_data = torch.rand(input_shape).float()
verify_model(Select1().float().eval(), input_data=input_data)
x = torch.randn(3, 4)
indices = torch.tensor([0, 2])
verify_model(IndexedSelect(x, 0).eval(), input_data=indices)
verify_model(IndexedSelect(x, 1).eval(), input_data=indices)
@tvm.testing.uses_gpu
def test_forward_clone():
torch.set_grad_enabled(False)
input_shape = [10]
class Clone1(Module):
def forward(self, *args):
return args[0].clone()
input_data = torch.rand(input_shape).float()
verify_model(Clone1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_gather():
torch.set_grad_enabled(False)
class Gather1(Module):
def forward(self, *args):
return torch.gather(args[0], 0, args[1])
class Gather2(Module):
def forward(self, *args):
return torch.gather(args[0], 1, args[1])
class Gather3(Module):
def forward(self, *args):
return torch.gather(args[0], 2, args[1])
input_data = torch.rand((4,)).float()
index = torch.tensor([1])
verify_model(Gather1().float().eval(), input_data=[input_data, index])
input_data = torch.rand((2, 2)).float()
index = torch.tensor([[1, 0], [0, 1]])
verify_model(Gather1().float().eval(), input_data=[input_data, index])
input_data = torch.tensor([[1, 2], [3, 4]])
index = torch.tensor([[0, 0], [1, 0]])
verify_model(Gather2().float().eval(), input_data=[input_data, index])
input_data = torch.rand((2, 2)).float()
index = torch.tensor([[1, 0], [0, 1]])
verify_model(Gather2().float().eval(), input_data=[input_data, index])
input_data = torch.rand((3, 3, 3)).float()
index = torch.tensor(
[
[[1, 0, 0], [1, 0, 1], [0, 1, 1]],
[[1, 1, 1], [1, 2, 1], [1, 0, 1]],
[[1, 2, 1], [1, 2, 1], [1, 2, 1]],
]
)
verify_model(Gather3().float().eval(), input_data=[input_data, index])
@tvm.testing.uses_gpu
def test_forward_logsoftmax():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class LogSoftmax1(Module):
def forward(self, *args):
return torch.nn.LogSoftmax(dim=1)(args[0][0, 0])
input_data = torch.rand(input_shape).float()
verify_model(LogSoftmax1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_norm():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Norm1(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=None, keepdim=False)
class Norm2(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("-inf"), dim=None, keepdim=False)
class Norm3(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("-inf"), dim=None, keepdim=True)
class Norm4(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=(1, 2), keepdim=False)
class Norm5(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=(1), keepdim=True)
class Norm6(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(0.5), dim=(1), keepdim=True)
class Norm7(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(1), dim=None, keepdim=False)
class Norm8(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(2.0), dim=(1), keepdim=True)
class Norm9(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(-0.5), dim=(1, 2), keepdim=True)
class Norm10(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(-2), dim=(1), keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(Norm1().float().eval(), input_data=input_data)
verify_model(Norm2().float().eval(), input_data=input_data)
verify_model(Norm3().float().eval(), input_data=input_data)
verify_model(Norm4().float().eval(), input_data=input_data)
verify_model(Norm5().float().eval(), input_data=input_data)
verify_model(Norm6().float().eval(), input_data=input_data)
verify_model(Norm7().float().eval(), input_data=input_data)
verify_model(Norm8().float().eval(), input_data=input_data)
verify_model(Norm9().float().eval(), input_data=input_data)
verify_model(Norm10().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_frobenius_norm():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class FroNorm1(Module):
def forward(self, *args):
return torch.norm(args[0])
class FroNorm2(Module):
def forward(self, *args):
return torch.norm(args[0], p="fro", dim=None, keepdim=True)
class FroNorm3(Module):
def forward(self, *args):
return torch.norm(args[0], p="fro", dim=(1), keepdim=True)
class FroNorm4(Module):
def forward(self, *args):
return torch.norm(args[0], dim=None, keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(FroNorm1().float().eval(), input_data=input_data)
verify_model(FroNorm2().float().eval(), input_data=input_data)
verify_model(FroNorm3().float().eval(), input_data=input_data)
verify_model(FroNorm4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_sigmoid():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Sigmoid().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_dense():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Dense1(Module):
def __init__(self):
super(Dense1, self).__init__()
self.linear = torch.nn.Linear(10, 7, bias=True)
def forward(self, *args):
return self.linear(args[0][0, 0])
class Dense2(Module):
def __init__(self):
super(Dense2, self).__init__()
self.linear = torch.nn.Linear(10, 7, bias=False)
def forward(self, *args):
return self.linear(args[0][0, 0])
input_data = torch.rand(input_shape).float()
verify_model(Dense1().float().eval(), input_data=input_data)
verify_model(Dense2().float().eval(), input_data=input_data)
trace = torch.jit.trace(Dense1(), [input_data])
mod, params = relay.frontend.from_pytorch(
trace,
[("input", input_shape)],
)
assert not any([op.name == "multiply" for op in list_ops(mod["main"])])
@tvm.testing.uses_gpu
def test_forward_dropout():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Dropout(p=0.5).eval(), input_data=input_data[0, 0])
verify_model(torch.nn.Dropout2d(p=0.5).eval(), input_data=input_data[0])
verify_model(torch.nn.Dropout3d(p=0.5).eval(), input_data=input_data)
verify_model(torch.nn.AlphaDropout(p=0.5).eval(), input_data=input_data[0, 0])
@tvm.testing.uses_gpu
def test_forward_slice():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Slice1(Module):
def forward(self, *args):
return args[0][:, :, :, :3]
class Slice2(Module):
def forward(self, *args):
return args[0][0, :, :-3, :]
class Slice3(Module):
def forward(self, *args):
x0 = torch.tensor(2) - torch.tensor(1)
x1 = torch.tensor(3) + torch.tensor(1)
return args[0][:, x0:, 1:x1, :]
class SliceWithStride(torch.nn.Module):
def forward(self, x):
return x[..., 0::2] + x[..., 1::2]
class SliceWithStride2(torch.nn.Module):
def forward(self, x):
return x[0::2, 0::2] + x[1::2, 1::2]
input_data = torch.rand(input_shape).float()
verify_model(Slice1(), input_data=input_data)
verify_model(Slice2(), input_data=input_data)
verify_model(Slice3(), input_data=input_data)
verify_model(SliceWithStride(), input_data=torch.randn(1, 4))
verify_model(SliceWithStride2(), input_data=torch.randn(4, 4))
@tvm.testing.uses_gpu
def test_forward_mean():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Mean1(Module):
def forward(self, *args):
return args[0].mean(2)
input_data = torch.rand(input_shape).float()
verify_model(Mean1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_expand():
torch.set_grad_enabled(False)
class Expand1(Module):
def forward(self, *args):
return args[0].expand((3, -1, -1, -1))
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Expand1().float().eval(), input_data=input_data)
class Expand2(Module):
def forward(self, *args):
return args[0].expand((3, 3, 3, 1))
input_shape = [3, 1]
input_data = torch.rand(input_shape).float()
verify_model(Expand2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_pow():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Pow1(Module):
def forward(self, *args):
return args[0] ** 2
input_data = torch.rand(input_shape).float()
verify_model(Pow1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_chunk():
torch.set_grad_enabled(False)
input_shape = [1, 3, 14, 14]
class Chunk1(Module):
def forward(self, *args):
chunks = args[0].chunk(7, 2)
return torch.cat(chunks, 2)
input_data = torch.rand(input_shape).float()
verify_model(Chunk1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_upsample():
class Upsample(Module):
def __init__(self, size=None, scale=None, mode="nearest", align_corners=None):
super().__init__()
self.size = size
self.scale = scale
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return torch.nn.functional.interpolate(
x,
size=self.size,
scale_factor=self.scale,
mode=self.mode,
align_corners=self.align_corners,
)
inp = torch.rand((1, 3, 32, 32))
verify_model(Upsample(size=(64, 64), mode="nearest"), inp)
verify_model(Upsample(scale=2, mode="nearest"), inp)
verify_model(Upsample(size=(50, 50), mode="nearest"), inp)
verify_model(Upsample(size=(64, 64), mode="bilinear", align_corners=True), inp)
verify_model(Upsample(scale=2, mode="bilinear", align_corners=True), inp)
verify_model(Upsample(size=(50, 50), mode="bilinear", align_corners=True), inp)
@tvm.testing.uses_gpu
def test_to():
""" test for aten::to(...) """
class ToCPU(Module):
def forward(self, x):
return x.to("cpu")
class ToFloat(Module):
def forward(self, x):
return x.float()
class ToInt(Module):
def forward(self, x):
return x.int()
class ToLong(Module):
def forward(self, x):
return x.long()
class ToDouble(Module):
def forward(self, x):
return x.double()
class ToFloat16(Module):
def forward(self, x):
return x.to(torch.float16)
verify_model(ToCPU().eval(), torch.rand((1, 3, 32, 32)))
verify_model(ToFloat().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))
verify_model(ToFloat().eval(), torch.tensor(2, dtype=torch.int))
verify_model(ToInt().eval(), torch.zeros((1, 3, 32, 32)))
verify_model(ToInt().eval(), torch.tensor(0.8))
verify_model(ToLong().eval(), torch.tensor(0.8))
verify_model(ToDouble().eval(), torch.tensor(0.8))
verify_model(ToFloat16().eval(), torch.tensor(2, dtype=torch.float32))
verify_model(ToFloat16().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))
@tvm.testing.uses_gpu
def test_adaptive_pool3d():
for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(torch.nn.AdaptiveMaxPool3d((1, 1, 1)).eval(), inp)
verify_model(torch.nn.AdaptiveMaxPool3d((2, 2, 2)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((1, 1, 1)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((2, 2, 2)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((4, 8, 8)).eval(), inp)
verify_model(torch.nn.AdaptiveMaxPool3d((7, 8, 9)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_functional_pad():
torch.set_grad_enabled(False)
pad = (0, 0)
class Pad1(Module):
def forward(self, *args):
return torch.nn.functional.pad(args[0], pad, "constant", 0)
input_data = torch.rand((3, 3, 4, 2))
pad = (1, 1)
verify_model(Pad1().float().eval(), input_data=input_data)
pad = (1, 1, 2, 2)
verify_model(Pad1().float().eval(), input_data=input_data)
pad = (0, 1, 2, 1, 3, 3)
verify_model(Pad1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_zero_pad2d():
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ZeroPad2d(2).eval(), inp)
verify_model(torch.nn.ZeroPad2d((1, 1, 2, 0)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad1d():
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)
inp = torch.rand((1, 2, 3))
verify_model(torch.nn.ConstantPad2d((3, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad2d():
inp = torch.rand((1, 2, 2, 2))
verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)
verify_model(torch.nn.ConstantPad2d((3, 0, 2, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad3d():
inp = torch.rand((1, 3, 2, 2, 2))
verify_model(torch.nn.ConstantPad3d(3, 3.5).eval(), inp)
verify_model(torch.nn.ConstantPad3d((3, 4, 5, 6, 0, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_reflection_pad1d():
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ReflectionPad1d(2).eval(), inp)
verify_model(torch.nn.ReflectionPad1d((3, 1)).eval(), inp)
inp = torch.rand((2, 4, 5))
verify_model(torch.nn.ReflectionPad1d((2, 3)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_reflection_pad2d():
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ReflectionPad2d(2).eval(), inp)
verify_model(torch.nn.ReflectionPad2d((1, 1, 2, 0)).eval(), inp)
inp = torch.rand((2, 4, 5, 6))
verify_model(torch.nn.ReflectionPad2d((1, 3, 2, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad1d():
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ReplicationPad1d(2).eval(), inp)
verify_model(torch.nn.ReplicationPad1d((3, 1)).eval(), inp)
inp = torch.rand((2, 4, 5))
verify_model(torch.nn.ReplicationPad1d((2, 3)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad2d():
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ReplicationPad2d(2).eval(), inp)
verify_model(torch.nn.ReplicationPad2d((1, 1, 2, 0)).eval(), inp)
inp = torch.rand((2, 4, 5, 6))
verify_model(torch.nn.ReplicationPad2d((1, 3, 2, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad3d():
inp = torch.rand((1, 1, 3, 3, 3))
verify_model(torch.nn.ReplicationPad3d(3).eval(), inp)
verify_model(torch.nn.ReplicationPad3d((1, 1, 2, 2, 1, 1)).eval(), inp)
inp = torch.rand((7, 5, 4, 5, 6))
verify_model(torch.nn.ReplicationPad3d((2, 3, 2, 5, 1, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_upsample3d():
inp = torch.arange(1, 9, dtype=torch.float32).view(1, 1, 2, 2, 2)
verify_model(torch.nn.Upsample(scale_factor=2, mode="nearest").eval(), inp)
verify_model(torch.nn.Upsample(scale_factor=2, mode="trilinear").eval(), inp)
verify_model(
torch.nn.Upsample(scale_factor=2, mode="trilinear", align_corners=True).eval(), inp
)
def test_forward_nms():
"""dynamic Non-Maximum Suppression"""
torch.set_grad_enabled(False)
class NonMaxSupression(Module):
def __init__(self, iou_thres):
super().__init__()
self.iou_threshold = iou_thres
def forward(self, *args):
return torchvision.ops.nms(args[0], args[1], self.iou_threshold)
# Generate random input data
def _gen_rand_inputs(num_boxes):
box_len = 4
boxes = torch.rand(num_boxes, box_len, dtype=torch.float) * 0.5
boxes[:, 2] += boxes[:, 0]
boxes[:, 3] += boxes[:, 1]
scores = torch.rand(num_boxes, dtype=torch.float)
return boxes, scores
targets = ["llvm"] # dynamic nms does not work on gpu
for num_boxes, iou_thres in [(10, 0.3), (100, 0.5), (500, 0.9)]:
in_boxes, in_scores = _gen_rand_inputs(num_boxes)
verify_trace_model(NonMaxSupression(iou_thres), [in_boxes, in_scores], targets)
def test_forward_roi_align():
"""ROI align"""
torch.set_grad_enabled(False)
class ROIAlgin(Module):
def __init__(self, output_sizes, spatial_scale=1.0, sampling_ratio=-1):
super().__init__()
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
self.output_sizes = output_sizes
def forward(self, *args):
return torchvision.ops.roi_align(
args[0],
args[1],
self.output_sizes,
self.spatial_scale,
self.sampling_ratio,
)
in_data = torch.Tensor(np.random.uniform(size=(1, 8, 100, 100)))
in_boxes = torch.Tensor(np.random.uniform(0.0, 100.0, size=(35, 4)))
in_batch = torch.zeros((35, 1), dtype=torch.float)
in_boxes = torch.cat([in_batch, in_boxes], dim=1)
verify_model(ROIAlgin(7), [in_data, in_boxes])
verify_model(ROIAlgin((10, 10), 0.7, 5), [in_data, in_boxes])
verify_model(ROIAlgin(15, 0.9, 3), [in_data, in_boxes])
@tvm.testing.uses_gpu
def test_conv3d():
for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(torch.nn.Conv3d(32, 16, (3, 3, 3), padding=(1, 1, 1)).eval(), inp),
verify_model(torch.nn.Conv3d(32, 16, (5, 5, 5), padding=(2, 2, 2)).eval(), inp),
verify_model(torch.nn.Conv3d(32, 16, kernel_size=1).eval(), inp)
# downsample
verify_model(torch.nn.Conv3d(32, 16, kernel_size=1, stride=2).eval(), inp)
@tvm.testing.uses_gpu
def test_conv3d_transpose():
for ishape in [(1, 8, 10, 5, 10), (1, 8, 5, 8, 8), (1, 8, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(
torch.nn.ConvTranspose3d(
in_channels=8, out_channels=33, kernel_size=3, stride=2
).eval(),
inp,
),
verify_model(
torch.nn.ConvTranspose3d(
in_channels=8,
out_channels=20,
kernel_size=(3, 5, 2),
stride=(2, 1, 1),
padding=(0, 4, 2),
).eval(),
inp,
),
verify_model(
torch.nn.ConvTranspose3d(in_channels=8, out_channels=20, kernel_size=1).eval(), inp
)
verify_model(
torch.nn.ConvTranspose3d(in_channels=8, out_channels=5, kernel_size=1, stride=2).eval(),
inp,
)
# Model tests
@tvm.testing.uses_gpu
def test_resnet18():
torch.set_grad_enabled(False)
verify_model("resnet18", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_squeezenet1_0():
torch.set_grad_enabled(False)
verify_model("squeezenet1_0", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_squeezenet1_1():
torch.set_grad_enabled(False)
verify_model("squeezenet1_1", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_densenet121():
torch.set_grad_enabled(False)
verify_model("densenet121", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_inception_v3():
torch.set_grad_enabled(False)
verify_model("inception_v3", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_googlenet():
torch.set_grad_enabled(False)
verify_model("googlenet", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_mnasnet0_5():
torch.set_grad_enabled(False)
verify_model("mnasnet0_5", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_mobilenet_v2():
torch.set_grad_enabled(False)
verify_model("mobilenet_v2", atol=1e-4, rtol=1e-4)
"""
#TODO: Fix VGG and AlexNet issues (probably due to pooling)
@tvm.testing.uses_gpu
def test_alexnet():
torch.set_grad_enabled(False)
verify_model("alexnet")
@tvm.testing.uses_gpu
def test_vgg11():
torch.set_grad_enabled(False)
verify_model("vgg11")
@tvm.testing.uses_gpu
def test_vgg11_bn():
torch.set_grad_enabled(False)
verify_model("vgg11_bn")
"""
@tvm.testing.uses_gpu
def test_custom_conversion_map():
def get_roi_align():
pool_size = 5
n_channels = 2 * (pool_size ** 2)
x = torch.rand(2, n_channels, 10, 10)
rois = torch.tensor(
[
[0, 0, 0, 9, 9], # format is (xyxy)
[0, 0, 5, 4, 9],
[0, 5, 5, 9, 9],
[1, 0, 0, 9, 9],
],
dtype=torch.float,
)
roi_align = torchvision.ops.RoIAlign(pool_size, spatial_scale=1, sampling_ratio=-1)
return roi_align.eval(), [x, rois]
def convert_roi_align():
def _impl(inputs, input_types):
spatial_scale = inputs[2]
pooled_size = (inputs[3], inputs[4])
sampling_ratio = inputs[5]
return relay.op.vision.roi_align(
inputs[0], inputs[1], pooled_size, spatial_scale, sampling_ratio
)
return _impl
custom_map = {"torchvision::roi_align": convert_roi_align()}
model, inputs = get_roi_align()
verify_model(model, inputs, custom_map)
@tvm.testing.uses_gpu
def test_segmentaton_models():
class SegmentationModelWrapper(Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inp):
out = self.model(inp)
return out["out"]
fcn = torchvision.models.segmentation.fcn_resnet101(pretrained=True)
deeplab = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=True)
inp = [torch.rand((1, 3, 300, 300), dtype=torch.float)]
verify_model(SegmentationModelWrapper(fcn.eval()), inp, atol=1e-4, rtol=1e-4)
verify_model(SegmentationModelWrapper(deeplab.eval()), inp, atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_3d_models():
input_shape = (1, 3, 4, 56, 56)
resnet3d = torchvision.models.video.r3d_18(pretrained=True).eval()
verify_model(resnet3d, [torch.rand(input_shape)], atol=1e-4, rtol=1e-4)
def _get_default_vm_targets():
return [tgt for (tgt, _) in tvm.testing.enabled_targets()]
def verify_script_model(pt_model, ishapes, targets):
script_module = torch.jit.script(pt_model)
verify_model_vm(script_module, ishapes, targets=targets)
def verify_trace_model(pt_model, idata, targets):
traced_model = torch.jit.trace(pt_model, idata)
ishapes = [data.shape for data in idata]
verify_model_vm(traced_model, ishapes, idata=idata, targets=targets)
def verify_model_vm(input_model, ishapes, idtype=torch.float, idata=None, targets=["llvm"]):
input_names = ["i{}".format(idx) for idx, ish in enumerate(ishapes)]
input_shapes = list(zip(input_names, ishapes))
input_data = idata if idata else [torch.randn(shape, dtype=idtype) for shape in ishapes]
# Compile via VM
mod, params = relay.frontend.from_pytorch(input_model, input_shapes)
for tgt in targets:
print("Running on target", tgt)
ctx = tvm.context(tgt, 0)
executor = relay.create_executor("vm", mod=mod, ctx=ctx, target=tgt)
evaluator = executor.evaluate()
# Inference
for name, inp in zip(input_names, input_data):
params[name] = inp.numpy()
vm_res = evaluator(**params)
# Baseline result
with torch.no_grad():
pt_result = input_model(*input_data)
# Verify the accuracy
if not isinstance(pt_result, torch.Tensor):
tvm_res = vm_res.asnumpy().item()
assert pt_result == tvm_res
else:
tvm.testing.assert_allclose(vm_res.asnumpy(), pt_result.numpy(), rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_control_flow():
class SimpleIf(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, inp):
if inp.sum() > 0.0:
output = self.weight + inp
else:
output = self.weight - inp
return output
class NestedIf(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, inp):
if inp.sum() > 0.0:
if inp.mean() > 0.0:
output = self.weight + inp
else:
output = self.weight - inp
else:
if inp.mean() >= 0.0:
output = self.weight * inp
else:
output = self.weight / inp
return output
class ScalarLoop(torch.nn.Module):
def forward(self, inp):
a = 0
for i in range(inp.size(0)):
b = i * i
b = b + 1
a += b
if a != 0:
a += 1
else:
a += 2
return a
class SimpleLoop(torch.nn.Module):
def forward(self, inp):
a = inp
for i in range(inp.size(0)):
b = a * 2.0
c = a + b
a += c
return a
class LoopWithIf(torch.nn.Module):
def forward(self, inp):
a = inp
for i in range(inp.size(0)):
b = a * 2.0
b = a + b
if b.sum() > 0.0:
a += b
else:
a -= b
return a
class NestedLoop(torch.nn.Module):
def forward(self, inp):
a = inp
for i in range(inp.size(0)):
b = a * float(i)
for j in range(inp.size(1)):
a += b * float(j)
return a
class SimpleScalarWhileLoop(torch.nn.Module):
def forward(self, inp):
a = 1
i = 0
while i <= inp.size(0):
a += i
i += 2
i = 0
# also test constant init cond
while i < 10:
a += i
i += 3
return a
class SimpleWhileLoop(torch.nn.Module):
def forward(self, inp):
a = inp
i = 0
while i < inp.size(0):
a += a * float(i) * 2.0
i += 1
return a
models = [
SimpleIf(10, 20),
NestedIf(10, 20),
ScalarLoop(),
SimpleLoop(),
LoopWithIf(),
SimpleScalarWhileLoop(),
SimpleWhileLoop(),
NestedLoop(),
]
for pt_model in models:
verify_script_model(pt_model.eval(), [(10, 20)], _get_default_vm_targets())
@tvm.testing.uses_gpu
def test_simple_rnn():
# The mixed tracing and scripting example from
# https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#mixing-scripting-and-tracing
class DecisionGate(torch.nn.Module):
def forward(self, x):
if x.sum() > 0:
return x
else:
return -x
class Cell(torch.nn.Module):
def __init__(self, dg):
super(Cell, self).__init__()
self.dg = dg
self.linear = torch.nn.Linear(4, 4)
def forward(self, x, h):
new_h = torch.tanh(self.dg(self.linear(x)) + h)
return new_h, new_h
class RNNLoop(torch.nn.Module):
def __init__(self):
super().__init__()
x = torch.rand(10, 4, dtype=torch.float)
h = torch.rand(10, 4, dtype=torch.float)
self.cell = torch.jit.trace(Cell(DecisionGate()), (x, h))
def forward(self, xs):
h = torch.zeros(10, 4, dtype=torch.float)
y = torch.zeros(10, 4, dtype=torch.float)
for i in range(xs.size(0)):
y, h = self.cell(xs[i], h)
return y
verify_script_model(RNNLoop().eval(), [(10, 10, 4)], _get_default_vm_targets())
@tvm.testing.uses_gpu
def test_forward_reduce_sum():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ReduceSum1(Module):
def forward(self, *args):
return args[0].sum(1)
class ReduceSum2(Module):
def forward(self, *args):
return args[0].sum(dim=1, keepdim=False)
class ReduceSum3(Module):
def forward(self, *args):
return args[0].sum(dim=2, keepdim=True)
class ReduceSum4(Module):
def forward(self, *args):
return args[0].sum(dim=(2, 3), keepdim=True)
class ReduceSum5(Module):
def forward(self, *args):
return args[0].sum(dim=(2, 3), keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(ReduceSum1().float().eval(), input_data=input_data)
verify_model(ReduceSum2().float().eval(), input_data=input_data)
verify_model(ReduceSum3().float().eval(), input_data=input_data)
verify_model(ReduceSum4().float().eval(), input_data=input_data)
verify_model(ReduceSum5().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reduce_prod():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ReduceProd1(Module):
def forward(self, *args):
return args[0].prod(1)
class ReduceProd2(Module):
def forward(self, *args):
return args[0].prod(dim=1, keepdim=False)
class ReduceProd3(Module):
def forward(self, *args):
return args[0].prod(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ReduceProd1().float().eval(), input_data=input_data)
verify_model(ReduceProd2().float().eval(), input_data=input_data)
verify_model(ReduceProd3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_argmin():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ArgMin1(Module):
def forward(self, *args):
return args[0].argmin(1)
class ArgMin2(Module):
def forward(self, *args):
return args[0].argmin(dim=1, keepdim=False)
class ArgMin3(Module):
def forward(self, *args):
return args[0].argmin(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ArgMin1().float().eval(), input_data=input_data)
verify_model(ArgMin2().float().eval(), input_data=input_data)
verify_model(ArgMin3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_argmax():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ArgMax1(Module):
def forward(self, *args):
return args[0].argmax(1)
class ArgMax2(Module):
def forward(self, *args):
return args[0].argmax(dim=1, keepdim=False)
class ArgMax3(Module):
def forward(self, *args):
return args[0].argmax(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ArgMax1().float().eval(), input_data=input_data)
verify_model(ArgMax2().float().eval(), input_data=input_data)
verify_model(ArgMax3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_std():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Std1(Module):
def forward(self, *args):
return args[0].std(1, unbiased=False)
class Std2(Module):
def forward(self, *args):
return args[0].std(dim=1, keepdim=False, unbiased=False)
class Std3(Module):
def forward(self, *args):
return args[0].std(dim=2, keepdim=True, unbiased=False)
class Std4(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=True, unbiased=False)
class Std5(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=False, unbiased=False)
class Std6(Module):
def forward(self, *args):
return args[0].std(unbiased=False)
class Std7(Module):
def forward(self, *args):
return args[0].std(dim=1, keepdim=False, unbiased=True)
class Std8(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=True, unbiased=True)
class Std9(Module):
def forward(self, *args):
return args[0].std(unbiased=True)
input_data = torch.rand(input_shape).float()
verify_model(Std1().float().eval(), input_data=input_data)
verify_model(Std2().float().eval(), input_data=input_data)
verify_model(Std3().float().eval(), input_data=input_data)
verify_model(Std4().float().eval(), input_data=input_data)
verify_model(Std5().float().eval(), input_data=input_data)
verify_model(Std6().float().eval(), input_data=input_data)
verify_model(Std7().float().eval(), input_data=input_data)
verify_model(Std8().float().eval(), input_data=input_data)
verify_model(Std9().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_variance():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Variance1(Module):
def forward(self, *args):
return args[0].var(1, unbiased=False)
class Variance2(Module):
def forward(self, *args):
return args[0].var(dim=1, keepdim=False, unbiased=False)
class Variance3(Module):
def forward(self, *args):
return args[0].var(dim=2, keepdim=True, unbiased=False)
class Variance4(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=True, unbiased=False)
class Variance5(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=False, unbiased=False)
class Variance6(Module):
def forward(self, *args):
return args[0].var(unbiased=False)
class Variance7(Module):
def forward(self, *args):
return args[0].var(dim=1, keepdim=False, unbiased=True)
class Variance8(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=True, unbiased=True)
class Variance9(Module):
def forward(self, *args):
return args[0].var(unbiased=True)
input_data = torch.rand(input_shape).float()
verify_model(Variance1().float().eval(), input_data=input_data)
verify_model(Variance2().float().eval(), input_data=input_data)
verify_model(Variance3().float().eval(), input_data=input_data)
verify_model(Variance4().float().eval(), input_data=input_data)
verify_model(Variance5().float().eval(), input_data=input_data)
verify_model(Variance6().float().eval(), input_data=input_data)
verify_model(Variance7().float().eval(), input_data=input_data)
verify_model(Variance8().float().eval(), input_data=input_data)
verify_model(Variance9().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_rsub():
torch.set_grad_enabled(False)
class Rsub1(Module):
def forward(self, *args):
return torch.rsub(args[0], args[1])
class Rsub2(Module):
def forward(self, *args):
return torch.rsub(args[0], args[1], alpha=0.5)
d1 = torch.rand([1, 3]).float()
d2 = torch.rand([1, 3]).float()
d3 = torch.rand([1, 3]).int()
verify_model(Rsub1().float().eval(), input_data=[d1, d2])
verify_model(Rsub1().float().eval(), input_data=[d1, d3])
verify_model(Rsub2().float().eval(), input_data=[d1, d2])
verify_model(Rsub2().float().eval(), input_data=[d1, d3])
@tvm.testing.uses_gpu
def test_forward_embedding():
torch.set_grad_enabled(False)
input_data = torch.randint(0, 10, [2, 4]).long()
verify_model(torch.nn.Embedding(10, 3).float().eval(), input_data=input_data)
input_data = torch.randint(0, 4, [2, 3, 4]).long()
verify_model(torch.nn.Embedding(4, 5, sparse=False).float().eval(), input_data=input_data)
input_data = torch.randint(0, 4, [2, 3, 4]).long()
verify_model(torch.nn.Embedding(4, 5, sparse=True).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_onehot():
torch.set_grad_enabled(False)
class OneHot1(Module):
def forward(self, *args):
return torch.nn.functional.one_hot(args[0], num_classes=3)
class OneHot2(Module):
def forward(self, *args):
return torch.nn.functional.one_hot(args[0], num_classes=5)
input_data = torch.arange(0, 5) % 3
verify_model(OneHot1().float().eval(), input_data=input_data)
input_data = torch.arange(0, 5) % 4
verify_model(OneHot2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isfinite():
torch.set_grad_enabled(False)
class IsFinite1(Module):
def forward(self, *args):
return torch.isfinite(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsFinite1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isnan():
torch.set_grad_enabled(False)
class IsNan1(Module):
def forward(self, *args):
return torch.isnan(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsNan1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isinf():
torch.set_grad_enabled(False)
class IsInf1(Module):
def forward(self, *args):
return torch.isinf(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsInf1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_clamp():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Clamp1(Module):
def forward(self, *args):
return torch.clamp(args[0], min=-0.5, max=0.5)
class Clamp2(Module):
def forward(self, *args):
return torch.clamp(args[0], min=-0.3)
class Clamp3(Module):
def forward(self, *args):
return torch.clamp(args[0], max=1.0)
input_data = torch.rand(input_shape).float()
verify_model(Clamp1().float().eval(), input_data=input_data)
verify_model(Clamp2().float().eval(), input_data=input_data)
verify_model(Clamp3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_clamp_():
torch.set_grad_enabled(False)
class ClampInPlace(Module):
def __init__(self, min, max):
super(ClampInPlace, self).__init__()
self.min = min
self.max = max
def forward(self, *args):
return torch.clamp_(args[0], self.min, self.max)
for ishape, min, max in (([4, 8], 0.1, 0.9), ([7, 6], 0.2, 0.5)):
input_data = torch.rand(ishape).float()
verify_model(ClampInPlace(min, max).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_ones():
torch.set_grad_enabled(False)
class Ones1(Module):
def forward(self, *args):
return torch.ones(2, 3)
verify_model(Ones1().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_ones_like():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class OnesLike1(Module):
def forward(self, *args):
return torch.ones_like(args[0])
class OnesLike2(Module):
def forward(self, *args):
return torch.ones_like(args[0], dtype=torch.int8)
class OnesLike3(Module):
def forward(self, *args):
return torch.ones_like(args[0], dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(OnesLike1().float().eval(), input_data=input_data)
verify_model(OnesLike2().float().eval(), input_data=input_data)
verify_model(OnesLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_zeros():
torch.set_grad_enabled(False)
class Zeros1(Module):
def forward(self, *args):
return torch.zeros(2, 3)
verify_model(Zeros1().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_zeros_like():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ZerosLike1(Module):
def forward(self, *args):
return torch.zeros_like(args[0])
class ZerosLike2(Module):
def forward(self, *args):
return torch.zeros_like(args[0], dtype=torch.int32)
class ZerosLike3(Module):
def forward(self, *args):
return torch.zeros_like(args[0], dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(ZerosLike1().float().eval(), input_data=input_data)
verify_model(ZerosLike2().float().eval(), input_data=input_data)
verify_model(ZerosLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_full():
torch.set_grad_enabled(False)
class Full1(Module):
def forward(self, *args):
return torch.full((2, 3), 3.14)
class Full2(Module):
def forward(self, *args):
return torch.full((1, 2, 3), 1.0, dtype=torch.int32)
verify_model(Full1().float().eval(), input_data=[])
verify_model(Full2().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_full_like():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class FullLike1(Module):
def forward(self, *args):
return torch.full_like(args[0], 3.14)
class FullLike2(Module):
def forward(self, *args):
return torch.full_like(args[0], 22.22, dtype=torch.int32)
class FullLike3(Module):
def forward(self, *args):
return torch.full_like(args[0], 1.4, dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(FullLike1().float().eval(), input_data=input_data)
verify_model(FullLike2().float().eval(), input_data=input_data)
verify_model(FullLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_linspace():
torch.set_grad_enabled(False)
class Linspace1(Module):
def forward(self, *args):
return torch.linspace(5, 10)
class Linspace2(Module):
def forward(self, *args):
return torch.linspace(-10, 10, steps=5)
class Linspace3(Module):
def forward(self, *args):
return torch.linspace(start=-10, end=10, steps=5)
class Linspace4(Module):
def forward(self, *args):
return torch.linspace(start=-10, end=10, steps=1)
class Linspace5(Module):
def forward(self, *args):
return torch.linspace(1, 2, 1, dtype=torch.int32)
class Linspace6(Module):
def forward(self, *args):
return torch.linspace(start=1, end=6, steps=2)
class Linspace7(Module):
def forward(self, *args):
return torch.linspace(1, 4, dtype=torch.float32)
class Linspace8(Module):
def forward(self, *args):
return torch.linspace(1, 2, 1, dtype=torch.int16)
verify_model(Linspace1().float().eval())
verify_model(Linspace2().float().eval())
verify_model(Linspace3().float().eval())
verify_model(Linspace4().float().eval())
verify_model(Linspace5().float().eval())
verify_model(Linspace6().float().eval())
verify_model(Linspace7().float().eval())
verify_model(Linspace8().float().eval())
@tvm.testing.uses_gpu
def test_forward_take():
torch.set_grad_enabled(False)
class Take1(Module):
def forward(self, *args):
indices = torch.tensor([[0, 0], [1, 0]])
if torch.cuda.is_available():
indices = indices.cuda()
return torch.take(args[0], indices)
class Take2(Module):
def forward(self, *args):
return torch.take(args[0], args[1])
input_data = torch.tensor([[1, 2], [3, 4]])
verify_model(Take1().float().eval(), input_data=input_data)
indices = torch.tensor([[0, 0], [1, 0]])
verify_model(Take2().float().eval(), input_data=[input_data, indices])
@tvm.testing.uses_gpu
def test_forward_topk():
torch.set_grad_enabled(False)
class Topk1(Module):
def forward(self, *args):
return torch.topk(args[0], k=3)
class Topk2(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, dim=-2)
class Topk3(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, dim=3)
class Topk4(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, largest=True)
class Topk5(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, largest=False)
class Topk6(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, sorted=True)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Topk1().float().eval(), input_data=input_data)
verify_model(Topk2().float().eval(), input_data=input_data)
verify_model(Topk3().float().eval(), input_data=input_data)
verify_model(Topk4().float().eval(), input_data=input_data)
verify_model(Topk5().float().eval(), input_data=input_data)
verify_model(Topk6().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_logical_not():
torch.set_grad_enabled(False)
class LogicalNot1(Module):
def forward(self, *args):
return torch.logical_not(args[0])
input_data = torch.tensor([True, False])
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0, 1, -10], dtype=torch.int8)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_bitwise_not():
torch.set_grad_enabled(False)
class BitwiseNot1(Module):
def forward(self, *args):
return torch.bitwise_not(args[0])
input_data = torch.tensor([0, 1, -10], dtype=torch.int8)
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([True, False])
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_bitwise_xor():
torch.set_grad_enabled(False)
class BitwiseXor1(Module):
def forward(self, *args):
return torch.bitwise_xor(args[0], args[1])
class BitwiseXor2(Module):
def forward(self, *args):
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
if torch.cuda.is_available():
rhs = rhs.cuda()
return torch.bitwise_xor(args[0], rhs)
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([True, True, False])
rhs = torch.tensor([False, True, False])
verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
verify_model(BitwiseXor2().float().eval(), input_data=[lhs])
@tvm.testing.uses_gpu
def test_forward_logical_xor():
torch.set_grad_enabled(False)
class LogicalXor1(Module):
def forward(self, *args):
return torch.logical_xor(args[0], args[1])
class LogicalXor2(Module):
def forward(self, *args):
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
if torch.cuda.is_available():
rhs = rhs.cuda()
return torch.logical_xor(args[0], rhs)
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([True, True, False])
rhs = torch.tensor([False, True, False])
verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
verify_model(LogicalXor2().float().eval(), input_data=[lhs])
@tvm.testing.uses_gpu
def test_forward_unary():
torch.set_grad_enabled(False)
class Sqrt1(Module):
def forward(self, *args):
return torch.sqrt(args[0])
class RSqrt1(Module):
def forward(self, *args):
return torch.rsqrt(args[0])
class Ceil1(Module):
def forward(self, *args):
return torch.ceil(args[0])
class Floor1(Module):
def forward(self, *args):
return torch.floor(args[0])
class Round1(Module):
def forward(self, *args):
return torch.round(args[0])
class Cos1(Module):
def forward(self, *args):
return torch.cos(args[0])
class Sin1(Module):
def forward(self, *args):
return torch.sin(args[0])
class Tan1(Module):
def forward(self, *args):
return torch.tan(args[0])
class Tanh1(Module):
def forward(self, *args):
return torch.tanh(args[0])
class Acos1(Module):
def forward(self, *args):
return torch.acos(args[0])
class Asin1(Module):
def forward(self, *args):
return torch.asin(args[0])
class Atan1(Module):
def forward(self, *args):
return torch.atan(args[0])
class Log1(Module):
def forward(self, *args):
return torch.log(args[0])
class Exp1(Module):
def forward(self, *args):
return torch.exp(args[0])
class Erf1(Module):
def forward(self, *args):
return torch.erf(args[0])
class Trunc1(Module):
def forward(self, *args):
return torch.trunc(args[0])
class Sign1(Module):
def forward(self, *args):
return torch.sign(args[0])
class Neg1(Module):
def forward(self, *args):
return torch.neg(args[0])
class Sinh1(Module):
def forward(self, *args):
return torch.sinh(args[0])
class Cosh1(Module):
def forward(self, *args):
return torch.cosh(args[0])
class Log2_1(Module):
def forward(self, *args):
return torch.log2(args[0])
class Log10_1(Module):
def forward(self, *args):
return torch.log10(args[0])
class Log1p_1(Module):
def forward(self, *args):
return torch.log1p(args[0])
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Sqrt1().float().eval(), input_data=input_data)
verify_model(RSqrt1().float().eval(), input_data=input_data)
verify_model(Ceil1().float().eval(), input_data=input_data)
verify_model(Floor1().float().eval(), input_data=input_data)
verify_model(Round1().float().eval(), input_data=input_data)
verify_model(Cos1().float().eval(), input_data=input_data)
verify_model(Cosh1().float().eval(), input_data=input_data)
verify_model(Sin1().float().eval(), input_data=input_data)
verify_model(Sinh1().float().eval(), input_data=input_data)
verify_model(Tan1().float().eval(), input_data=input_data)
verify_model(Tanh1().float().eval(), input_data=input_data)
verify_model(Acos1().float().eval(), input_data=input_data)
verify_model(Asin1().float().eval(), input_data=input_data)
verify_model(Atan1().float().eval(), input_data=input_data)
verify_model(Log1().float().eval(), input_data=input_data)
verify_model(Log2_1().float().eval(), input_data=input_data)
verify_model(Log10_1().float().eval(), input_data=input_data)
verify_model(Log1p_1().float().eval(), input_data=input_data)
verify_model(Exp1().float().eval(), input_data=input_data)
verify_model(Erf1().float().eval(), input_data=input_data)
verify_model(Trunc1().float().eval(), input_data=input_data)
verify_model(Sign1().float().eval(), input_data=input_data)
verify_model(Neg1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_where():
torch.set_grad_enabled(False)
class Where1(Module):
def forward(self, *args):
y = torch.ones([3, 2])
if torch.cuda.is_available():
y = y.cuda()
return torch.where(args[0] > 0, args[0], y)
class Where2(Module):
def forward(self, *args):
return torch.where(args[0] > 0, args[0], args[1])
class Where3(Module):
def forward(self, *args):
return torch.where(args[0])[0]
x = torch.rand([3, 2]).float()
verify_model(Where1(), input_data=[x])
y = torch.rand([3, 2])
verify_model(Where2(), input_data=[x, y])
# a single argument variant, equivalent to torch.nonzero(..., as_tuple=True)
inp = torch.rand([10])
inp[3:8] = 0
verify_trace_model(Where3(), [inp], ["llvm"])
@tvm.testing.uses_gpu
def test_forward_addcdiv():
torch.set_grad_enabled(False)
class Addcdiv1(Module):
def forward(self, *args):
t1 = torch.ones([3, 1])
t2 = torch.ones([1, 3])
if torch.cuda.is_available():
t1 = t1.cuda()
t2 = t2.cuda()
return torch.addcdiv(args[0], 0.1, t1, t2)
class Addcdiv2(Module):
def forward(self, *args):
return torch.addcdiv(args[0], 0.5, args[1], args[2])
input_data = torch.rand([1, 3]).float()
verify_model(Addcdiv1().float().eval(), input_data=input_data)
t1 = torch.rand([3, 1]).float()
t2 = torch.rand([1, 3]).float()
verify_model(Addcdiv2().float().eval(), input_data=[input_data, t1, t2])
@tvm.testing.uses_gpu
def test_forward_addcmul():
torch.set_grad_enabled(False)
class Addcmul1(Module):
def forward(self, *args):
t1 = torch.ones([3, 1])
t2 = torch.ones([1, 3])
if torch.cuda.is_available():
t1 = t1.cuda()
t2 = t2.cuda()
return torch.addcmul(args[0], 0.1, t1, t2)
class Addcmul2(Module):
def forward(self, *args):
return torch.addcmul(args[0], 0.5, args[1], args[2])
input_data = torch.rand([1, 3]).float()
verify_model(Addcmul1().float().eval(), input_data=input_data)
t1 = torch.rand([3, 1]).float()
t2 = torch.rand([1, 3]).float()
verify_model(Addcmul2().float().eval(), input_data=[input_data, t1, t2])
@tvm.testing.uses_gpu
def test_forward_true_divide():
if package_version.parse(torch.__version__) < package_version.parse("1.5.0"):
return
torch.set_grad_enabled(False)
class TrueDivide(Module):
def forward(self, *args):
return torch.true_divide(args[0], args[1])
dividend = torch.rand([5, 3]).float()
# divisor could be either tensor or scalar
divisor_tensor = torch.rand([5, 3]).float() + 0.5
divisor_scalar = torch.tensor(1.0, dtype=torch.float32)
verify_model(
TrueDivide().float().eval(), input_data=[dividend, divisor_tensor], atol=1e-4, rtol=1e-4
)
verify_model(
TrueDivide().float().eval(), input_data=[dividend, divisor_scalar], atol=1e-4, rtol=1e-4
)
@tvm.testing.uses_gpu
def test_forward_traced_function():
def fn(t1, t2):
return t1 + t2
tensor1 = torch.randn(3, 4)
tensor2 = torch.randn(3, 4)
verify_model(fn, input_data=[tensor1, tensor2])
@tvm.testing.uses_gpu
def test_forward_dtypes():
def fn(t1, t2):
return 2.5 * t1 + t2
for dt in [torch.int32, torch.int64, torch.double]:
tensor1 = torch.randn(3, 4).to(dtype=dt)
tensor2 = torch.randn(3, 4).to(dtype=dt)
verify_model(fn, input_data=[tensor1, tensor2])
class ModuleWithIntParameters(Module):
def __init__(self, arr):
super().__init__()
self.param = torch.nn.Parameter(torch.LongTensor(arr), requires_grad=False)
def forward(self, x):
return x.long() + self.param
shape = (10, 10)
param = torch.ones(shape, dtype=torch.long)
inp = torch.ones(shape, dtype=torch.int)
verify_model(ModuleWithIntParameters(param), input_data=inp)
@tvm.testing.uses_gpu
def test_weight_names():
tm = torch.jit.trace(torch.nn.Linear(3, 4), [torch.randn(2, 3)])
mod, params = relay.frontend.from_pytorch(tm, [("input", (2, 3))])
assert set(params.keys()) == set(n for n, p in tm.named_parameters())
@tvm.testing.uses_gpu
def test_duplicate_weight_use():
# The test cases doesn't make any sense as a neural network,
# the issue popped up in shared input/output embeddings of bert,
# but this is quicker
class Test(Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(5, 3)
def forward(self, x):
x = self.lin(x)
x = x @ self.lin.weight
return x
verify_model(Test(), input_data=[torch.randn(5, 5)])
@tvm.testing.uses_gpu
def test_forward_matmul():
torch.set_grad_enabled(False)
class MatMul1(Module):
def forward(self, *args):
return torch.matmul(args[0], args[1])
# matrix x vector
tensor1 = torch.randn(3, 4)
tensor2 = torch.randn(4)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
# matrix x matrix
tensor1 = torch.randn(10, 4)
tensor2 = torch.randn(4, 10)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
# batched matrix x batched matrix
tensor1 = torch.randn(10, 3, 4)
tensor2 = torch.randn(10, 4, 5)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
# batched matrix x broadcasted matrix
tensor1 = torch.randn(10, 3, 4)
tensor2 = torch.randn(4, 5)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
# batched matrix x batched matrix
tensor1 = torch.randn(1, 12, 14, 64)
tensor2 = torch.randn(1, 12, 64, 14)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
def test_forward_index():
torch.set_grad_enabled(False)
input_shape = [3, 4, 5, 6]
class Index0(Module):
def forward(self, x):
return x[[0, 1], [0, 2], :2, 4]
input_data = torch.rand(input_shape).float()
verify_model(Index0().eval(), input_data=input_data)
class Index1(Module):
def forward(self, x):
return x[[0], [1, 2, 3, 0], [3, 1, 2, 2], [4, 2, 1, 0]]
input_data = torch.rand(input_shape).float()
verify_model(Index1().eval(), input_data=input_data)
def test_logsumexp():
class Logsumexp(Module):
def __init__(self, dim, keepdim=False):
super().__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
return torch.logsumexp(x, self.dim, self.keepdim)
input_shape = (100, 100)
input_data = torch.rand(input_shape)
verify_model(Logsumexp(0), input_data=input_data)
verify_model(Logsumexp(0, keepdim=True), input_data=input_data)
# Also test on double
verify_model(Logsumexp(1, keepdim=True), input_data=input_data.double())
def test_stack():
class Stack(torch.nn.Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return torch.stack((x, x), dim=self.axis)
inp = torch.randn(8, 8, 8)
verify_model(Stack(), input_data=inp)
verify_model(Stack(axis=-1), input_data=inp)
verify_model(Stack(axis=3), input_data=inp)
verify_model(Stack(axis=-4), input_data=inp)
def test_stack_dynamic():
class Stack(torch.nn.Module):
def forward(self, x):
tensor_list = []
for i in range(x.size(0)):
# this is a workaround to avoid generating impure aten::append op
tensor_list += [x[i]]
# relay tensor array only supports stacking on the first axis
return torch.stack(tensor_list, dim=0)
verify_script_model(Stack(), [(8, 8, 8)], _get_default_vm_targets())
def test_forward_unbind():
class Unbind(torch.nn.Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return torch.unbind(x, self.axis)
inp = torch.randn(8, 8, 8)
verify_model(Unbind(0), input_data=inp)
verify_model(Unbind(1), input_data=inp)
verify_model(Unbind(2), input_data=inp)
def test_forward_nonzero():
class Nonzero(Module):
def __init__(self, as_tuple=False):
super().__init__()
self.as_tuple = as_tuple
def forward(self, data):
return torch.nonzero(data, as_tuple=self.as_tuple)
inp = torch.Tensor(np.array([[0, 1, 0], [2, 0, 9], [-1, -1, 0]]).astype("float32"))
verify_trace_model(Nonzero(), [inp], ["llvm"])
def test_forward_scatter():
class Scatter(Module):
def __init__(self, dim=0):
super().__init__()
self.dim = dim
def forward(self, data, index, src):
return torch.scatter(data, dim=self.dim, index=index, src=src)
in_data = torch.zeros(3, 5)
in_index = torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]])
in_src = torch.rand(2, 5)
# TODO: add scatter gpu schedule to enable gpu test.
verify_trace_model(Scatter(), [in_data, in_index, in_src], ["llvm"])
in_data = torch.zeros(2, 4)
in_index = torch.tensor([[2], [3]])
in_src = torch.rand(2, 1)
# TODO: add scatter gpu schedule to enable gpu test.
verify_trace_model(Scatter(1), [in_data, in_index, in_src], ["llvm"])
def test_numel():
class Numel(Module):
def forward(self, data):
return torch.tensor(torch.numel(data))
targets = _get_default_vm_targets()
verify_script_model(Numel(), [(1,)], targets)
verify_script_model(Numel(), [(3, 5)], targets)
verify_script_model(Numel(), [(3, 5, 8)], targets)
def test_forward_pretrained_bert_base_uncased():
######################################################################
# This is an example how to run BERT models using TVM
# ---------------------------------------------------
"""
Refer the bert example given in https://pypi.org/project/pytorch-pretrained-bert
# To get started, pretrained bert package needs to be installed as prerequisite.
.. code-block:: bash
# install bert package
pip install pytorch_pretrained_bert==0.6.2 --user
"""
try:
from pytorch_pretrained_bert import BertTokenizer, BertForMaskedLM
except:
print("Torch pretrained bert package must be installed to run this script.")
return
######################################################################
# Load the tokenizer and tokenize the input
# -----------------------------------------
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
# Tokenized input
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
tokenized_text = tokenizer.tokenize(text)
# Mask a token that we will try to predict back with `BertForMaskedLM`
masked_index = 8
tokenized_text[masked_index] = "[MASK]"
assert tokenized_text == [
"[CLS]",
"who",
"was",
"jim",
"henson",
"?",
"[SEP]",
"jim",
"[MASK]",
"was",
"a",
"puppet",
"##eer",
"[SEP]",
]
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
######################################################################
# Load a pretrained PyTorch model bert-base-uncased
# -------------------------------------------------
# Bert Model with a language modeling
model = BertForMaskedLM.from_pretrained("bert-base-uncased")
model.eval()
######################################################################
# Predict all tokens with pytorch
# -------------------------------
with torch.no_grad():
torch_preds = model(tokens_tensor, segments_tensors)
######################################################################
# Make TorchScripted model via jit trace
# --------------------------------------
scripted_model = torch.jit.trace(model, (tokens_tensor, segments_tensors)).eval()
######################################################################
# Import the graph to Relay
# -------------------------
# Convert PyTorch graph to Relay graph. The input name can be arbitrary.
input_1 = "input_ids"
input_2 = "input.2"
shape_list = [(input_1, list(tokens_tensor.shape)), (input_2, list(segments_tensors.shape))]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
######################################################################
# Compile the model with relay
# ----------------------------
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)
######################################################################
# Execute on TVM
# --------------
ctx = tvm.context(target, 0)
relay_model = graph_runtime.create(relay_graph, relay_lib, ctx)
relay_model.set_input(**relay_params)
relay_model.set_input(input_1, tokens_tensor)
relay_model.set_input(input_2, segments_tensors)
relay_model.run()
compiled_output = relay_model.get_output(0).asnumpy()
######################################################################
# Validate the outputs
# --------------------
# Compare the torch and tvm outputs
tvm.testing.assert_allclose(torch_preds, compiled_output, rtol=1e-3, atol=1e-3)
######################################################################
# Process the output
# ------------------
# Process the model output to token.
# Torch output to token
torch_pred_idx = torch.argmax(torch_preds[0, masked_index]).item()
torch_pred_token = tokenizer.convert_ids_to_tokens([torch_pred_idx])[0]
# TVM output to token
tvm_pred_idx = compiled_output[0, masked_index].argmax()
tvm_pred_token = tokenizer.convert_ids_to_tokens([tvm_pred_idx])[0]
assert torch_pred_idx == tvm_pred_idx
assert torch_pred_token == tvm_pred_token
# Print the outputs
print("Torch top-1 id: {}, token: {}".format(torch_pred_idx, torch_pred_token))
print("TVM top-1 id: {}, token: {}".format(tvm_pred_idx, tvm_pred_token))
def test_convert_torch_script_with_input_types():
def model_fn(x, y):
x = x.to(dtype=torch.int32)
y = x + y
return y
ishape = (4, 5)
input_x = torch.rand(ishape, dtype=torch.float32)
input_y = torch.randint(low=0, high=100, size=ishape, dtype=torch.int32)
inputs = [input_x, input_y]
script_module = torch.jit.trace(model_fn, inputs)
fname = "tmp.pt"
torch.jit.save(script_module, fname)
loaded = torch.jit.load(fname)
os.remove(fname)
verify_model(loaded.eval(), input_data=inputs)
def expected(x_shape, y_shape):
# use a fixed order of args so alpha equal check can pass
x = relay.var("x", shape=x_shape, dtype="float32")
y = relay.var("y", shape=y_shape, dtype="int32")
args = [x, y]
x1 = relay.cast(x, "int32")
y1 = relay.add(x1, y)
mod = tvm.IRModule.from_expr(relay.Function(args, y1))
return mod["main"]
input_infos = [("input0", (ishape, "float")), ("input1", (ishape, "int"))]
mod, params = relay.frontend.from_pytorch(loaded, input_infos)
expected_mod = expected(ishape, ishape)
assert tvm.ir.structural_equal(expected_mod, mod["main"], map_free_vars=True)
if __name__ == "__main__":
# some structural tests
test_forward_traced_function()
test_forward_dtypes()
test_weight_names()
test_duplicate_weight_use()
# Single operator tests
test_forward_pixel_shuffle()
test_forward_add()
test_forward_subtract()
test_forward_multiply()
test_forward_matmul()
test_forward_rsub()
test_forward_onehot()
test_forward_embedding()
test_forward_reshape()
test_forward_reciprocal()
test_forward_repeat()
test_forward_repeat_interleave()
test_forward_squeeze()
test_forward_unsqueeze()
test_forward_concatenate()
test_forward_reduce_sum()
test_forward_reduce_prod()
test_forward_argmin()
test_forward_argmax()
test_forward_norm()
test_forward_frobenius_norm()
test_forward_std()
test_forward_variance()
test_forward_relu()
test_forward_prelu()
test_forward_leakyrelu()
test_forward_elu()
test_forward_celu()
test_forward_gelu()
test_forward_selu()
test_forward_log_sigmoid()
test_forward_adaptiveavgpool()
test_forward_maxpool2d()
test_forward_maxpool1d()
test_forward_maxpool3d()
test_forward_hardtanh()
test_forward_conv()
test_forward_conv_transpose()
test_forward_threshold()
test_forward_contiguous()
test_forward_batchnorm()
test_forward_instancenorm()
test_forward_layernorm()
test_forward_groupnorm()
test_forward_transpose()
test_forward_size()
test_forward_view()
test_forward_select()
test_forward_take()
test_forward_topk()
test_forward_where()
test_forward_addcdiv()
test_forward_addcmul()
test_forward_true_divide()
test_forward_clone()
test_forward_softplus()
test_forward_softsign()
test_forward_logsoftmax()
test_forward_sigmoid()
test_forward_dense()
test_forward_avgpool()
test_forward_avgpool3d()
test_forward_dropout()
test_forward_slice()
test_forward_mean()
test_forward_expand()
test_forward_pow()
test_forward_unary()
test_forward_clamp()
test_forward_clamp_()
test_forward_logical_not()
test_forward_bitwise_not()
test_forward_bitwise_xor()
test_forward_logical_xor()
test_forward_isfinite()
test_forward_isnan()
test_forward_isinf()
test_forward_ones()
test_forward_ones_like()
test_forward_zeros()
test_forward_zeros_like()
test_forward_full()
test_forward_full_like()
test_forward_linspace()
test_forward_arange()
test_forward_mesh_grid()
test_forward_chunk()
test_forward_split()
test_forward_gather()
test_upsample()
test_forward_upsample3d()
test_forward_nms()
test_forward_roi_align()
test_to()
test_flatten()
test_type_as()
test_forward_functional_pad()
test_forward_zero_pad2d()
test_forward_constant_pad1d()
test_forward_constant_pad2d()
test_forward_constant_pad3d()
test_forward_reflection_pad1d()
test_forward_reflection_pad2d()
test_forward_replication_pad1d()
test_forward_replication_pad2d()
test_forward_replication_pad3d()
test_adaptive_pool3d()
test_conv3d()
test_conv3d_transpose()
test_forward_index()
test_min_max()
test_logsumexp()
test_stack()
test_stack_dynamic()
test_forward_unbind()
test_forward_nonzero()
test_forward_scatter()
test_numel()
# Model tests
test_resnet18()
test_squeezenet1_0()
test_squeezenet1_1()
test_densenet121()
# disable inception test for now, since loading it takes ~5min on torchvision-0.5 due to scipy bug
# See https://discuss.pytorch.org/t/torchvisions-inception-v3-takes-much-longer-to-load-than-other-models/68756
# test_inception_v3()
test_googlenet()
test_mnasnet0_5()
test_mobilenet_v2()
test_custom_conversion_map()
test_segmentaton_models()
test_3d_models()
# Quantization test
from qnn_test import test_quantized_imagenet, test_quantized_modules
test_quantized_modules()
test_quantized_imagenet()
# Test simple conditionals and loop
test_control_flow()
test_simple_rnn()
# More complex recurrent models
from test_lstm import test_custom_lstm
test_custom_lstm()
# Test bert model
test_forward_pretrained_bert_base_uncased()
# Test convert torch script(jit) with specific inputs' types
test_convert_torch_script_with_input_types()
``` |
[
{
"content": "Here is the snippet:\n```python\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import fetch_olivetti_faces\nfrom sklearn.utils.validation import check_random_state\n\nfrom linear_regression import *\nfrom knn import *\n\n\ndef main():\n # Load the faces datasets\n... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import fetch_olivetti_faces\nfrom sklearn.utils.validation import check_random_state\n\nfrom linear_regression import *\nfrom knn import *\n\n\ndef main():\n # Load the ... | ```python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from linear_regression import *
from knn import *
def main():
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train, y_train = np.split(train, [int(0.5 * n_pixels)], axis=1)
X_test , y_test = np.split(test , [int(0.5 * n_pixels)], axis=1)
# Fit estimators
ESTIMATORS = {
"LeastSquareRegression": LeastSquareRegression(),
"RidgeRegression" : RidgeRegression(incr=0.3, min_change=0.1),
"knn" : KNN(k=5)
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
if __name__ == "__main__":
main()
``` |
[
{
"content": "Here is a code snippet:\n```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file exce... | ```python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from quantum.db import model_base
class OFPServer(model_base.BASEV2):
"""Openflow Server/API address."""
__tablename__ = 'ofp_server'
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
address = sa.Column(sa.String(64)) # netloc <host ip address>:<port>
host_type = sa.Column(sa.String(255)) # server type
# Controller, REST_API
def __repr__(self):
return "<OFPServer(%s,%s,%s)>" % (self.id, self.address,
self.host_type)
class TunnelKeyLast(model_base.BASEV2):
"""Lastly allocated Tunnel key. The next key allocation will be started
from this value + 1
"""
last_key = sa.Column(sa.Integer, primary_key=True)
def __repr__(self):
return "<TunnelKeyLast(%x)>" % self.last_key
class TunnelKey(model_base.BASEV2):
"""Netowrk ID <-> tunnel key mapping."""
network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"),
nullable=False)
tunnel_key = sa.Column(sa.Integer, primary_key=True,
nullable=False, autoincrement=False)
def __repr__(self):
return "<TunnelKey(%s,%x)>" % (self.network_id, self.tunnel_key)
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n\"\"\"\nTest collection used only for testing purpose\n\"\"\"\nfrom booby import fields\nfrom ot_api.base import OpentopicModel\n\nfrom .parsers import test_parser\n\n\ndef _generate_test_object(i):\n \"\"\"\n :return: dist with ... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n\"\"\"\nTest collection used only for testing purpose\n\"\"\"\nfrom booby import fields\nfrom ot_api.base import OpentopicModel\n\nfrom .parsers import test_parser\n\n\ndef _generate_test_object(i):\n \"\"\"\n :re... | ```python
"""
Test collection used only for testing purpose
"""
from booby import fields
from ot_api.base import OpentopicModel
from .parsers import test_parser
def _generate_test_object(i):
"""
:return: dist with test object
"""
return {
'id': i,
'name': 'test_name{0}'.format(str(i))
}
def _generate_test_objects(offset, limit):
"""
generate list with test objects
:return: list iwth generated objects
"""
return [_generate_test_object(i) for i in range(offset, limit)]
def _generate_test_json(offset, limit, next_page=None):
"""
Generat test json
:return: dict
"""
results = list(_generate_test_objects(offset, limit))
json_response = {
'results': results,
'next': next_page,
'count': len(results),
'previous': None
}
return json_response
class TestCollectionModel(OpentopicModel):
"""
Represent recommendation object.
TODO: link to documentation
"""
pk = fields.Integer()
name = fields.String()
parser = test_parser
@property
def id(self):
return self.pk
def __str__(self):
return self.name
``` |
[
{
"content": "Repeat the code exactly:\n```python\n# coding: utf8\n# Auntefication Library\nfrom ColorAndSayLib import PrintWC, SayPrint\n\n# Variables of userdata\nlogpass = {\"hacker\": \"12345678\"}\n######################\nPostLoginTrue = False\n\n\n######################\n# def SayPrint(text,speech = None,... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n# coding: utf8\n# Auntefication Library\nfrom ColorAndSayLib import PrintWC, SayPrint\n\n# Variables of userdata\nlogpass = {\"hacker\": \"12345678\"}\n######################\nPostLoginTrue = False\n\n\n######################\n# def SayPrint(tex... | ```python
# coding: utf8
# Auntefication Library
from ColorAndSayLib import PrintWC, SayPrint
# Variables of userdata
logpass = {"hacker": "12345678"}
######################
PostLoginTrue = False
######################
# def SayPrint(text,speech = None,color= None,ttsd = "plyer"):
# if color == None:
# color = ""
# if speech == None:
# print color + text
#
# if ttsd == "plyer":
# import plyer
# plyer.tts.speak(text)Неиспользуемые функции перенесеные в ColorAndSayLib
# else:
# print color + text
# if ttsd == "plyer":
# import plyer
# plyer.tts.speak(speech)
# def PrintWC(text,color = "WHITE"):
# import colorama
# if color == "WHITE":
# c =
# elif color == "GREEN":
# c = Fore.GREEN
# elif color == "RED":
# c = Fore.RED
# print c+text
def Register():
global logpass
global numreg
global PostLoginTrue
PrintWC('OK, starting register...\n', "WHITE", styling="BOLD", backs="D_GRAY")
PrintWC("", backs="D_GRAY")
while True:
givenlogin = str(input("Select your login: "))
givenpassword = str(input("Select your password: "))
try:
x = logpass[givenlogin]
PrintWC("Sorry, login don't aviable!")
PostLoginTrue = False
except KeyError:
logpass[givenlogin] = givenpassword
PostLoginTrue = True
break
return givenlogin
def Menu(num=0):
while True:
PrintWC("Register - 1 \nLogin - 0\n")
ch = str(input())
if ch == "0":
username = Login()
return username
elif ch == "1":
username = Register()
return username
def Login():
passTrue = None
global logpass
global PostLoginTrue
while True:
if PostLoginTrue == True:
return givenlogin
givenlogin, givenpassword = PLEnter()
try:
if logpass[givenlogin] == givenpassword:
passTrue = 1
PrintWC("Hello, " + givenlogin, "GREEN")
PostLoginTrue = True
else:
PrintWC("Password not found.", "RED")
PostLoginTrue = False
except KeyError:
PrintWC("Login not found.", "RED")
PostLoginTrue = False
def PLEnter():
login = str(input("Enter login: \n"))
password = str(input("Enter password: \n"))
return login, password
``` |
[
{
"content": "```python\nimport pyquaternion\n\n\n# Create a quaternion representing a rotation of +90 degrees about positive y axis.\nmy_quaternion = pyquaternion.Quaternion(axis=[0, 1, 0], degrees=90)\n\nmy_vector = [0, 0, 4]\nmy_rotated_vector = my_quaternion.rotate(my_vector)\n\nprint('\\nBasic Rotation')\n... | [
{
"content": "<|memory_start|>```python\nimport pyquaternion\n\n\n# Create a quaternion representing a rotation of +90 degrees about positive y axis.\nmy_quaternion = pyquaternion.Quaternion(axis=[0, 1, 0], degrees=90)\n\nmy_vector = [0, 0, 4]\nmy_rotated_vector = my_quaternion.rotate(my_vector)\n\nprint('\\nBa... | ```python
import pyquaternion
# Create a quaternion representing a rotation of +90 degrees about positive y axis.
my_quaternion = pyquaternion.Quaternion(axis=[0, 1, 0], degrees=90)
my_vector = [0, 0, 4]
my_rotated_vector = my_quaternion.rotate(my_vector)
print('\nBasic Rotation')
print('--------------')
print('My Vector: {}'.format(my_vector))
print('Performing rotation of {angle} deg about {axis}'.format(angle=my_quaternion.degrees, axis=my_quaternion.axis))
print('My Rotated Vector: {}'.format(my_rotated_vector))
# Create another quaternion representing no rotation at all
null_quaternion = pyquaternion.Quaternion(axis=[0, 1, 0], angle=0)
print('\nInterpolated Rotation')
print('---------------------')
# The following will create a sequence of 9 intermediate quaternion rotation objects
for q in pyquaternion.Quaternion.intermediates(null_quaternion, my_quaternion, 9, include_endpoints=True):
my_interpolated_point = q.rotate(my_vector)
print('My Interpolated Point: {point}\t(after rotation of {angle} deg about {axis})'.format(
point=my_interpolated_point, angle=round(q.degrees, 4), axis=q.axis
))
print('Done!')
``` |
[
{
"content": "Here is some code:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nTest the IO routines - based on the MSAT test cases\n\"\"\"\nimport unittest\n\nimport numpy as np\n\nimport pytasa.anisotropy_index\n\n\nclass TestAnisotropyIndex(unittest.TestCase):\n\n def setUp(self):\n ... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nTest the IO routines - based on the MSAT test cases\n\"\"\"\nimport unittest\n\nimport numpy as np\n\nimport pytasa.anisotropy_index\n\n\nclass TestAnisotropyIndex(unittest.TestCase):\n\n def ... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test the IO routines - based on the MSAT test cases
"""
import unittest
import numpy as np
import pytasa.anisotropy_index
class TestAnisotropyIndex(unittest.TestCase):
def setUp(self):
"""Some useful matricies for testing"""
self.olivine = np.array([[320.5, 68.1, 71.6, 0.0, 0.0, 0.0],
[68.1, 196.5, 76.8, 0.0, 0.0, 0.0],
[71.6, 76.8, 233.5, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 64.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 77.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 78.7]])
self.isotropic = np.array([[166.6667, 66.6667, 66.6667, 0.0, 0.0, 0.0],
[66.6667, 166.6667, 66.6667, 0.0, 0.0, 0.0],
[66.6667, 66.6667, 166.6667, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 50.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 50.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 50.0]])
def test_isotropic_zenner(self):
"""Test from MSAT - are isotropic results isotropic"""
np.testing.assert_almost_equal(pytasa.anisotropy_index.zenerAniso(
self.isotropic), [1.0, 0.0])
def test_isotropic_universal(self):
"""Test from MSAT - are isotropic results isotropic"""
np.testing.assert_almost_equal(pytasa.anisotropy_index.uAniso(
self.isotropic), [0.0, 0.0])
def suite():
return unittest.makeSuite(TestCijStability, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n#!/usr/bin/env python2\r\n\r\n# Copyright 2015 Dejan D. M. Milosavljevic\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n#!/usr/bin/env python2\r\n\r\n# Copyright 2015 Dejan D. M. Milosavljevic\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may ... | ```python
#!/usr/bin/env python2
# Copyright 2015 Dejan D. M. Milosavljevic
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
import nucleotide
import nucleotide.component
import nucleotide.component.windows
import nucleotide.component.windows.cygwingcc.atom
import nucleotide.component.windows.cygwingcc.atom.blank
##Detect GCC on CYgWin
class Translator:
m_list = []
def __init__(self):
self.m_list = []
def get(self):
return self.m_list
@staticmethod
def check():
return True
@staticmethod
def extend(P_options):
nucleotide.component.windows.cygwingcc.atom.blank.Blank.extend(P_options)
nucleotide.component.linux.gcc.translator.Translator.extend(P_options)
@staticmethod
def _exists( key, sub_key ):
return False
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n\"\"\"\nCopyright (2010-2014) INCUBAID BVBA\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n h... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n\"\"\"\nCopyright (2010-2014) INCUBAID BVBA\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the Lic... | ```python
"""
Copyright (2010-2014) INCUBAID BVBA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from Compat import X
import os
import logging
import subprocess
def test_client_lib():
my_temp = '/tmp/client_lib_test'
OCAML_LIBDIR = X.subprocess.check_output('ocamlfind printconf destdir',
shell=True)
OCAML_LIBDIR = OCAML_LIBDIR.strip()
env = os.environ.copy()
env['OCAML_LIBDIR'] = OCAML_LIBDIR
cmds = [
(['make', 'uninstall_client'], None),
(['make', 'install'], None),
(['mkdir', '-p', my_temp], None),
(['cp', './examples/ocaml/demo.ml', my_temp], None),
(['ocamlbuild', '-use-ocamlfind', '-package','lwt' ,
'-package','arakoon_client',
'-tags', 'annot,debug,thread',
'demo.native'], my_temp),
(['make', 'uninstall_client'], None),
]
for cmd, cwd in cmds:
if cwd == None:
cwd = '../..'
print cmd
try:
r = X.subprocess.check_output(cmd,
cwd = cwd,
env = env,
stderr= X.subprocess.STDOUT
)
print r
except subprocess.CalledProcessError as ex:
logging.info("ex:%s" % ex)
logging.info("output=%s" % ex.output)
raise ex
``` |
[
{
"content": "```python\nimport pipes\r\nimport os\r\nimport string\r\nimport unittest\r\nfrom test.support import TESTFN, run_unittest, unlink, reap_children\r\n\r\nif os.name != 'posix':\r\n raise unittest.SkipTest('pipes module only works on posix')\r\n\r\nTESTFN2 = TESTFN + \"2\"\r\n\r\n# tr a-z A-Z is n... | [
{
"content": "<|memory_start|>```python\nimport pipes\r\nimport os\r\nimport string\r\nimport unittest\r\nfrom test.support import TESTFN, run_unittest, unlink, reap_children\r\n\r\nif os.name != 'posix':\r\n raise unittest.SkipTest('pipes module only works on posix')\r\n\r\nTESTFN2 = TESTFN + \"2\"\r\n\r\n#... | ```python
import pipes
import os
import string
import unittest
from test.support import TESTFN, run_unittest, unlink, reap_children
if os.name != 'posix':
raise unittest.SkipTest('pipes module only works on posix')
TESTFN2 = TESTFN + "2"
# tr a-z A-Z is not portable, so make the ranges explicit
s_command = 'tr %s %s' % (string.ascii_lowercase, string.ascii_uppercase)
class SimplePipeTests(unittest.TestCase):
def tearDown(self):
for f in (TESTFN, TESTFN2):
unlink(f)
def testSimplePipe1(self):
t = pipes.Template()
t.append(s_command, pipes.STDIN_STDOUT)
f = t.open(TESTFN, 'w')
f.write('hello world #1')
f.close()
with open(TESTFN) as f:
self.assertEqual(f.read(), 'HELLO WORLD #1')
def testSimplePipe2(self):
with open(TESTFN, 'w') as f:
f.write('hello world #2')
t = pipes.Template()
t.append(s_command + ' < $IN > $OUT', pipes.FILEIN_FILEOUT)
t.copy(TESTFN, TESTFN2)
with open(TESTFN2) as f:
self.assertEqual(f.read(), 'HELLO WORLD #2')
def testSimplePipe3(self):
with open(TESTFN, 'w') as f:
f.write('hello world #2')
t = pipes.Template()
t.append(s_command + ' < $IN', pipes.FILEIN_STDOUT)
f = t.open(TESTFN, 'r')
try:
self.assertEqual(f.read(), 'HELLO WORLD #2')
finally:
f.close()
def testEmptyPipeline1(self):
# copy through empty pipe
d = 'empty pipeline test COPY'
with open(TESTFN, 'w') as f:
f.write(d)
with open(TESTFN2, 'w') as f:
f.write('')
t=pipes.Template()
t.copy(TESTFN, TESTFN2)
with open(TESTFN2) as f:
self.assertEqual(f.read(), d)
def testEmptyPipeline2(self):
# read through empty pipe
d = 'empty pipeline test READ'
with open(TESTFN, 'w') as f:
f.write(d)
t=pipes.Template()
f = t.open(TESTFN, 'r')
try:
self.assertEqual(f.read(), d)
finally:
f.close()
def testEmptyPipeline3(self):
# write through empty pipe
d = 'empty pipeline test WRITE'
t = pipes.Template()
with t.open(TESTFN, 'w') as f:
f.write(d)
with open(TESTFN) as f:
self.assertEqual(f.read(), d)
def testQuoting(self):
safeunquoted = string.ascii_letters + string.digits + '@%_-+=:,./'
unsafe = '"`$\\!'
self.assertEqual(pipes.quote(''), "''")
self.assertEqual(pipes.quote(safeunquoted), safeunquoted)
self.assertEqual(pipes.quote('test file name'), "'test file name'")
for u in unsafe:
self.assertEqual(pipes.quote('test%sname' % u),
"'test%sname'" % u)
for u in unsafe:
self.assertEqual(pipes.quote("test%s'name'" % u),
"'test%s'\"'\"'name'\"'\"''" % u)
def testRepr(self):
t = pipes.Template()
self.assertEqual(repr(t), "<Template instance, steps=[]>")
t.append('tr a-z A-Z', pipes.STDIN_STDOUT)
self.assertEqual(repr(t),
"<Template instance, steps=[('tr a-z A-Z', '--')]>")
def testSetDebug(self):
t = pipes.Template()
t.debug(False)
self.assertEqual(t.debugging, False)
t.debug(True)
self.assertEqual(t.debugging, True)
def testReadOpenSink(self):
# check calling open('r') on a pipe ending with
# a sink raises ValueError
t = pipes.Template()
t.append('boguscmd', pipes.SINK)
self.assertRaises(ValueError, t.open, 'bogusfile', 'r')
def testWriteOpenSource(self):
# check calling open('w') on a pipe ending with
# a source raises ValueError
t = pipes.Template()
t.prepend('boguscmd', pipes.SOURCE)
self.assertRaises(ValueError, t.open, 'bogusfile', 'w')
def testBadAppendOptions(self):
t = pipes.Template()
# try a non-string command
self.assertRaises(TypeError, t.append, 7, pipes.STDIN_STDOUT)
# try a type that isn't recognized
self.assertRaises(ValueError, t.append, 'boguscmd', 'xx')
# shouldn't be able to append a source
self.assertRaises(ValueError, t.append, 'boguscmd', pipes.SOURCE)
# check appending two sinks
t = pipes.Template()
t.append('boguscmd', pipes.SINK)
self.assertRaises(ValueError, t.append, 'boguscmd', pipes.SINK)
# command needing file input but with no $IN
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd $OUT',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd',
pipes.FILEIN_STDOUT)
# command needing file output but with no $OUT
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd $IN',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd',
pipes.STDIN_FILEOUT)
def testBadPrependOptions(self):
t = pipes.Template()
# try a non-string command
self.assertRaises(TypeError, t.prepend, 7, pipes.STDIN_STDOUT)
# try a type that isn't recognized
self.assertRaises(ValueError, t.prepend, 'tr a-z A-Z', 'xx')
# shouldn't be able to prepend a sink
self.assertRaises(ValueError, t.prepend, 'boguscmd', pipes.SINK)
# check prepending two sources
t = pipes.Template()
t.prepend('boguscmd', pipes.SOURCE)
self.assertRaises(ValueError, t.prepend, 'boguscmd', pipes.SOURCE)
# command needing file input but with no $IN
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd $OUT',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd',
pipes.FILEIN_STDOUT)
# command needing file output but with no $OUT
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd $IN',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd',
pipes.STDIN_FILEOUT)
def testBadOpenMode(self):
t = pipes.Template()
self.assertRaises(ValueError, t.open, 'bogusfile', 'x')
def testClone(self):
t = pipes.Template()
t.append('tr a-z A-Z', pipes.STDIN_STDOUT)
u = t.clone()
self.assertNotEqual(id(t), id(u))
self.assertEqual(t.steps, u.steps)
self.assertNotEqual(id(t.steps), id(u.steps))
self.assertEqual(t.debugging, u.debugging)
def test_main():
run_unittest(SimplePipeTests)
reap_children()
if __name__ == "__main__":
test_main()
``` |
[
{
"content": "Here is the script:\n```python\n#creer l'url avec son checksum\n\nimport hashlib\n\ndef checksumUrl(queryString,action,secret):\n\t#1. Create the entire query string for your API call without the checksum parameter \"name=Test+Meeting&meetingID=abc123&attendeePW=111222&moderatorPW=333444\"\n\t#2. ... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n#creer l'url avec son checksum\n\nimport hashlib\n\ndef checksumUrl(queryString,action,secret):\n\t#1. Create the entire query string for your API call without the checksum parameter \"name=Test+Meeting&meetingID=abc123&attendeePW=111222&moderatorPW=... | ```python
#creer l'url avec son checksum
import hashlib
def checksumUrl(queryString,action,secret):
#1. Create the entire query string for your API call without the checksum parameter "name=Test+Meeting&meetingID=abc123&attendeePW=111222&moderatorPW=333444"
#2. Prepend the call name to your string "createname=Test+Meeting&meetingID=abc123&attendeePW=111222&moderatorPW=333444"
#3. Now, append the shared secret to your string "createname=Test+Meeting&meetingID=abc123&attendeePW=111222&moderatorPW=333444639259d4-9dd8-4b25-bf01-95f9567eaf4b"
checksum = action+queryString+secret
#4. Now, find the SHA-1 sum for that string "1fcbb0c4fc1f039f73aa6d697d2db9ba7f803f17"
sha1 = hashlib.sha1(checksum)
sha1Hex = sha1.hexdigest()
# print "debug sha1hex = "+sha1Hex
#5. Add a checksum parameter to your query string that contains this checksum "name=Test+Meeting&meetingID=abc123&attendeePW=111222&moderatorPW=333444&checksum=1fcbb0c4fc1f039f73aa6d697d2db9ba7f803f17"
finalChecksumUrl = action+"?"+queryString+"&checksum="+sha1Hex
return finalChecksumUrl
``` |
[
{
"content": "Here is the source code:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import, unicode_literals\nfrom qtpy.QtCore import *\n# from qtpy.QtWidgets import *\nimport re\nimport logging\nfrom sphinx_explorer.util.commander import ... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import, unicode_literals\nfrom qtpy.QtCore import *\n# from qtpy.QtWidgets import *\nimport re\nimport logging\nfrom sphinx_explorer.util.c... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
from qtpy.QtCore import *
# from qtpy.QtWidgets import *
import re
import logging
from sphinx_explorer.util.commander import Commander
logger = logging.getLogger(__name__)
class PipInstallTask(QObject):
finished = Signal(bool, str, str)
def __init__(self, packages, update_flag=False, commander=Commander(), callback=None, parent=None):
super(PipInstallTask, self).__init__(parent)
self.packages = packages or []
self.callback = callback
self.commander = commander
self.update_flag = update_flag
def run(self):
for package in self.packages:
update_flag = "-U" if self.update_flag else ""
result = self.commander.call("pip install -q {} {}".format(update_flag, package), shell=True)
if not result:
logger.warning("pip failed.")
package_info = self.commander.check_output("pip show {}".format(package), shell=True)
version = self.get_version(package_info)
self.finished.emit(True, package, version)
@staticmethod
def get_version(msg):
if not msg:
return None
for line in msg.splitlines():
if line.startswith("Version: "):
version = line[len("Version: "):].strip()
break
else:
version = None
return version
class PipListTask(QObject):
finished = Signal(list)
PARSE_RE = re.compile(r"([^\s]+)\s+\(([^\s]+)\)")
def __init__(self, commander=Commander(), callback=None, parent=None):
super(PipListTask, self).__init__(parent)
self.packages = []
self.callback = callback
self.commander = commander
@staticmethod
def filter(output):
for line in output.splitlines():
g = PipListTask.PARSE_RE.match(line)
if g:
package, version = g.groups()
yield package, version, None
def run(self):
self._run()
self.finished.emit(self.packages)
def _run(self):
output = self.commander.check_output("pip list --format=legacy", shell=True)
if not output:
logger.warning("pip failed.")
else:
for package, version, latest in PipListTask.filter(output):
self.packages.append((package, version, latest))
class PipListOutDateTask(PipListTask):
OUTDATE_PARSE_RE = re.compile(r"([^\s]+)\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)")
@staticmethod
def outdate_filter(output):
if not output:
return
for line in output.splitlines():
g = PipListOutDateTask.OUTDATE_PARSE_RE.match(line)
if g:
package, version, latest, pack_type = g.groups()
if not package or package[0] == "-" or package == "Package":
continue
yield package, version, latest, pack_type
def run(self):
# noinspection PyBroadException
try:
output = self.commander.check_output("pip list -o --format=columns", shell=True)
if not output:
logger.warning("pip failed.")
except:
self.finished.emit(self.packages)
return
for package, version, latest, _ in self.outdate_filter(output):
self.packages.append((package, version, latest))
self.finished.emit(self.packages)
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Copyright © 2014 German Neuroinformatics Node (G-Node)\n\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted unde... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Copyright © 2014 German Neuroinformatics Node (G-Node)\n\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, ar... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright © 2014 German Neuroinformatics Node (G-Node)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted under the terms of the BSD License. See
LICENSE file in the root of the Project.
Author: Jan Grewe <jan.grewe@g-node.org>
This tutorial shows how irregularly sampled data is stored in nix-files.
See https://github.com/G-node/nix/wiki for more information.
"""
import nixio as nix
import numpy as np
import matplotlib.pylab as plt
def create_data(duration, interval):
times = np.around(np.cumsum(np.random.poisson(interval*1000, 1.5*duration/interval))/1000., 3)
times = times[times <= duration]
x = np.arange(0, times[-1] * 2 * np.pi, 0.001)
y = np.sin(5 * x)
return times, y[np.asarray(times / 0.001 * 2 * np.pi, dtype=int)]
def plot_data(data_array):
x_axis = data_array.dimensions[0]
x = list(x_axis.ticks)
y = data_array.data
plt.plot(x, y, marker='o', color='dodgerblue')
plt.xlabel(x_axis.label + " [" + x_axis.unit + "]")
plt.ylabel(data_array.label + " [" + data_array.unit + "]")
plt.title(data_array.name)
plt.xlim([0, times[-1]])
plt.ylim(np.min(y)*1.1, np.max(y)*1.1)
plt.show()
if __name__ == "__main__":
# fake some data
times, y = create_data(1.0, 0.02)
# create a new file overwriting any existing content
file_name = 'irregular_data_example.h5'
file = nix.File.open(file_name, nix.FileMode.Overwrite)
# create a 'Block' that represents a grouping object. Here, the recording session.
# it gets a name and a type
block = file.create_block("block name", "nix.session")
# create a 'DataArray' to take the data, add some information about the signal
data = block.create_data_array("sinewave", "nix.irregular_sampled", data=y)
data.unit = "mV"
data.label = "voltage"
# add a descriptor for the xaxis
dim = data.append_range_dimension(times)
dim.unit = "s"
dim.label = "time"
# let's plot the data from the stored information
plot_data(data)
file.close()
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n#!/usr/bin/env python\n#\n# Copyright 2017 Anil Thomas\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy ... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n#!/usr/bin/env python\n#\n# Copyright 2017 Anil Thomas\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You ma... | ```python
#!/usr/bin/env python
#
# Copyright 2017 Anil Thomas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Train and validate a model
Usage:
./run.py -w </path/to/data> -e 8 -r 0
"""
import os
import numpy as np
from data0 import ChunkLoader
import settings
import video
# Parse the command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--s", help="source data folder")
parser.add_argument("--d", help="destination data folder")
parser.add_argument("--tm", help="test mode")
parser.add_argument("--set_name", help="set name")
args = parser.parse_args()
# Setup data provider
repo_dir = args.s
save_folder = args.d
common = dict(datum_dtype=np.uint8, repo_dir=repo_dir, test_mode=args.tm)
set_name= args.set_name
test = ChunkLoader(set_name=set_name, **common)
labels_file = open(save_folder+"labels.txt",'w')
for i in range(test.data_size):
c,s,t = test.next_video(i)
if i%100==0:
print "procedding ",i
# print np.array(c).shape,np.array(s).shape,np.array(t).shape
np.save(save_folder+"locaition_"+test.current_uid,np.array(s))
# np.save(save_folder+"label_"+test.current_uid,np.array(t))
np.save(save_folder+"chunk_"+test.current_uid,np.array(c))
for i,l in enumerate(t):
print >>labels_file,test.current_uid,i,l
labels_file.close()
# data = ChunkLoader(set_name=set_name, augment=not args.test_mode, **common)
# repo_dir = '/Users/chen.liu/nfs03/share_data/Intelligence/Scoupon/items/luna_new_vids/'
# Setup data provider
# repo_dir = args.data_dir
# common = dict(datum_dtype=np.uint8, repo_dir=repo_dir, test_mode=args.test_mode)
# test = ChunkLoader(set_name='test', augment=not args.test_mode, **common)
# print "# batches", test.nbatches
#
# for uid, data, targets, starts in test:
# print uid
# print data.get()
# print targets.get()
# print starts.get()
``` |
[
{
"content": "Repeat the following code:\n```python\n\"\"\"\nFitPanel class contains fields allowing to fit models and data\n\n:note: For Fit to be performed the user should check at least one parameter\n on fit Panel window.\n\n\"\"\"\nimport wx\nimport wx.lib.newevent\nfrom wx.aui import AuiNotebook as N... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n\"\"\"\nFitPanel class contains fields allowing to fit models and data\n\n:note: For Fit to be performed the user should check at least one parameter\n on fit Panel window.\n\n\"\"\"\nimport wx\nimport wx.lib.newevent\nfrom wx.aui import ... | ```python
"""
FitPanel class contains fields allowing to fit models and data
:note: For Fit to be performed the user should check at least one parameter
on fit Panel window.
"""
import wx
import wx.lib.newevent
from wx.aui import AuiNotebook as Notebook
import datetime
from bumps.gui.convergence_view import ConvergenceView
from bumps.gui.uncertainty_view import UncertaintyView, CorrelationView, TraceView
from bumps.dream.stats import var_stats, format_vars
from sas.sasgui.guiframe.panel_base import PanelBase
from sas.sasgui.guiframe.events import StatusEvent
(PlotResultEvent, EVT_PLOT_RESULT) = wx.lib.newevent.NewEvent()
class ResultPanel(Notebook, PanelBase):
"""
FitPanel class contains fields allowing to fit models and data
:note: For Fit to be performed the user should check at least one parameter
on fit Panel window.
"""
## Internal name for the AUI manager
window_name = "Result panel"
## Title to appear on top of the window
window_caption = "Result Panel"
CENTER_PANE = True
def __init__(self, parent, manager=None, *args, **kwargs):
"""
"""
style = ((wx.aui.AUI_NB_WINDOWLIST_BUTTON
| wx.aui.AUI_NB_DEFAULT_STYLE
| wx.CLIP_CHILDREN)
& ~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
Notebook.__init__(self, parent, wx.ID_ANY, style=style)
PanelBase.__init__(self, parent)
self.frame = parent
self.Bind(EVT_PLOT_RESULT, self.on_plot_results)
self.frame.Bind(wx.EVT_CLOSE, self.on_close)
self._manager = None
def on_close(self, event):
if event.CanVeto():
self.frame.Hide()
event.Veto()
else:
event.Skip()
def on_plot_results(self, event):
self.frame.Show(True)
result = event.result[0][0]
filename = result.data.sas_data.filename
current_time = datetime.datetime.now().strftime("%I:%M%p, %B %d, %Y")
self.parent.SetTitle(self.window_name + " - " + filename + " - " + current_time)
if hasattr(result, 'convergence'):
best, pop = result.convergence[:, 0], result.convergence[:, 1:]
self._get_view(ConvergenceView).update(best, pop)
else:
self._del_view(ConvergenceView)
if hasattr(result, 'uncertainty_state'):
stats = var_stats(result.uncertainty_state.draw())
msg = format_vars(stats)
self._get_view(CorrelationView).update(result.uncertainty_state)
self._get_view(UncertaintyView).update((result.uncertainty_state, stats))
self._get_view(TraceView).update(result.uncertainty_state)
# TODO: stats should be stored in result rather than computed in bumps UncertaintyView
wx.PostEvent(self.frame.parent,
StatusEvent(status=msg, info="info"))
else:
for view in (CorrelationView, UncertaintyView, TraceView):
self._del_view(view)
def get_frame(self):
return self.frame
def _get_view(self, view_class):
for idx in range(self.PageCount):
if self.GetPageText(idx) == view_class.title:
return self.GetPage(idx)
else:
panel = view_class(self)
self.AddPage(panel, panel.title)
return panel
def _del_view(self, view_class):
for idx in range(self.PageCount):
if self.GetPageText(idx) == view_class.title:
self.DeletePage(idx)
``` |
[
{
"content": "```python\n# Copyright (c) 2015 Ultimaker B.V.\n# Cura is released under the terms of the AGPLv3 or higher.\n\nfrom UM.Backend.Backend import Backend\nfrom UM.Application import Application\nfrom UM.Scene.SceneNode import SceneNode\nfrom UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterat... | [
{
"content": "<|memory_start|>```python\n# Copyright (c) 2015 Ultimaker B.V.\n# Cura is released under the terms of the AGPLv3 or higher.\n\nfrom UM.Backend.Backend import Backend\nfrom UM.Application import Application\nfrom UM.Scene.SceneNode import SceneNode\nfrom UM.Scene.Iterator.DepthFirstIterator import ... | ```python
# Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from UM.Backend.Backend import Backend
from UM.Application import Application
from UM.Scene.SceneNode import SceneNode
from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator
from UM.Preferences import Preferences
from UM.Math.Vector import Vector
from UM.Signal import Signal
from UM.Logger import Logger
from UM.Resources import Resources
from UM.Settings.SettingOverrideDecorator import SettingOverrideDecorator
from UM.Message import Message
from cura.OneAtATimeIterator import OneAtATimeIterator
from . import Cura_pb2
from . import ProcessSlicedObjectListJob
from . import ProcessGCodeJob
from . import StartSliceJob
import os
import sys
import numpy
from PyQt5.QtCore import QTimer
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
class CuraEngineBackend(Backend):
def __init__(self):
super().__init__()
# Find out where the engine is located, and how it is called. This depends on how Cura is packaged and which OS we are running on.
default_engine_location = os.path.join(Application.getInstallPrefix(), "bin", "CuraEngine")
if hasattr(sys, "frozen"):
default_engine_location = os.path.join(os.path.dirname(os.path.abspath(sys.executable)), "CuraEngine")
if sys.platform == "win32":
default_engine_location += ".exe"
default_engine_location = os.path.abspath(default_engine_location)
Preferences.getInstance().addPreference("backend/location", default_engine_location)
self._scene = Application.getInstance().getController().getScene()
self._scene.sceneChanged.connect(self._onSceneChanged)
# Workaround to disable layer view processing if layer view is not active.
self._layer_view_active = False
Application.getInstance().getController().activeViewChanged.connect(self._onActiveViewChanged)
self._onActiveViewChanged()
self._stored_layer_data = None
Application.getInstance().getMachineManager().activeMachineInstanceChanged.connect(self._onChanged)
self._profile = None
Application.getInstance().getMachineManager().activeProfileChanged.connect(self._onActiveProfileChanged)
self._onActiveProfileChanged()
self._change_timer = QTimer()
self._change_timer.setInterval(500)
self._change_timer.setSingleShot(True)
self._change_timer.timeout.connect(self.slice)
self._message_handlers[Cura_pb2.SlicedObjectList] = self._onSlicedObjectListMessage
self._message_handlers[Cura_pb2.Progress] = self._onProgressMessage
self._message_handlers[Cura_pb2.GCodeLayer] = self._onGCodeLayerMessage
self._message_handlers[Cura_pb2.GCodePrefix] = self._onGCodePrefixMessage
self._message_handlers[Cura_pb2.ObjectPrintTime] = self._onObjectPrintTimeMessage
self._slicing = False
self._restart = False
self._enabled = True
self._always_restart = True
self._message = None
self.backendConnected.connect(self._onBackendConnected)
Application.getInstance().getController().toolOperationStarted.connect(self._onToolOperationStarted)
Application.getInstance().getController().toolOperationStopped.connect(self._onToolOperationStopped)
Application.getInstance().getMachineManager().activeMachineInstanceChanged.connect(self._onInstanceChanged)
## Get the command that is used to call the engine.
# This is usefull for debugging and used to actually start the engine
# \return list of commands and args / parameters.
def getEngineCommand(self):
active_machine = Application.getInstance().getMachineManager().getActiveMachineInstance()
if not active_machine:
return None
return [Preferences.getInstance().getValue("backend/location"), "connect", "127.0.0.1:{0}".format(self._port), "-j", active_machine.getMachineDefinition().getPath(), "-vv"]
## Emitted when we get a message containing print duration and material amount. This also implies the slicing has finished.
# \param time The amount of time the print will take.
# \param material_amount The amount of material the print will use.
printDurationMessage = Signal()
## Emitted when the slicing process starts.
slicingStarted = Signal()
## Emitted whne the slicing process is aborted forcefully.
slicingCancelled = Signal()
## Perform a slice of the scene.
def slice(self):
if not self._enabled:
return
if self._slicing:
self._slicing = False
self._restart = True
if self._process is not None:
Logger.log("d", "Killing engine process")
try:
self._process.terminate()
except: # terminating a process that is already terminating causes an exception, silently ignore this.
pass
if self._message:
self._message.hide()
self._message = None
self.slicingCancelled.emit()
return
if self._profile.hasErrorValue():
Logger.log('w', "Profile has error values. Aborting slicing")
if self._message:
self._message.hide()
self._message = None
self._message = Message(catalog.i18nc("@info:status", "Unable to slice. Please check your setting values for errors."))
self._message.show()
return #No slicing if we have error values since those are by definition illegal values.
self.processingProgress.emit(0.0)
if not self._message:
self._message = Message(catalog.i18nc("@info:status", "Slicing..."), 0, False, -1)
self._message.show()
else:
self._message.setProgress(-1)
self._scene.gcode_list = []
self._slicing = True
job = StartSliceJob.StartSliceJob(self._profile, self._socket)
job.start()
job.finished.connect(self._onStartSliceCompleted)
def _onStartSliceCompleted(self, job):
if job.getError() or job.getResult() != True:
if self._message:
self._message.hide()
self._message = None
return
def _onSceneChanged(self, source):
if type(source) is not SceneNode:
return
if source is self._scene.getRoot():
return
if source.getMeshData() is None:
return
if source.getMeshData().getVertices() is None:
return
self._onChanged()
def _onActiveProfileChanged(self):
if self._profile:
self._profile.settingValueChanged.disconnect(self._onSettingChanged)
self._profile = Application.getInstance().getMachineManager().getActiveProfile()
if self._profile:
self._profile.settingValueChanged.connect(self._onSettingChanged)
self._onChanged()
def _onSettingChanged(self, setting):
self._onChanged()
def _onSlicedObjectListMessage(self, message):
if self._layer_view_active:
job = ProcessSlicedObjectListJob.ProcessSlicedObjectListJob(message)
job.start()
else :
self._stored_layer_data = message
def _onProgressMessage(self, message):
if self._message:
self._message.setProgress(round(message.amount * 100))
self.processingProgress.emit(message.amount)
def _onGCodeLayerMessage(self, message):
self._scene.gcode_list.append(message.data.decode("utf-8", "replace"))
def _onGCodePrefixMessage(self, message):
self._scene.gcode_list.insert(0, message.data.decode("utf-8", "replace"))
def _onObjectPrintTimeMessage(self, message):
self.printDurationMessage.emit(message.time, message.material_amount)
self.processingProgress.emit(1.0)
self._slicing = False
if self._message:
self._message.setProgress(100)
self._message.hide()
self._message = None
if self._always_restart:
try:
self._process.terminate()
self._createSocket()
except: # terminating a process that is already terminating causes an exception, silently ignore this.
pass
def _createSocket(self):
super()._createSocket()
self._socket.registerMessageType(1, Cura_pb2.Slice)
self._socket.registerMessageType(2, Cura_pb2.SlicedObjectList)
self._socket.registerMessageType(3, Cura_pb2.Progress)
self._socket.registerMessageType(4, Cura_pb2.GCodeLayer)
self._socket.registerMessageType(5, Cura_pb2.ObjectPrintTime)
self._socket.registerMessageType(6, Cura_pb2.SettingList)
self._socket.registerMessageType(7, Cura_pb2.GCodePrefix)
## Manually triggers a reslice
def forceSlice(self):
self._change_timer.start()
def _onChanged(self):
if not self._profile:
return
self._change_timer.start()
def _onBackendConnected(self):
if self._restart:
self._onChanged()
self._restart = False
def _onToolOperationStarted(self, tool):
self._enabled = False # Do not reslice when a tool is doing it's 'thing'
def _onToolOperationStopped(self, tool):
self._enabled = True # Tool stop, start listening for changes again.
self._onChanged()
def _onActiveViewChanged(self):
if Application.getInstance().getController().getActiveView():
view = Application.getInstance().getController().getActiveView()
if view.getPluginId() == "LayerView":
self._layer_view_active = True
if self._stored_layer_data:
job = ProcessSlicedObjectListJob.ProcessSlicedObjectListJob(self._stored_layer_data)
job.start()
self._stored_layer_data = None
else:
self._layer_view_active = False
def _onInstanceChanged(self):
self._slicing = False
self._restart = True
if self._process is not None:
Logger.log("d", "Killing engine process")
try:
self._process.terminate()
except: # terminating a process that is already terminating causes an exception, silently ignore this.
pass
self.slicingCancelled.emit()
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\nfrom datetime import datetime\n\nfrom dateutil.relativedelta import relativedelta\n\nfrom feedrsub.database import db\nfrom feedrsub.models.period import PERIOD, Period\nfrom feedrsub.models.populate_db import populate_periods\n\n\ndef test_pop... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nfrom datetime import datetime\n\nfrom dateutil.relativedelta import relativedelta\n\nfrom feedrsub.database import db\nfrom feedrsub.models.period import PERIOD, Period\nfrom feedrsub.models.populate_db import populate_periods\n... | ```python
from datetime import datetime
from dateutil.relativedelta import relativedelta
from feedrsub.database import db
from feedrsub.models.period import PERIOD, Period
from feedrsub.models.populate_db import populate_periods
def test_populate_periods(session):
populate_periods()
daily = Period.query.filter_by(name=PERIOD.DAILY).first()
assert daily.name == PERIOD.DAILY
immediate = Period.query.filter_by(name=PERIOD.IMMEDIATE).first()
assert immediate.name == PERIOD.IMMEDIATE
weekly = Period.query.filter_by(name=PERIOD.WEEKLY).first()
assert weekly.name == PERIOD.WEEKLY
monthly = Period.query.filter_by(name=PERIOD.MONTHLY).first()
assert monthly.name == PERIOD.MONTHLY
def test_period_creation(session):
period_desc = "A Yearly period"
period_name = "YEARLY"
period = Period(period_name, period_desc)
db.session.add(period)
db.session.commit()
yearly = Period.query.filter_by(name=period_name).first()
assert yearly.name == period_name
assert yearly.description == period_desc
def test_get_from_date_with_name(session):
now = datetime.utcnow()
past = now - relativedelta(days=1)
from_date = Period.get_from_date(PERIOD.DAILY, now)
assert from_date == past
def test_get_from_date_with_period(session):
now = datetime.utcnow()
past = now - relativedelta(days=1)
period = Period(name=PERIOD.DAILY)
from_date = Period.get_from_date(period, now)
assert from_date == past
``` |
[
{
"content": "Here is the snippet:\n```python\n# This file is part of PyEMMA.\n#\n# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)\n#\n# PyEMMA is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License ... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n# This file is part of PyEMMA.\n#\n# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)\n#\n# PyEMMA is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General... | ```python
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 19.01.2015
@author: marscher
'''
from __future__ import absolute_import
import numpy as np
from pyemma._base.serialization.serialization import SerializableMixIn
from pyemma._ext.variational.solvers.direct import eig_corr
from pyemma._ext.variational.util import ZeroRankError
from pyemma.coordinates.estimation.covariance import LaggedCovariance
from pyemma.coordinates.transform._tica_base import TICABase, TICAModelBase
from pyemma.util.annotators import fix_docs
import warnings
__all__ = ['TICA']
@fix_docs
class TICA(TICABase, SerializableMixIn):
r""" Time-lagged independent component analysis (TICA)"""
__serialize_version = 0
def __init__(self, lag, dim=-1, var_cutoff=0.95, kinetic_map=True, commute_map=False, epsilon=1e-6,
stride=1, skip=0, reversible=True, weights=None, ncov_max=float('inf')):
r""" Time-lagged independent component analysis (TICA) [1]_, [2]_, [3]_.
Parameters
----------
lag : int
lag time
dim : int, optional, default -1
Maximum number of significant independent components to use to reduce dimension of input data. -1 means
all numerically available dimensions (see epsilon) will be used unless reduced by var_cutoff.
Setting dim to a positive value is exclusive with var_cutoff.
var_cutoff : float in the range [0,1], optional, default 0.95
Determines the number of output dimensions by including dimensions until their cumulative kinetic variance
exceeds the fraction subspace_variance. var_cutoff=1.0 means all numerically available dimensions
(see epsilon) will be used, unless set by dim. Setting var_cutoff smaller than 1.0 is exclusive with dim
kinetic_map : bool, optional, default True
Eigenvectors will be scaled by eigenvalues. As a result, Euclidean distances in the transformed data
approximate kinetic distances [4]_. This is a good choice when the data is further processed by clustering.
commute_map : bool, optional, default False
Eigenvector_i will be scaled by sqrt(timescale_i / 2). As a result, Euclidean distances in the transformed
data will approximate commute distances [5]_.
epsilon : float
eigenvalue norm cutoff. Eigenvalues of C0 with norms <= epsilon will be
cut off. The remaining number of eigenvalues define the size
of the output.
stride: int, optional, default = 1
Use only every stride-th time step. By default, every time step is used.
skip : int, default=0
skip the first initial n frames per trajectory.
reversible: bool, default=True
symmetrize correlation matrices C_0, C_{\tau}.
weights: object or list of ndarrays, optional, default = None
* An object that allows to compute re-weighting factors to estimate equilibrium means and correlations from
off-equilibrium data. The only requirement is that weights possesses a method weights(X), that accepts a
trajectory X (np.ndarray(T, n)) and returns a vector of re-weighting factors (np.ndarray(T,)).
* A list of ndarrays (ndim=1) specifies the weights for each frame of each trajectory.
Notes
-----
Given a sequence of multivariate data :math:`X_t`, computes the mean-free
covariance and time-lagged covariance matrix:
.. math::
C_0 &= (X_t - \mu)^T (X_t - \mu) \\
C_{\tau} &= (X_t - \mu)^T (X_{t + \tau} - \mu)
and solves the eigenvalue problem
.. math:: C_{\tau} r_i = C_0 \lambda_i(tau) r_i,
where :math:`r_i` are the independent components and :math:`\lambda_i(tau)` are
their respective normalized time-autocorrelations. The eigenvalues are
related to the relaxation timescale by
.. math:: t_i(tau) = -\tau / \ln |\lambda_i|.
When used as a dimension reduction method, the input data is projected
onto the dominant independent components.
References
----------
.. [1] Perez-Hernandez G, F Paul, T Giorgino, G De Fabritiis and F Noe. 2013.
Identification of slow molecular order parameters for Markov model construction
J. Chem. Phys. 139, 015102. doi:10.1063/1.4811489
.. [2] Schwantes C, V S Pande. 2013.
Improvements in Markov State Model Construction Reveal Many Non-Native Interactions in the Folding of NTL9
J. Chem. Theory. Comput. 9, 2000-2009. doi:10.1021/ct300878a
.. [3] L. Molgedey and H. G. Schuster. 1994.
Separation of a mixture of independent signals using time delayed correlations
Phys. Rev. Lett. 72, 3634.
.. [4] Noe, F. and Clementi, C. 2015. Kinetic distance and kinetic maps from molecular dynamics simulation.
J. Chem. Theory. Comput. doi:10.1021/acs.jctc.5b00553
.. [5] Noe, F., Banisch, R., Clementi, C. 2016. Commute maps: separating slowly-mixing molecular configurations
for kinetic modeling. J. Chem. Theory. Comput. doi:10.1021/acs.jctc.6b00762
"""
super(TICA, self).__init__()
if kinetic_map and commute_map:
raise ValueError('Trying to use both kinetic_map and commute_map. Use either or.')
if (kinetic_map or commute_map) and not reversible:
kinetic_map = False
commute_map = False
warnings.warn("Cannot use kinetic_map or commute_map for non-reversible processes, both will be set to"
"False.")
# this instance will be set by partial fit.
self._covar = None
self.dim = dim
self.var_cutoff = var_cutoff
self.set_params(lag=lag, dim=dim, var_cutoff=var_cutoff, kinetic_map=kinetic_map, commute_map=commute_map,
epsilon=epsilon, reversible=reversible, stride=stride, skip=skip, weights=weights, ncov_max=ncov_max)
@property
def model(self):
if not hasattr(self, '_model') or self._model is None:
self._model = TICAModelBase()
return self._model
def describe(self):
try:
dim = self.dimension()
except RuntimeError:
dim = self.dim
return "[TICA, lag = %i; max. output dim. = %i]" % (self._lag, dim)
def estimate(self, X, **kwargs):
r"""
Chunk-based parameterization of TICA. Iterates over all data and estimates
the mean, covariance and time lagged covariance. Finally, the
generalized eigenvalue problem is solved to determine
the independent components.
"""
return super(TICA, self).estimate(X, **kwargs)
def partial_fit(self, X):
""" incrementally update the covariances and mean.
Parameters
----------
X: array, list of arrays, PyEMMA reader
input data.
Notes
-----
The projection matrix is first being calculated upon its first access.
"""
from pyemma.coordinates import source
iterable = source(X, chunksize=self.chunksize)
indim = iterable.dimension()
if not self.dim <= indim:
raise RuntimeError("requested more output dimensions (%i) than dimension"
" of input data (%i)" % (self.dim, indim))
if self._covar is None:
self._covar = LaggedCovariance(c00=True, c0t=True, ctt=False, remove_data_mean=True, reversible=self.reversible,
lag=self.lag, bessel=False, stride=self.stride, skip=self.skip,
weights=self.weights, ncov_max=self.ncov_max)
self._covar.partial_fit(iterable)
self.model.update_model_params(mean=self._covar.mean, # TODO: inefficient, fixme
cov=self._covar.C00_,
cov_tau=self._covar.C0t_)
self._estimated = False
return self
def _estimate(self, iterable, **kw):
covar = LaggedCovariance(c00=True, c0t=True, ctt=False, remove_data_mean=True, reversible=self.reversible,
lag=self.lag, bessel=False, stride=self.stride, skip=self.skip,
weights=self.weights, ncov_max=self.ncov_max)
indim = iterable.dimension()
if not self.dim <= indim:
raise RuntimeError("requested more output dimensions (%i) than dimension"
" of input data (%i)" % (self.dim, indim))
if self._logger_is_active(self._loglevel_DEBUG):
self.logger.debug("Running TICA with tau=%i; Estimating two covariance matrices"
" with dimension (%i, %i)", self._lag, indim, indim)
covar.estimate(iterable, chunksize=self.chunksize, **kw)
self.model.update_model_params(mean=covar.mean,
cov=covar.C00_,
cov_tau=covar.C0t_)
self._diagonalize()
return self.model
def _diagonalize(self):
# diagonalize with low rank approximation
self.logger.debug("diagonalize Cov and Cov_tau.")
try:
eigenvalues, eigenvectors = eig_corr(self.cov, self.cov_tau, self.epsilon, sign_maxelement=True)
except ZeroRankError:
raise ZeroRankError('All input features are constant in all time steps. No dimension would be left after dimension reduction.')
if self.kinetic_map and self.commute_map:
raise ValueError('Trying to use both kinetic_map and commute_map. Use either or.')
if self.kinetic_map: # scale by eigenvalues
eigenvectors *= eigenvalues[None, :]
if self.commute_map: # scale by (regularized) timescales
timescales = 1-self.lag / np.log(np.abs(eigenvalues))
# dampen timescales smaller than the lag time, as in section 2.5 of ref. [5]
regularized_timescales = 0.5 * timescales * np.maximum(np.tanh(np.pi * ((timescales - self.lag) / self.lag) + 1), 0)
eigenvectors *= np.sqrt(regularized_timescales / 2)
self.logger.debug("finished diagonalisation.")
# compute cumulative variance
cumvar = np.cumsum(np.abs(eigenvalues) ** 2)
cumvar /= cumvar[-1]
self.model.update_model_params(cumvar=cumvar,
eigenvalues=eigenvalues,
eigenvectors=eigenvectors)
self._estimated = True
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n#!/usr/bin/python\n# Mario Hernandez\n# github.com/mariofix\n# Licences under GPL v3\n#\n# For use with nginx\n# uwsgi -s /tmp/ad-login.sock -w adlogin:app --enable-threads --chmod-socket=666\n#\nimport bender\n\nfrom flask import Fl... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n#!/usr/bin/python\n# Mario Hernandez\n# github.com/mariofix\n# Licences under GPL v3\n#\n# For use with nginx\n# uwsgi -s /tmp/ad-login.sock -w adlogin:app --enable-threads --chmod-socket=666\n#\nimport bender\n\nfrom... | ```python
#!/usr/bin/python
# Mario Hernandez
# github.com/mariofix
# Licences under GPL v3
#
# For use with nginx
# uwsgi -s /tmp/ad-login.sock -w adlogin:app --enable-threads --chmod-socket=666
#
import bender
from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route("/auth", methods=["GET", "POST"])
def auth():
if request.method == "GET":
#I show an error message so the data does not go directly in the URL
salida = {'msg':"Debe usar POST", 'status':'ERROR'}
return jsonify(salida)
else:
salida = {'status':'ERROR','msg':'Debe ingresar datos para autentificacion'}
try:
resultado = bender.autentifica(request.form['username'],request.form['password'])
if resultado == 'OK':
salida = {'status':'success','msg':'usuario autentificado'}
else:
salida = {'status':'ERROR','msg':resultado}
except e:
print e
return jsonify(salida)
if __name__ == "__main__":
#Comment on production
app.debug = True
app.run(host='0.0.0.0')
``` |
[
{
"content": "Repeat the following code:\n```python\nimport re\nimport os\nimport commands\nimport logging\nfrom autotest.client.shared import error\nfrom virttest import virsh, utils_misc, xml_utils, libvirt_xml\nfrom virttest.libvirt_xml import vm_xml, xcepts\n\n\ndef xml_recover(vmxml):\n \"\"\"\n Reco... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\nimport re\nimport os\nimport commands\nimport logging\nfrom autotest.client.shared import error\nfrom virttest import virsh, utils_misc, xml_utils, libvirt_xml\nfrom virttest.libvirt_xml import vm_xml, xcepts\n\n\ndef xml_recover(vmxml):\n ... | ```python
import re
import os
import commands
import logging
from autotest.client.shared import error
from virttest import virsh, utils_misc, xml_utils, libvirt_xml
from virttest.libvirt_xml import vm_xml, xcepts
def xml_recover(vmxml):
"""
Recover older xml config with backup vmxml.
:params: vmxml: VMXML object
"""
try:
options = "--snapshots-metadata"
vmxml.undefine(options)
vmxml.define()
return True
except xcepts.LibvirtXMLError, detail:
logging.error("Recover older xml failed:%s.", detail)
return False
def check_snap_in_image(vm_name, snap_name):
"""
check the snapshot info in image
:params: vm_name: VM name
:params: snap_name: Snapshot name
"""
domxml = virsh.dumpxml(vm_name).stdout.strip()
xtf_dom = xml_utils.XMLTreeFile(domxml)
cmd = "qemu-img info " + xtf_dom.find("devices/disk/source").get("file")
img_info = commands.getoutput(cmd).strip()
if re.search(snap_name, img_info):
logging.info("Find snapshot info in image")
return True
else:
return False
def compose_disk_options(test, params, opt_names):
"""
Compose the {disk,mem}spec options
The diskspec file need to add suitable dir with the name which is configed
individually, The 'value' after 'file=' is a parameter which also need to
get from cfg
:params: test & params: system parameters
:params: opt_names: params get from cfg of {disk,mem}spec options
"""
if opt_names.find("file=") >= 0:
opt_disk = opt_names.split("file=")
opt_list = opt_disk[1].split(",")
if len(opt_list) > 1:
left_opt = opt_list[1]
else:
left_opt = ""
if params.get("bad_disk") is not None or \
params.get("external_disk") is not None:
spec_disk = os.path.join(test.virtdir, params.get(opt_list[0]))
else:
spec_disk = os.path.join(test.virtdir, opt_list[0])
return opt_disk[0] + "file=" + spec_disk + left_opt
def check_snapslist(vm_name, options, option_dict, output,
snaps_before, snaps_list):
no_metadata = options.find("--no-metadata")
fdisks = "disks"
# command with print-xml will not really create snapshot
if options.find("print-xml") >= 0:
xtf = xml_utils.XMLTreeFile(output)
# With --print-xml there isn't new snapshot created
if len(snaps_before) != len(snaps_list):
raise error.TestFail("--print-xml create new snapshot")
else:
# The following does not check with print-xml
get_sname = output.split()[2]
# check domain/snapshot xml depends on if have metadata
if no_metadata < 0:
output_dump = virsh.snapshot_dumpxml(vm_name,
get_sname).stdout.strip()
else:
output_dump = virsh.dumpxml(vm_name).stdout.strip()
fdisks = "devices"
xtf = xml_utils.XMLTreeFile(output_dump)
find = 0
for snap in snaps_list:
if snap == get_sname:
find = 1
break
# Should find snap in snaplist without --no-metadata
if (find == 0 and no_metadata < 0):
raise error.TestFail("Can not find snapshot %s!"
% get_sname)
# Should not find snap in list without metadata
elif (find == 1 and no_metadata >= 0):
raise error.TestFail("Can find snapshot metadata even "
"if have --no-metadata")
elif (find == 0 and no_metadata >= 0):
logging.info("Can not find snapshot %s as no-metadata "
"is given" % get_sname)
# Check snapshot only in qemu-img
if (options.find("--disk-only") < 0 and
options.find("--memspec") < 0):
ret = check_snap_in_image(vm_name, get_sname)
if ret is False:
raise error.TestFail("No snap info in image")
else:
logging.info("Find snapshot %s in snapshot list."
% get_sname)
# Check if the disk file exist when disk-only is given
if options.find("disk-only") >= 0:
for disk in xtf.find(fdisks).findall('disk'):
diskpath = disk.find('source').get('file')
if os.path.isfile(diskpath):
logging.info("disk file %s exist" % diskpath)
os.remove(diskpath)
else:
# Didn't find <source file="path to disk"/>
# in output - this could leave a file around
# wherever the main OS image file is found
logging.debug("output_dump=%s", output_dump)
raise error.TestFail("Can not find disk %s"
% diskpath)
# Check if the guest is halted when 'halt' is given
if options.find("halt") >= 0:
domstate = virsh.domstate(vm_name)
if re.match("shut off", domstate.stdout):
logging.info("Domain is halted after create "
"snapshot")
else:
raise error.TestFail("Domain is not halted after "
"snapshot created")
# Check the snapshot xml regardless of having print-xml or not
if (options.find("name") >= 0 and no_metadata < 0):
if xtf.findtext('name') == option_dict["name"]:
logging.info("get snapshot name same as set")
else:
raise error.TestFail("Get wrong snapshot name %s" %
xtf.findtext('name'))
if (options.find("description") >= 0 and no_metadata < 0):
desc = xtf.findtext('description')
if desc == option_dict["description"]:
logging.info("get snapshot description same as set")
else:
raise error.TestFail("Get wrong description on xml")
if options.find("diskspec") >= 0:
if isinstance(option_dict['diskspec'], list):
index = len(option_dict['diskspec'])
else:
index = 1
disks = xtf.find(fdisks).findall('disk')
for num in range(index):
if isinstance(option_dict['diskspec'], list):
option_disk = option_dict['diskspec'][num]
else:
option_disk = option_dict['diskspec']
option_disk = "name=" + option_disk
disk_dict = utils_misc.valued_option_dict(option_disk,
",", 0, "=")
logging.debug("disk_dict is %s", disk_dict)
# For no metadata snapshot do not check name and
# snapshot
if no_metadata < 0:
dname = disks[num].get('name')
logging.debug("dname is %s", dname)
if dname == disk_dict['name']:
logging.info("get disk%d name same as set in "
"diskspec", num)
else:
raise error.TestFail("Get wrong disk%d name %s"
% num, dname)
if option_disk.find('snapshot=') >= 0:
dsnap = disks[num].get('snapshot')
logging.debug("dsnap is %s", dsnap)
if dsnap == disk_dict['snapshot']:
logging.info("get disk%d snapshot type same"
" as set in diskspec", num)
else:
raise error.TestFail("Get wrong disk%d "
"snapshot type %s" %
num, dsnap)
if option_disk.find('driver=') >= 0:
dtype = disks[num].find('driver').get('type')
if dtype == disk_dict['driver']:
logging.info("get disk%d driver type same as "
"set in diskspec", num)
else:
raise error.TestFail("Get wrong disk%d driver "
"type %s" % num, dtype)
if option_disk.find('file=') >= 0:
sfile = disks[num].find('source').get('file')
if sfile == disk_dict['file']:
logging.info("get disk%d source file same as "
"set in diskspec", num)
else:
raise error.TestFail("Get wrong disk%d source "
"file %s" % num, sfile)
# For memspec check if the xml is same as setting
# Also check if the mem file exists
if options.find("memspec") >= 0:
memspec = option_dict['memspec']
if re.search('file=', option_dict['memspec']) < 0:
memspec = 'file=' + option_dict['memspec']
mem_dict = utils_misc.valued_option_dict(memspec, ",", 0,
"=")
logging.debug("mem_dict is %s", mem_dict)
if no_metadata < 0:
if memspec.find('snapshot=') >= 0:
snap = xtf.find('memory').get('snapshot')
if snap == mem_dict['snapshot']:
logging.info("get memory snapshot type same as"
" set in diskspec")
else:
raise error.TestFail("Get wrong memory snapshot"
" type on print xml")
memfile = xtf.find('memory').get('file')
if memfile == mem_dict['file']:
logging.info("get memory file same as set in "
"diskspec")
else:
raise error.TestFail("Get wrong memory file on "
"print xml %s", memfile)
if options.find("print-xml") < 0:
if os.path.isfile(mem_dict['file']):
logging.info("memory file generated")
os.remove(mem_dict['file'])
else:
raise error.TestFail("Fail to generate memory file"
" %s", mem_dict['file'])
def run_virsh_snapshot_create_as(test, params, env):
"""
Test snapshot-create-as command
Make sure that the clean repo can be used because qemu-guest-agent need to
be installed in guest
The command create a snapshot (disk and RAM) from arguments which including
the following point
* virsh snapshot-create-as --print-xml --diskspec --name --description
* virsh snapshot-create-as --print-xml with multi --diskspec
* virsh snapshot-create-as --print-xml --memspec
* virsh snapshot-create-as --description
* virsh snapshot-create-as --no-metadata
* virsh snapshot-create-as --no-metadata --print-xml (negative test)
* virsh snapshot-create-as --atomic --disk-only
* virsh snapshot-create-as --quiesce --disk-only (positive and negative)
* virsh snapshot-create-as --reuse-external
* virsh snapshot-create-as --disk-only --diskspec
* virsh snapshot-create-as --memspec --reuse-external --atomic(negative)
* virsh snapshot-create-as --disk-only and --memspec (negative)
* Create multi snapshots with snapshot-create-as
* Create snapshot with name a--a a--a--snap1
"""
if not virsh.has_help_command('snapshot-create-as'):
raise error.TestNAError("This version of libvirt does not support "
"the snapshot-create-as test")
vm_name = params.get("main_vm")
status_error = params.get("status_error", "no")
options = params.get("snap_createas_opts")
multi_num = params.get("multi_num", "1")
diskspec_num = params.get("diskspec_num", "1")
bad_disk = params.get("bad_disk")
external_disk = params.get("external_disk")
start_ga = params.get("start_ga", "yes")
domain_state = params.get("domain_state")
memspec_opts = params.get("memspec_opts")
diskspec_opts = params.get("diskspec_opts")
opt_names = locals()
if memspec_opts is not None:
mem_options = compose_disk_options(test, params, memspec_opts)
# if the parameters have the disk without "file=" then we only need to
# add testdir for it.
if mem_options is None:
mem_options = os.path.join(test.virtdir, memspec_opts)
options += " --memspec " + mem_options
tag_diskspec = 0
dnum = int(diskspec_num)
if diskspec_opts is not None:
tag_diskspec = 1
opt_names['diskopts_1'] = diskspec_opts
# diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
if dnum > 1:
tag_diskspec = 1
for i in range(1, dnum + 1):
opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)
if tag_diskspec == 1:
for i in range(1, dnum + 1):
disk_options = compose_disk_options(test, params,
opt_names["diskopts_%s" % i])
options += " --diskspec " + disk_options
logging.debug("options are %s", options)
vm = env.get_vm(vm_name)
option_dict = {}
option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
logging.debug("option_dict is %s", option_dict)
# A backup of original vm
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
logging.debug("original xml is %s", vmxml_backup)
# Generate empty image for negative test
if bad_disk is not None:
bad_disk = os.path.join(test.virtdir, bad_disk)
os.open(bad_disk, os.O_RDWR | os.O_CREAT)
# Generate external disk
if external_disk is not None:
external_disk = os.path.join(test.virtdir, external_disk)
commands.getoutput("qemu-img create -f qcow2 %s 1G" % external_disk)
try:
# Start qemu-ga on guest if have --quiesce
if options.find("quiesce") >= 0:
if vm.is_alive():
vm.destroy()
virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh)
virt_xml_obj.set_agent_channel(vm_name)
vm.start()
if start_ga == "yes":
session = vm.wait_for_login()
# Check if qemu-ga already started automatically
cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
stat_install = session.cmd_status(cmd, 300)
if stat_install != 0:
raise error.TestFail("Fail to install qemu-guest-agent, make"
"sure that you have usable repo in guest")
# Check if qemu-ga already started
stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
if stat_ps != 0:
session.cmd("qemu-ga -d")
# Check if the qemu-ga really started
stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
if stat_ps != 0:
raise error.TestFail("Fail to run qemu-ga in guest")
if domain_state == "paused":
virsh.suspend(vm_name)
# Record the previous snapshot-list
snaps_before = virsh.snapshot_list(vm_name)
# Run virsh command
# May create several snapshots, according to configuration
for count in range(int(multi_num)):
cmd_result = virsh.snapshot_create_as(vm_name, options,
ignore_status=True, debug=True)
output = cmd_result.stdout.strip()
status = cmd_result.exit_status
# check status_error
if status_error == "yes":
if status == 0:
raise error.TestFail("Run successfully with wrong command!")
else:
# Check memspec file should be removed if failed
if (options.find("memspec") >= 0
and options.find("atomic") >= 0):
if os.path.isfile(option_dict['memspec']):
os.remove(option_dict['memspec'])
raise error.TestFail("Run failed but file %s exist"
% option_dict['memspec'])
else:
logging.info("Run failed as expected and memspec file"
" already beed removed")
else:
logging.info("Run failed as expected")
elif status_error == "no":
if status != 0:
raise error.TestFail("Run failed with right command: %s"
% output)
else:
# Check the special options
snaps_list = virsh.snapshot_list(vm_name)
logging.debug("snaps_list is %s", snaps_list)
check_snapslist(vm_name, options, option_dict, output,
snaps_before, snaps_list)
finally:
# Environment clean
if options.find("quiesce") >= 0 and start_ga == "yes":
session.cmd("rpm -e qemu-guest-agent")
# recover domain xml
xml_recover(vmxml_backup)
path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
if os.path.isfile(path):
raise error.TestFail("Still can find snapshot metadata")
# rm bad disks
if bad_disk is not None:
os.remove(bad_disk)
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.backends import ModelBackend\n\nfrom google.appengine.api import users\n\n\nclass GoogleAccountBackend(ModelBackend):\... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.backends import ModelBackend\n\nfrom google.appengine.api import users\n\n\nclass GoogleAccountBackend... | ```python
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.backends import ModelBackend
from google.appengine.api import users
class GoogleAccountBackend(ModelBackend):
"""
backend for authentication via Google Accounts on Google
App Engine
A Django auth.contrib.models.User object is linked to
a Google Account via the password field, that stores
the unique Google Account ID
The Django User object is created the first time a user logs
in with his Google Account.
"""
def authenticate(self, **credentials):
g_user = users.get_current_user()
if g_user == None:
return None
username = g_user.email().split('@')[0]
if hasattr(settings, 'ALLOWED_USERS'):
try:
settings.ALLOWED_USERS.index(username)
except ValueError:
return None
try:
user = User.objects.get(password=g_user.user_id())
if user.email is not g_user.email():
user.email = g_user.email()
user.username = username
user.save()
return user
except User.DoesNotExist:
user = User.objects.create_user(username,\
g_user.email())
user.password = g_user.user_id()
if users.is_current_user_admin():
user.is_staff = True
user.is_superuser = True
user.save()
return user
``` |
[
{
"content": "Replicate the source code:\n```python\nimport win32pipe\r\nimport win32console\r\nimport win32process\r\nimport time\r\nimport win32con\r\nimport codecs\r\nimport ctypes \r\nuser32 = ctypes.windll.user32\r\n\r\nCONQUE_WINDOWS_VK = {\r\n '3' : win32con.VK_CANCEL,\r\n '8' : win32con.VK_BACK,... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\nimport win32pipe\r\nimport win32console\r\nimport win32process\r\nimport time\r\nimport win32con\r\nimport codecs\r\nimport ctypes \r\nuser32 = ctypes.windll.user32\r\n\r\nCONQUE_WINDOWS_VK = {\r\n '3' : win32con.VK_CANCEL,\r\n '8' : w... | ```python
import win32pipe
import win32console
import win32process
import time
import win32con
import codecs
import ctypes
user32 = ctypes.windll.user32
CONQUE_WINDOWS_VK = {
'3' : win32con.VK_CANCEL,
'8' : win32con.VK_BACK,
'9' : win32con.VK_TAB,
'12' : win32con.VK_CLEAR,
'13' : win32con.VK_RETURN,
'17' : win32con.VK_CONTROL,
'20' : win32con.VK_CAPITAL,
'27' : win32con.VK_ESCAPE,
'28' : win32con.VK_CONVERT,
'35' : win32con.VK_END,
'36' : win32con.VK_HOME,
'37' : win32con.VK_LEFT,
'38' : win32con.VK_UP,
'39' : win32con.VK_RIGHT,
'40' : win32con.VK_DOWN,
'45' : win32con.VK_INSERT,
'46' : win32con.VK_DELETE,
'47' : win32con.VK_HELP
}
def make_input_key(c, control_key_state=None):
kc = win32console.PyINPUT_RECORDType (win32console.KEY_EVENT)
kc.KeyDown = True
kc.RepeatCount = 1
cnum = ord(c)
if cnum == 3:
pid_list = win32console.GetConsoleProcessList()
win32console.GenerateConsoleCtrlEvent(win32con.CTRL_C_EVENT, 0)
return
else:
kc.Char = unicode(c)
if str(cnum) in CONQUE_WINDOWS_VK:
kc.VirtualKeyCode = CONQUE_WINDOWS_VK[str(cnum)]
else:
kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum)
#kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum+96)
#kc.ControlKeyState = win32con.LEFT_CTRL_PRESSED
return kc
#win32console.AttachConsole()
coord = win32console.PyCOORDType
con_stdout = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)
con_stdin = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)
flags = win32process.NORMAL_PRIORITY_CLASS
si = win32process.STARTUPINFO()
si.dwFlags |= win32con.STARTF_USESHOWWINDOW
(handle1, handle2, i1, i2) = win32process.CreateProcess(None, "cmd.exe", None, None, 0, flags, None, '.', si)
time.sleep(1)
#size = con_stdout.GetConsoleScreenBufferInfo()['Window']
# with codecs.open("log.txt", "w", "utf8") as f:
# for i in xrange(0, size.Bottom):
# f.write(con_stdout.ReadConsoleOutputCharacter(size.Right+1, coord(0, i)))
# f.write("\n")
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
HOST = "127.0.0.1"
PORT = 5554
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(1)
(sc, scname) = s.accept()
while True:
msg = sc.recv(1)
if ord(msg) == 0:
break
keys = [make_input_key(msg)]
if keys:
con_stdin.WriteConsoleInput(keys)
win32process.TerminateProcess(handle1, 0)
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\nimport tornado.websocket\n\nfrom Server.Chat.SimpleTypes import Participant\nfrom Server.Tools.Response.Json import *\n\nclass ChatWebSocketManager(tornado.websocket.WebSocketHandler):\n \"\"\"\n The main chat socket... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\nimport tornado.websocket\n\nfrom Server.Chat.SimpleTypes import Participant\nfrom Server.Tools.Response.Json import *\n\nclass ChatWebSocketManager(tornado.websocket.WebSocketHandler):\n \"\"\"\n The ... | ```python
import tornado.websocket
from Server.Chat.SimpleTypes import Participant
from Server.Tools.Response.Json import *
class ChatWebSocketManager(tornado.websocket.WebSocketHandler):
"""
The main chat socket manager, which handles both server commands
from the user as well as chat messages themselves.
"""
def initialize(self, appManager):
self._participantsManager = appManager.getParticipantsManager()
self._roomManager = appManager.getRoomManager()
def _handleServerCommand(self, command, args):
if command == "getNicks":
responseData = {
"roomName": args["roomName"],
"nicks": self._roomManager.getRoom(args["roomName"]).getParticipantNicks()
}
self.write_message(getServerCommandResponseJson(command, responseData))
def _handleUserCommand(self, command, commandArgument):
"""
Handle server commands such as for joining rooms, connecting
to the server etc.
:param command: The command string itself, e.g. 'join'
:param commandArgument: The argument to the command.
:return: void
"""
if command == "/join":
participant = self._participantsManager.getParticipantBySocket(self)
if self._roomManager.roomExists(commandArgument):
room = self._roomManager.getRoom(commandArgument)
# Don't let them join again if they're already in the room.
if room.hasParticipant(self):
self.write_message(getServerMessageJson("alreadyInRoom", "error"))
else:
room.addParticipant(participant)
self.write_message(getServerMessageJson("roomJoinSuccess"))
else:
self._roomManager.createRoom(commandArgument, [participant])
self.write_message(getServerMessageJson("roomCreateSuccess"))
elif command == "/leave" and self._roomManager.roomExists(commandArgument):
room = self._roomManager.getRoom(commandArgument)
room.removeParticipant(self)
self.write_message(getServerMessageJson("roomLeaveSuccess"))
elif command == "/connect":
self._participantsManager.addParticipant(Participant(nick = commandArgument, socket = self))
self.write_message(getServerMessageJson("connectSuccess"))
print(str.format("New participant, adding to participants manager. Total participants: {0}", self._participantsManager.participantsCount()))
else:
self.write_message(getServerMessageJson("Unknown command", "error"))
def _handleChatMessage(self, room, message):
self._roomManager.getRoom(room).broadcastMessage(message, self._participantsManager.getParticipantBySocket(self))
def open(self):
print("New client connection.")
def on_message(self, message):
"""
Called when data is received from the client on this socket.
:param message: The data received (should be JSON in our case).
:return: void
"""
incomingData = json.loads(message)
# Check for a server command, e.g. get nick list for room.
if incomingData.get("serverCommand"):
self._handleServerCommand(incomingData["command"], incomingData["args"])
# Check if it's a user command.
elif incomingData["message"][0] == "/":
messageParts = incomingData["message"].split(" ")
self._handleUserCommand(messageParts[0], messageParts[1])
else:
# Regular message, broadcast it to the room.
self._handleChatMessage(incomingData["room"], incomingData["message"])
def on_close(self):
for room in self._roomManager.getRooms().keys():
self._roomManager.getRooms()[room].removeParticipant(self)
self._participantsManager.removeParticipant(self)
print(str.format("Participant left. Total participants: {0}", self._participantsManager.participantsCount()))
def check_origin(self, origin):
# TODO: If you use this for anything serious then change this
# to check against a real origin for your domain.
return True
``` |
[
{
"content": "Repeat the code exactly:\n```python\n# vim:ai:et:ff=unix:fileencoding=utf-8:sw=4:ts=4:\n# conveyor/src/main/python/conveyor/client/__init__.py\n#\n# conveyor - Printing dispatch engine for 3D objects and their friends.\n# Copyright © 2012 Matthew W. Samsonoff <matthew.samsonoff@makerbot.com>\n#\n#... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n# vim:ai:et:ff=unix:fileencoding=utf-8:sw=4:ts=4:\n# conveyor/src/main/python/conveyor/client/__init__.py\n#\n# conveyor - Printing dispatch engine for 3D objects and their friends.\n# Copyright © 2012 Matthew W. Samsonoff <matthew.samsonoff@mak... | ```python
# vim:ai:et:ff=unix:fileencoding=utf-8:sw=4:ts=4:
# conveyor/src/main/python/conveyor/client/__init__.py
#
# conveyor - Printing dispatch engine for 3D objects and their friends.
# Copyright © 2012 Matthew W. Samsonoff <matthew.samsonoff@makerbot.com>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, print_function, unicode_literals)
import itertools
import json
import logging
import os.path
import socket
import sys
import tempfile
import textwrap
import time
import conveyor.arg
import conveyor.domain
import conveyor.job
import conveyor.jsonrpc
import conveyor.main
import conveyor.slicer
import conveyor.machine.port
import conveyor.main
import conveyor.task
from conveyor.decorator import args, command
class _ClientCommand(conveyor.main.Command):
'''A client command.'''
def _get_driver_name(self):
if None is not self._parsed_args.driver_name:
driver_name = self._parsed_args.driver_name
else:
driver_name = self._config.get('client', 'driver')
return driver_name
def _get_profile_name(self):
if None is not self._parsed_args.profile_name:
profile_name = self._parsed_args.profile_name
else:
profile_name = self._config.get('client', 'profile')
return profile_name
class _JsonRpcCommand(_ClientCommand):
'''
A client command that requires a JSON-RPC connection to the conveyor
service.
'''
def __init__(self, parsed_args, config):
_ClientCommand.__init__(self, parsed_args, config)
self._jsonrpc = None
self._stop = False
self._code = 0
def run(self):
address = self._config.get('common', 'address')
try:
self._connection = address.connect()
except EnvironmentError as e:
self._code = 1
self._log.critical(
'failed to connect to address: %s: %s',
address, e.strerror, exc_info=True)
if not self._pid_file_exists():
self._log.critical(
'pid file missing; is the conveyor service running?')
else:
self._jsonrpc = conveyor.jsonrpc.JsonRpc(
self._connection, self._connection)
self._export_methods()
hello_task = self._jsonrpc.request('hello', {})
hello_task.stoppedevent.attach(
self._guard_callback(self._hello_callback))
hello_task.start()
self._jsonrpc.run()
return self._code
def _pid_file_exists(self):
pid_file = self._config.get('common', 'pid_file')
result = os.path.exists(pid_file)
return result
def _export_methods(self):
'''
Export JSON-RPC methods to the conveyor service. The default
implementation does not export any methods.
'''
def _guard_callback(self, callback):
'''
Creates a new callback that invokes `_check_task` and then invokes
`callback` only if `_check_task` returns `True`. This reduces some
repetitive code.
'''
def guard(task):
if self._check_task(task):
def func():
try:
callback(task)
except:
self._stop_jsonrpc()
raise
conveyor.error.guard(self._log, func)
return guard
def _check_task(self, task):
'''
Returns whether or not a task ended successfully. It terminates the
client if the task failed or was canceled.
'''
if conveyor.task.TaskConclusion.ENDED == task.conclusion:
result = True
elif conveyor.task.TaskConclusion.FAILED == task.conclusion:
self._code = 1
self._log.error('%s', task.failure)
self._stop_jsonrpc()
result = False
elif conveyor.task.TaskConclusion.CANCELED == task.conclusion:
self._code = 1
self._log.warning('canceled')
self._stop_jsonrpc()
result = False
else:
self._stop_jsonrpc()
raise ValueError(task.conclusion)
return result
def _stop_jsonrpc(self):
'''Stop the JSON-RPC connection. This will end the client.'''
self._stop = True
self._jsonrpc.stop()
def _hello_callback(self, hello_task):
'''
A callback invoked after the command successfully invokes `hello` on
the conveyor service. This callback can be used to invoke additional
methods on the conveyor service.
'''
raise NotImplementedError
class _MethodCommand(_JsonRpcCommand):
'''
A client command that invokes a JSON-RPC request on the conveyor service.
'''
def _hello_callback(self, hello_task):
method_task = self._create_method_task()
method_task.stoppedevent.attach(
self._guard_callback(self._method_callback))
method_task.start()
def _create_method_task(self):
'''
Creates a task for a request to be invoked on the conveyor service.
'''
raise NotImplementedError
def _method_callback(self, method_task):
'''
A callback invoked when the request returns. This callback can be used
to handle the result of the request, to handle errors, and to invoke
additional methods on the conveyor service.
'''
raise NotImplementedError
class _QueryCommand(_MethodCommand):
'''
A client command that invokes a JSON-RPC request on the conveyor service
and handles the result.
'''
def _method_callback(self, method_task):
self._handle_result(method_task.result)
self._stop_jsonrpc()
def _handle_result(self, result):
'''Handles the result of the query.'''
raise NotImplementedError
@args(conveyor.arg.json)
class _JsonCommand(_QueryCommand):
'''
A client command that invokes a JSON-RPC request on the conveyor service
and optionally prints the result in raw JSON format.
'''
def _handle_result(self, result):
if self._parsed_args.json:
self._handle_result_json(result)
else:
self._handle_result_default(result)
def _handle_result_json(self, result):
'''
Handles the result of the query by printing it in raw JSON format.
'''
json.dump(result, sys.stdout)
print()
def _handle_result_default(self, result):
'''
Handles the result of the query in some way other than printing it in
raw JSON format.
'''
raise NotImplementedError
class _MonitorCommand(_MethodCommand):
'''
A client command that invokes a JSON-RPC request on the conveyor service
and waits for a job to complete. The request must return a job id.
'''
def __init__(self, parsed_args, config):
_MethodCommand.__init__(self, parsed_args, config)
self._job_id = None
def _export_methods(self):
self._jsonrpc.addmethod('jobchanged', self._job_changed)
def _job_changed(self, *args, **kwargs):
'''
Invoked by the conveyor service to inform the client that a job has
changed.
'''
job = conveyor.job.JobInfo.from_dict(kwargs)
job_id = job.id
if (not self._stop and None is not self._job_id
and self._job_id == job_id):
if conveyor.task.TaskState.STOPPED == job.state:
if conveyor.task.TaskConclusion.ENDED == job.conclusion:
self._code = 0
self._log.info('job ended')
elif conveyor.task.TaskConclusion.FAILED == job.conclusion:
self._code = 1
self._log.error('job failed: %s', job.failure)
elif conveyor.task.TaskConclusion.CANCELED == job.conclusion:
self._code = 1
self._log.warning('job canceled')
else:
raise ValueError(job.conclusion)
self._stop_jsonrpc()
def _method_callback(self, method_task):
if (None is not method_task.result
and isinstance(method_task.result, dict)
and 'id' in method_task.result):
self._job_id = method_task.result['id']
else:
self._code = 1
self._log.error(
'the conveyor service returned invalid job information')
self._stop_jsonrpc()
@args(conveyor.arg.driver)
@args(conveyor.arg.machine)
@args(conveyor.arg.port)
@args(conveyor.arg.profile)
class _ConnectedCommand(_MonitorCommand):
'''
A client command that connects a machine, invokes a JSON-RPC request on the
conveyor service and waits for a job to complete. The request must return a
job id.
This is essentially a `_MonitorCommand` that calls `connect` on the
conveyor service before invoking the job-related method. `connect` must
return a `MachineInfo` object with a `name` field. The machine's name is
stored in an instance field called `_machine_name`.
'''
def __init__(self, parsed_args, config):
_MonitorCommand.__init__(self, parsed_args, config)
self._machine_name = None
def _hello_callback(self, hello_task):
# NOTE: this method doesn't use the `_get_driver_name` nor
# `_get_profile_name` as the driver and profile can often be detected
# automatically.
params = {
'machine_name': self._parsed_args.machine_name,
'port_name': self._parsed_args.port_name,
'driver_name': self._parsed_args.driver_name,
'profile_name': self._parsed_args.profile_name,
'persistent': False,
}
connect_task = self._jsonrpc.request('connect', params)
connect_task.stoppedevent.attach(
self._guard_callback(self._connect_callback))
connect_task.start()
def _connect_callback(self, connect_task):
self._machine_name = connect_task.result['name']
method_task = self._create_method_task()
method_task.stoppedevent.attach(
self._guard_callback(self._method_callback))
method_task.start()
@args(conveyor.arg.positional_job)
class CancelCommand(_MethodCommand):
name = 'cancel'
help = 'cancel a job'
def _create_method_task(self):
params = {'id': self._parsed_args.job_id}
method_task = self._jsonrpc.request('canceljob', params)
return method_task
def _method_callback(self, method_task):
self._stop_jsonrpc()
@args(conveyor.arg.driver)
@args(conveyor.arg.positional_firmware_version)
class CompatibleFirmware(_QueryCommand):
name = 'compatiblefirmware'
help = 'determine if a firmware verison is comatible with the MakerBot driver'
def _create_method_task(self):
params = {
'driver_name': self._get_driver_name(),
'firmware_version': self._parsed_args.firmware_version,
}
method_task = self._jsonrpc.request('compatiblefirmware', params)
return method_task
def _handle_result(self, result):
print('Your firmware version is compatible: %r' % (result,))
@args(conveyor.arg.driver)
@args(conveyor.arg.machine)
@args(conveyor.arg.port)
@args(conveyor.arg.profile)
class ConnectCommand(_MethodCommand):
name = 'connect'
help = 'connect to a machine'
def _create_method_task(self):
params = {
'machine_name': self._parsed_args.machine_name,
'port_name': self._parsed_args.port_name,
'driver_name': self._get_driver_name(),
'profile_name': self._get_profile_name(),
'persistent': True,
}
method_task = self._jsonrpc.request('connect', params)
return method_task
def _method_callback(self, method_task):
self._stop_jsonrpc()
@args(conveyor.arg.positional_output_file_optional)
class DefaultConfigCommand(_ClientCommand):
name = 'defaultconfig'
help = 'print the platform\'s default conveyor configuration'
def run(self):
if None is self._parsed_args.output_file:
conveyor.config.format_default(sys.stdout)
else:
with open(self._parsed_args.output_file, 'w') as fp:
conveyor.config.format_default(fp)
return 0
class DirCommand(_JsonCommand):
name = 'dir'
help = 'list the methods available from the conveyor service'
def _create_method_task(self):
params = {}
method_task = self._jsonrpc.request('dir', params)
return method_task
def _handle_result_default(self, result):
for method_name, description in result.items():
lines = textwrap.dedent(description).splitlines()
def is_blank(s):
return 0 == len(s) or s.isspace()
# Remove blank lines at the end of the description. This puts the
# lines in reverse order.
lines = list(itertools.dropwhile(is_blank, reversed(lines)))
# Remove blank lines at the start of the description. This also has
# the side-effect of putting the lines back in forward order.
lines = list(itertools.dropwhile(is_blank, reversed(lines)))
self._log.info('%s:', method_name)
for line in lines:
self._log.info(' %s', line)
@args(conveyor.arg.machine)
class DisconnectCommand(_MethodCommand):
name = 'disconnect'
help = 'disconnect from a machine'
def _create_method_task(self):
params = {
'machine_name': self._parsed_args.machine_name,
}
method_task = self._jsonrpc.request('disconnect', params)
return method_task
def _method_callback(self, method_task):
self._stop_jsonrpc()
@args(conveyor.arg.driver)
@args(conveyor.arg.machine_type)
@args(conveyor.arg.firmware_version)
class DownloadFirmware(_QueryCommand):
name = 'downloadfirmware'
help = 'download firmware'
def _create_method_task(self):
params = {
'driver_name': self._get_driver_name(),
'machine_type': self._parsed_args.machine_type,
'firmware_version': self._parsed_args.firmware_version,
}
method_task = self._jsonrpc.request('downloadfirmware', params)
return method_task
def _handle_result(self, result):
self._log.info('firmware downloaded to: %s', result)
@args(conveyor.arg.positional_driver)
class DriverCommand(_JsonCommand):
name = 'driver'
help = 'get the details for a driver'
def _create_method_task(self):
params = {'driver_name': self._get_driver_name(),}
method_task = self._jsonrpc.request('get_driver', params)
return method_task
def _handle_result_default(self, result):
driver = result
drivers = [driver]
_print_driver_profiles(self._log, drivers)
class DriversCommand(_JsonCommand):
name = 'drivers'
help = 'list the available drivers'
def _create_method_task(self):
params = {}
method_task = self._jsonrpc.request('get_drivers', params)
return method_task
def _handle_result_default(self, result):
drivers = result
_print_driver_profiles(self._log, drivers)
@args(conveyor.arg.driver)
@args(conveyor.arg.machine_type)
class GetMachineVersions(_QueryCommand):
name = 'getmachineversions'
help = 'get the firmware versions available for a machine'
def _create_method_task(self):
params = {
'driver_name': self._get_driver_name(),
'machine_type': self._parsed_args.machine_type,
}
method_task = self._jsonrpc.request('getmachineversions', params)
return method_task
def _handle_result(self, result):
self._log.info('%s', result)
@args(conveyor.arg.driver)
class GetUploadableMachines(_QueryCommand):
name = 'getuploadablemachines'
help = 'list the machines to which conveyor can upload firmware'
def _create_method_task(self):
params = {'driver_name': self._get_driver_name(),}
method_task = self._jsonrpc.request('getuploadablemachines', params)
return method_task
def _handle_result(self, result):
print(result)
@args(conveyor.arg.positional_job)
class JobCommand(_JsonCommand):
name = 'job'
help = 'get the details for a job'
def _create_method_task(self):
params = {'id': int(self._parsed_args.job_id)}
method_task = self._jsonrpc.request('getjob', params)
return method_task
def _handle_result_default(self, result):
self._log.info('%s', result)
class JobsCommand(_JsonCommand):
name = 'jobs'
help = 'get the details for all jobs'
def _create_method_task(self):
params = {}
method_task = self._jsonrpc.request('getjobs', params)
return method_task
def _handle_result_default(self, result):
self._log.info('%s', result)
class PauseCommand(_ConnectedCommand):
name = 'pause'
help = 'pause a machine'
def _create_method_task(self):
params = {
'machine_name': self._parsed_args.machine_name,
'port_name': self._parsed_args.port_name,
'driver_name': self._get_driver_name(),
'profile_name': self._get_profile_name(),
}
pause_task = self._jsonrpc.request('pause', params)
return pause_task
class PortsCommand(_JsonCommand):
name = 'ports'
help = 'list the available ports'
def _create_method_task(self):
params = {}
method_task = self._jsonrpc.request('getports', params)
return method_task
def _handle_result_default(self, result):
for port in result:
if conveyor.machine.port.PortType.SERIAL == port['type']:
self._handle_serial(port)
else:
raise ValueError(port['type'])
def _handle_serial(self, port):
self._log.info('Serial port:')
self._log.info(' name - %s', port['name'])
self._log.info(' path - %s', port['path'])
self._log.info(' iSerial - %s', port['iserial'])
self._log.info(' VID:PID - %04X:%04X', port['vid'], port['pid'])
@args(conveyor.arg.extruder)
@args(conveyor.arg.gcode_processor)
@args(conveyor.arg.has_start_end)
@args(conveyor.arg.material)
@args(conveyor.arg.slicer)
@args(conveyor.arg.slicer_settings)
@args(conveyor.arg.positional_input_file)
class PrintCommand(_ConnectedCommand):
name = 'print'
help = 'print an object'
def _create_method_task(self):
slicer_settings = _create_slicer_settings(
self._parsed_args, self._config)
slicer_settings.path = self._parsed_args.slicer_settings_path
extruder_name = _fix_extruder_name(self._parsed_args.extruder_name)
params = {
'machine_name': self._machine_name,
'input_file': self._parsed_args.input_file,
'extruder_name': extruder_name,
'gcode_processor_name': self._parsed_args.gcode_processor_name,
'has_start_end': self._parsed_args.has_start_end,
'material_name': self._parsed_args.material_name,
'slicer_name': self._parsed_args.slicer_name,
'slicer_settings': slicer_settings.to_dict(),
}
method_task = self._jsonrpc.request('print', params)
return method_task
@args(conveyor.arg.driver)
@args(conveyor.arg.extruder)
@args(conveyor.arg.gcode_processor)
@args(conveyor.arg.file_type)
@args(conveyor.arg.has_start_end)
@args(conveyor.arg.material)
@args(conveyor.arg.profile)
@args(conveyor.arg.slicer)
@args(conveyor.arg.slicer_settings)
@args(conveyor.arg.positional_input_file)
@args(conveyor.arg.positional_output_file)
class PrintToFileCommand(_MonitorCommand):
name = 'printtofile'
help = 'print an object to an .s3g or .x3g file'
def _create_method_task(self):
slicer_settings = _create_slicer_settings(
self._parsed_args, self._config)
slicer_settings.path = self._parsed_args.slicer_settings_path
extruder_name = _fix_extruder_name(self._parsed_args.extruder_name)
params = {
'driver_name': self._get_driver_name(),
'profile_name': self._get_profile_name(),
'input_file': self._parsed_args.input_file,
'output_file': self._parsed_args.output_file,
'extruder_name': extruder_name,
'file_type': self._parsed_args.file_type,
'gcode_processor_name': self._parsed_args.gcode_processor_name,
'has_start_end': self._parsed_args.has_start_end,
'material_name': self._parsed_args.material_name,
'slicer_name': self._parsed_args.slicer_name,
'slicer_settings': slicer_settings.to_dict(),
}
method_task = self._jsonrpc.request('print_to_file', params)
return method_task
class PrintersCommand(_JsonCommand):
name = 'printers'
help = 'list connected printers'
def _create_method_task(self):
params = {}
method_task = self._jsonrpc.request('getprinters', params)
return method_task
def _handle_result_default(self, result):
for machine in result:
self._log.info('Printer:')
self._log.info(' name - %s', machine['name'])
self._log.info(' state - %s', machine['state'])
self._log.info(' temperature - %s', machine['temperature'])
self._log.info(' firmware - %s', machine['firmware_version'])
# TODO: stop being lazy and add the rest of the fields.
@args(conveyor.arg.positional_driver)
@args(conveyor.arg.positional_profile)
class ProfileCommand(_JsonCommand):
name = 'profile'
help = 'get the details for a profile'
def _create_method_task(self):
params = {
'driver_name': self._get_driver_name(),
'profile_name': self._get_profile_name(),
}
method_task = self._jsonrpc.request('get_profile', params)
return method_task
def _handle_result_default(self, result):
profile = result
profiles = [profile]
driver = {
'name': self._parsed_args.driver_name,
'profiles': profiles,
}
drivers = [driver]
_print_driver_profiles(self._log, drivers)
@args(conveyor.arg.positional_driver)
class ProfilesCommand(_JsonCommand):
name = 'profiles'
help = 'list the available profiles'
def _create_method_task(self):
params = {'driver_name': self._get_driver_name(),}
method_task = self._jsonrpc.request('get_profiles', params)
return method_task
def _handle_result_default(self, result):
profiles = result
driver = {
'name': self._parsed_args.driver_name,
'profiles': profiles,
}
drivers = [driver]
_print_driver_profiles(self._log, drivers)
@args(conveyor.arg.machine)
@args(conveyor.arg.positional_output_file)
class ReadEepromCommand(_QueryCommand):
name = 'readeeprom'
help = 'read a machine EEPROM'
def _create_method_task(self):
params = {'printername': self._parsed_args.machine_name}
method_task = self._jsonrpc.request('readeeprom', params)
return method_task
def _handle_result(self, result):
output_file = os.path.abspath(self._parsed_args.output_file)
with open(output_file, 'w') as fp:
json.dump(result, fp, sort_keys=True, indent=2)
class ResetToFactoryCommand(_QueryCommand):
name = 'resettofactory'
help = 'reset a machine EEPROM to factory settings'
def _create_method_task(self):
params = {'printername': None}
method_task = self._jsonrpc.request('resettofactory', params)
return method_task
def _handle_result(self, result):
pass
@args(conveyor.arg.add_start_end)
@args(conveyor.arg.driver)
@args(conveyor.arg.extruder)
@args(conveyor.arg.gcode_processor)
@args(conveyor.arg.material)
@args(conveyor.arg.profile)
@args(conveyor.arg.slicer)
@args(conveyor.arg.slicer_settings)
@args(conveyor.arg.positional_input_file)
@args(conveyor.arg.positional_output_file)
class SliceCommand(_MonitorCommand):
name = 'slice'
help = 'slice an object to a .gcode file'
def _create_method_task(self):
slicer_settings = _create_slicer_settings(
self._parsed_args, self._config)
slicer_settings.path = self._parsed_args.slicer_settings_path
extruder_name = _fix_extruder_name(self._parsed_args.extruder_name)
params = {
'driver_name': self._get_driver_name(),
'profile_name': self._get_profile_name(),
'input_file': self._parsed_args.input_file,
'output_file': self._parsed_args.output_file,
'add_start_end': self._parsed_args.add_start_end,
'extruder_name': extruder_name,
'gcode_processor_name': self._parsed_args.gcode_processor_name,
'material_name': self._parsed_args.material_name,
'slicer_name': self._parsed_args.slicer_name,
'slicer_settings': slicer_settings.to_dict(),
}
method_task = self._jsonrpc.request('slice', params)
return method_task
class UnpauseCommand(_ConnectedCommand):
name = 'unpause'
help = 'unpause a machine'
def _create_method_task(self):
params = {
'machine_name': self._parsed_args.machine_name,
'port_name': self._parsed_args.port_name,
'driver_name': self._get_driver_name(),
'profile_name': self._get_profile_name(),
}
pause_task = self._jsonrpc.request('unpause', params)
return pause_task
@args(conveyor.arg.machine_type)
@args(conveyor.arg.positional_input_file)
class UploadFirmwareCommand(_QueryCommand):
name = 'uploadfirmware'
help = 'upload firmware'
def _create_method_task(self):
params = {
'machine_name': None,
'machinetype': self._parsed_args.machine_type,
'filename': self._parsed_args.input_file,
}
method_task = self._jsonrpc.request('uploadfirmware', params)
return method_task
def _handle_result(self, result):
pass
@args(conveyor.arg.positional_input_file)
class VerifyS3gCommand(_QueryCommand):
name = 'verifys3g'
help = 'verify an s3g/x3g file.'
def _create_method_task(self):
params = {'s3gpath': self._parsed_args.input_file}
method_task = self._jsonrpc.request('verifys3g', params)
return method_task
def _handle_result(self, result):
print('Your s3g file is %s valid' % ('NOT' if result is False else '',))
class WaitForServiceCommand(_ClientCommand):
name = 'waitforservice'
help = 'wait for the conveyor service to start'
def run(self):
now = time.time()
failtime = now + 30.0
address = self._config.get('common', 'address')
while True:
try:
address.connect()
except:
now = time.time()
if now < failtime:
time.sleep(1.0)
else:
self._log.error('failed to connect to conveyor service')
code = 1
break
else:
self._log.info('connected')
code = 0
break
return code
@args(conveyor.arg.positional_input_file)
class WriteEepromCommand(_QueryCommand):
name = 'writeeeprom'
help = 'write a machine EEPROM'
def _create_method_task(self):
input_file = os.path.abspath(self._parsed_args.input_file)
with open(input_file) as fp:
eeprommap = json.load(fp)
params = {
'printername': None,
'eeprommap': eeprommap,
}
method_task = self._jsonrpc.request('writeeeprommap', params)
return method_task
def _handle_result(self, result):
pass
def _fix_extruder_name(extruder_name):
if 'right' == extruder_name:
result = '0'
elif 'left' == extruder_name:
result = '1'
elif 'both' == extruder_name:
result = '0,1'
else:
raise ValueError(extruder_name)
return result
def _create_slicer_settings(parsed_args, config):
if 'miraclegrue' == parsed_args.slicer_name:
slicer = conveyor.slicer.Slicer.MIRACLEGRUE
elif 'skeinforge' == parsed_args.slicer_name:
slicer = conveyor.slicer.Slicer.SKEINFORGE
else:
raise ValueError(parsed_args.slicer_name)
extruder_name = _fix_extruder_name(parsed_args.extruder_name)
slicer_settings = conveyor.domain.SlicerConfiguration(
slicer=slicer,
extruder=extruder_name,
raft=bool(
config.get('client', 'slicing', 'raft')),
support=bool(
config.get('client', 'slicing', 'support')),
infill=float(
config.get('client', 'slicing', 'infill')),
layer_height=float(
config.get('client', 'slicing', 'layer_height')),
shells=int(
config.get('client', 'slicing', 'shells')),
extruder_temperature=float(
config.get('client', 'slicing', 'extruder_temperature')),
platform_temperature=float(
config.get('client', 'slicing', 'platform_temperature')),
print_speed=float(
config.get('client', 'slicing', 'print_speed')),
travel_speed=float(
config.get('client', 'slicing', 'travel_speed')),
)
return slicer_settings
def _print_driver_profiles(log, drivers):
log.info('drivers:')
for driver in drivers:
log.info(' %s:', driver['name'])
for profile in driver['profiles']:
log.info(' %s:', profile['name'])
log.info(' X axis size - %s', profile['xsize'])
log.info(' Y axis size - %s', profile['ysize'])
log.info(' Z axis size - %s', profile['zsize'])
log.info(' can print - %s', profile['can_print'])
log.info(' can print to file - %s', profile['can_print_to_file'])
log.info(' heated platform - %s', profile['has_heated_platform'])
log.info(' number of tools - %d', profile['number_of_tools'])
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n''' pydevd - a debugging daemon\nThis is the daemon you launch for python remote debugging.\n\nProtocol:\neach command has a format:\n id\\tsequence-num\\ttext\n id: protocol command number\n sequence-num: each request ... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n''' pydevd - a debugging daemon\nThis is the daemon you launch for python remote debugging.\n\nProtocol:\neach command has a format:\n id\\tsequence-num\\ttext\n id: protocol command number\n sequence-nu... | ```python
''' pydevd - a debugging daemon
This is the daemon you launch for python remote debugging.
Protocol:
each command has a format:
id\tsequence-num\ttext
id: protocol command number
sequence-num: each request has a sequence number. Sequence numbers
originating at the debugger are odd, sequence numbers originating
at the daemon are even. Every response uses the same sequence number
as the request.
payload: it is protocol dependent. When response is a complex structure, it
is returned as XML. Each attribute value is urlencoded, and then the whole
payload is urlencoded again to prevent stray characters corrupting protocol/xml encodings
Commands:
NUMBER NAME FROM* ARGUMENTS RESPONSE NOTE
100 series: program execution
101 RUN JAVA - -
102 LIST_THREADS JAVA RETURN with XML listing of all threads
103 THREAD_CREATE PYDB - XML with thread information
104 THREAD_KILL JAVA id (or * to exit) kills the thread
PYDB id nofies JAVA that thread was killed
105 THREAD_SUSPEND JAVA XML of the stack, suspends the thread
reason for suspension
PYDB id notifies JAVA that thread was suspended
106 CMD_THREAD_RUN JAVA id resume the thread
PYDB id \t reason notifies JAVA that thread was resumed
107 STEP_INTO JAVA thread_id
108 STEP_OVER JAVA thread_id
109 STEP_RETURN JAVA thread_id
110 GET_VARIABLE JAVA thread_id \t frame_id \t GET_VARIABLE with XML of var content
FRAME|GLOBAL \t attributes*
111 SET_BREAK JAVA file/line of the breakpoint
112 REMOVE_BREAK JAVA file/line of the return
113 CMD_EVALUATE_EXPRESSION JAVA expression result of evaluating the expression
114 CMD_GET_FRAME JAVA request for frame contents
115 CMD_EXEC_EXPRESSION JAVA
116 CMD_WRITE_TO_CONSOLE PYDB
117 CMD_CHANGE_VARIABLE
118 CMD_RUN_TO_LINE
119 CMD_RELOAD_CODE
120 CMD_GET_COMPLETIONS JAVA
500 series diagnostics/ok
501 VERSION either Version string (1.0) Currently just used at startup
502 RETURN either Depends on caller -
900 series: errors
901 ERROR either - This is reserved for unexpected errors.
* JAVA - remote debugger, the java end
* PYDB - pydevd, the python end
'''
import os
from _pydev_bundle.pydev_imports import _queue
from _pydev_imps._pydev_saved_modules import time
from _pydev_imps._pydev_saved_modules import thread
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import socket
from socket import socket, AF_INET, SOCK_STREAM, SHUT_RD, SHUT_WR, SOL_SOCKET, SO_REUSEADDR, SHUT_RDWR, timeout
from _pydevd_bundle.pydevd_constants import DebugInfoHolder, dict_contains, get_thread_id, IS_JYTHON, IS_PY2, IS_PY3K, IS_PY36_OR_GREATER, \
STATE_RUN
try:
from urllib import quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
import pydevconsole
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle import pydevd_tracing
from _pydevd_bundle import pydevd_vm_type
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER, norm_file_to_client
import sys
import traceback
from _pydevd_bundle.pydevd_utils import quote_smart as quote, compare_object_attrs, cmp_to_key, to_string
from _pydev_bundle import pydev_log
from _pydev_bundle import _pydev_completer
from _pydevd_bundle.pydevd_tracing import get_exception_traceback_str
from _pydevd_bundle import pydevd_console
from _pydev_bundle.pydev_monkey import disable_trace_thread_modules, enable_trace_thread_modules
CMD_RUN = 101
CMD_LIST_THREADS = 102
CMD_THREAD_CREATE = 103
CMD_THREAD_KILL = 104
CMD_THREAD_SUSPEND = 105
CMD_THREAD_RUN = 106
CMD_STEP_INTO = 107
CMD_STEP_OVER = 108
CMD_STEP_RETURN = 109
CMD_GET_VARIABLE = 110
CMD_SET_BREAK = 111
CMD_REMOVE_BREAK = 112
CMD_EVALUATE_EXPRESSION = 113
CMD_GET_FRAME = 114
CMD_EXEC_EXPRESSION = 115
CMD_WRITE_TO_CONSOLE = 116
CMD_CHANGE_VARIABLE = 117
CMD_RUN_TO_LINE = 118
CMD_RELOAD_CODE = 119
CMD_GET_COMPLETIONS = 120
# Note: renumbered (conflicted on merge)
CMD_CONSOLE_EXEC = 121
CMD_ADD_EXCEPTION_BREAK = 122
CMD_REMOVE_EXCEPTION_BREAK = 123
CMD_LOAD_SOURCE = 124
CMD_ADD_DJANGO_EXCEPTION_BREAK = 125
CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126
CMD_SET_NEXT_STATEMENT = 127
CMD_SMART_STEP_INTO = 128
CMD_EXIT = 129
CMD_SIGNATURE_CALL_TRACE = 130
CMD_SET_PY_EXCEPTION = 131
CMD_GET_FILE_CONTENTS = 132
CMD_SET_PROPERTY_TRACE = 133
# Pydev debug console commands
CMD_EVALUATE_CONSOLE_EXPRESSION = 134
CMD_RUN_CUSTOM_OPERATION = 135
CMD_GET_BREAKPOINT_EXCEPTION = 136
CMD_STEP_CAUGHT_EXCEPTION = 137
CMD_SEND_CURR_EXCEPTION_TRACE = 138
CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139
CMD_IGNORE_THROWN_EXCEPTION_AT = 140
CMD_ENABLE_DONT_TRACE = 141
CMD_SHOW_CONSOLE = 142
CMD_GET_ARRAY = 143
CMD_STEP_INTO_MY_CODE = 144
CMD_GET_CONCURRENCY_EVENT = 145
CMD_SHOW_RETURN_VALUES = 146
CMD_INPUT_REQUESTED = 147
CMD_GET_DESCRIPTION = 148
CMD_PROCESS_CREATED = 149
CMD_SHOW_CYTHON_WARNING = 150
CMD_VERSION = 501
CMD_RETURN = 502
CMD_ERROR = 901
ID_TO_MEANING = {
'101': 'CMD_RUN',
'102': 'CMD_LIST_THREADS',
'103': 'CMD_THREAD_CREATE',
'104': 'CMD_THREAD_KILL',
'105': 'CMD_THREAD_SUSPEND',
'106': 'CMD_THREAD_RUN',
'107': 'CMD_STEP_INTO',
'108': 'CMD_STEP_OVER',
'109': 'CMD_STEP_RETURN',
'110': 'CMD_GET_VARIABLE',
'111': 'CMD_SET_BREAK',
'112': 'CMD_REMOVE_BREAK',
'113': 'CMD_EVALUATE_EXPRESSION',
'114': 'CMD_GET_FRAME',
'115': 'CMD_EXEC_EXPRESSION',
'116': 'CMD_WRITE_TO_CONSOLE',
'117': 'CMD_CHANGE_VARIABLE',
'118': 'CMD_RUN_TO_LINE',
'119': 'CMD_RELOAD_CODE',
'120': 'CMD_GET_COMPLETIONS',
'121': 'CMD_CONSOLE_EXEC',
'122': 'CMD_ADD_EXCEPTION_BREAK',
'123': 'CMD_REMOVE_EXCEPTION_BREAK',
'124': 'CMD_LOAD_SOURCE',
'125': 'CMD_ADD_DJANGO_EXCEPTION_BREAK',
'126': 'CMD_REMOVE_DJANGO_EXCEPTION_BREAK',
'127': 'CMD_SET_NEXT_STATEMENT',
'128': 'CMD_SMART_STEP_INTO',
'129': 'CMD_EXIT',
'130': 'CMD_SIGNATURE_CALL_TRACE',
'131': 'CMD_SET_PY_EXCEPTION',
'132': 'CMD_GET_FILE_CONTENTS',
'133': 'CMD_SET_PROPERTY_TRACE',
'134': 'CMD_EVALUATE_CONSOLE_EXPRESSION',
'135': 'CMD_RUN_CUSTOM_OPERATION',
'136': 'CMD_GET_BREAKPOINT_EXCEPTION',
'137': 'CMD_STEP_CAUGHT_EXCEPTION',
'138': 'CMD_SEND_CURR_EXCEPTION_TRACE',
'139': 'CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED',
'140': 'CMD_IGNORE_THROWN_EXCEPTION_AT',
'141': 'CMD_ENABLE_DONT_TRACE',
'142': 'CMD_SHOW_CONSOLE',
'143': 'CMD_GET_ARRAY',
'144': 'CMD_STEP_INTO_MY_CODE',
'145': 'CMD_GET_CONCURRENCY_EVENT',
'146': 'CMD_SHOW_RETURN_VALUES',
'147': 'CMD_INPUT_REQUESTED',
'148': 'CMD_GET_DESCRIPTION',
'149': 'CMD_PROCESS_CREATED',
'150': 'CMD_SHOW_CYTHON_WARNING',
'501': 'CMD_VERSION',
'502': 'CMD_RETURN',
'901': 'CMD_ERROR',
}
MAX_IO_MSG_SIZE = 1000 #if the io is too big, we'll not send all (could make the debugger too non-responsive)
#this number can be changed if there's need to do so
VERSION_STRING = "@@BUILD_NUMBER@@"
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
file_system_encoding = getfilesystemencoding()
#--------------------------------------------------------------------------------------------------- UTILITIES
#=======================================================================================================================
# pydevd_log
#=======================================================================================================================
def pydevd_log(level, *args):
""" levels are:
0 most serious warnings/errors
1 warnings/significant events
2 informational trace
"""
if level <= DebugInfoHolder.DEBUG_TRACE_LEVEL:
#yes, we can have errors printing if the console of the program has been finished (and we're still trying to print something)
try:
sys.stderr.write('%s\n' % (args,))
except:
pass
#=======================================================================================================================
# GlobalDebuggerHolder
#=======================================================================================================================
class GlobalDebuggerHolder:
'''
Holder for the global debugger.
'''
global_dbg = None # Note: don't rename (the name is used in our attach to process)
#=======================================================================================================================
# get_global_debugger
#=======================================================================================================================
def get_global_debugger():
return GlobalDebuggerHolder.global_dbg
GetGlobalDebugger = get_global_debugger # Backward-compatibility
#=======================================================================================================================
# set_global_debugger
#=======================================================================================================================
def set_global_debugger(dbg):
GlobalDebuggerHolder.global_dbg = dbg
#------------------------------------------------------------------- ACTUAL COMM
#=======================================================================================================================
# PyDBDaemonThread
#=======================================================================================================================
class PyDBDaemonThread(threading.Thread):
created_pydb_daemon_threads = {}
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.killReceived = False
self.pydev_do_not_trace = True
self.is_pydev_daemon_thread = True
def run(self):
created_pydb_daemon = self.created_pydb_daemon_threads
created_pydb_daemon[self] = 1
try:
try:
if IS_JYTHON and not isinstance(threading.currentThread(), threading._MainThread):
# we shouldn't update sys.modules for the main thread, cause it leads to the second importing 'threading'
# module, and the new instance of main thread is created
import org.python.core as PyCore #@UnresolvedImport
ss = PyCore.PySystemState()
# Note: Py.setSystemState() affects only the current thread.
PyCore.Py.setSystemState(ss)
self._on_run()
except:
if sys is not None and traceback is not None:
traceback.print_exc()
finally:
del created_pydb_daemon[self]
def _on_run(self):
raise NotImplementedError('Should be reimplemented by: %s' % self.__class__)
def do_kill_pydev_thread(self):
#that was not working very well because jython gave some socket errors
self.killReceived = True
def _stop_trace(self):
if self.pydev_do_not_trace:
disable_tracing = True
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0:
# don't run untraced threads if we're in jython 2.2.1 or lower
# jython bug: if we start a thread and another thread changes the tracing facility
# it affects other threads (it's not set only for the thread but globally)
# Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867
disable_tracing = False
if disable_tracing:
pydevd_tracing.SetTrace(None) # no debugging on this thread
#=======================================================================================================================
# ReaderThread
#=======================================================================================================================
class ReaderThread(PyDBDaemonThread):
""" reader thread reads and dispatches commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Reader")
from _pydevd_bundle.pydevd_process_net_command import process_net_command
self.process_net_command = process_net_command
self.global_debugger_holder = GlobalDebuggerHolder
def do_kill_pydev_thread(self):
#We must close the socket so that it doesn't stay halted there.
self.killReceived = True
try:
self.sock.shutdown(SHUT_RD) #shutdown the socket for read
except:
#just ignore that
pass
def _on_run(self):
self._stop_trace()
read_buffer = ""
try:
while not self.killReceived:
try:
r = self.sock.recv(1024)
except:
if not self.killReceived:
traceback.print_exc()
self.handle_except()
return #Finished communication.
#Note: the java backend is always expected to pass utf-8 encoded strings. We now work with unicode
#internally and thus, we may need to convert to the actual encoding where needed (i.e.: filenames
#on python 2 may need to be converted to the filesystem encoding).
if hasattr(r, 'decode'):
r = r.decode('utf-8')
read_buffer += r
if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS:
sys.stderr.write('debugger: received >>%s<<\n' % (read_buffer,))
sys.stderr.flush()
if len(read_buffer) == 0:
self.handle_except()
break
while read_buffer.find('\n') != -1:
command, read_buffer = read_buffer.split('\n', 1)
args = command.split('\t', 2)
try:
cmd_id = int(args[0])
pydev_log.debug('Received command: %s %s\n' % (ID_TO_MEANING.get(str(cmd_id), '???'), command,))
self.process_command(cmd_id, int(args[1]), args[2])
except:
traceback.print_exc()
sys.stderr.write("Can't process net command: %s\n" % command)
sys.stderr.flush()
except:
traceback.print_exc()
self.handle_except()
def handle_except(self):
self.global_debugger_holder.global_dbg.finish_debugging_session()
def process_command(self, cmd_id, seq, text):
self.process_net_command(self.global_debugger_holder.global_dbg, cmd_id, seq, text)
#----------------------------------------------------------------------------------- SOCKET UTILITIES - WRITER
#=======================================================================================================================
# WriterThread
#=======================================================================================================================
class WriterThread(PyDBDaemonThread):
""" writer thread writes out the commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Writer")
self.cmdQueue = _queue.Queue()
if pydevd_vm_type.get_vm_type() == 'python':
self.timeout = 0
else:
self.timeout = 0.1
def add_command(self, cmd):
""" cmd is NetCommand """
if not self.killReceived: #we don't take new data after everybody die
self.cmdQueue.put(cmd)
def _on_run(self):
""" just loop and write responses """
self._stop_trace()
get_has_timeout = sys.hexversion >= 0x02030000 # 2.3 onwards have it.
try:
while True:
try:
try:
if get_has_timeout:
cmd = self.cmdQueue.get(1, 0.1)
else:
time.sleep(.01)
cmd = self.cmdQueue.get(0)
except _queue.Empty:
if self.killReceived:
try:
self.sock.shutdown(SHUT_WR)
self.sock.close()
except:
pass
return #break if queue is empty and killReceived
else:
continue
except:
#pydevd_log(0, 'Finishing debug communication...(1)')
#when liberating the thread here, we could have errors because we were shutting down
#but the thread was still not liberated
return
out = cmd.outgoing
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
out_message = 'sending cmd --> '
out_message += "%20s" % ID_TO_MEANING.get(out[:3], 'UNKNOWN')
out_message += ' '
out_message += unquote(unquote(out)).replace('\n', ' ')
try:
sys.stderr.write('%s\n' % (out_message,))
except:
pass
if IS_PY3K:
out = bytearray(out, 'utf-8')
self.sock.send(out) #TODO: this does not guarantee that all message are sent (and jython does not have a send all)
if cmd.id == CMD_EXIT:
break
if time is None:
break #interpreter shutdown
time.sleep(self.timeout)
except Exception:
GlobalDebuggerHolder.global_dbg.finish_debugging_session()
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 0:
traceback.print_exc()
def empty(self):
return self.cmdQueue.empty()
#--------------------------------------------------- CREATING THE SOCKET THREADS
#=======================================================================================================================
# start_server
#=======================================================================================================================
def start_server(port):
""" binds to a port, waits for the debugger to connect """
s = socket(AF_INET, SOCK_STREAM)
s.settimeout(None)
try:
from socket import SO_REUSEPORT
s.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)
except ImportError:
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(('', port))
pydevd_log(1, "Bound to port ", str(port))
try:
s.listen(1)
newSock, _addr = s.accept()
pydevd_log(1, "Connection accepted")
# closing server socket is not necessary but we don't need it
s.shutdown(SHUT_RDWR)
s.close()
return newSock
except:
sys.stderr.write("Could not bind to port: %s\n" % (port,))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) #TODO: is it safe?
#=======================================================================================================================
# start_client
#=======================================================================================================================
def start_client(host, port):
""" connects to a host/port """
pydevd_log(1, "Connecting to ", host, ":", str(port))
s = socket(AF_INET, SOCK_STREAM)
MAX_TRIES = 100
i = 0
while i<MAX_TRIES:
try:
s.connect((host, port))
except:
i+=1
time.sleep(0.2)
continue
pydevd_log(1, "Connected.")
return s
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) #TODO: is it safe?
#------------------------------------------------------------------------------------ MANY COMMUNICATION STUFF
#=======================================================================================================================
# NetCommand
#=======================================================================================================================
class NetCommand:
""" Commands received/sent over the network.
Command can represent command received from the debugger,
or one to be sent by daemon.
"""
next_seq = 0 # sequence numbers
def __init__(self, id, seq, text):
""" smart handling of parameters
if sequence is 0, new sequence will be generated
if text has carriage returns they'll be replaced"""
self.id = id
if seq == 0:
NetCommand.next_seq += 2
seq = NetCommand.next_seq
self.seq = seq
self.text = text
encoded = quote(to_string(text), '/<>_=" \t')
self.outgoing = '%s\t%s\t%s\n' % (id, seq, encoded)
#=======================================================================================================================
# NetCommandFactory
#=======================================================================================================================
class NetCommandFactory:
def _thread_to_xml(self, thread):
""" thread information as XML """
name = pydevd_xml.make_valid_xml_value(thread.getName())
cmdText = '<thread name="%s" id="%s" />' % (quote(name), get_thread_id(thread))
return cmdText
def make_error_message(self, seq, text):
cmd = NetCommand(CMD_ERROR, seq, text)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
sys.stderr.write("Error: %s" % (text,))
return cmd
def make_thread_created_message(self, thread):
cmdText = "<xml>" + self._thread_to_xml(thread) + "</xml>"
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_process_created_message(self):
cmdText = '<process/>'
return NetCommand(CMD_PROCESS_CREATED, 0, cmdText)
def make_show_cython_warning_message(self):
try:
return NetCommand(CMD_SHOW_CYTHON_WARNING, 0, '')
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_custom_frame_created_message(self, frameId, frameDescription):
frameDescription = pydevd_xml.make_valid_xml_value(frameDescription)
cmdText = '<xml><thread name="%s" id="%s"/></xml>' % (frameDescription, frameId)
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_list_threads_message(self, seq):
""" returns thread listing as XML """
try:
t = threading.enumerate()
cmd_text = ["<xml>"]
append = cmd_text.append
for i in t:
if t.isAlive():
append(self._thread_to_xml(i))
append("</xml>")
return NetCommand(CMD_RETURN, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_variable_changed_message(self, seq, payload):
# notify debugger that value was changed successfully
return NetCommand(CMD_RETURN, seq, payload)
def make_io_message(self, v, ctx, dbg=None):
'''
@param v: the message to pass to the debug server
@param ctx: 1 for stdio 2 for stderr
@param dbg: If not none, add to the writer
'''
try:
if len(v) > MAX_IO_MSG_SIZE:
v = v[0:MAX_IO_MSG_SIZE]
v += '...'
v = pydevd_xml.make_valid_xml_value(quote(v, '/>_= \t'))
net = NetCommand(str(CMD_WRITE_TO_CONSOLE), 0, '<xml><io s="%s" ctx="%s"/></xml>' % (v, ctx))
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_version_message(self, seq):
try:
return NetCommand(CMD_VERSION, seq, VERSION_STRING)
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_thread_killed_message(self, id):
try:
return NetCommand(CMD_THREAD_KILL, 0, str(id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_suspend_str(self, thread_id, frame, stop_reason, message):
""" <xml>
<thread id="id" stop_reason="reason">
<frame id="id" name="functionName " file="file" line="line">
<var variable stuffff....
</frame>
</thread>
"""
cmd_text_list = ["<xml>"]
append = cmd_text_list.append
make_valid_xml_value = pydevd_xml.make_valid_xml_value
if message:
message = make_valid_xml_value(message)
append('<thread id="%s" stop_reason="%s" message="%s">' % (thread_id, stop_reason, message))
curr_frame = frame
try:
while curr_frame:
#print cmdText
my_id = id(curr_frame)
#print "id is ", my_id
if curr_frame.f_code is None:
break #Iron Python sometimes does not have it!
my_name = curr_frame.f_code.co_name #method name (if in method) or ? if global
if my_name is None:
break #Iron Python sometimes does not have it!
#print "name is ", my_name
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(curr_frame)
myFile = norm_file_to_client(abs_path_real_path_and_base[0])
if file_system_encoding.lower() != "utf-8" and hasattr(myFile, "decode"):
# myFile is a byte string encoded using the file system encoding
# convert it to utf8
myFile = myFile.decode(file_system_encoding).encode("utf-8")
#print "file is ", myFile
#myFile = inspect.getsourcefile(curr_frame) or inspect.getfile(frame)
myLine = str(curr_frame.f_lineno)
#print "line is ", myLine
#the variables are all gotten 'on-demand'
#variables = pydevd_xml.frame_vars_to_xml(curr_frame.f_locals)
variables = ''
append('<frame id="%s" name="%s" ' % (my_id , make_valid_xml_value(my_name)))
append('file="%s" line="%s">' % (quote(myFile, '/>_= \t'), myLine))
append(variables)
append("</frame>")
curr_frame = curr_frame.f_back
except :
traceback.print_exc()
append("</thread></xml>")
return ''.join(cmd_text_list)
def make_thread_suspend_message(self, thread_id, frame, stop_reason, message):
try:
return NetCommand(CMD_THREAD_SUSPEND, 0, self.make_thread_suspend_str(thread_id, frame, stop_reason, message))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_run_message(self, id, reason):
try:
return NetCommand(CMD_THREAD_RUN, 0, str(id) + "\t" + str(reason))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_get_variable_message(self, seq, payload):
try:
return NetCommand(CMD_GET_VARIABLE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_array_message(self, seq, payload):
try:
return NetCommand(CMD_GET_ARRAY, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_description_message(self, seq, payload):
try:
return NetCommand(CMD_GET_DESCRIPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_frame_message(self, seq, payload):
try:
return NetCommand(CMD_GET_FRAME, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_evaluate_expression_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_completions_message(self, seq, payload):
try:
return NetCommand(CMD_GET_COMPLETIONS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_file_contents(self, seq, payload):
try:
return NetCommand(CMD_GET_FILE_CONTENTS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_breakpoint_exception_message(self, seq, payload):
try:
return NetCommand(CMD_GET_BREAKPOINT_EXCEPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_message(self, seq, thread_id, curr_frame_id, exc_type, exc_desc, trace_obj):
try:
while trace_obj.tb_next is not None:
trace_obj = trace_obj.tb_next
exc_type = pydevd_xml.make_valid_xml_value(str(exc_type)).replace('\t', ' ') or 'exception: type unknown'
exc_desc = pydevd_xml.make_valid_xml_value(str(exc_desc)).replace('\t', ' ') or 'exception: no description'
payload = str(curr_frame_id) + '\t' + exc_type + "\t" + exc_desc + "\t" + \
self.make_thread_suspend_str(thread_id, trace_obj.tb_frame, CMD_SEND_CURR_EXCEPTION_TRACE, '')
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_proceeded_message(self, seq, thread_id):
try:
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED, 0, str(thread_id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_send_console_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_CONSOLE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_custom_operation_message(self, seq, payload):
try:
return NetCommand(CMD_RUN_CUSTOM_OPERATION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_load_source_message(self, seq, source, dbg=None):
try:
net = NetCommand(CMD_LOAD_SOURCE, seq, '%s' % source)
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_show_console_message(self, thread_id, frame):
try:
return NetCommand(CMD_SHOW_CONSOLE, 0, self.make_thread_suspend_str(thread_id, frame, CMD_SHOW_CONSOLE, ''))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_input_requested_message(self, started):
try:
return NetCommand(CMD_INPUT_REQUESTED, 0, started)
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_exit_message(self):
try:
net = NetCommand(CMD_EXIT, 0, '')
except:
net = self.make_error_message(0, get_exception_traceback_str())
return net
INTERNAL_TERMINATE_THREAD = 1
INTERNAL_SUSPEND_THREAD = 2
#=======================================================================================================================
# InternalThreadCommand
#=======================================================================================================================
class InternalThreadCommand:
""" internal commands are generated/executed by the debugger.
The reason for their existence is that some commands have to be executed
on specific threads. These are the InternalThreadCommands that get
get posted to PyDB.cmdQueue.
"""
def can_be_executed_by(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id)
def do_it(self, dbg):
raise NotImplementedError("you have to override do_it")
class ReloadCodeCommand(InternalThreadCommand):
def __init__(self, module_name, thread_id):
self.thread_id = thread_id
self.module_name = module_name
self.executed = False
self.lock = thread.allocate_lock()
def can_be_executed_by(self, thread_id):
if self.thread_id == '*':
return True #Any thread can execute it!
return InternalThreadCommand.can_be_executed_by(self, thread_id)
def do_it(self, dbg):
self.lock.acquire()
try:
if self.executed:
return
self.executed = True
finally:
self.lock.release()
module_name = self.module_name
if not dict_contains(sys.modules, module_name):
if '.' in module_name:
new_module_name = module_name.split('.')[-1]
if dict_contains(sys.modules, new_module_name):
module_name = new_module_name
if not dict_contains(sys.modules, module_name):
sys.stderr.write('pydev debugger: Unable to find module to reload: "' + module_name + '".\n')
# Too much info...
# sys.stderr.write('pydev debugger: This usually means you are trying to reload the __main__ module (which cannot be reloaded).\n')
else:
sys.stderr.write('pydev debugger: Start reloading module: "' + module_name + '" ... \n')
from _pydevd_bundle import pydevd_reload
if pydevd_reload.xreload(sys.modules[module_name]):
sys.stderr.write('pydev debugger: reload finished\n')
else:
sys.stderr.write('pydev debugger: reload finished without applying any change\n')
#=======================================================================================================================
# InternalTerminateThread
#=======================================================================================================================
class InternalTerminateThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
pydevd_log(1, "killing ", str(self.thread_id))
cmd = dbg.cmd_factory.make_thread_killed_message(self.thread_id)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunThread
#=======================================================================================================================
class InternalRunThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = -1
t.additional_info.pydev_step_stop = None
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalStepThread
#=======================================================================================================================
class InternalStepThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id):
self.thread_id = thread_id
self.cmd_id = cmd_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalSetNextStatementThread
#=======================================================================================================================
class InternalSetNextStatementThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id, line, func_name):
self.thread_id = thread_id
self.cmd_id = cmd_id
self.line = line
if IS_PY2:
if isinstance(func_name, unicode):
# On cython with python 2.X it requires an str, not unicode (but on python 3.3 it should be a str, not bytes).
func_name = func_name.encode('utf-8')
self.func_name = func_name
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_next_line = int(self.line)
t.additional_info.pydev_func_name = self.func_name
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalGetVariable
#=======================================================================================================================
class InternalGetVariable(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attributes = attrs
def do_it(self, dbg):
""" Converts request into python variable """
try:
xml = "<xml>"
_typeName, valDict = pydevd_vars.resolve_compound_variable(self.thread_id, self.frame_id, self.scope, self.attributes)
if valDict is None:
valDict = {}
keys = valDict.keys()
if _typeName != "OrderedDict" and not IS_PY36_OR_GREATER:
if hasattr(keys, 'sort'):
keys.sort(compare_object_attrs) #Python 3.0 does not have it
else:
if IS_PY3K:
keys = sorted(keys, key=cmp_to_key(compare_object_attrs)) #Jython 2.1 does not have it (and all must be compared as strings).
else:
keys = sorted(keys, cmp=compare_object_attrs) #Jython 2.1 does not have it (and all must be compared as strings).
for k in keys:
xml += pydevd_xml.var_to_xml(valDict[k], to_string(k))
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_variable_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving variables " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetArray
#=======================================================================================================================
class InternalGetArray(InternalThreadCommand):
def __init__(self, seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.name = attrs.split("\t")[-1]
self.attrs = attrs
self.roffset = int(roffset)
self.coffset = int(coffset)
self.rows = int(rows)
self.cols = int(cols)
self.format = format
def do_it(self, dbg):
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
var = pydevd_vars.eval_in_context(self.name, frame.f_globals, frame.f_locals)
xml = pydevd_vars.table_like_struct_to_xml(var, self.name, self.roffset, self.coffset, self.rows, self.cols, self.format )
cmd = dbg.cmd_factory.make_get_array_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving array: " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalChangeVariable
#=======================================================================================================================
class InternalChangeVariable(InternalThreadCommand):
""" changes the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attr, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attr = attr
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.attr, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_variable_changed_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error changing variable attr:%s expression:%s traceback:%s" % (self.attr, self.expression, get_exception_traceback_str()))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetFrame
#=======================================================================================================================
class InternalGetFrame(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
def do_it(self, dbg):
""" Converts request into python variable """
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
hidden_ns = pydevconsole.get_ipython_hidden_vars()
xml = "<xml>"
xml += pydevd_xml.frame_vars_to_xml(frame.f_locals, hidden_ns)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_frame_message(self.sequence, xml)
dbg.writer.add_command(cmd)
else:
#pydevd_vars.dump_frames(self.thread_id)
#don't print this error: frame not found: means that the client is not synchronized (but that's ok)
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving frame: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateExpression
#=======================================================================================================================
class InternalEvaluateExpression(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression, doExec, doTrim, temp_name):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
self.doExec = doExec
self.doTrim = doTrim
self.temp_name = temp_name
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.evaluate_expression(self.thread_id, self.frame_id, self.expression, self.doExec)
if self.temp_name != "":
pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.temp_name, self.expression, dbg, result)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, self.expression, self.doTrim)
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetCompletions
#=======================================================================================================================
class InternalGetCompletions(InternalThreadCommand):
""" Gets the completions in a given scope """
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Converts request into completions """
try:
remove_path = None
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
msg = _pydev_completer.generate_completions_as_xml(frame, self.act_tok)
cmd = dbg.cmd_factory.make_get_completions_message(self.sequence, msg)
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "InternalGetCompletions: Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
finally:
if remove_path is not None:
sys.path.remove(remove_path)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
# =======================================================================================================================
# InternalGetDescription
# =======================================================================================================================
class InternalGetDescription(InternalThreadCommand):
""" Fetch the variable description stub from the debug console
"""
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
description = pydevd_console.get_description(frame, self.thread_id, self.frame_id, self.expression)
description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t'))
description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description
cmd = dbg.cmd_factory.make_get_description_message(self.sequence, description_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching description" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetBreakpointException
#=======================================================================================================================
class InternalGetBreakpointException(InternalThreadCommand):
""" Send details of exception raised while evaluating conditional breakpoint """
def __init__(self, thread_id, exc_type, stacktrace):
self.sequence = 0
self.thread_id = thread_id
self.stacktrace = stacktrace
self.exc_type = exc_type
def do_it(self, dbg):
try:
callstack = "<xml>"
makeValid = pydevd_xml.make_valid_xml_value
for filename, line, methodname, methodobj in self.stacktrace:
if file_system_encoding.lower() != "utf-8" and hasattr(filename, "decode"):
# filename is a byte string encoded using the file system encoding
# convert it to utf8
filename = filename.decode(file_system_encoding).encode("utf-8")
callstack += '<frame thread_id = "%s" file="%s" line="%s" name="%s" obj="%s" />' \
% (self.thread_id, makeValid(filename), line, makeValid(methodname), makeValid(methodobj))
callstack += "</xml>"
cmd = dbg.cmd_factory.make_send_breakpoint_exception_message(self.sequence, self.exc_type + "\t" + callstack)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Exception: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTrace
#=======================================================================================================================
class InternalSendCurrExceptionTrace(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id, arg, curr_frame_id):
'''
:param arg: exception type, description, traceback object
'''
self.sequence = 0
self.thread_id = thread_id
self.curr_frame_id = curr_frame_id
self.arg = arg
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_message(self.sequence, self.thread_id, self.curr_frame_id, *self.arg)
del self.arg
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTraceProceeded
#=======================================================================================================================
class InternalSendCurrExceptionTraceProceeded(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id):
self.sequence = 0
self.thread_id = thread_id
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_proceeded_message(self.sequence, self.thread_id)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace Proceeded: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateConsoleExpression
#=======================================================================================================================
class InternalEvaluateConsoleExpression(InternalThreadCommand):
""" Execute the given command in the debug console """
def __init__(self, seq, thread_id, frame_id, line, buffer_output=True):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.line = line
self.buffer_output = buffer_output
def do_it(self, dbg):
""" Create an XML for console output, error and more (true/false)
<xml>
<output message=output_message></output>
<error message=error_message></error>
<more>true/false</more>
</xml>
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
console_message = pydevd_console.execute_console_command(
frame, self.thread_id, self.frame_id, self.line, self.buffer_output)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, console_message.to_xml())
else:
from _pydevd_bundle.pydevd_console import ConsoleMessage
console_message = ConsoleMessage()
console_message.add_console_message(
pydevd_console.CONSOLE_ERROR,
"Select the valid frame in the debug view (thread: %s, frame: %s invalid)" % (self.thread_id, self.frame_id),
)
cmd = dbg.cmd_factory.make_error_message(self.sequence, console_message.to_xml())
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunCustomOperation
#=======================================================================================================================
class InternalRunCustomOperation(InternalThreadCommand):
""" Run a custom command on an expression
"""
def __init__(self, seq, thread_id, frame_id, scope, attrs, style, encoded_code_or_file, fnname):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attrs = attrs
self.style = style
self.code_or_file = unquote_plus(encoded_code_or_file)
self.fnname = fnname
def do_it(self, dbg):
try:
res = pydevd_vars.custom_operation(self.thread_id, self.frame_id, self.scope, self.attrs,
self.style, self.code_or_file, self.fnname)
resEncoded = quote_plus(res)
cmd = dbg.cmd_factory.make_custom_operation_message(self.sequence, resEncoded)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in running custom operation" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleGetCompletions
#=======================================================================================================================
class InternalConsoleGetCompletions(InternalThreadCommand):
""" Fetch the completions in the debug console
"""
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
completions_xml = pydevd_console.get_completions(frame, self.act_tok)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, completions_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching completions" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleExec
#=======================================================================================================================
class InternalConsoleExec(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
try:
#don't trace new threads created by console command
disable_trace_thread_modules()
result = pydevconsole.console_exec(self.thread_id, self.frame_id, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating console expression " + exc)
dbg.writer.add_command(cmd)
finally:
enable_trace_thread_modules()
sys.stderr.flush()
sys.stdout.flush()
#=======================================================================================================================
# pydevd_find_thread_by_id
#=======================================================================================================================
def pydevd_find_thread_by_id(thread_id):
try:
# there was a deadlock here when I did not remove the tracing function when thread was dead
threads = threading.enumerate()
for i in threads:
tid = get_thread_id(i)
if thread_id == tid or thread_id.endswith('|' + tid):
return i
sys.stderr.write("Could not find thread %s\n" % thread_id)
sys.stderr.write("Available: %s\n" % [get_thread_id(t) for t in threads])
sys.stderr.flush()
except:
traceback.print_exc()
return None
``` |
[
{
"content": "```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) Au... | [
{
"content": "<|memory_start|>```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by ... | ```python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NatGatewaysOperations:
"""NatGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified nat gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
nat_gateway_name=nat_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
nat_gateway_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NatGateway":
"""Gets the specified nat gateway in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NatGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.NatGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.NatGateway",
**kwargs
) -> "_models.NatGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NatGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NatGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NatGateway', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.NatGateway",
**kwargs
) -> AsyncLROPoller["_models.NatGateway"]:
"""Creates or updates a nat gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param parameters: Parameters supplied to the create or update nat gateway operation.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.NatGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NatGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_04_01.models.NatGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
nat_gateway_name=nat_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.NatGateway":
"""Updates nat gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param parameters: Parameters supplied to update nat gateway tags.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NatGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.NatGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.NatGatewayListResult"]:
"""Gets all the Nat Gateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NatGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.NatGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NatGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/natGateways'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.NatGatewayListResult"]:
"""Gets all nat gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NatGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.NatGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NatGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways'} # type: ignore
``` |
[
{
"content": "```python\nimport curses\nimport datetime\nimport sys\n\nfrom writelightly.calendar import Calendar\nfrom writelightly.conf import Config\nfrom writelightly.edit import edit_date, get_edits, clean_tmp, show_edits\nfrom writelightly.metadata import Metadata\nfrom writelightly.screen import ScreenMa... | [
{
"content": "<|memory_start|>```python\nimport curses\nimport datetime\nimport sys\n\nfrom writelightly.calendar import Calendar\nfrom writelightly.conf import Config\nfrom writelightly.edit import edit_date, get_edits, clean_tmp, show_edits\nfrom writelightly.metadata import Metadata\nfrom writelightly.screen... | ```python
import curses
import datetime
import sys
from writelightly.calendar import Calendar
from writelightly.conf import Config
from writelightly.edit import edit_date, get_edits, clean_tmp, show_edits
from writelightly.metadata import Metadata
from writelightly.screen import ScreenManager, TextArea
from writelightly.tags import show_tags, show_tag
from writelightly.utils import entry_exists, parse_date, WLError, WLQuit
import locale
locale.setlocale(locale.LC_ALL, ('en_US', 'UTF-8'))
def show_calendar():
"""Show an interactive calendar.
Show the calendar on the left side of the screen and some metadata about
the selected date on the right. Any entry can be edited in external editor.
"""
today = datetime.date.today()
year, month = today.year, today.month
cal = Calendar(year, month, today.day, entry_exists)
metadata = Metadata.get(year, month)
text_area = TextArea()
ScreenManager.draw_all()
d = cal.get_current_date()
text_area.show_text(metadata.text(d.day))
keys = Config.calendar_keys
while 1:
try:
kn = curses.keyname(cal.window.getch())
except KeyboardInterrupt:
break
except ValueError:
continue
if kn in Config.general_keys['quit']:
raise WLQuit
if kn in Config.general_keys['quit_mode']:
break
if kn == 'KEY_RESIZE':
ScreenManager.resize()
if cal.hidden:
continue
if kn in keys['left']:
moved = cal.move_left()
if not moved:
cal = cal.get_previous_calendar()
cal.draw()
metadata = Metadata.get(cal.year, cal.month)
text_area.show_text(metadata.text(cal.get_current_day()))
elif kn in keys['right']:
moved = cal.move_right()
if not moved:
cal = cal.get_next_calendar()
cal.draw()
metadata = Metadata.get(cal.year, cal.month)
text_area.show_text(metadata.text(cal.get_current_day()))
elif kn in keys['down']:
cal.move_down()
text_area.show_text(metadata.text(cal.get_current_day()))
elif kn in keys['up']:
cal.move_up()
text_area.show_text(metadata.text(cal.get_current_day()))
elif kn in keys['edit']:
date = cal.get_current_date()
edit_date(date)
metadata.load_day(date.day)
cal.set_active(entry_exists(date))
text_area.show_text(metadata.text(date.day))
elif kn in keys['tags']:
show_tags(cal.area_id, text_area)
ScreenManager.restore_area(cal.area_id)
cal.reinit()
text_area.set_title()
text_area.show_text(metadata.text(cal.get_current_day()))
elif kn in keys['edits']:
date = cal.get_current_date()
edits = get_edits(date)
if edits:
show_edits(date, edits, text_area.area_id)
ScreenManager.restore_area(text_area.area_id)
text_area.show_text(metadata.text(date.day))
elif kn in keys['prev_month']:
cal = cal.get_previous_calendar(cal.get_current_day())
cal.draw()
metadata = Metadata.get(cal.year, cal.month)
text_area.show_text(metadata.text(cal.get_current_day()))
elif kn in keys['next_month']:
cal = cal.get_next_calendar(cal.get_current_day())
cal.draw()
metadata = Metadata.get(cal.year, cal.month)
text_area.show_text(metadata.text(cal.get_current_day()))
Metadata.write_all()
clean_tmp()
def edit_single_date(date):
"""Edit a single entry in external editor without initializing screen."""
date = parse_date(date)
if not date:
raise WLError('Unrecognised date format\n')
edit_date(date)
metadata = Metadata(date.year, date.month)
metadata.load_day(date.day)
metadata.write()
usage = '''Usage:
%(name)s
%(name)s ( <date> | today | yesterday )
%(name)s -t [<tag>]
''' % {'name': sys.argv[0]}
def wrapper(func, with_screen=False):
if with_screen:
ScreenManager.init()
error = None
try:
func()
except WLQuit:
pass
except WLError as exc:
error = exc
finally:
if with_screen:
ScreenManager.quit()
if error is not None:
sys.stderr.write('%s\n' % error)
def main():
from getopt import getopt, GetoptError
from functools import partial
try:
options, args = getopt(sys.argv[1:], 'th', ['help'])
except GetoptError as exc:
sys.stderr.write('%s\nTry `%s -h` for help\n' % (exc, sys.argv[0]))
sys.exit(1)
init_screen = True
option_names = [o[0] for o in options]
if '-h' in option_names or '--help' in option_names:
print usage
sys.exit()
if options:
if args:
func = partial(show_tag, args[0])
else:
func = show_tags
else:
if args:
func = partial(edit_single_date, args[0])
init_screen = False
else:
func = show_calendar
wrapper(func, init_screen)
``` |
[
{
"content": "Here is the code content:\n```python\nimport logging\nimport posixpath\nimport requests\nimport arrow\nfrom functools import partial\nfrom errbot import BotPlugin, botcmd\nfrom apscheduler.scheduler import Scheduler\nfrom BeautifulSoup import BeautifulSoup\n\n\nSTOPS = {\n 'xmassteps': 'bstgajt... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\nimport logging\nimport posixpath\nimport requests\nimport arrow\nfrom functools import partial\nfrom errbot import BotPlugin, botcmd\nfrom apscheduler.scheduler import Scheduler\nfrom BeautifulSoup import BeautifulSoup\n\n\nSTOPS = {\n 'xmas... | ```python
import logging
import posixpath
import requests
import arrow
from functools import partial
from errbot import BotPlugin, botcmd
from apscheduler.scheduler import Scheduler
from BeautifulSoup import BeautifulSoup
STOPS = {
'xmassteps': 'bstgajt',
'centre': 'bstgajp',
'hippodrome': 'bstdwmd',
'kennave': 'bstjawt',
'peache': 'sglgdgd',
'rupert' : 'bstpaga'
}
class Bus(BotPlugin):
def activate(self):
super(Bus, self).activate()
self.sched = Scheduler(coalesce=True)
self.sched.start()
@botcmd(split_args_with=' ')
def bus(self, mess, args):
argsLength = len(args)
if argsLength < 2 :
route = 49
else :
route = args[1]
now = arrow.now()
t = self.next_buses(*args)
buses = []
if t:
for bus in t:
buses.append( 'No. %s bus leaves from %s %s' % (
route,
args[0],
bus.humanize(now)
)
)
for s in buses:
yield s
@botcmd(split_args_with=' ')
def bus_remind(self, mess, args):
t = self.next_bus(*args)
reminder = t.replace(minutes=-10)
remind = partial(self.remind, mess, args)
self.sched.add_date_job(remind, reminder.naive)
return "%s: you'll be reminded %s" % (
mess.getMuckNick(),
reminder.humanize()
)
def remind(self, mess, args):
now = arrow.now()
t = self.next_bus(args[0], args[1])
if t:
self.send(
mess.getFrom(),
'%s: the next no. %s bus leaves from %s %s' % (
mess.getMuckNick(),
args[1],
args[0],
t.humanize(now)
),
message_type=mess.getType()
)
def parse_timetable(self, stop, route):
if stop in STOPS:
stop = STOPS[stop]
url = posixpath.join(
"http://www.nextbuses.mobi",
"WebView/BusStopSearch/BusStopSearchResults/",
stop
)
res = requests.get(
url,
params={'searchType': 'route', 'searchFilter': route}
)
soup = BeautifulSoup(res.text)
bus_stops = soup.findAll('table', {'class': 'BusStops'})
times = bus_stops[0].findAll('p', {'class': 'Stops'}) #should loop instead of return one
return times
def next_bus(self, stop, route=49, time=0):
times = self.parse_timetable(stop, route)
now = arrow.now()
then = now.replace(minutes=+int(time))
nextbuses = []
for i in times:
logging.info(i.text)
if 'DUE' in i.text:
continue
elif ';at ' in i.text:
t = i.text.split('at ')[-1].strip().split(':')
next = now.replace(hour=int(t[0]), minute=int(t[1]))
logging.info(next)
else:
t = i.text.split('in ')[-1].strip().split()
next = now.replace(minutes=int(t[0]))
logging.info(next)
if next > then:
return next
return False
def next_buses(self,stop, route=49,time=0):
times = self.parse_timetable(stop,route)
now = arrow.now()
then = now.replace(minutes=+int(time))
buses = []
for i in times:
logging.info(i.text)
if 'DUE' in i.text:
continue
elif ';at ' in i.text:
t = i.text.split('at ')[-1].strip().split(':')
if t[1].find('(') == -1:
logging.info("replacing hour with %s and minute with %s" % (t[0], t[1]))
next = now.replace(hour=int(t[0]), minute=int(t[1]))
buses.append(next)
logging.info("Next bus parsed is %s" % next)
else:
t = i.text.split('in ')[-1].strip().split()
next = now.replace(minutes=int(t[0]))
buses.append(next)
logging.info(next)
if len(buses) != 0:
return buses;
return False
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\nimport numpy as np\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\nplt.ion()\nfrom astropy.io import fits\nfrom fitstools import common_header\nfrom html_plot import plotter\n\ndef unpack_xy(use_args='all', preserve=False):\n d... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\nimport numpy as np\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\nplt.ion()\nfrom astropy.io import fits\nfrom fitstools import common_header\nfrom html_plot import plotter\n\ndef unpack_xy(use_args='all', preserv... | ```python
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
plt.ion()
from astropy.io import fits
from fitstools import common_header
from html_plot import plotter
def unpack_xy(use_args='all', preserve=False):
def decorator(f):
def wrapper(use_args, *args, **kwargs):
args = list(args)
if use_args == 'all':
use_args = [i for i in range(len(args))]
dtype = None
for i in use_args:
#if isinstance(args[i], spectrum):
# if d<2:
# dtype = lambda w, f, ferr=None, header=None: spectrum(w, f, ferr, header) #Tried adding spectrum object to
# d = 2
if isinstance(args[i], curve):
dtype = lambda x, y, yerr=None: curve(x, y, yerr)
else:
try:
iter(args[i])
except TypeError:
continue
if len(args[i]) == 3:
x, y, yerr = args[i]
args[i] = curve(x, y, yerr)
elif len(args[i]) == 2:
x, y = args[i]
args[i] = curve(x, y)
else:
continue
res = f(*args, **kwargs)
if preserve and dtype != None:
res = dtype(*res)
return res
return lambda *args, **kwargs: wrapper(use_args, *args, **kwargs)
return decorator
class curve:
def __init__(self, x, y, yerr=None):
sort_i = np.argsort(x) #Sort data by x.
self.x = np.asarray(x)[sort_i]
self.y = np.asarray(y)[sort_i]
if type(yerr) == type(None):
self.yerr = np.zeros_like(y)[sort_i]
else:
self.yerr = np.asarray(yerr)[sort_i]
def get_x(self):
return self.x
def get_y(self):
return self.y
def get_yerr(self):
return self.yerr
def get_data(self):
return self.x, self.y, self.yerr
@unpack_xy()
def math_helper(c1, c2, **kwargs):
if isinstance(c2, curve):
x_interp = get_x_interp([c1.x, c2.x], **kwargs)
c1_y_interp = interp1d(c1.x, c1.y)(x_interp)
c1_yerr_interp = interp1d(c1.x, c1.yerr)(x_interp)
c1_interp = curve(x_interp, c1_y_interp, c1_yerr_interp)
c2_y_interp = interp1d(c2.x, c2.y)(x_interp)
c2_yerr_interp = interp1d(c2.x, c2.yerr)(x_interp)
c2_interp = curve(x_interp, c2_y_interp, c2_yerr_interp)
return c1_interp, c2_interp
else:
return c1, curve(c1.x, c2*np.ones_like(c1.y))
def __add__(self, other, **kwargs):
self_interp, other_interp = curve.math_helper(self, other, **kwargs)
x_interp = self_interp.x
y_interp = self_interp.y+other_interp.y
yerr_interp = (self_interp.yerr**2+other_interp.yerr**2)**0.5
return curve(x_interp, y_interp, yerr_interp)
def __sub__(self, other, **kwargs):
self_interp, other_interp = curve.math_helper(self, other, **kwargs)
x_interp = self_interp.x
y_interp = self_interp.y-other_interp.y
yerr_interp = (self_interp.yerr**2+other_interp.yerr**2)**0.5
return curve(x_interp, y_interp, yerr_interp)
def __mul__(self, other, **kwargs):
self_interp, other_interp = curve.math_helper(self, other, **kwargs)
x_interp = self_interp.x
y_interp = self_interp.y*other_interp.y
yerr_interp = ((self_interp.yerr*other_interp.y)**2 + (other_interp.yerr*other_interp.y)**2)**0.5
return curve(x_interp, y_interp, yerr_interp)
def __div__(self, other, **kwargs):
self_interp, other_interp = curve.math_helper(self, other, **kwargs)
x_interp = self_interp.x
y_interp = self_interp.y/other_interp.y
yerr_interp = ((self_interp.yerr*other_interp.y)**2 + (other_interp.yerr*other_interp.y)**2)**0.5
return curve(x_interp, y_interp, yerr_interp)
def get_x_interp(x_arrs, x_interp=None, x_interp_i=None, dx=None, **kwargs):
if x_interp == None:
try:
x_interp = x_arrs[x_interp_i]
except TypeError, IndexError:
low = max([min(x_arr) for x_arr in x_arrs]) #Find the lowest x value
high = min([max(x_arr) for x_arr in x_arrs]) #Find the highest x value
if dx != None:
x_interp = np.arange(low, high, dx)
else:
x_interp = []
num_x = len(x_arrs)
x_i_list = [0]*num_x
current_x = low
while current_x < high:
x_interp.append(current_x)
avg_dx = 0
n = 0
for i,x in enumerate(x_arrs):
indx = x_i_list[i]
while indx < len(x) and x[indx] < current_x:
indx += 1
x_i_list[i] = int(indx)
try:
avg_dx += abs(x[indx+1] - x[indx])
n+=1
except:
pass
avg_dx = avg_dx/n if n>0 else last_dx
current_x += avg_dx
last_dx = avg_dx
return x_interp
@unpack_xy()
def interp_helper(*xy_curves, **kwargs):
x_arrs = [c.get_x() for c in xy_curves]
y_arrs = [c.get_y() for c in xy_curves]
yerr_arrs = [c.get_yerr() for c in xy_curves]
x_interp = get_x_interp(x_arrs=x_arrs, **kwargs)
y_interp_arrs = np.zeros((len(y_arrs), len(x_interp)))
for i in range(len(x_arrs)):
y_interp_arrs[i,:] = interp1d(x_arrs[i], y_arrs[i], fill_value=(np.nan, np.nan))(x_interp)
yerr_interp_arrs = np.zeros((len(yerr_arrs), len(x_interp)))
for i in range(len(x_arrs)):
yerr_interp_arrs[i,:] = interp1d(x_arrs[i], yerr_arrs[i], fill_value=(np.nan, np.nan))(x_interp)
return x_interp, y_interp_arrs, yerr_interp_arrs
@unpack_xy(preserve=True)
def interp_add(*spectra, **kwargs):
x_interp, y_interp_arrs, yerr_interp_arrs = interp_helper(*spectra, **kwargs)
y_interp = np.nansum(y_interp_arrs, axis=0)
yerr_interp = np.nansum([yerr**2 for yerr in yerr_interp_arrs], axis=0)**0.5
return x_interp, y_interp
@unpack_xy(preserve=True)
def interp_mean(*spectra, **kwargs):
x_interp, y_interp_arrs, yerr_interp_arrs = interp_helper(*spectra, **kwargs)
y_interp = np.nanmean(y_interp_arrs, axis=0)
yerr_interp = np.nansum([yerr**2 for yerr in yerr_interp_arrs], axis=0)**0.5/N
return x_interp, y_interp
def robust_mean(y_vals, y_errs, m=5):
y_vals = np.array(y_vals)
y_errs = np.array(y_errs)
c = np.nanmedian(y_vals)
keep = (abs(y_vals - c) < m*y_errs)
if len(y_vals[keep]) > 0:
try:
mean = np.average(y_vals[keep], weights=1/y_errs[keep])
except ZeroDivisionError:
mean = np.nanmean(y_vals[keep])
else:
mean = np.nanmean(y_vals)
return mean
@unpack_xy(preserve=True)
def interp_rmean(*spectra, **kwargs):
x_interp, y_interp_arrs, yerr_interp_arrs = interp_helper(*spectra, **kwargs)
y_interp = [robust_mean([y[i] for y in y_interp_arrs], [yerr[i] for yerr in yerr_interp_arrs]) for i in range(len(x_interp))]
yerr_interp = np.nansum([yerr**2 for yerr in yerr_interp_arrs], axis=0)**0.5
return x_interp, y_interp, yerr_interp
@unpack_xy(preserve=True)
def interp_median(*spectra, **kwargs):
x_interp, y_interp_arrs, yerr_interp_arrs = interp_helper(*spectra, **kwargs)
y_interp = np.nanmedian(y_interp_arrs, axis=0)
N = len(y_interp_arrs)
yerr_interp = 1.253*np.nansum([yerr**2 for yerr in yerr_interp_arrs], axis=0)**0.5/N
return x_interp, y_interp, yerr_interp
class spectrum(curve):
def __init__(self, wavelength, flux=None, flux_err=None, header=None):
if type(flux) == type(None) and isinstance(wavelength, curve):
input_curve = wavelength
curve.__init__(self, *input_curve.get_data())
else:
curve.__init__(self, wavelength, flux, flux_err)
self.header = header
def set_header(self, new_header):
self.header = new_header
def get_wavelength(self):
return self.x
def get_flux(self):
return self.y
def get_flux_err(self):
return self.yerr
def get_data(self):
return [self.x, self.y, self.yerr]
def get_header(self):
return self.header
def __add__(self, other, header_i=None):
if header_i == None:
try:
headers = [self.header, other.header]
header = common_header(headers)
except AttributeError:
header = self.header
return spectrum(curve.__add__(self, other), header=header)
def __sub__(self, other, header_i=None):
if header_i == None:
try:
headers = [self.header, other.header]
header = common_header(headers)
except AttributeError:
header = self.header
return spectrum(curve.__sub__(self, other), header=header) #None is temp, REMOVE SOON
def __mul__(self, other, header_i=None):
if header_i == None:
try:
headers = [self.header, other.header]
header = common_header(headers)
except AttributeError:
header = self.header
return spectrum(curve.__mul__(self, other), header=header)
def __div__(self, other, header_i=None):
if header_i == None:
try:
headers = [self.header, other.header]
header = common_header(headers)
except AttributeError:
header = self.header
return spectrum(curve.__div__(self, other), header=header)
def save(self, savepath):
flux = fits.PrimaryHDU(self.get_flux(), self.get_header())
flux.header['EXTNAME'] = 'FLUX'
wavelength = fits.ImageHDU(self.get_wavelength())
wavelength.header['EXTNAME'] = 'WAVELENGTH'
flux_err = fits.ImageHDU(self.get_flux_err())
flux_err.header['EXTNAME'] = 'FLUX_ERR'
f = fits.HDUList([flux, wavelength, flux_err])
f.writeto(savepath, clobber=True)
def plot(self, p=None, **kwargs):
'''
#Old matplotlib method.
if ax == None:
fig, ax = plt.subplots()
ax.set_xlabel('Wavelength ($\AA$)')
ax.set_ylabel('Flux')
ax.plot(self.x, self.y, **kwargs)
if type(self.yerr) != type(None):
ax.fill_between(self.x, self.y-self.yerr, self.y+self.yerr, facecolor='cornflowerblue', linewidth=0.0)
return ax
'''
if p == None:
p = plotter()
p.set_xlabel('Wavelength (Ang)')
p.set_ylabel('Flux')
p.line(self.x, self.y, **kwargs)
if type(self.yerr) != type(None):
if 'line_color' in kwargs:
color = kwargs['line_color']
else:
color = 'blue'
p.fill_between(self.x, self.y-self.yerr, self.y+self.yerr, line_width=0.0, fill_color=color, line_color=color, fill_alpha=0.2, line_alpha=0.2)
return p
def sum_spectra(spectra, header=None, **kwargs):
if header==None:
#Combine headers somehow
pass
sum_curve = interp_add(*spectra, **kwargs)
sum_spectrum = spectrum(sum_curve, header=header)
return sum_spectrum
def median_spectra(spectra, header=None, **kwargs):
if header==None:
#Combine headers somehow
pass
median_curve = interp_median(*spectra, **kwargs)
median_spectrum = spectrum(median_curve, header=header)
return median_spectrum
def mean_spectra(spectra, header=None, **kwargs):
if header==None:
#Combine headers somehow
pass
mean_curve = interp_mean(*spectra, **kwargs)
mean_spectrum = spectrum(mean_curve, header=header)
return mean_spectrum
def rmean_spectra(spectra, header=None, **kwargs):
if header==None:
#Combine headers somehow
pass
rmean_curve = interp_rmean(*spectra, **kwargs)
rmean_spectrum = spectrum(rmean_curve, header=header)
return rmean_spectrum
def scale_spectra(spectra, method='median'):
if method == 'median':
statistic = np.nanmedian
scaled_spectra = []
scale_value = statistic([statistic(sp.get_flux()) for sp in spectra])
for sp in spectra:
scaled_spectra.append(sp*(scale_value/statistic(sp.get_flux())))
return scaled_spectra
``` |
[
{
"content": "Write the code verbatim:\n```python\n# -*- coding: utf-8 -*-\nimport unittest\nimport pathlib2 as pathlib\nfrom refmanage import BibFile\nfrom refmanage.ref_exceptions import UnparseableBibtexError\nfrom pybtex.database import BibliographyData\n\n\n# Base classes\n# ============\nclass Base(unitte... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nimport unittest\nimport pathlib2 as pathlib\nfrom refmanage import BibFile\nfrom refmanage.ref_exceptions import UnparseableBibtexError\nfrom pybtex.database import BibliographyData\n\n\n# Base classes\n# ============\nc... | ```python
# -*- coding: utf-8 -*-
import unittest
import pathlib2 as pathlib
from refmanage import BibFile
from refmanage.ref_exceptions import UnparseableBibtexError
from pybtex.database import BibliographyData
# Base classes
# ============
class Base(unittest.TestCase):
"""
Base class for tests
This class is intended to be subclassed so that the same `setUp` method does not have to be rewritten for each class containing tests.
"""
def setUp(self):
"""
Create `Path`s to various control data
"""
self.empty = pathlib.Path("test/controls/empty.bib")
self.one = pathlib.Path("test/controls/one.bib")
self.two = pathlib.Path("test/controls/two.bib")
self.invalid = pathlib.Path("test/controls/invalid.bib")
self.one_valid_one_invalid = pathlib.Path("test/controls/one_valid_one_invalid.bib")
class Instantiation(Base):
"""
Test all aspects of instantiating an object
Includes input of wrong type, input outside of a bound, etc.
"""
def test_no_input(self):
"""
refmanage.BibFile should raise TypeError if instantiated with no input
"""
with self.assertRaises(TypeError):
BibFile()
def test_invalid_bibtex(self):
"""
refmanage.BibFile should raise UnparseableBibtexError if instantiated with a path to an unparseable file.
"""
with self.assertRaises(UnparseableBibtexError):
BibFile(self.invalid)
def test_one_valid_one_invalid_bib_type(self):
"""
refmanage.BibFile should raise UnparseableBibtexError if instantiated with a path to a file containing both valid and invalid BibTeX
"""
with self.assertRaises(UnparseableBibtexError):
BibFile(self.one_valid_one_invalid)
class Attributes(Base):
"""
Test attributes of BibFile
These tests include type checks, setting immutable attributes, etc.
"""
# Type checking
# =============
def test_path_type(self):
"""
refmanage.BibFile.path should be of type `pathlib.Path`
"""
b = BibFile(self.empty)
self.assertIsInstance(b.path, pathlib.Path)
def test_src_txt_type(self):
"""
refmanage.BibFile.src_txt should be of type unicode
"""
b = BibFile(self.empty)
self.assertIsInstance(b.src_txt, unicode)
def test_bib_type(self):
"""
refmanage.BibFile.bib should be of type `pybtex.database.BibliographyData`
"""
b = BibFile(self.two)
self.assertIsInstance(b.bib, BibliographyData)
# Immutability
# ============
# The `path`, `bib`, and `src_txt` should be immutable once the `BibFile` object has been created. In other words, these attributes should not be changeable after the fact.
def test_path_immutability(self):
"""
Attempting to set `refmanage.BibFile.path` should raise AttributeError
"""
b = BibFile(self.one)
try:
b.path = self.empty
except AttributeError:
# Attempting to set `path` attribute raises an error; test passed!
pass
else:
self.fail("BibFile.path can be set after instantiation")
def test_bib_immutability(self):
"""
Attempting to set `refmanage.BibFile.bib` should raise AttributeError
"""
b = BibFile(self.one)
bib = b.bib
try:
b.bib = bib
except AttributeError:
# Attempting to set `path` attribute raises an error; test passed!
pass
else:
self.fail("BibFile.bib can be set after instantiation")
def test_src_txt_immutability(self):
"""
Attempting to set `refmanage.BibFile.src_txt` should raise AttributeError
"""
b = BibFile(self.one)
try:
b.src_txt = "legitimate text string"
except AttributeError:
# Attempting to set `path` attribute raises an error; test passed!
pass
else:
self.fail("BibFile.src_txt can be set after instantiation")
# Value checking
# ==============
def test_empty_file_bib_length(self):
"""
refmanage.BibFile.bib should contain zero entries if instantiated with an empty file
"""
b = BibFile(self.empty)
self.assertEqual(len(b.bib.entries), 0)
def test_one_entry_bibtex_file_bib_length(self):
"""
refmanage.BibFile.bib should contain one entry if instantiated with a file containing valid BibTeX with a single entry
"""
b = BibFile(self.one)
self.assertEqual(len(b.bib.entries), 1)
def test_two_entries_bibtex_file_bib_length(self):
"""
refmanage.BibFile.bib should contain two entries if instantiated with a file containing valid BibTeX with two entries
"""
b = BibFile(self.two)
self.assertEqual(len(b.bib.entries), 2)
class MethodsInput(unittest.TestCase):
"""
Tests methods which take input parameters
Tests include: passing invalid input, etc.
"""
pass
class MethodsReturnType(Base):
"""
Tests methods' output types
"""
def test_terse_msg(self):
"""
refmanage.BibFile.terse_msg() should return a unicode
"""
b = BibFile(self.empty)
self.assertIsInstance(b.terse_msg(), unicode)
def test_verbose_msg(self):
"""
refmanage.BibFile.verbose_msg() should return a unicode
"""
b = BibFile(self.empty)
self.assertIsInstance(b.verbose_msg(), unicode)
def test_test_msg_verbose_false(self):
"""
refmanage.BibFile.test_msg(verbose=False) should return a unicode
"""
b = BibFile(self.empty)
self.assertIsInstance(b.test_msg(False), unicode)
def test_test_msg_verbose_true(self):
"""
refmanage.BibFile.test_msg(verbose=True) should return a unicode
"""
b = BibFile(self.empty)
self.assertIsInstance(b.test_msg(True), unicode)
class MethodsReturnValues(Base):
"""
Tests values of methods against known values
"""
def test_verbose_msg_valid_bibtex(self):
"""
refmanage.BibFile.verbose_msg() should return a str of zero length for an argument pointing to valid BibTeX.
"""
b = BibFile(self.two)
self.assertEqual(len(b.verbose_msg()), 0)
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\n#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\nA cbz writter\n'''\n\nfrom mgdpck import actions\nimport os\nimport mimetypes\nimport zipfile\n\nclass CbzWritter(actions.AbsWritter):\n @classmethod\n def get_name(cls):\n return 'cbz'\n\n\n ... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\nA cbz writter\n'''\n\nfrom mgdpck import actions\nimport os\nimport mimetypes\nimport zipfile\n\nclass CbzWritter(actions.AbsWritter):\n @classmethod\n def get_name(cls):\n ret... | ```python
#! /usr/bin/python
# -*- coding: utf-8 -*-
'''
A cbz writter
'''
from mgdpck import actions
import os
import mimetypes
import zipfile
class CbzWritter(actions.AbsWritter):
@classmethod
def get_name(cls):
return 'cbz'
def __init__(self, outdir):
self.outdir = outdir
self.out = None
def done(self):
if self.out:
self.out.close()
def export_book(self, lsb, chapter_min, chapter_max):
self.out_file = os.path.join(self.outdir, "{0.book.short_name}_{1.num:>03}_{2.num:>03}.cbz".format(lsb, chapter_min, chapter_max))
self.out = zipfile.ZipFile(self.out_file, "w", compression=zipfile.ZIP_DEFLATED)
def export_cover(self, lsb):
cv_path = "{0:>03}_{0:>03}_{1}{2}".format(0, 'cover',
mimetypes.guess_extension(lsb.image.mimetype))
self.out.writestr(cv_path, lsb.image.content)
def export_chapter(self, ch):
pass
def export_page(self, pa):
pa_path = "{0.chapter.num:>03}_{0.num:>03}{1}".format(pa,
mimetypes.guess_extension(pa.image.mimetype))
self.out.writestr(pa_path, pa.image.content)
actions.register_writter(CbzWritter)
``` |
[
{
"content": "Here is the code content:\n```python\n#\n# Copyright (C) 2009, 2010 UNINETT AS\n#\n# This file is part of Network Administration Visualized (NAV).\n#\n# NAV is free software: you can redistribute it and/or modify it under the\n# terms of the GNU General Public License version 2 as published by the... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\n#\n# Copyright (C) 2009, 2010 UNINETT AS\n#\n# This file is part of Network Administration Visualized (NAV).\n#\n# NAV is free software: you can redistribute it and/or modify it under the\n# terms of the GNU General Public License version 2 as ... | ```python
#
# Copyright (C) 2009, 2010 UNINETT AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License version 2 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Coordinate transformation.
Functions for converting between UTM and longitude/latitude, and for
parsing a string representation of UTM.
Derived from code available under GPL from http://pygps.org/
(http://pygps.org/LatLongUTMconversion-1.2.tar.gz)
"""
from math import pi, sin, cos, tan, sqrt
import re
_deg2rad = pi / 180.0
_rad2deg = 180.0 / pi
_equatorial_radius = 2
_eccentricity_squared = 3
_ellipsoid = [
# id, Ellipsoid name, Equatorial Radius, square of eccentricity
# first once is a placeholder only, To allow array indices to match id numbers
[ -1, "Placeholder", 0, 0],
[ 1, "Airy", 6377563, 0.00667054],
[ 2, "Australian National", 6378160, 0.006694542],
[ 3, "Bessel 1841", 6377397, 0.006674372],
[ 4, "Bessel 1841 (Nambia] ", 6377484, 0.006674372],
[ 5, "Clarke 1866", 6378206, 0.006768658],
[ 6, "Clarke 1880", 6378249, 0.006803511],
[ 7, "Everest", 6377276, 0.006637847],
[ 8, "Fischer 1960 (Mercury] ", 6378166, 0.006693422],
[ 9, "Fischer 1968", 6378150, 0.006693422],
[ 10, "GRS 1967", 6378160, 0.006694605],
[ 11, "GRS 1980", 6378137, 0.00669438],
[ 12, "Helmert 1906", 6378200, 0.006693422],
[ 13, "Hough", 6378270, 0.00672267],
[ 14, "International", 6378388, 0.00672267],
[ 15, "Krassovsky", 6378245, 0.006693422],
[ 16, "Modified Airy", 6377340, 0.00667054],
[ 17, "Modified Everest", 6377304, 0.006637847],
[ 18, "Modified Fischer 1960", 6378155, 0.006693422],
[ 19, "South American 1969", 6378160, 0.006694542],
[ 20, "WGS 60", 6378165, 0.006693422],
[ 21, "WGS 66", 6378145, 0.006694542],
[ 22, "WGS-72", 6378135, 0.006694318],
[ 23, "WGS-84", 6378137, 0.00669438]
]
#Reference ellipsoids derived from Peter H. Dana's website-
#http://www.utexas.edu/depts/grg/gcraft/notes/datum/elist.html
#Department of Geography, University of Texas at Austin
#Internet: pdana@mail.utexas.edu
#3/22/95
#Source
#Defense Mapping Agency. 1987b. DMA Technical Report: Supplement to Department of Defense World Geodetic System
#1984 Technical Report. Part I and II. Washington, DC: Defense Mapping Agency
def ll_to_utm(reference_ellipsoid, lat, lon, zone = None):
"""converts lat/long to UTM coords. Equations from USGS Bulletin 1532
East Longitudes are positive, West longitudes are negative.
North latitudes are positive, South latitudes are negative
lat and Long are in decimal degrees
Written by Chuck Gantz- chuck.gantz@globalstar.com"""
a = _ellipsoid[reference_ellipsoid][_equatorial_radius]
ecc_squared = _ellipsoid[reference_ellipsoid][_eccentricity_squared]
k0 = 0.9996
#Make sure the longitude is between -180.00 .. 179.9
lon_tmp = (lon+180)-int((lon+180)/360)*360-180 # -180.00 .. 179.9
lat_rad = lat*_deg2rad
lon_rad = lon_tmp*_deg2rad
if zone is None:
zone_number = int((lon_tmp + 180)/6) + 1
else:
zone_number = zone
if lat >= 56.0 and lat < 64.0 and lon_tmp >= 3.0 and lon_tmp < 12.0:
zone_number = 32
# Special zones for Svalbard
if lat >= 72.0 and lat < 84.0:
if lon_tmp >= 0.0 and lon_tmp < 9.0:
zone_number = 31
elif lon_tmp >= 9.0 and lon_tmp < 21.0:
zone_number = 33
elif lon_tmp >= 21.0 and lon_tmp < 33.0:
zone_number = 35
elif lon_tmp >= 33.0 and lon_tmp < 42.0:
zone_number = 37
lon_origin = (zone_number - 1)*6 - 180 + 3 #+3 puts origin in middle of zone
lon_origin_rad = lon_origin * _deg2rad
#compute the UTM Zone from the latitude and longitude
utm_zone = "%d%c" % (zone_number, _utm_letter_designator(lat))
ecc_prime_squared = (ecc_squared)/(1-ecc_squared)
N = a/sqrt(1-ecc_squared*sin(lat_rad)*sin(lat_rad))
T = tan(lat_rad)*tan(lat_rad)
C = ecc_prime_squared*cos(lat_rad)*cos(lat_rad)
A = cos(lat_rad)*(lon_rad-lon_origin_rad)
M = a*((1
- ecc_squared/4
- 3*ecc_squared*ecc_squared/64
- 5*ecc_squared*ecc_squared*ecc_squared/256)*lat_rad
- (3*ecc_squared/8
+ 3*ecc_squared*ecc_squared/32
+ 45*ecc_squared*ecc_squared*ecc_squared/1024)*sin(2*lat_rad)
+ (15*ecc_squared*ecc_squared/256 + 45*ecc_squared*ecc_squared*ecc_squared/1024)*sin(4*lat_rad)
- (35*ecc_squared*ecc_squared*ecc_squared/3072)*sin(6*lat_rad))
utm_easting = (k0*N*(A+(1-T+C)*A*A*A/6
+ (5-18*T+T*T+72*C-58*ecc_prime_squared)*A*A*A*A*A/120)
+ 500000.0)
utm_northing = (k0*(M+N*tan(lat_rad)*(A*A/2+(5-T+9*C+4*C*C)*A*A*A*A/24
+ (61
-58*T
+T*T
+600*C
-330*ecc_prime_squared)*A*A*A*A*A*A/720)))
if lat < 0:
utm_northing = utm_northing + 10000000.0; #10000000 meter offset for southern hemisphere
return (utm_zone, utm_easting, utm_northing)
def _utm_letter_designator(lat):
"""This routine determines the correct UTM letter designator for the given latitude
returns 'Z' if latitude is outside the UTM limits of 84N to 80S
Written by Chuck Gantz- chuck.gantz@globalstar.com"""
if 84 >= lat >= 72: return 'X'
elif 72 > lat >= 64: return 'W'
elif 64 > lat >= 56: return 'V'
elif 56 > lat >= 48: return 'U'
elif 48 > lat >= 40: return 'T'
elif 40 > lat >= 32: return 'S'
elif 32 > lat >= 24: return 'R'
elif 24 > lat >= 16: return 'Q'
elif 16 > lat >= 8: return 'P'
elif 8 > lat >= 0: return 'N'
elif 0 > lat >= -8: return 'M'
elif -8 > lat >= -16: return 'L'
elif -16 > lat >= -24: return 'K'
elif -24 > lat >= -32: return 'J'
elif -32 > lat >= -40: return 'H'
elif -40 > lat >= -48: return 'G'
elif -48 > lat >= -56: return 'F'
elif -56 > lat >= -64: return 'E'
elif -64 > lat >= -72: return 'D'
elif -72 > lat >= -80: return 'C'
else: return 'Z' # if the Latitude is outside the UTM limits
def utm_to_ll(reference_ellipsoid, northing, easting, zone):
"""converts UTM coords to lat/long. Equations from USGS Bulletin 1532
East Longitudes are positive, West longitudes are negative.
North latitudes are positive, South latitudes are negative
lat and lon are in decimal degrees.
Written by Chuck Gantz- chuck.gantz@globalstar.com
Converted to Python by Russ Nelson <nelson@crynwr.com>"""
k0 = 0.9996
a = _ellipsoid[reference_ellipsoid][_equatorial_radius]
ecc_squared = _ellipsoid[reference_ellipsoid][_eccentricity_squared]
e1 = (1-sqrt(1-ecc_squared))/(1+sqrt(1-ecc_squared))
#northern_hemisphere; //1 for northern hemispher, 0 for southern
x = easting - 500000.0 #remove 500,000 meter offset for longitude
y = northing
zone_letter = zone[-1]
zone_number = int(zone[:-1])
if zone_letter >= 'N':
northern_hemisphere = 1 # point is in northern hemisphere
else:
northern_hemisphere = 0 # point is in southern hemisphere
y -= 10000000.0 # remove 10,000,000 meter offset used for southern hemisphere
lon_origin = (zone_number - 1)*6 - 180 + 3 # +3 puts origin in middle of zone
ecc_prime_squared = (ecc_squared)/(1-ecc_squared)
M = y / k0
mu = M/(a*(1-ecc_squared/4-3*ecc_squared*ecc_squared/64-5*ecc_squared*ecc_squared*ecc_squared/256))
phi1_rad = (mu + (3*e1/2-27*e1*e1*e1/32)*sin(2*mu)
+ (21*e1*e1/16-55*e1*e1*e1*e1/32)*sin(4*mu)
+(151*e1*e1*e1/96)*sin(6*mu))
phi1 = phi1_rad*_rad2deg;
N1 = a/sqrt(1-ecc_squared*sin(phi1_rad)*sin(phi1_rad))
T1 = tan(phi1_rad)*tan(phi1_rad)
C1 = ecc_prime_squared*cos(phi1_rad)*cos(phi1_rad)
R1 = a*(1-ecc_squared)/pow(1-ecc_squared*sin(phi1_rad)*sin(phi1_rad), 1.5)
D = x/(N1*k0)
lat = phi1_rad - (N1*tan(phi1_rad)/R1)*(D*D/2-(5+3*T1+10*C1-4*C1*C1-9*ecc_prime_squared)*D*D*D*D/24
+(61+90*T1+298*C1+45*T1*T1-252*ecc_prime_squared-3*C1*C1)*D*D*D*D*D*D/720)
lat = lat * _rad2deg
lon = (D-(1+2*T1+C1)*D*D*D/6+(5-2*C1+28*T1-3*C1*C1+8*ecc_prime_squared+24*T1*T1)
*D*D*D*D*D/120)/cos(phi1_rad)
lon = lon_origin + lon * _rad2deg
return (lat, lon)
def parse_utm(utm_str):
"""Parse UTM coordinates from a string.
utm_str should be a string of the form 'zh n e', where z is a zone
number, h a hemisphere identifier ('N' or 'S') and n and e the
northing and easting. h may be omitted, in which case 'N' is
assumed.
Return value: dictionary with keys (zone, hemisphere, n, e).
"""
default_hemisphere = 'N'
utm_re = (r'^\W*([0-9][0-9])([NS]?)\W+([0-9]*[.]?[0-9]+)\W+'
r'([0-9]*[.]?[0-9]+)\W*$')
m = re.match(utm_re, utm_str)
if m is None:
raise Exception('incorrectly formatted UTM string "' + utm_str)
utm = {}
utm['zone'] = int(m.group(1))
utm['hemisphere'] = m.group(2)
if utm['hemisphere'] == '':
utm['hemisphere'] = default_hemisphere
utm['n'] = float(m.group(3))
utm['e'] = float(m.group(4))
return utm
def utm_str_to_lonlat(utm_str):
"""Convert UTM coordinates in string form (see parse_utm) to a
(longitude,latitude) pair.
"""
utm = parse_utm(utm_str)
(lat, lon) = utm_to_ll(23, utm['n'], utm['e'],
'%d%s'%(utm['zone'], utm['hemisphere']))
return (lon, lat)
``` |
[
{
"content": "Here is a code file:\n```python\n\"\"\"\nInternal tasks are tasks that are started from the teuthology infrastructure.\nNote that there is no corresponding task defined for this module. All of\nthe calls are made from other modules, most notably teuthology/run.py\n\"\"\"\nfrom cStringIO import St... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n\"\"\"\nInternal tasks are tasks that are started from the teuthology infrastructure.\nNote that there is no corresponding task defined for this module. All of\nthe calls are made from other modules, most notably teuthology/run.py\n\"\"\"\nfrom cSt... | ```python
"""
Internal tasks are tasks that are started from the teuthology infrastructure.
Note that there is no corresponding task defined for this module. All of
the calls are made from other modules, most notably teuthology/run.py
"""
from cStringIO import StringIO
import contextlib
import logging
import os
import time
import yaml
import subprocess
from teuthology import lockstatus
from teuthology import lock
from teuthology import misc
from teuthology import provision
from teuthology.job_status import get_status, set_status
from teuthology.config import config as teuth_config
from teuthology.parallel import parallel
from teuthology.suite import has_packages_for_distro
from ..orchestra import cluster, remote, run
from .. import report
log = logging.getLogger(__name__)
@contextlib.contextmanager
def base(ctx, config):
"""
Create the test directory that we will be using on the remote system
"""
log.info('Creating test directory...')
testdir = misc.get_testdir(ctx)
run.wait(
ctx.cluster.run(
args=[
'mkdir', '-m0755', '--',
testdir,
],
wait=False,
)
)
try:
yield
finally:
log.info('Tidying up after the test...')
# if this fails, one of the earlier cleanups is flawed; don't
# just cram an rm -rf here
run.wait(
ctx.cluster.run(
args=[
'rmdir',
'--',
testdir,
],
wait=False,
),
)
@contextlib.contextmanager
def lock_machines(ctx, config):
"""
Lock machines. Called when the teuthology run finds and locks
new machines. This is not called if the one has teuthology-locked
machines and placed those keys in the Targets section of a yaml file.
"""
# It's OK for os_type and os_version to be None here. If we're trying
# to lock a bare metal machine, we'll take whatever is available. If
# we want a vps, defaults will be provided by misc.get_distro and
# misc.get_distro_version in provision.create_if_vm
os_type = ctx.config.get("os_type")
os_version = ctx.config.get("os_version")
arch = ctx.config.get('arch')
log.info('Locking machines...')
assert isinstance(config[0], int), 'config[0] must be an integer'
machine_type = config[1]
how_many = config[0]
# We want to make sure there are always this many machines available
to_reserve = 5
# change the status during the locking process
report.try_push_job_info(ctx.config, dict(status='waiting'))
while True:
# get a candidate list of machines
machines = lock.list_locks(machine_type=machine_type, up=True,
locked=False, count=how_many + to_reserve)
if machines is None:
if ctx.block:
log.error('Error listing machines, trying again')
time.sleep(20)
continue
else:
raise RuntimeError('Error listing machines')
# make sure there are machines for non-automated jobs to run
if len(machines) < to_reserve + how_many and ctx.owner.startswith('scheduled'):
if ctx.block:
log.info(
'waiting for more machines to be free (need %s + %s, have %s)...',
to_reserve,
how_many,
len(machines),
)
time.sleep(10)
continue
else:
assert 0, ('not enough machines free; need %s + %s, have %s' %
(to_reserve, how_many, len(machines)))
newly_locked = lock.lock_many(ctx, how_many, machine_type, ctx.owner,
ctx.archive, os_type, os_version, arch)
if not newly_locked and not isinstance(newly_locked, list):
raise RuntimeError('Invalid parameters specified')
if len(newly_locked) == how_many:
vmlist = []
for lmach in newly_locked:
if misc.is_vm(lmach):
vmlist.append(lmach)
if vmlist:
log.info('Waiting for virtual machines to come up')
keys_dict = dict()
loopcount = 0
while len(keys_dict) != len(vmlist):
loopcount += 1
time.sleep(10)
keys_dict = lock.ssh_keyscan(vmlist)
log.info('virtual machine is still unavailable')
if loopcount == 40:
loopcount = 0
log.info('virtual machine(s) still not up, ' +
'recreating unresponsive ones.')
for guest in vmlist:
if guest not in keys_dict.keys():
log.info('recreating: ' + guest)
full_name = misc.canonicalize_hostname(guest)
provision.destroy_if_vm(ctx, full_name)
provision.create_if_vm(ctx, full_name)
if lock.do_update_keys(keys_dict):
log.info("Error in virtual machine keys")
newscandict = {}
for dkey in newly_locked.iterkeys():
stats = lockstatus.get_status(dkey)
newscandict[dkey] = stats['ssh_pub_key']
ctx.config['targets'] = newscandict
else:
ctx.config['targets'] = newly_locked
locked_targets = yaml.safe_dump(
ctx.config['targets'],
default_flow_style=False
).splitlines()
log.info('\n '.join(['Locked targets:', ] + locked_targets))
# successfully locked machines, change status back to running
report.try_push_job_info(ctx.config, dict(status='running'))
break
elif not ctx.block:
assert 0, 'not enough machines are available'
log.warn('Could not lock enough machines, waiting...')
time.sleep(10)
try:
yield
finally:
if ctx.config.get('unlock_on_failure', False) or \
get_status(ctx.summary) == 'pass':
log.info('Unlocking machines...')
for machine in ctx.config['targets'].iterkeys():
lock.unlock_one(ctx, machine, ctx.owner)
def save_config(ctx, config):
"""
Store the config in a yaml file
"""
log.info('Saving configuration')
if ctx.archive is not None:
with file(os.path.join(ctx.archive, 'config.yaml'), 'w') as f:
yaml.safe_dump(ctx.config, f, default_flow_style=False)
def check_lock(ctx, config):
"""
Check lock status of remote machines.
"""
if not teuth_config.lock_server or ctx.config.get('check-locks') is False:
log.info('Lock checking disabled.')
return
log.info('Checking locks...')
for machine in ctx.config['targets'].iterkeys():
status = lockstatus.get_status(machine)
log.debug('machine status is %s', repr(status))
assert status is not None, \
'could not read lock status for {name}'.format(name=machine)
assert status['up'], 'machine {name} is marked down'.format(name=machine)
assert status['locked'], \
'machine {name} is not locked'.format(name=machine)
assert status['locked_by'] == ctx.owner, \
'machine {name} is locked by {user}, not {owner}'.format(
name=machine,
user=status['locked_by'],
owner=ctx.owner,
)
def check_packages(ctx, config):
"""
Checks gitbuilder to determine if there are missing packages for this job.
If there are missing packages, fail the job.
"""
log.info("Checking packages...")
os_type = ctx.config.get("os_type", None)
sha1 = ctx.config.get("sha1", None)
# We can only do this check if there are a defined sha1 and os_type
# in the job config.
if os_type and sha1:
log.info(
"Checking packages for os_type '{os}' and ceph hash '{ver}'".format(
os=os_type,
ver=sha1,
)
)
if not has_packages_for_distro(sha1, os_type):
msg = "Packages for os_type '{os}' and ceph hash '{ver}' not found"
msg = msg.format(
os=os_type,
ver=sha1,
)
log.error(msg)
# set the failure message and update paddles with the status
ctx.summary["failure_reason"] = msg
set_status(ctx.summary, "dead")
report.try_push_job_info(ctx.config, dict(status='dead'))
raise RuntimeError(msg)
else:
log.info(
"Checking packages skipped, missing os_type '{os}' or ceph hash '{ver}'".format(
os=os_type,
ver=sha1,
)
)
@contextlib.contextmanager
def timer(ctx, config):
"""
Start the timer used by teuthology
"""
log.info('Starting timer...')
start = time.time()
try:
yield
finally:
duration = time.time() - start
log.info('Duration was %f seconds', duration)
ctx.summary['duration'] = duration
def connect(ctx, config):
"""
Open a connection to a remote host.
"""
log.info('Opening connections...')
remotes = []
machs = []
for name in ctx.config['targets'].iterkeys():
machs.append(name)
for t, key in ctx.config['targets'].iteritems():
t = misc.canonicalize_hostname(t)
log.debug('connecting to %s', t)
try:
if ctx.config['sshkeys'] == 'ignore':
key = None
except (AttributeError, KeyError):
pass
remotes.append(
remote.Remote(name=t, host_key=key, keep_alive=True, console=None))
ctx.cluster = cluster.Cluster()
if 'roles' in ctx.config:
for rem, roles in zip(remotes, ctx.config['roles']):
assert all(isinstance(role, str) for role in roles), \
"Roles in config must be strings: %r" % roles
ctx.cluster.add(rem, roles)
log.info('roles: %s - %s' % (rem, roles))
else:
for rem in remotes:
ctx.cluster.add(rem, rem.name)
def push_inventory(ctx, config):
if not teuth_config.lock_server:
return
def push():
for rem in ctx.cluster.remotes.keys():
info = rem.inventory_info
lock.update_inventory(info)
try:
push()
except Exception:
log.exception("Error pushing inventory")
def serialize_remote_roles(ctx, config):
"""
Provides an explicit mapping for which remotes have been assigned what roles
So that other software can be loosely coupled to teuthology
"""
if ctx.archive is not None:
with file(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file:
info_yaml = yaml.safe_load(info_file)
info_file.seek(0)
info_yaml['cluster'] = dict([(rem.name, {'roles': roles}) for rem, roles in ctx.cluster.remotes.iteritems()])
yaml.safe_dump(info_yaml, info_file, default_flow_style=False)
def check_ceph_data(ctx, config):
"""
Check for old /var/lib/ceph directories and detect staleness.
"""
log.info('Checking for old /var/lib/ceph...')
processes = ctx.cluster.run(
args=[
'test', '!', '-e', '/var/lib/ceph',
],
wait=False,
)
failed = False
for proc in processes:
try:
proc.wait()
except run.CommandFailedError:
log.error('Host %s has stale /var/lib/ceph, check lock and nuke/cleanup.', proc.remote.shortname)
failed = True
if failed:
raise RuntimeError('Stale /var/lib/ceph detected, aborting.')
def check_conflict(ctx, config):
"""
Note directory use conflicts and stale directories.
"""
log.info('Checking for old test directory...')
testdir = misc.get_testdir(ctx)
processes = ctx.cluster.run(
args=[
'test', '!', '-e', testdir,
],
wait=False,
)
failed = False
for proc in processes:
try:
proc.wait()
except run.CommandFailedError:
log.error('Host %s has stale test directory %s, check lock and cleanup.', proc.remote.shortname, testdir)
failed = True
if failed:
raise RuntimeError('Stale jobs detected, aborting.')
@contextlib.contextmanager
def archive(ctx, config):
"""
Handle the creation and deletion of the archive directory.
"""
log.info('Creating archive directory...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--', archive_dir,
],
wait=False,
)
)
try:
yield
except Exception:
# we need to know this below
set_status(ctx.summary, 'fail')
raise
finally:
passed = get_status(ctx.summary) == 'pass'
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and passed):
log.info('Transferring archived files...')
logdir = os.path.join(ctx.archive, 'remote')
if (not os.path.exists(logdir)):
os.mkdir(logdir)
for rem in ctx.cluster.remotes.iterkeys():
path = os.path.join(logdir, rem.shortname)
misc.pull_directory(rem, archive_dir, path)
log.info('Removing archive directory...')
run.wait(
ctx.cluster.run(
args=[
'rm',
'-rf',
'--',
archive_dir,
],
wait=False,
),
)
@contextlib.contextmanager
def sudo(ctx, config):
"""
Enable use of sudo
"""
log.info('Configuring sudo...')
sudoers_file = '/etc/sudoers'
backup_ext = '.orig.teuthology'
tty_expr = r's/^\([^#]*\) \(requiretty\)/\1 !\2/g'
pw_expr = r's/^\([^#]*\) !\(visiblepw\)/\1 \2/g'
run.wait(
ctx.cluster.run(
args="sudo sed -i{ext} -e '{tty}' -e '{pw}' {path}".format(
ext=backup_ext, tty=tty_expr, pw=pw_expr,
path=sudoers_file
),
wait=False,
)
)
try:
yield
finally:
log.info('Restoring {0}...'.format(sudoers_file))
ctx.cluster.run(
args="sudo mv -f {path}{ext} {path}".format(
path=sudoers_file, ext=backup_ext
)
)
@contextlib.contextmanager
def coredump(ctx, config):
"""
Stash a coredump of this system if an error occurs.
"""
log.info('Enabling coredump saving...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--',
'{adir}/coredump'.format(adir=archive_dir),
run.Raw('&&'),
'sudo', 'sysctl', '-w', 'kernel.core_pattern={adir}/coredump/%t.%p.core'.format(adir=archive_dir),
],
wait=False,
)
)
try:
yield
finally:
run.wait(
ctx.cluster.run(
args=[
'sudo', 'sysctl', '-w', 'kernel.core_pattern=core',
run.Raw('&&'),
# don't litter the archive dir if there were no cores dumped
'rmdir',
'--ignore-fail-on-non-empty',
'--',
'{adir}/coredump'.format(adir=archive_dir),
],
wait=False,
)
)
# set status = 'fail' if the dir is still there = coredumps were
# seen
for rem in ctx.cluster.remotes.iterkeys():
r = rem.run(
args=[
'if', 'test', '!', '-e', '{adir}/coredump'.format(adir=archive_dir), run.Raw(';'), 'then',
'echo', 'OK', run.Raw(';'),
'fi',
],
stdout=StringIO(),
)
if r.stdout.getvalue() != 'OK\n':
log.warning('Found coredumps on %s, flagging run as failed', rem)
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = \
'Found coredumps on {rem}'.format(rem=rem)
@contextlib.contextmanager
def syslog(ctx, config):
"""
start syslog / stop syslog on exit.
"""
if ctx.archive is None:
# disable this whole feature if we're not going to archive the data anyway
yield
return
log.info('Starting syslog monitoring...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'mkdir', '-m0755', '--',
'{adir}/syslog'.format(adir=archive_dir),
],
wait=False,
)
)
CONF = '/etc/rsyslog.d/80-cephtest.conf'
conf_fp = StringIO('''
kern.* -{adir}/syslog/kern.log;RSYSLOG_FileFormat
*.*;kern.none -{adir}/syslog/misc.log;RSYSLOG_FileFormat
'''.format(adir=archive_dir))
try:
for rem in ctx.cluster.remotes.iterkeys():
misc.sudo_write_file(
remote=rem,
path=CONF,
data=conf_fp,
)
conf_fp.seek(0)
run.wait(
ctx.cluster.run(
args=[
'sudo',
'service',
# a mere reload (SIGHUP) doesn't seem to make
# rsyslog open the files
'rsyslog',
'restart',
],
wait=False,
),
)
yield
finally:
log.info('Shutting down syslog monitoring...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'rm',
'-f',
'--',
CONF,
run.Raw('&&'),
'sudo',
'service',
'rsyslog',
'restart',
],
wait=False,
),
)
# race condition: nothing actually says rsyslog had time to
# flush the file fully. oh well.
log.info('Checking logs for errors...')
for rem in ctx.cluster.remotes.iterkeys():
log.debug('Checking %s', rem.name)
r = rem.run(
args=[
'egrep', '--binary-files=text',
'\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b',
run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)),
run.Raw('|'),
'grep', '-v', 'task .* blocked for more than .* seconds',
run.Raw('|'),
'grep', '-v', 'lockdep is turned off',
run.Raw('|'),
'grep', '-v', 'trying to register non-static key',
run.Raw('|'),
'grep', '-v', 'DEBUG: fsize', # xfs_fsr
run.Raw('|'),
'grep', '-v', 'CRON', # ignore cron noise
run.Raw('|'),
'grep', '-v', 'BUG: bad unlock balance detected', # #6097
run.Raw('|'),
'grep', '-v', 'inconsistent lock state', # FIXME see #2523
run.Raw('|'),
'grep', '-v', '*** DEADLOCK ***', # part of lockdep output
run.Raw('|'),
'grep', '-v', 'INFO: possible irq lock inversion dependency detected', # FIXME see #2590 and #147
run.Raw('|'),
'grep', '-v', 'INFO: NMI handler (perf_event_nmi_handler) took too long to run',
run.Raw('|'),
'grep', '-v', 'INFO: recovery required on readonly',
run.Raw('|'),
'head', '-n', '1',
],
stdout=StringIO(),
)
stdout = r.stdout.getvalue()
if stdout != '':
log.error('Error in syslog on %s: %s', rem.name, stdout)
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = \
"'{error}' in syslog".format(error=stdout)
log.info('Compressing syslogs...')
run.wait(
ctx.cluster.run(
args=[
'find',
'{adir}/syslog'.format(adir=archive_dir),
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'--',
],
wait=False,
),
)
def vm_setup(ctx, config):
"""
Look for virtual machines and handle their initialization
"""
all_tasks = [x.keys()[0] for x in ctx.config['tasks']]
need_chef = False
if 'chef' in all_tasks or 'kernel' in all_tasks:
need_chef = True
with parallel() as p:
editinfo = os.path.join(os.path.dirname(__file__),'edit_sudoers.sh')
for rem in ctx.cluster.remotes.iterkeys():
mname = rem.shortname
if misc.is_vm(mname):
r = rem.run(args=['test', '-e', '/ceph-qa-ready',],
stdout=StringIO(),
check_status=False,)
if r.returncode != 0:
p1 = subprocess.Popen(['cat', editinfo], stdout=subprocess.PIPE)
p2 = subprocess.Popen(
[
'ssh',
'-o', 'StrictHostKeyChecking=no',
'-t', '-t',
str(rem),
'sudo',
'sh'
],
stdin=p1.stdout, stdout=subprocess.PIPE
)
_, err = p2.communicate()
if err:
log.info("Edit of /etc/sudoers failed: %s", err)
if need_chef:
p.spawn(_download_and_run_chef, rem)
def _download_and_run_chef(remote_):
"""
Run ceph_qa_chef.
"""
log.info('Running ceph_qa_chef on %s', remote_)
remote_.run(
args=[
'wget', '-q', '-O-',
'http://ceph.com/git/?p=ceph-qa-chef.git;a=blob_plain;f=solo/solo-from-scratch;hb=HEAD',
run.Raw('|'),
'sh',
],
label="run chef solo-from-scratch"
)
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport applications.delivery2.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('delivery2', '0001_initial'),\n... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport applications.delivery2.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('delivery2', '0... | ```python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import applications.delivery2.models
class Migration(migrations.Migration):
dependencies = [
('delivery2', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EmailImageTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(max_length=256, verbose_name='\u041f\u0443\u0442\u044c')),
('image', models.ImageField(upload_to=applications.delivery2.models.upload_to, null=True, verbose_name='\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435', blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True)),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True)),
],
options={
'ordering': ['-created_at'],
'db_table': 'Delivery2_EmailImageTemplate',
'verbose_name': '\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435 \u0432 \u043f\u0438\u0441\u044c\u043c\u0435',
'verbose_name_plural': '\u0418\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u044f \u0432 \u043f\u0438\u0441\u044c\u043c\u0435',
},
),
migrations.CreateModel(
name='EmailSubject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subject', models.CharField(default='\u0422\u0435\u043c\u0430', max_length=256, verbose_name='\u0422\u0435\u043c\u0430 \u043f\u0438\u0441\u044c\u043c\u0430')),
('chance', models.DecimalField(default=1, verbose_name='\u0412\u0435\u0440\u043e\u044f\u0442\u043d\u043e\u0441\u0442\u044c', max_digits=4, decimal_places=2)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True)),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True)),
],
options={
'ordering': ['-created_at'],
'db_table': 'Delivery2_EmailSubject',
'verbose_name': '\u0422\u0435\u043c\u0430',
'verbose_name_plural': '\u0422\u0435\u043c\u044b',
},
),
migrations.RemoveField(
model_name='subject',
name='delivery',
),
migrations.RemoveField(
model_name='delivery',
name='template',
),
migrations.AddField(
model_name='emailtemplate',
name='name',
field=models.CharField(null=True, default=b'<built-in method now of type object at 0x83c4c20>', max_length=64, blank=True, unique=True, verbose_name='\u041d\u0430\u0437\u0432\u0430\u043d\u0438\u0435'),
),
migrations.AlterField(
model_name='delivery',
name='task_id',
field=models.CharField(max_length=255, null=True, verbose_name='task id', blank=True),
),
migrations.AlterField(
model_name='emailtemplate',
name='template',
field=models.FileField(upload_to=applications.delivery2.models.upload_to, null=True, verbose_name='\u0428\u0430\u0431\u043b\u043e\u043d', blank=True),
),
migrations.AlterField(
model_name='message',
name='subject',
field=models.ForeignKey(verbose_name='\u0423\u043a\u0430\u0437\u0430\u0442\u0435\u043b\u044c \u043d\u0430 subject', blank=True, to='delivery2.EmailSubject', null=True),
),
migrations.AlterModelTable(
name='emailtemplate',
table='Delivery2_EmailTemplate',
),
migrations.DeleteModel(
name='Subject',
),
migrations.AddField(
model_name='emailsubject',
name='delivery',
field=models.ForeignKey(to='delivery2.Delivery'),
),
migrations.AddField(
model_name='emailimagetemplate',
name='template',
field=models.ForeignKey(related_name='images', verbose_name='\u0428\u0430\u0431\u043b\u043e\u043d', to='delivery2.EmailTemplate'),
),
]
``` |
[
{
"content": "Here is the snippet:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport PyV8\nimport urllib\nfrom urlparse import urlparse\n\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\n\nclass Wappalyzer(object):\n\n def __init__(self, ... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport PyV8\nimport urllib\nfrom urlparse import urlparse\n\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\n\nclass Wappalyzer(object):\n\n def... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import PyV8
import urllib
from urlparse import urlparse
try:
import json
except ImportError:
import simplejson as json
class Wappalyzer(object):
def __init__(self, url):
self.file_dir = os.path.dirname(__file__)
f = open(os.path.join(self.file_dir, '../../share/apps.json'))
data = json.loads(f.read())
f.close()
self.categories = data['categories']
self.apps = data['apps']
self.url = url
def analyze(self):
ctxt = PyV8.JSContext()
ctxt.enter()
f1 = open(os.path.join(self.file_dir, '../php/js/wappalyzer.js'))
f2 = open(os.path.join(self.file_dir, '../php/js/driver.js'))
ctxt.eval(f1.read())
ctxt.eval(f2.read())
f1.close()
f2.close()
host = urlparse(self.url).hostname
html = urllib.urlopen(self.url).read()
data = {'host': host, 'url': self.url, 'html': html, 'headers': {}}
apps = json.dumps(self.apps)
categories = json.dumps(self.categories)
return ctxt.eval("w.apps = %s; w.categories = %s; w.driver.data = %s; w.driver.init();" % (apps, categories, json.dumps(data)))
if __name__ == '__main__':
try:
w = Wappalyzer(sys.argv[1])
print w.analyze()
except IndexError:
print ('Usage: python %s <url>' % sys.argv[0])
``` |
[
{
"content": "```python\n#/*##########################################################################\n# Copyright (C) 2004-2012 European Synchrotron Radiation Facility\n#\n# This file is part of the PyMca X-ray Fluorescence Toolkit developed at\n# the ESRF by the Software group.\n#\n# This toolkit is free sof... | [
{
"content": "<|memory_start|>```python\n#/*##########################################################################\n# Copyright (C) 2004-2012 European Synchrotron Radiation Facility\n#\n# This file is part of the PyMca X-ray Fluorescence Toolkit developed at\n# the ESRF by the Software group.\n#\n# This too... | ```python
#/*##########################################################################
# Copyright (C) 2004-2012 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# This toolkit is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# PyMca is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyMca; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# PyMca follows the dual licensing model of Riverbank's PyQt and cannot be
# used as a free plugin for a non-free program.
#
# Please contact the ESRF industrial unit (industry@esrf.fr) if this license
# is a problem for you.
#############################################################################*/
import types
from PyMca import DataObject
from PyMca import spswrap as sps
DEBUG = 0
SOURCE_TYPE = 'SPS'
class SpsDataSource(object):
def __init__(self, name):
if type(name) not in types.StringTypes:
raise TypeError("Constructor needs string as first argument")
self.name = name
self.sourceName = name
self.sourceType = SOURCE_TYPE
def refresh(self):
pass
def getSourceInfo(self):
"""
Returns information about the Spec version in self.name
to give application possibility to know about it before loading.
Returns a dictionary with the key "KeyList" (list of all available keys
in this source). Each element in "KeyList" is an shared memory
array name.
"""
return self.__getSourceInfo()
def getKeyInfo(self, key):
if key in self.getSourceInfo()['KeyList']:
return self.__getArrayInfo(key)
else:
return {}
def getDataObject(self, key_list, selection=None):
if type(key_list) != types.ListType:
nolist = True
key_list = [key_list]
else:
output = []
nolist = False
if self.name in sps.getspeclist():
sourcekeys = self.getSourceInfo()['KeyList']
for key in key_list:
#a key corresponds to an array name
if key not in sourcekeys:
raise KeyError("Key %s not in source keys" % key)
#array = key
#create data object
data = DataObject.DataObject()
data.info = self.__getArrayInfo(key)
data.info['selection'] = selection
data.data = sps.getdata(self.name, key)
if nolist:
if selection is not None:
scantest = (data.info['flag'] &
sps.TAG_SCAN) == sps.TAG_SCAN
if ((key in ["SCAN_D"]) or scantest) \
and 'cntlist' in selection:
data.x = None
data.y = None
data.m = None
if 'nopts' in data.info['envdict']:
nopts = int(data.info['envdict']['nopts']) + 1
else:
nopts = data.info['rows']
if not 'LabelNames' in data.info:
data.info['LabelNames'] =\
selection['cntlist'] * 1
if 'x' in selection:
for labelindex in selection['x']:
label = data.info['LabelNames'][labelindex]
if label not in data.info['LabelNames']:
raise ValueError("Label %s not in scan labels" % label)
index = data.info['LabelNames'].index(label)
if data.x is None: data.x = []
data.x.append(data.data[:nopts, index])
if 'y' in selection:
for labelindex in selection['y']:
label = data.info['LabelNames'][labelindex]
if label not in data.info['LabelNames']:
raise ValueError("Label %s not in scan labels" % label)
index = data.info['LabelNames'].index(label)
if data.y is None: data.y = []
data.y.append(data.data[:nopts, index])
if 'm' in selection:
for labelindex in selection['m']:
label = data.info['LabelNames'][labelindex]
if label not in data.info['LabelNames']:
raise ValueError("Label %s not in scan labels" % label)
index = data.info['LabelNames'].index(label)
if data.m is None: data.m = []
data.m.append(data.data[:nopts, index])
data.info['selectiontype'] = "1D"
data.info['scanselection'] = True
data.data = None
return data
if (key in ["XIA_DATA"]) and 'XIA' in selection:
if selection["XIA"]:
if 'Detectors' in data.info:
for i in range(len(selection['rows']['y'])):
selection['rows']['y'][i] = \
data.info['Detectors'].index(selection['rows']['y'][i]) + 1
del selection['XIA']
return data.select(selection)
else:
if data.data is not None:
data.info['selectiontype'] = "%dD" % len(data.data.shape)
if data.info['selectiontype'] == "2D":
data.info["imageselection"] = True
return data
else:
output.append(data.select(selection))
return output
else:
return None
def __getSourceInfo(self):
arraylist = []
sourcename = self.name
for array in sps.getarraylist(sourcename):
arrayinfo = sps.getarrayinfo(sourcename, array)
arraytype = arrayinfo[2]
arrayflag = arrayinfo[3]
if arraytype != sps.STRING:
if (arrayflag & sps.TAG_ARRAY) == sps.TAG_ARRAY:
arraylist.append(array)
continue
if DEBUG:
print("array not added %s" % array)
source_info = {}
source_info["Size"] = len(arraylist)
source_info["KeyList"] = arraylist
return source_info
def __getArrayInfo(self, array):
info = {}
info["SourceType"] = SOURCE_TYPE
info["SourceName"] = self.name
info["Key"] = array
arrayinfo = sps.getarrayinfo(self.name, array)
info["rows"] = arrayinfo[0]
info["cols"] = arrayinfo[1]
info["type"] = arrayinfo[2]
info["flag"] = arrayinfo[3]
counter = sps.updatecounter(self.name, array)
info["updatecounter"] = counter
envdict = {}
keylist = sps.getkeylist(self.name, array + "_ENV")
for i in keylist:
val = sps.getenv(self.name, array + "_ENV", i)
envdict[i] = val
info["envdict"] = envdict
scantest = (info['flag'] & sps.TAG_SCAN) == sps.TAG_SCAN
if (array in ["SCAN_D"]) or scantest:
if 'axistitles' in info["envdict"]:
info["LabelNames"] = self._buildLabelsList(info['envdict']['axistitles'])
if 'H' in info["envdict"]:
if 'K' in info["envdict"]:
if 'L' in info["envdict"]:
info['hkl'] = [envdict['H'],
envdict['K'],
envdict['L']]
calibarray = array + "_PARAM"
if calibarray in sps.getarraylist(self.name):
try:
data = sps.getdata(self.name, calibarray)
updc = sps.updatecounter(self.name, calibarray)
info["EnvKey"] = calibarray
# data is an array
info["McaCalib"] = data.tolist()[0]
info["env_updatecounter"] = updc
except:
# Some of our C modules return NULL without setting
# an exception ...
pass
if array in ["XIA_DATA", "XIA_BASELINE"]:
envarray = "XIA_DET"
if envarray in sps.getarraylist(self.name):
try:
data = sps.getdata(self.name, envarray)
updc = sps.updatecounter(self.name, envarray)
info["EnvKey"] = envarray
info["Detectors"] = data.tolist()[0]
info["env_updatecounter"] = updc
except:
pass
return info
def _buildLabelsList(self, instr):
if DEBUG:
print('SpsDataSource : building counter list')
state = 0
llist = ['']
for letter in instr:
if state == 0:
if letter == ' ':
state = 1
elif letter == '{':
state = 2
else:
llist[-1] = llist[-1] + letter
elif state == 1:
if letter == ' ':
pass
elif letter == '{':
state = 2
llist.append('')
else:
llist.append(letter)
state = 0
elif state == 2:
if letter == '}':
state = 0
else:
llist[-1] = llist[-1] + letter
try:
llist.remove('')
except ValueError:
pass
return llist
def isUpdated(self, sourceName, key):
if sps.specrunning(sourceName):
if sps.isupdated(sourceName, key):
return True
#return True if its environment is updated
envkey = key + "_ENV"
if envkey in sps.getarraylist(sourceName):
if sps.isupdated(sourceName, envkey):
return True
return False
source_types = {SOURCE_TYPE: SpsDataSource}
# TODO object is a builtins
def DataSource(name="", object=None, copy=True, source_type=SOURCE_TYPE):
try:
sourceClass = source_types[source_type]
except KeyError:
# ERROR invalid source type
raise TypeError("Invalid Source Type, source type should be one of %s" % source_types.keys())
return sourceClass(name, object, copy)
def main():
import sys
try:
specname = sys.argv[1]
arrayname = sys.argv[2]
obj = DataSource(specname)
data = obj.getData(arrayname)
print("info = ", data.info)
except:
# give usage instructions
print("Usage: SpsDataSource <specversion> <arrayname>")
sys.exit()
if __name__ == "__main__":
main()
``` |
[
{
"content": "```python\n# update_hi - receive binary and i) parse it, ii) update json tally as needed, iii) store .hi file for later\n# get_hi -- fetch a bin for the emu\n# get_json_tally - dump highscore table as json (for fancy frontend to display, say)\n# get_html_tally - dump highscore in vaguely readable ... | [
{
"content": "<|memory_start|>```python\n# update_hi - receive binary and i) parse it, ii) update json tally as needed, iii) store .hi file for later\n# get_hi -- fetch a bin for the emu\n# get_json_tally - dump highscore table as json (for fancy frontend to display, say)\n# get_html_tally - dump highscore in v... | ```python
# update_hi - receive binary and i) parse it, ii) update json tally as needed, iii) store .hi file for later
# get_hi -- fetch a bin for the emu
# get_json_tally - dump highscore table as json (for fancy frontend to display, say)
# get_html_tally - dump highscore in vaguely readable html table (for web browser quickies)
# get_last_modify_epoch - get epoch-time of last tally modify
import logging
import json
import array
import os
import pprint
import time
import traceback
import profile
from paths import _basepath
import modulemap
import activity_log
SCOREBOARD_MAX=500
logging.info ( "LOADING: singlescore_handler" )
# "score" should not be supplied, unless its multiscore sending its shit here
def update_hi ( req, score_int=None ):
#pp = pprint.PrettyPrinter ( indent=4 )
# base game path
writepath = _basepath ( req )
try:
logging.debug ( "Attempt to create dirs %s" % ( writepath ) )
os.makedirs ( writepath )
except:
pass
# pull up existing tally file
#
tally = _read_tally ( req )
sb = tally [ 'scoreboard' ]
# parse new hi buffer
#
if score_int:
hi = score_int
else:
hi = parse_hi_bin ( req, req [ '_bindata' ] )
# is any of this new buffer better than existing tally?
# if so, update tally file and record it
# if not, we're done
# new tally update? great ..
# .. store hi-file
# .. store new tally file
# -------
# does this score factor into the high score table, or too low to count?
if False and hi < sb [ SCOREBOARD_MAX - 1 ][ 'score' ]:
logging.info ( "hidb - %s - submitter score of %d is NOT sufficient to enter scoreboard (lowest %d, highest %d)" % ( req [ 'gamename' ], hi, sb [ SCOREBOARD_MAX - 1 ][ 'score' ], sb [ 0 ][ 'score' ] ) )
return
# is score same as existing top .. if so, its just resubmitting the score they pulled down, likely, so.. discard
if False and hi == sb [ 0 ][ 'score' ]:
logging.info ( "hidb - %s - submitter score of %d is same as highest score .. probably just looping. (lowest %d, highest %d)" % ( req [ 'gamename' ], hi, sb [ SCOREBOARD_MAX - 1 ][ 'score' ], sb [ 0 ][ 'score' ] ) )
return
# okay, so the guys score is at least better than one of them.. start at top, pushing the way down
if False:
logging.info ( "hidb - %s - submitter score of %d IS sufficient to enter scoreboard (lowest %d, highest %d)" % ( req [ 'gamename' ], hi, sb [ SCOREBOARD_MAX - 1 ][ 'score' ], sb [ 0 ][ 'score' ] ) )
# determine desired sort order
order = 'highest-first'
try:
_order = modulemap.gamemap [ req [ 'gamename' ] ] [ '_general'] [ 'ordering' ]
logging.info ( 'hidb - %s - ordering from conf is %s' % ( req [ 'gamename' ], _order ) )
if _order in ( 'highest-first' ,'lowest-first' ):
order = _order
else:
order = 'highest-first'
except:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint ( modulemap.gamemap [ req [ 'gamename' ] ] )
print modulemap.gamemap [ req [ 'gamename' ] ]
traceback.print_exc()
logging.info ( 'hidb - %s - ordering -> exception .. assuming highest-first' % ( req [ 'gamename' ] ) )
order = 'highest-first'
logging.info ( 'hidb - %s - ordering to use is %s' % ( req [ 'gamename' ], order ) )
# create new score entry
d = dict()
d [ 'prid' ] = req [ 'prid' ]
d [ 'score' ] = hi
d [ 'time' ] = int ( time.time() )
# old: insert with manual assumed-ascending sort order
if False:
for i in range ( SCOREBOARD_MAX ):
if hi > sb [ i ][ 'score' ]:
# log the activity
activity_log.log_entry ( req, d, i )
# insert
sb.insert ( i, d )
# drop off last guy
sb.pop()
# if we updated the first entry, the very highest score, spit out a new .hi file
# (mspacman only has a single high score, so we only update it for the highest score.. not a whole table)
if i == 0 and score_int == None:
f = open ( writepath + req [ 'gamename' ] + ".hi", "w" )
f.write ( build_hi_bin ( req, sb [ 0 ][ 'score' ] ) )
f.close()
break
# insert at first, assuming a post-sort; we can drop the 'worst' entry after sort
if True:
sb.insert ( 0, d )
# update activity log.. try to find the entry match and publish it
if True:
for i in range ( SCOREBOARD_MAX ):
if d [ 'prid' ] == sb [ i ] [ 'prid' ] and d [ 'score' ] == sb [ i ] [ 'score' ] and d [ 'time' ] == sb [ i ] [ 'time' ]:
activity_log.log_entry ( req, d, i )
break
# post-sort to games desired sort order
# reverse=False -> ascending (lowest first), lowest is best
# reverse=True -> descending (highest first), highest is best -> most typical case
def _sortvalue ( entry ):
if entry [ 'score' ] == 0:
if order == 'lowest-first':
return 999999999999
else:
return -1
else:
return entry [ 'score' ]
if True:
reversify = True
if order == 'lowest-first':
reversify = False
try:
sb.sort ( key=_sortvalue, reverse=reversify )
except:
traceback.print_exc()
# drop 'worst' (last, since we sorted) entry
if True:
sb.pop()
#logging.info ( 'hidb - %s - sorted ' % ( req [ 'gamename' ] ) )
# update stats and write out the updated tally file
tally [ 'hi' ] = sb [ 0 ][ 'score' ]
tally [ 'prid' ] = sb [ 0 ][ 'prid' ]
tallyfile = json.dumps ( tally )
f = open ( writepath + req [ 'gamename' ] + ".json", "w" )
f.write ( tallyfile )
f.close()
#logging.debug ( "received len %d" % ( req [ '_binlen' ] ) )
return
def get_hi ( req ):
req [ '_bindata' ] = build_hi_bin ( req, 0 )
req [ '_binlen' ] = len ( req [ '_bindata' ] )
logging.info ( "%s - pulled generated zero-score hi file (len %s)" % ( req [ 'gamename' ], req [ '_binlen' ] ) )
'''
writepath = _basepath ( req )
try:
f = open ( writepath + req [ 'gamename' ] + ".hi", "r" )
bindata = f.read()
f.close()
req [ '_bindata' ] = bindata
req [ '_binlen' ] = len ( bindata )
logging.info ( "%s - pulled existant hi file (len %s)" % ( req [ 'gamename' ], req [ '_binlen' ] ) )
except:
req [ '_bindata' ] = build_hi_bin ( req, 270 )
req [ '_binlen' ] = len ( req [ '_bindata' ] )
logging.info ( "%s - pulled generated zero-score hi file (len %s)" % ( req [ 'gamename' ], req [ '_binlen' ] ) )
'''
return
def get_json_tally ( req, raw=False ):
tally = _read_tally ( req )
for ent in tally [ 'scoreboard' ]:
prident = profile.fetch_pridfile_as_dict ( ent [ 'prid' ] )
if prident == None:
prident = profile.NULL_PROFILE
ent [ 'shortname' ] = prident [ 'shortname' ]
ent [ 'longname' ] = prident [ 'longname' ]
if '_general' in modulemap.gamemap [ req [ 'gamename' ] ]:
if 'dispunit' in modulemap.gamemap [ req [ 'gamename' ] ][ '_general' ]:
ent [ 'dispunit' ] = modulemap.gamemap [ req [ 'gamename' ] ] [ '_general' ][ 'dispunit' ]
del ent [ 'prid' ]
if raw:
req [ '_bindata' ] = tally
else:
req [ '_bindata' ] = json.dumps ( tally )
req [ '_binlen' ] = len ( req [ '_bindata' ] )
return
def get_html_tally ( req ):
tally = _read_tally ( req )
if '_backdate' in req:
if req [ '_backdate' ].isdigit():
timeframe = 'Specific Month: ' + req [ '_backdate' ]
else:
timeframe = 'All Time'
else:
timeframe = 'Current Month'
html = ''
html += "<h2>" + req [ 'gamename' ] + "</h2>\n"
html += "<h3>" + timeframe + "</h3>\n"
html += "<table>\n"
html += '<tr>\n'
html += ' <td style="padding:0 15px 0 15px;"><b>Rank</b></td>\n'
html += ' <td style="padding:0 15px 0 15px;"><b>Initial</b></td>\n'
html += ' <td style="padding:0 15px 0 15px;"><b>Name</b></td>\n'
html += ' <td style="padding:0 15px 0 15px;"><b>Score</b></td>\n'
html += ' <td style="padding:0 15px 0 15px;"><b>When</b></td>\n'
html += '</tr>\n'
i = 1
pridcache = dict()
lastprident = None
lastrun = 0 # for an RLE-like run count
for ent in tally [ 'scoreboard' ]:
prident = None
if ent [ 'prid' ]:
try:
prident = pridcache [ ent [ 'prid' ] ]
except:
prident = profile.fetch_pridfile_as_dict ( ent [ 'prid' ] )
pridcache [ ent [ 'prid' ] ] = prident
if prident == None:
prident = profile.NULL_PROFILE
tlocal = time.localtime ( ent [ 'time' ] )
tdisplay = time.strftime ( '%d-%b-%Y', tlocal )
# units
unit = ''
if '_general' in modulemap.gamemap [ req [ 'gamename' ] ]:
if 'dispunit' in modulemap.gamemap [ req [ 'gamename' ] ][ '_general' ]:
unit = ' ' + str ( modulemap.gamemap [ req [ 'gamename' ] ][ '_general' ][ 'dispunit' ] )
showrow = 1 # 0 no, 1 yes, 2 ellipses
if False: # True -> force to full length display
lastprident = None # if uncommented, forces full display .. no ellipses hidden entries
if lastprident == prident:
showrow = 0
lastrun += 1
else:
# if not first row, and the RLE is significant .. show an ellipses
if lastprident != None and lastrun > 0:
showrow = 2
else:
showrow = 1
# last and current are not the same, so RLE is back to zero
lastrun = 0
if showrow == 0:
pass # suppress
else:
if showrow == 2:
# so our last row is not same as this row, and last guy was not also the first
# row.. so show "..."
html += '<tr>\n'
html += ' <td style="padding:0 15px 0 15px;">' + "" + "</td>\n"
html += ' <td style="padding:0 15px 0 15px;">' + "" + "</td>\n"
html += ' <td style="padding:0 15px 0 15px;">' + "..." + "</td>\n"
html += ' <td style="padding:0 15px 0 15px;"></td>\n'
html += ' <td style="padding:0 15px 0 15px;"></td>\n'
html += '</tr>\n'
# showrow == 1, or showrow == 2 .. show this line
html += '<tr>\n'
html += ' <td style="padding:0 15px 0 15px;">' + str ( i ) + "</td>\n"
html += ' <td style="padding:0 15px 0 15px;">' + prident [ 'shortname' ] + "</td>\n"
html += ' <td style="padding:0 15px 0 15px;">' + prident [ 'longname' ] + "</td>\n"
if ent [ 'score' ] > 0:
html += ' <td style="padding:0 15px 0 15px;">' + str ( ent [ 'score' ] ) + unit + "</td>\n"
else:
html += ' <td style="padding:0 15px 0 15px;">-</td>\n'
if ent [ 'time' ] > 0:
html += ' <td style="padding:0 15px 0 15px;">' + tdisplay + "</td>\n"
else:
html += ' <td style="padding:0 15px 0 15px;"></td>\n'
html += '</tr>\n'
lastprident = prident
i += 1
html += "</table>\n"
html += "<p>%d unique profiles in the leaderboard</p>\n" % ( len ( pridcache ) )
req [ '_bindata' ] = html
req [ '_binlen' ] = len ( req [ '_bindata' ] )
return
def get_last_modify_epoch ( req ):
try:
filename = _basepath ( req ) + req [ 'gamename' ] + ".json"
return int ( os.path.getmtime ( filename ) )
except:
return 0
# ---------------
def _read_tally ( req ):
writepath = _basepath ( req )
try:
f = open ( writepath + req [ 'gamename' ] + ".json", "r" )
tallyfile = f.read()
f.close()
tally = json.loads ( tallyfile )
except:
logging.warning ( "%s - assuming new score file (all zeroes)" % ( req [ 'gamename' ] ) )
tally = dict()
tally [ 'hi' ] = 0
tally [ 'prid' ] = '_default_'
scoreboard = list()
for i in range ( SCOREBOARD_MAX ):
scoreboard.append ( { 'prid': '_default_', 'score': 0, 'time': 0 } )
tally [ 'scoreboard' ] = scoreboard
return tally
def parse_hi_bin ( req, bindata ):
return modulemap.gamemap [ req [ 'gamename' ] ][ 'module' ].parse_hi_bin ( req, bindata )
def build_hi_bin ( req, hiscore ):
return modulemap.gamemap [ req [ 'gamename' ] ][ 'module' ].build_hi_bin ( req, hiscore )
def done ( req ):
pass
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n#SBaaS base\nfrom SBaaS_base.postgresql_orm_base import *\nclass data_stage03_quantification_dG0_f(Base):\n __tablename__ = 'data_stage03_quantification_dG0_f'\n id = Column(Integer, Sequence('data_stage03_quantification_dG0_f_id_seq'), primary_key=T... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n#SBaaS base\nfrom SBaaS_base.postgresql_orm_base import *\nclass data_stage03_quantification_dG0_f(Base):\n __tablename__ = 'data_stage03_quantification_dG0_f'\n id = Column(Integer, Sequence('data_stage03_quantification_dG0_f_id_seq'... | ```python
#SBaaS base
from SBaaS_base.postgresql_orm_base import *
class data_stage03_quantification_dG0_f(Base):
__tablename__ = 'data_stage03_quantification_dG0_f'
id = Column(Integer, Sequence('data_stage03_quantification_dG0_f_id_seq'), primary_key=True)
reference_id = Column(String(100))
met_name = Column(String(500))
met_id = Column(String(100))
KEGG_id = Column(String(20))
priority = Column(Integer);
dG0_f = Column(Float);
dG0_f_var = Column(Float);
dG0_f_units = Column(String(50));
temperature = Column(Float, default=298.15);
temperature_units = Column(String(50), default='K');
ionic_strength = Column(Float, default=0.0);
ionic_strength_units = Column(String(50),default='M');
pH = Column(Float, default=0.0);
pH_units = Column(String(50));
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (UniqueConstraint('reference_id','KEGG_id','priority'),
)
def __init__(self,
row_dict_I,
):
self.dG0_f_units=row_dict_I['dG0_f_units'];
self.dG0_f_var=row_dict_I['dG0_f_var'];
self.dG0_f=row_dict_I['dG0_f'];
self.priority=row_dict_I['priority'];
self.KEGG_id=row_dict_I['KEGG_id'];
self.met_id=row_dict_I['met_id'];
self.met_name=row_dict_I['met_name'];
self.reference_id=row_dict_I['reference_id'];
self.ionic_strength=row_dict_I['ionic_strength'];
self.ionic_strength_units=row_dict_I['ionic_strength_units'];
self.pH=row_dict_I['pH'];
self.pH_units=row_dict_I['pH_units'];
self.used_=row_dict_I['used_'];
self.comment_=row_dict_I['comment_'];
self.temperature_units=row_dict_I['temperature_units'];
self.temperature=row_dict_I['temperature'];
def __set__row__(self, reference_id_I, met_name_I, met_id_I, KEGG_id_I, priority_I,
dG0_f_I, dG0_f_var_I, dG0_f_units_I, temperature_I, temperature_units_I, ionic_strength_I, ionic_strength_units_I,
pH_I, pH_units_I, used_I, comment_I):
self.reference_id = reference_id_I;
self.met_name = met_name_I;
self.met_id = met_id_I;
self.KEGG_id = KEGG_id_I;
self.priority = priority_I;
self.dG0_f = dG0_f_I;
self.dG0_f_var = dG0_f_var_I;
self.dG0_f_units = dG0_f_units_I;
self.temperature = temperature_I;
self.temperature_units = temperature_units_I;
self.ionic_strength = ionic_strength_I;
self.ionic_strength_units = ionic_strength_units_I;
self.pH = pH_I;
self.pH_units = pH_units_I;
self.used_ = used_I;
self.comment_ = comment_I;
def __repr__dict__(self):
return {'id':self.id,
'reference_id':self.reference_id,
'met_name':self.met_name,
'met_id':self.met_id,
'KEGG_ID':self.KEGG_id,
'priority':self.priority,
'dG0_f':self.dG0_f,
'dG0_f_var':self.dG0_f_var,
'dG0_f_units':self.dG0_f_units,
'temperature':self.temperature,
'temperature_units':self.temperature_units,
'ionic_strength':self.ionic_strength,
'ionic_strength_units':self.ionic_strength_units,
'pH':self.pH,
'pH_units':self.pH_units,
'used_':self.used_,
'comments_':self.comments_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage03_quantification_dG_f(Base):
__tablename__ = 'data_stage03_quantification_dG_f'
id = Column(Integer, Sequence('data_stage03_quantification_dG_f_id_seq'), primary_key=True)
experiment_id = Column(String(100))
model_id = Column(String(50))
sample_name_abbreviation = Column(String(100))
time_point = Column(String(10))
met_name = Column(String(500))
met_id = Column(String(100))
dG_f = Column(Float);
dG_f_var = Column(Float);
dG_f_units = Column(String(50));
dG_f_lb = Column(Float);
dG_f_ub = Column(Float);
temperature = Column(Float);
temperature_units = Column(String(50));
ionic_strength = Column(Float);
ionic_strength_units = Column(String(50));
pH = Column(Float);
pH_units = Column(String(50));
measured = Column(Boolean);
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (UniqueConstraint('experiment_id','model_id','sample_name_abbreviation','time_point','met_id'),
)
def __init__(self,
row_dict_I,
):
self.met_name=row_dict_I['met_name'];
self.time_point=row_dict_I['time_point'];
self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
self.model_id=row_dict_I['model_id'];
self.experiment_id=row_dict_I['experiment_id'];
self.temperature=row_dict_I['temperature'];
self.used_=row_dict_I['used_'];
self.measured=row_dict_I['measured'];
self.pH_units=row_dict_I['pH_units'];
self.temperature_units=row_dict_I['temperature_units'];
self.ionic_strength=row_dict_I['ionic_strength'];
self.ionic_strength_units=row_dict_I['ionic_strength_units'];
self.pH=row_dict_I['pH'];
self.comment_=row_dict_I['comment_'];
self.dG_f_ub=row_dict_I['dG_f_ub'];
self.dG_f_lb=row_dict_I['dG_f_lb'];
self.dG_f_units=row_dict_I['dG_f_units'];
self.dG_f_var=row_dict_I['dG_f_var'];
self.dG_f=row_dict_I['dG_f'];
self.met_id=row_dict_I['met_id'];
def __set__row__(self, experiment_id_I,model_id_I,sample_name_abbreviation_I,
time_point_I, met_name_I, met_id_I,
dG_f_I, dG_f_var_I, dG_f_units_I,
dG_f_lb_I, dG_f_ub_I, temperature_I, temperature_units_I,
ionic_strength_I, ionic_strength_units_I,
pH_I, pH_units_I, measured_I, used_I, comment_I):
self.experiment_id = experiment_id_I;
self.model_id = model_id_I;
self.sample_name_abbreviation=sample_name_abbreviation_I
self.time_point=time_point_I
self.met_name = met_name_I;
self.met_id = met_id_I;
self.dG_f = dG_f_I;
self.dG_f_var = dG_f_var_I;
self.dG_f_units = dG_f_units_I;
self.dG_f_lb = dG_f_lb_I;
self.dG_f_ub = dG_f_ub_I;
self.temperature = temperature_I;
self.temperature_units = temperature_units_I;
self.ionic_strength = ionic_strength_I;
self.ionic_strength_units = ionic_strength_units_I;
self.pH = pH_I;
self.pH_units = pH_units_I;
self.measured = measured_I;
self.used_ = used_I;
self.comment_ = comment_I;
def __repr__dict__(self):
return {'id':self.id,
'experiment_id':self.experiment_id,
'model_id':self.model_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
'time_point':self.time_point,
'met_name':self.met_name,
'met_id':self.met_id,
'dG_f':self.dG_f,
'dG_f_var':self.dG_f_var,
'dG_f_units':self.dG_f_units,
'dG_f_lb':self.dG_f_lb,
'dG_f_ub':self.dG_f_ub,
'temperature':self.temperature,
'temperature_units':self.temperature_units,
'ionic_strength':self.ionic_strength,
'ionic_strength_units':self.ionic_strength_units,
'pH':self.pH,
'pH_units':self.pH_units,
'measured':self.measured,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
``` |
[
{
"content": "Repeat the following code:\n```python\nimport torch\nimport torch.nn as nn\n\nimport torch.nn.functional as F\n\nclass KimCNN(nn.Module):\n def __init__(self, config):\n super(KimCNN, self).__init__()\n output_channel = config.output_channel\n target_class = config.target_c... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\nimport torch\nimport torch.nn as nn\n\nimport torch.nn.functional as F\n\nclass KimCNN(nn.Module):\n def __init__(self, config):\n super(KimCNN, self).__init__()\n output_channel = config.output_channel\n target_class =... | ```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class KimCNN(nn.Module):
def __init__(self, config):
super(KimCNN, self).__init__()
output_channel = config.output_channel
target_class = config.target_class
words_num = config.words_num
words_dim = config.words_dim
embed_num = config.embed_num
embed_dim = config.embed_dim
self.mode = config.mode
Ks = 3 # There are three conv net here
if config.mode == 'multichannel':
input_channel = 2
else:
input_channel = 1
self.embed = nn.Embedding(words_num, words_dim)
self.static_embed = nn.Embedding(embed_num, embed_dim)
self.non_static_embed = nn.Embedding(embed_num, embed_dim)
self.static_embed.weight.requires_grad = False
self.conv1 = nn.Conv2d(input_channel, output_channel, (3, words_dim), padding=(2,0))
self.conv2 = nn.Conv2d(input_channel, output_channel, (4, words_dim), padding=(3,0))
self.conv3 = nn.Conv2d(input_channel, output_channel, (5, words_dim), padding=(4,0))
self.dropout = nn.Dropout(config.dropout)
self.fc1 = nn.Linear(Ks * output_channel, target_class)
def forward(self, x):
x = x.text
if self.mode == 'rand':
word_input = self.embed(x) # (batch, sent_len, embed_dim)
x = word_input.unsqueeze(1) # (batch, channel_input, sent_len, embed_dim)
elif self.mode == 'static':
static_input = self.static_embed(x)
x = static_input.unsqueeze(1) # (batch, channel_input, sent_len, embed_dim)
elif self.mode == 'non-static':
non_static_input = self.non_static_embed(x)
x = non_static_input.unsqueeze(1) # (batch, channel_input, sent_len, embed_dim)
elif self.mode == 'multichannel':
non_static_input = self.non_static_embed(x)
static_input = self.static_embed(x)
x = torch.stack([non_static_input, static_input], dim=1) # (batch, channel_input=2, sent_len, embed_dim)
else:
print("Unsupported Mode")
exit()
x = [F.relu(self.conv1(x)).squeeze(3), F.relu(self.conv2(x)).squeeze(3), F.relu(self.conv3(x)).squeeze(3)]
# (batch, channel_output, ~=sent_len) * Ks
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # max-over-time pooling
# (batch, channel_output) * Ks
x = torch.cat(x, 1) # (batch, channel_output * Ks)
x = self.dropout(x)
logit = self.fc1(x) # (batch, target_size)
return logit
``` |
[
{
"content": "Here is a code snippet:\n```python\n# -*- coding: utf-8 -*-\n\"\"\"\nSettings for the hello scripts.\n\nYou most likely need to edit a few of them, e.g. API_HOST and the OAuth\ncredentials.\n\"\"\"\n\nOUR_BANK = '00100'\n\nUSERNAME = '1000203893'\nPASSWORD = '1000203893'\nC... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\nSettings for the hello scripts.\n\nYou most likely need to edit a few of them, e.g. API_HOST and the OAuth\ncredentials.\n\"\"\"\n\nOUR_BANK = '00100'\n\nUSERNAME = '1000203893'\nPASSWORD =... | ```python
# -*- coding: utf-8 -*-
"""
Settings for the hello scripts.
You most likely need to edit a few of them, e.g. API_HOST and the OAuth
credentials.
"""
OUR_BANK = '00100'
USERNAME = '1000203893'
PASSWORD = '1000203893'
CONSUMER_KEY = 'bvldezvlnqj4mtva4jfktke4xliep0bt1xm44yxi'
CONSUMER_SECRET = 'fgwo35uhkroebasxlqgzjjcc0cf1yaujuynkwodz'
# API server URL
BASE_URL = 'https://socgen2-k-api.openbankproject.com'
API_VERSION = "v2.1.0"
# API server will redirect your browser to this URL, should be non-functional
# You will paste the redirect location here when running the script
CALLBACK_URI = 'http://127.0.0.1/cb'
# Our COUNTERPARTY account id (of the same currency)
OUR_COUNTERPARTY = '3806441b-bbdf-3c60-b2b3-14e2f645635f'
COUNTERPARTY_BANK = '00100'
# this following two fields are just used in V210
OUR_COUNTERPARTY_ID = ''
OUR_COUNTERPARTY_IBAN = ''
# Our currency to use
OUR_CURRENCY = 'XAF'
# Our value to transfer
# values below 1000 do not requre challenge request
OUR_VALUE = '0.01'
OUR_VALUE_LARGE = '1000.00'
PAYMENT_DESCRIPTION = 'Hello Payments v2.1!'
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n# Author: Nic Wolfe <nic@wolfeden.ca>\n# URL: http://code.google.com/p/sickbeard/\n#\n# This file is part of Sick Beard.\n#\n# Sick Beard is free software: you can redistribute it and/or modify\n# it under the te... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n# Author: Nic Wolfe <nic@wolfeden.ca>\n# URL: http://code.google.com/p/sickbeard/\n#\n# This file is part of Sick Beard.\n#\n# Sick Beard is free software: you can redistribute it and/or modify\n#... | ```python
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import sickbeard
import os.path
from sickbeard import db, common, helpers, logger
from sickbeard.providers.generic import GenericProvider
from sickbeard import encodingKludge as ek
from sickbeard.name_parser.parser import NameParser, InvalidNameException
class MainSanityCheck(db.DBSanityCheck):
def check(self):
self.fix_duplicate_shows()
self.fix_duplicate_episodes()
self.fix_orphan_episodes()
def fix_duplicate_shows(self):
sqlResults = self.connection.select("SELECT show_id, tvdb_id, COUNT(tvdb_id) as count FROM tv_shows GROUP BY tvdb_id HAVING count > 1")
for cur_duplicate in sqlResults:
logger.log(u"Duplicate show detected! tvdb_id: " + str(cur_duplicate["tvdb_id"]) + u" count: " + str(cur_duplicate["count"]), logger.DEBUG)
cur_dupe_results = self.connection.select("SELECT show_id, tvdb_id FROM tv_shows WHERE tvdb_id = ? LIMIT ?",
[cur_duplicate["tvdb_id"], int(cur_duplicate["count"])-1]
)
for cur_dupe_id in cur_dupe_results:
logger.log(u"Deleting duplicate show with tvdb_id: " + str(cur_dupe_id["tvdb_id"]) + u" show_id: " + str(cur_dupe_id["show_id"]))
self.connection.action("DELETE FROM tv_shows WHERE show_id = ?", [cur_dupe_id["show_id"]])
else:
logger.log(u"No duplicate show, check passed")
def fix_duplicate_episodes(self):
sqlResults = self.connection.select("SELECT showid, season, episode, COUNT(showid) as count FROM tv_episodes GROUP BY showid, season, episode HAVING count > 1")
for cur_duplicate in sqlResults:
logger.log(u"Duplicate episode detected! showid: " + str(cur_duplicate["showid"]) + u" season: "+str(cur_duplicate["season"]) + u" episode: "+str(cur_duplicate["episode"]) + u" count: " + str(cur_duplicate["count"]), logger.DEBUG)
cur_dupe_results = self.connection.select("SELECT episode_id FROM tv_episodes WHERE showid = ? AND season = ? and episode = ? ORDER BY episode_id DESC LIMIT ?",
[cur_duplicate["showid"], cur_duplicate["season"], cur_duplicate["episode"], int(cur_duplicate["count"])-1]
)
for cur_dupe_id in cur_dupe_results:
logger.log(u"Deleting duplicate episode with episode_id: " + str(cur_dupe_id["episode_id"]))
self.connection.action("DELETE FROM tv_episodes WHERE episode_id = ?", [cur_dupe_id["episode_id"]])
else:
logger.log(u"No duplicate episode, check passed")
def fix_orphan_episodes(self):
sqlResults = self.connection.select("SELECT episode_id, showid, tv_shows.tvdb_id FROM tv_episodes LEFT JOIN tv_shows ON tv_episodes.showid=tv_shows.tvdb_id WHERE tv_shows.tvdb_id is NULL")
for cur_orphan in sqlResults:
logger.log(u"Orphan episode detected! episode_id: " + str(cur_orphan["episode_id"]) + " showid: " + str(cur_orphan["showid"]), logger.DEBUG)
logger.log(u"Deleting orphan episode with episode_id: "+str(cur_orphan["episode_id"]))
self.connection.action("DELETE FROM tv_episodes WHERE episode_id = ?", [cur_orphan["episode_id"]])
else:
logger.log(u"No orphan episode, check passed")
def backupDatabase(version):
helpers.backupVersionedFile(db.dbFilename(), version)
# ======================
# = Main DB Migrations =
# ======================
# Add new migrations at the bottom of the list; subclass the previous migration.
class InitialSchema (db.SchemaUpgrade):
def test(self):
return self.hasTable("tv_shows")
def execute(self):
queries = [
"CREATE TABLE tv_shows (show_id INTEGER PRIMARY KEY, location TEXT, show_name TEXT, tvdb_id NUMERIC, network TEXT, genre TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, seasonfolders NUMERIC, paused NUMERIC, startyear NUMERIC);",
"CREATE TABLE tv_episodes (episode_id INTEGER PRIMARY KEY, showid NUMERIC, tvdbid NUMERIC, name TEXT, season NUMERIC, episode NUMERIC, description TEXT, airdate NUMERIC, hasnfo NUMERIC, hastbn NUMERIC, status NUMERIC, location TEXT);",
"CREATE TABLE info (last_backlog NUMERIC, last_tvdb NUMERIC);",
"CREATE TABLE history (action NUMERIC, date NUMERIC, showid NUMERIC, season NUMERIC, episode NUMERIC, quality NUMERIC, resource TEXT, provider NUMERIC);"
]
for query in queries:
self.connection.action(query)
class AddTvrId (InitialSchema):
def test(self):
return self.hasColumn("tv_shows", "tvr_id")
def execute(self):
self.addColumn("tv_shows", "tvr_id")
class AddTvrName (AddTvrId):
def test(self):
return self.hasColumn("tv_shows", "tvr_name")
def execute(self):
self.addColumn("tv_shows", "tvr_name", "TEXT", "")
class AddImdbId (InitialSchema):
def test(self):
return self.hasColumn("tv_shows", "imdb_id")
def execute(self):
self.addColumn("tv_shows", "imdb_id", "TEXT", "")
class AddAirdateIndex (AddTvrName):
def test(self):
return self.hasTable("idx_tv_episodes_showid_airdate")
def execute(self):
self.connection.action("CREATE INDEX idx_tv_episodes_showid_airdate ON tv_episodes(showid,airdate);")
class NumericProviders (AddAirdateIndex):
def test(self):
return self.connection.tableInfo("history")['provider']['type'] == 'TEXT'
histMap = {-1: 'unknown',
1: 'newzbin',
2: 'tvbinz',
3: 'nzbs',
4: 'eztv',
5: 'nzbmatrix',
6: 'tvnzb',
7: 'ezrss',
8: 'thepiratebay',
9: 'dtt',
10: 'torrentleech' }
def execute(self):
self.connection.action("ALTER TABLE history RENAME TO history_old")
self.connection.action("CREATE TABLE history (action NUMERIC, date NUMERIC, showid NUMERIC, season NUMERIC, episode NUMERIC, quality NUMERIC, resource TEXT, provider TEXT);")
for x in self.histMap.keys():
self.upgradeHistory(x, self.histMap[x])
def upgradeHistory(self, number, name):
oldHistory = self.connection.action("SELECT * FROM history_old").fetchall()
for curResult in oldHistory:
sql = "INSERT INTO history (action, date, showid, season, episode, quality, resource, provider) VALUES (?,?,?,?,?,?,?,?)"
provider = 'unknown'
try:
provider = self.histMap[int(curResult["provider"])]
except ValueError:
provider = curResult["provider"]
args = [curResult["action"], curResult["date"], curResult["showid"], curResult["season"], curResult["episode"], curResult["quality"], curResult["resource"], provider]
self.connection.action(sql, args)
class NewQualitySettings (NumericProviders):
def test(self):
return self.hasTable("db_version")
def execute(self):
backupDatabase(0)
# old stuff that's been removed from common but we need it to upgrade
HD = 1
SD = 3
ANY = 2
BEST = 4
ACTION_SNATCHED = 1
ACTION_PRESNATCHED = 2
ACTION_DOWNLOADED = 3
PREDOWNLOADED = 3
MISSED = 6
BACKLOG = 7
DISCBACKLOG = 8
SNATCHED_BACKLOG = 10
### Update default quality
if sickbeard.QUALITY_DEFAULT == HD:
sickbeard.QUALITY_DEFAULT = common.HD
elif sickbeard.QUALITY_DEFAULT == SD:
sickbeard.QUALITY_DEFAULT = common.SD
elif sickbeard.QUALITY_DEFAULT == ANY:
sickbeard.QUALITY_DEFAULT = common.ANY
elif sickbeard.QUALITY_DEFAULT == BEST:
sickbeard.QUALITY_DEFAULT = common.BEST
### Update episode statuses
toUpdate = self.connection.select("SELECT episode_id, location, status FROM tv_episodes WHERE status IN (?, ?, ?, ?, ?, ?, ?)", [common.DOWNLOADED, common.SNATCHED, PREDOWNLOADED, MISSED, BACKLOG, DISCBACKLOG, SNATCHED_BACKLOG])
didUpdate = False
for curUpdate in toUpdate:
# remember that we changed something
didUpdate = True
newStatus = None
oldStatus = int(curUpdate["status"])
if oldStatus == common.SNATCHED:
newStatus = common.Quality.compositeStatus(common.SNATCHED, common.Quality.UNKNOWN)
elif oldStatus == PREDOWNLOADED:
newStatus = common.Quality.compositeStatus(common.DOWNLOADED, common.Quality.SDTV)
elif oldStatus in (MISSED, BACKLOG, DISCBACKLOG):
newStatus = common.WANTED
elif oldStatus == SNATCHED_BACKLOG:
newStatus = common.Quality.compositeStatus(common.SNATCHED, common.Quality.UNKNOWN)
if newStatus != None:
self.connection.action("UPDATE tv_episodes SET status = ? WHERE episode_id = ? ", [newStatus, curUpdate["episode_id"]])
continue
# if we get here status should be == DOWNLOADED
if not curUpdate["location"]:
continue
newQuality = common.Quality.nameQuality(curUpdate["location"])
if newQuality == common.Quality.UNKNOWN:
newQuality = common.Quality.assumeQuality(curUpdate["location"])
self.connection.action("UPDATE tv_episodes SET status = ? WHERE episode_id = ?", [common.Quality.compositeStatus(common.DOWNLOADED, newQuality), curUpdate["episode_id"]])
# if no updates were done then the backup is useless
if didUpdate:
os.remove(db.dbFilename(suffix='v0'))
### Update show qualities
toUpdate = self.connection.select("SELECT * FROM tv_shows")
for curUpdate in toUpdate:
if not curUpdate["quality"]:
continue
if int(curUpdate["quality"]) == HD:
newQuality = common.HD
elif int(curUpdate["quality"]) == SD:
newQuality = common.SD
elif int(curUpdate["quality"]) == ANY:
newQuality = common.ANY
elif int(curUpdate["quality"]) == BEST:
newQuality = common.BEST
else:
logger.log(u"Unknown show quality: "+str(curUpdate["quality"]), logger.WARNING)
newQuality = None
if newQuality:
self.connection.action("UPDATE tv_shows SET quality = ? WHERE show_id = ?", [newQuality, curUpdate["show_id"]])
### Update history
toUpdate = self.connection.select("SELECT * FROM history")
for curUpdate in toUpdate:
newAction = None
newStatus = None
if int(curUpdate["action"] == ACTION_SNATCHED):
newStatus = common.SNATCHED
elif int(curUpdate["action"] == ACTION_DOWNLOADED):
newStatus = common.DOWNLOADED
elif int(curUpdate["action"] == ACTION_PRESNATCHED):
newAction = common.Quality.compositeStatus(common.SNATCHED, common.Quality.SDTV)
if newAction == None and newStatus == None:
continue
if not newAction:
if int(curUpdate["quality"] == HD):
newAction = common.Quality.compositeStatus(newStatus, common.Quality.HDTV)
elif int(curUpdate["quality"] == SD):
newAction = common.Quality.compositeStatus(newStatus, common.Quality.SDTV)
else:
newAction = common.Quality.compositeStatus(newStatus, common.Quality.UNKNOWN)
self.connection.action("UPDATE history SET action = ? WHERE date = ? AND showid = ?", [newAction, curUpdate["date"], curUpdate["showid"]])
self.connection.action("CREATE TABLE db_version (db_version INTEGER);")
self.connection.action("INSERT INTO db_version (db_version) VALUES (?)", [1])
class DropOldHistoryTable(NewQualitySettings):
def test(self):
return self.checkDBVersion() >= 2
def execute(self):
self.connection.action("DROP TABLE history_old")
self.incDBVersion()
class UpgradeHistoryForGenericProviders(DropOldHistoryTable):
def test(self):
return self.checkDBVersion() >= 3
def execute(self):
providerMap = {'NZBs': 'NZBs.org',
'BinReq': 'Bin-Req',
'NZBsRUS': '''NZBs'R'US''',
'EZTV': 'EZTV@BT-Chat'}
for oldProvider in providerMap:
self.connection.action("UPDATE history SET provider = ? WHERE provider = ?", [providerMap[oldProvider], oldProvider])
self.incDBVersion()
class AddAirByDateOption(UpgradeHistoryForGenericProviders):
def test(self):
return self.checkDBVersion() >= 4
def execute(self):
self.connection.action("ALTER TABLE tv_shows ADD air_by_date NUMERIC")
self.incDBVersion()
class ChangeSabConfigFromIpToHost(AddAirByDateOption):
def test(self):
return self.checkDBVersion() >= 5
def execute(self):
sickbeard.SAB_HOST = 'http://' + sickbeard.SAB_HOST + '/sabnzbd/'
self.incDBVersion()
class FixSabHostURL(ChangeSabConfigFromIpToHost):
def test(self):
return self.checkDBVersion() >= 6
def execute(self):
if sickbeard.SAB_HOST.endswith('/sabnzbd/'):
sickbeard.SAB_HOST = sickbeard.SAB_HOST.replace('/sabnzbd/','/')
sickbeard.save_config()
self.incDBVersion()
class AddLang (FixSabHostURL):
def test(self):
return self.hasColumn("tv_shows", "lang")
def execute(self):
self.addColumn("tv_shows", "lang", "TEXT", "en")
class PopulateRootDirs (AddLang):
def test(self):
return self.checkDBVersion() >= 7
def execute(self):
dir_results = self.connection.select("SELECT location FROM tv_shows")
dir_counts = {}
for cur_dir in dir_results:
cur_root_dir = ek.ek(os.path.dirname, ek.ek(os.path.normpath, cur_dir["location"]))
if cur_root_dir not in dir_counts:
dir_counts[cur_root_dir] = 1
else:
dir_counts[cur_root_dir] += 1
logger.log(u"Dir counts: "+str(dir_counts), logger.DEBUG)
if not dir_counts:
self.incDBVersion()
return
default_root_dir = dir_counts.values().index(max(dir_counts.values()))
new_root_dirs = str(default_root_dir)+'|'+'|'.join(dir_counts.keys())
logger.log(u"Setting ROOT_DIRS to: "+new_root_dirs, logger.DEBUG)
sickbeard.ROOT_DIRS = new_root_dirs
sickbeard.save_config()
self.incDBVersion()
class SetNzbTorrentSettings(PopulateRootDirs):
def test(self):
return self.checkDBVersion() >= 8
def execute(self):
use_torrents = False
use_nzbs = False
for cur_provider in sickbeard.providers.sortedProviderList():
if cur_provider.isEnabled():
if cur_provider.providerType == GenericProvider.NZB:
use_nzbs = True
logger.log(u"Provider "+cur_provider.name+" is enabled, enabling NZBs in the upgrade")
break
elif cur_provider.providerType == GenericProvider.TORRENT:
use_torrents = True
logger.log(u"Provider "+cur_provider.name+" is enabled, enabling Torrents in the upgrade")
break
sickbeard.USE_TORRENTS = use_torrents
sickbeard.USE_NZBS = use_nzbs
sickbeard.save_config()
self.incDBVersion()
class FixAirByDateSetting(SetNzbTorrentSettings):
def test(self):
return self.checkDBVersion() >= 9
def execute(self):
shows = self.connection.select("SELECT * FROM tv_shows")
for cur_show in shows:
if cur_show["genre"] and "talk show" in cur_show["genre"].lower():
self.connection.action("UPDATE tv_shows SET air_by_date = ? WHERE tvdb_id = ?", [1, cur_show["tvdb_id"]])
self.incDBVersion()
class AddSizeAndSceneNameFields(FixAirByDateSetting):
def test(self):
return self.checkDBVersion() >= 10
def execute(self):
backupDatabase(11)
if not self.hasColumn("tv_episodes", "file_size"):
self.addColumn("tv_episodes", "file_size")
if not self.hasColumn("tv_episodes", "release_name"):
self.addColumn("tv_episodes", "release_name", "TEXT", "")
ep_results = self.connection.select("SELECT episode_id, location, file_size FROM tv_episodes")
logger.log(u"Adding file size to all episodes in DB, please be patient")
for cur_ep in ep_results:
if not cur_ep["location"]:
continue
# if there is no size yet then populate it for us
if (not cur_ep["file_size"] or not int(cur_ep["file_size"])) and ek.ek(os.path.isfile, cur_ep["location"]):
cur_size = ek.ek(os.path.getsize, cur_ep["location"])
self.connection.action("UPDATE tv_episodes SET file_size = ? WHERE episode_id = ?", [cur_size, int(cur_ep["episode_id"])])
# check each snatch to see if we can use it to get a release name from
history_results = self.connection.select("SELECT * FROM history WHERE provider != -1 ORDER BY date ASC")
logger.log(u"Adding release name to all episodes still in history")
for cur_result in history_results:
# find the associated download, if there isn't one then ignore it
download_results = self.connection.select("SELECT resource FROM history WHERE provider = -1 AND showid = ? AND season = ? AND episode = ? AND date > ?",
[cur_result["showid"], cur_result["season"], cur_result["episode"], cur_result["date"]])
if not download_results:
logger.log(u"Found a snatch in the history for "+cur_result["resource"]+" but couldn't find the associated download, skipping it", logger.DEBUG)
continue
nzb_name = cur_result["resource"]
file_name = ek.ek(os.path.basename, download_results[0]["resource"])
# take the extension off the filename, it's not needed
if '.' in file_name:
file_name = file_name.rpartition('.')[0]
# find the associated episode on disk
ep_results = self.connection.select("SELECT episode_id, status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ? AND location != ''",
[cur_result["showid"], cur_result["season"], cur_result["episode"]])
if not ep_results:
logger.log(u"The episode "+nzb_name+" was found in history but doesn't exist on disk anymore, skipping", logger.DEBUG)
continue
# get the status/quality of the existing ep and make sure it's what we expect
ep_status, ep_quality = common.Quality.splitCompositeStatus(int(ep_results[0]["status"]))
if ep_status != common.DOWNLOADED:
continue
if ep_quality != int(cur_result["quality"]):
continue
# make sure this is actually a real release name and not a season pack or something
for cur_name in (nzb_name, file_name):
logger.log(u"Checking if "+cur_name+" is actually a good release name", logger.DEBUG)
try:
np = NameParser(False)
parse_result = np.parse(cur_name)
except InvalidNameException:
continue
if parse_result.series_name and parse_result.season_number != None and parse_result.episode_numbers and parse_result.release_group:
# if all is well by this point we'll just put the release name into the database
self.connection.action("UPDATE tv_episodes SET release_name = ? WHERE episode_id = ?", [cur_name, ep_results[0]["episode_id"]])
break
# check each snatch to see if we can use it to get a release name from
empty_results = self.connection.select("SELECT episode_id, location FROM tv_episodes WHERE release_name = ''")
logger.log(u"Adding release name to all episodes with obvious scene filenames")
for cur_result in empty_results:
ep_file_name = ek.ek(os.path.basename, cur_result["location"])
ep_file_name = os.path.splitext(ep_file_name)[0]
# I only want to find real scene names here so anything with a space in it is out
if ' ' in ep_file_name:
continue
try:
np = NameParser(False)
parse_result = np.parse(ep_file_name)
except InvalidNameException:
continue
if not parse_result.release_group:
continue
logger.log(u"Name "+ep_file_name+" gave release group of "+parse_result.release_group+", seems valid", logger.DEBUG)
self.connection.action("UPDATE tv_episodes SET release_name = ? WHERE episode_id = ?", [ep_file_name, cur_result["episode_id"]])
self.incDBVersion()
class RenameSeasonFolders(AddSizeAndSceneNameFields):
def test(self):
return self.checkDBVersion() >= 11
def execute(self):
# rename the column
self.connection.action("ALTER TABLE tv_shows RENAME TO tmp_tv_shows")
self.connection.action("CREATE TABLE tv_shows (show_id INTEGER PRIMARY KEY, location TEXT, show_name TEXT, tvdb_id NUMERIC, network TEXT, genre TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, flatten_folders NUMERIC, paused NUMERIC, startyear NUMERIC, tvr_id NUMERIC, tvr_name TEXT, air_by_date NUMERIC, lang TEXT)")
sql = "INSERT INTO tv_shows(show_id, location, show_name, tvdb_id, network, genre, runtime, quality, airs, status, flatten_folders, paused, startyear, tvr_id, tvr_name, air_by_date, lang) SELECT show_id, location, show_name, tvdb_id, network, genre, runtime, quality, airs, status, seasonfolders, paused, startyear, tvr_id, tvr_name, air_by_date, lang FROM tmp_tv_shows"
self.connection.action(sql)
# flip the values to be opposite of what they were before
self.connection.action("UPDATE tv_shows SET flatten_folders = 2 WHERE flatten_folders = 1")
self.connection.action("UPDATE tv_shows SET flatten_folders = 1 WHERE flatten_folders = 0")
self.connection.action("UPDATE tv_shows SET flatten_folders = 0 WHERE flatten_folders = 2")
self.connection.action("DROP TABLE tmp_tv_shows")
self.incDBVersion()
class AddSubtitlesSupport(RenameSeasonFolders):
def test(self):
return self.checkDBVersion() >= 12
def execute(self):
self.addColumn("tv_shows", "subtitles")
self.addColumn("tv_episodes", "subtitles", "TEXT", "")
self.addColumn("tv_episodes", "subtitles_searchcount")
self.addColumn("tv_episodes", "subtitles_lastsearch", "TIMESTAMP", str(datetime.datetime.min))
self.incDBVersion()
class AddIMDbInfo(RenameSeasonFolders):
def test(self):
return self.checkDBVersion() >= 13
def execute(self):
self.connection.action("CREATE TABLE imdb_info (tvdb_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC, akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT, rating TEXT, votes INTEGER, last_update NUMERIC)")
self.incDBVersion()
class Add1080pAndRawHDQualities(AddIMDbInfo):
"""Add support for 1080p related qualities along with RawHD
Quick overview of what the upgrade needs to do:
quality | old | new
--------------------------
hdwebdl | 1<<3 | 1<<5
hdbluray | 1<<4 | 1<<7
fullhdbluray | 1<<5 | 1<<8
--------------------------
rawhdtv | | 1<<3
fullhdtv | | 1<<4
fullhdwebdl | | 1<<6
"""
def test(self):
return self.checkDBVersion() >= 14
def _update_status(self, old_status):
(status, quality) = common.Quality.splitCompositeStatus(old_status)
return common.Quality.compositeStatus(status, self._update_quality(quality))
def _update_quality(self, old_quality):
"""Update bitwise flags to reflect new quality values
Check flag bits (clear old then set their new locations) starting
with the highest bits so we dont overwrite data we need later on
"""
result = old_quality
# move fullhdbluray from 1<<5 to 1<<8 if set
if(result & (1<<5)):
result = result & ~(1<<5)
result = result | (1<<8)
# move hdbluray from 1<<4 to 1<<7 if set
if(result & (1<<4)):
result = result & ~(1<<4)
result = result | (1<<7)
# move hdwebdl from 1<<3 to 1<<5 if set
if(result & (1<<3)):
result = result & ~(1<<3)
result = result | (1<<5)
return result
def _update_composite_qualities(self, status):
"""Unpack, Update, Return new quality values
Unpack the composite archive/initial values.
Update either qualities if needed.
Then return the new compsite quality value.
"""
best = (status & (0xffff << 16)) >> 16
initial = status & (0xffff)
best = self._update_quality(best)
initial = self._update_quality(initial)
result = ((best << 16) | initial)
return result
def execute(self):
backupDatabase(self.checkDBVersion())
# update the default quality so we dont grab the wrong qualities after migration
sickbeard.QUALITY_DEFAULT = self._update_composite_qualities(sickbeard.QUALITY_DEFAULT)
sickbeard.save_config()
# upgrade previous HD to HD720p -- shift previous qualities to new placevalues
old_hd = common.Quality.combineQualities([common.Quality.HDTV, common.Quality.HDWEBDL >> 2, common.Quality.HDBLURAY >> 3], [])
new_hd = common.Quality.combineQualities([common.Quality.HDTV, common.Quality.HDWEBDL, common.Quality.HDBLURAY], [])
# update ANY -- shift existing qualities and add new 1080p qualities, note that rawHD was not added to the ANY template
old_any = common.Quality.combineQualities([common.Quality.SDTV, common.Quality.SDDVD, common.Quality.HDTV, common.Quality.HDWEBDL >> 2, common.Quality.HDBLURAY >> 3, common.Quality.UNKNOWN], [])
new_any = common.Quality.combineQualities([common.Quality.SDTV, common.Quality.SDDVD, common.Quality.HDTV, common.Quality.FULLHDTV, common.Quality.HDWEBDL, common.Quality.FULLHDWEBDL, common.Quality.HDBLURAY, common.Quality.FULLHDBLURAY, common.Quality.UNKNOWN], [])
# update qualities (including templates)
shows = self.connection.select("SELECT * FROM tv_shows")
for cur_show in shows:
if cur_show["quality"] == old_hd:
new_quality = new_hd
elif cur_show["quality"] == old_any:
new_quality = new_any
else:
new_quality = self._update_composite_qualities(cur_show["quality"])
self.connection.action("UPDATE tv_shows SET quality = ? WHERE tvdb_id = ?", [new_quality, cur_show["tvdb_id"]])
# update status that are are within the old hdwebdl (1<<3 which is 8) and better -- exclude unknown (1<<15 which is 32768)
episodes = self.connection.select("SELECT * FROM tv_episodes WHERE status/100 < 32768 AND status/100 >= 8")
for cur_episode in episodes:
self.connection.action("UPDATE tv_episodes SET status = ? WHERE episode_id = ?", [self._update_status(cur_episode["status"]), cur_episode["episode_id"]])
# make two seperate passes through the history since snatched and downloaded (action & quality) may not always coordinate together
# update previous history so it shows the correct action
historyAction = self.connection.select("SELECT * FROM history WHERE action/100 < 32768 AND action/100 >= 8")
for cur_entry in historyAction:
self.connection.action("UPDATE history SET action = ? WHERE showid = ? AND date = ?", [self._update_status(cur_entry["action"]), cur_entry["showid"], cur_entry["date"]])
# update previous history so it shows the correct quality
historyQuality = self.connection.select("SELECT * FROM history WHERE quality < 32768 AND quality >= 8")
for cur_entry in historyQuality:
self.connection.action("UPDATE history SET quality = ? WHERE showid = ? AND date = ?", [self._update_quality(cur_entry["quality"]), cur_entry["showid"], cur_entry["date"]])
self.incDBVersion()
class AddProperNamingSupport(AddIMDbInfo):
def test(self):
return self.checkDBVersion() >= 15
def execute(self):
self.addColumn("tv_episodes", "is_proper")
self.incDBVersion()
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n\n'''\nThis module contains some common routines used by cv2 samples and an abstract multiprocessing class\n'''\n\nimport numpy as np\nimport cv2\n\n# built-in modules\nimport os\nimport itertools as it\nfrom contextlib import contextmanager\n\n\nimport multiproce... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n\n'''\nThis module contains some common routines used by cv2 samples and an abstract multiprocessing class\n'''\n\nimport numpy as np\nimport cv2\n\n# built-in modules\nimport os\nimport itertools as it\nfrom contextlib import contextmanager\n\n\ni... | ```python
#!/usr/bin/env python
'''
This module contains some common routines used by cv2 samples and an abstract multiprocessing class
'''
import numpy as np
import cv2
# built-in modules
import os
import itertools as it
from contextlib import contextmanager
import multiprocessing
import Queue #needed separately for the Empty exception
import time, datetime
# A sleepable multiprocessing.Process
class SleepableCVProcess(multiprocessing.Process):
def __init__(self, inputqueue, outputqueue):
multiprocessing.Process.__init__(self)
self.inputqueue = inputqueue
self.outputqueue = outputqueue
self.exit = multiprocessing.Event()
self.sleeping = multiprocessing.Event()
self.reseting = multiprocessing.Event()
def run(self):
self.setup()
while not self.exit.is_set():
if self.sleeping.is_set():
time.sleep(0.1)
continue
if self.reseting.is_set():
self.setup()
self.reseting.clear()
try:
tstamp, cv_img = self.inputqueue.get(False)
if (cv_img is not None) and cv_img.data:
vis = self.doWork(cv_img)
else:
continue
tstamp = datetime.datetime.now()
try:
self.outputqueue.put((tstamp, vis), False)
except Queue.Full:
continue
except Queue.Empty:
continue
#override with actual work to be done (cv_img ---> visualization_img)
def setup(self):
pass
#override with actual work to be done (cv_img ---> visualization_img)
def doWork(self, cv_img):
return cv_img
def isAwake(self):
return not self.sleeping.is_set()
def shutdown(self):
self.exit.set()
def sleep(self):
self.sleeping.set()
def wake(self):
self.sleeping.clear()
def reset(self):
self.reseting.set()
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return str(self.__dict__)
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def homotrans(H, x, y):
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
return xs/s, ys/s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
LINE_AA = 16L
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=LINE_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=LINE_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv2.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv2.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv2.EVENT_LBUTTONDOWN:
self.prev_pt = pt
elif event == cv2.EVENT_LBUTTONUP:
self.prev_pt = None
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv2.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
@contextmanager
def Timer(msg):
print msg, '...',
start = clock()
try:
yield
finally:
print "%.2f ms" % ((clock()-start)*1000)
class StatValue:
def __init__(self, smooth_coef = 0.5):
self.value = None
self.smooth_coef = smooth_coef
def update(self, v):
if self.value is None:
self.value = v
else:
c = self.smooth_coef
self.value = c * self.value + (1.0-c) * v
class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv2.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
self.drag_rect = None
if x1-x0 > 0 and y1-y0 > 0:
self.drag_rect = (x0, y0, x1, y1)
else:
rect = self.drag_rect
self.drag_start = None
self.drag_rect = None
if rect:
self.callback(rect)
def draw(self, vis):
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
return self.drag_rect is not None
def grouper(n, iterable, fillvalue=None):
'''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def mosaic(w, imgs):
'''Make a grid from images.
w -- number of grid columns
imgs -- images (must have same size and format)
'''
imgs = iter(imgs)
img0 = imgs.next()
pad = np.zeros_like(img0)
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
def mdot(*args):
return reduce(np.dot, args)
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv2.circle(vis, (int(x), int(y)), 2, color)
``` |
[
{
"content": "Here is the script:\n```python\nimport numpy as np\nimport os.path as op\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal\nfrom nose.tools import assert_true, assert_false, assert_equal, assert_raises\n\nimport mne\nfrom mne import io, Epochs, read_events, pick_types, creat... | [
{
"content": "Here is the script:\n<|memory_start|>```python\nimport numpy as np\nimport os.path as op\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal\nfrom nose.tools import assert_true, assert_false, assert_equal, assert_raises\n\nimport mne\nfrom mne import io, Epochs, read_events, p... | ```python
import numpy as np
import os.path as op
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
import mne
from mne import io, Epochs, read_events, pick_types, create_info, EpochsArray
from mne.utils import (_TempDir, run_tests_if_main, slow_test, requires_h5py,
grand_average)
from mne.time_frequency import single_trial_power
from mne.time_frequency.tfr import (cwt_morlet, morlet, tfr_morlet,
_dpss_wavelet, tfr_multitaper,
AverageTFR, read_tfrs, write_tfrs,
combine_tfr)
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
event_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-eve.fif')
def test_morlet():
"""Test morlet with and without zero mean"""
Wz = morlet(1000, [10], 2., zero_mean=True)
W = morlet(1000, [10], 2., zero_mean=False)
assert_true(np.abs(np.mean(np.real(Wz[0]))) < 1e-5)
assert_true(np.abs(np.mean(np.real(W[0]))) > 1e-3)
def test_time_frequency():
"""Test time frequency transform (PSD and phase lock)
"""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = read_events(event_fname)
include = []
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=include, exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
data = epochs.get_data()
times = epochs.times
nave = len(data)
epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0))
freqs = np.arange(6, 20, 5) # define frequencies of interest
n_cycles = freqs / 4.
# Test first with a single epoch
power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
# Now compute evoked
evoked = epochs.average()
power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
return_itc=False)
assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
# Test picks argument
power_picks, itc_picks = tfr_morlet(epochs_nopicks, freqs=freqs,
n_cycles=n_cycles, use_fft=True,
return_itc=True, picks=picks)
# the actual data arrays here are equivalent, too...
assert_array_almost_equal(power.data, power_picks.data)
assert_array_almost_equal(itc.data, itc_picks.data)
assert_array_almost_equal(power.data, power_evoked.data)
print(itc) # test repr
print(itc.ch_names) # test property
itc += power # test add
itc -= power # test add
power.apply_baseline(baseline=(-0.1, 0), mode='logratio')
assert_true('meg' in power)
assert_true('grad' in power)
assert_false('mag' in power)
assert_false('eeg' in power)
assert_equal(power.nave, nave)
assert_equal(itc.nave, nave)
assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
assert_true(power.data.shape == itc.data.shape)
assert_true(np.sum(itc.data >= 1) == 0)
assert_true(np.sum(itc.data <= 0) == 0)
# grand average
itc2 = itc.copy()
itc2.info['bads'] = [itc2.ch_names[0]] # test channel drop
gave = grand_average([itc2, itc])
assert_equal(gave.data.shape, (itc2.data.shape[0] - 1,
itc2.data.shape[1],
itc2.data.shape[2]))
assert_equal(itc2.ch_names[1:], gave.ch_names)
assert_equal(gave.nave, 2)
itc2.drop_channels(itc2.info["bads"])
assert_array_almost_equal(gave.data, itc2.data)
itc2.data = np.ones(itc2.data.shape)
itc.data = np.zeros(itc.data.shape)
itc2.nave = 2
itc.nave = 1
itc.drop_channels([itc.ch_names[0]])
combined_itc = combine_tfr([itc2, itc])
assert_array_almost_equal(combined_itc.data,
np.ones(combined_itc.data.shape) * 2 / 3)
# more tests
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
return_itc=True)
assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
assert_true(power.data.shape == itc.data.shape)
assert_true(np.sum(itc.data >= 1) == 0)
assert_true(np.sum(itc.data <= 0) == 0)
Fs = raw.info['sfreq'] # sampling in Hz
tfr = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2)
assert_true(tfr.shape == (len(picks), len(freqs), len(times)))
single_power = single_trial_power(data, Fs, freqs, use_fft=False,
n_cycles=2)
assert_array_almost_equal(np.mean(single_power), power.data)
power_pick = power.pick_channels(power.ch_names[:10:2])
assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
power_drop = power.drop_channels(power.ch_names[1:10:2])
assert_equal(power_drop.ch_names, power_pick.ch_names)
assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))
mne.equalize_channels([power_pick, power_drop])
assert_equal(power_pick.ch_names, power_drop.ch_names)
assert_equal(power_pick.data.shape, power_drop.data.shape)
def test_dpsswavelet():
"""Test DPSS wavelet"""
freqs = np.arange(5, 25, 3)
Ws = _dpss_wavelet(1000, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0, zero_mean=True)
assert_true(len(Ws) == 3) # 3 tapers expected
# Check that zero mean is true
assert_true(np.abs(np.mean(np.real(Ws[0][0]))) < 1e-5)
assert_true(len(Ws[0]) == len(freqs)) # As many wavelets as asked for
@slow_test
def test_tfr_multitaper():
"""Test tfr_multitaper"""
sfreq = 200.0
ch_names = ['SIM0001', 'SIM0002', 'SIM0003']
ch_types = ['grad', 'grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = int(sfreq) # Second long epochs
n_epochs = 3
seed = 42
rng = np.random.RandomState(seed)
noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
dat = noise + signal
reject = dict(grad=4000.)
events = np.empty((n_epochs, 3), int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id,
reject=reject)
freqs = np.arange(5, 100, 3, dtype=np.float)
power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0)
picks = np.arange(len(ch_names))
power_picks, itc_picks = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2.,
time_bandwidth=4.0, picks=picks)
power_evoked = tfr_multitaper(epochs.average(), freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False)
# test picks argument
assert_array_almost_equal(power.data, power_picks.data)
assert_array_almost_equal(itc.data, itc_picks.data)
# one is squared magnitude of the average (evoked) and
# the other is average of the squared magnitudes (epochs PSD)
# so values shouldn't match, but shapes should
assert_array_equal(power.data.shape, power_evoked.data.shape)
assert_raises(AssertionError, assert_array_almost_equal,
power.data, power_evoked.data)
tmax = t[np.argmax(itc.data[0, freqs == 50, :])]
fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]
assert_true(tmax > 0.3 and tmax < 0.7)
assert_false(np.any(itc.data < 0.))
assert_true(fmax > 40 and fmax < 60)
def test_crop():
"""Test TFR cropping"""
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.crop(0.2, 0.3)
assert_array_equal(tfr.times, [0.2, 0.3])
assert_equal(tfr.data.shape[-1], 2)
@requires_h5py
def test_io():
"""Test TFR IO capacities"""
tempdir = _TempDir()
fname = op.join(tempdir, 'test-tfr.h5')
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.save(fname)
tfr2 = read_tfrs(fname, condition='test')
assert_array_equal(tfr.data, tfr2.data)
assert_array_equal(tfr.times, tfr2.times)
assert_array_equal(tfr.freqs, tfr2.freqs)
assert_equal(tfr.comment, tfr2.comment)
assert_equal(tfr.nave, tfr2.nave)
assert_raises(IOError, tfr.save, fname)
tfr.comment = None
tfr.save(fname, overwrite=True)
assert_equal(read_tfrs(fname, condition=0).comment, tfr.comment)
tfr.comment = 'test-A'
tfr2.comment = 'test-B'
fname = op.join(tempdir, 'test2-tfr.h5')
write_tfrs(fname, [tfr, tfr2])
tfr3 = read_tfrs(fname, condition='test-A')
assert_equal(tfr.comment, tfr3.comment)
assert_true(isinstance(tfr.info, io.meas_info.Info))
tfrs = read_tfrs(fname, condition=None)
assert_equal(len(tfrs), 2)
tfr4 = tfrs[1]
assert_equal(tfr2.comment, tfr4.comment)
assert_raises(ValueError, read_tfrs, fname, condition='nonono')
def test_plot():
"""Test TFR plotting."""
import matplotlib.pyplot as plt
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.plot([1, 2], title='title')
plt.close('all')
ax = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (1, 1))
ax3 = plt.subplot2grid((2, 2), (0, 1))
tfr.plot(picks=[0, 1, 2], axes=[ax, ax2, ax3])
plt.close('all')
tfr.plot_topo(picks=[1, 2])
plt.close('all')
tfr.plot_topo(picks=[1, 2])
plt.close('all')
def test_add_channels():
"""Test tfr splitting / re-appending channel types
"""
data = np.zeros((6, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(
['MEG 001', 'MEG 002', 'MEG 003', 'EEG 001', 'EEG 002', 'STIM 001'],
1000., ['mag', 'mag', 'mag', 'eeg', 'eeg', 'stim'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr_eeg = tfr.pick_types(meg=False, eeg=True, copy=True)
tfr_meg = tfr.pick_types(meg=True, copy=True)
tfr_stim = tfr.pick_types(meg=False, stim=True, copy=True)
tfr_eeg_meg = tfr.pick_types(meg=True, eeg=True, copy=True)
tfr_new = tfr_meg.add_channels([tfr_eeg, tfr_stim], copy=True)
assert_true(all(ch in tfr_new.ch_names
for ch in tfr_stim.ch_names + tfr_meg.ch_names))
tfr_new = tfr_meg.add_channels([tfr_eeg], copy=True)
assert_true(ch in tfr_new.ch_names for ch in tfr.ch_names)
assert_array_equal(tfr_new.data, tfr_eeg_meg.data)
assert_true(all(ch not in tfr_new.ch_names
for ch in tfr_stim.ch_names))
# Now test errors
tfr_badsf = tfr_eeg.copy()
tfr_badsf.info['sfreq'] = 3.1415927
tfr_eeg = tfr_eeg.crop(-.1, .1)
assert_raises(RuntimeError, tfr_meg.add_channels, [tfr_badsf])
assert_raises(AssertionError, tfr_meg.add_channels, [tfr_eeg])
assert_raises(ValueError, tfr_meg.add_channels, [tfr_meg])
assert_raises(AssertionError, tfr_meg.add_channels, tfr_badsf)
run_tests_if_main()
``` |
[
{
"content": "Recreate the original code text:\n```python\n# -*- coding:utf-8 -*-\n#\n# Copyright 2019 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the Licen... | [
{
"content": "Recreate the original code text:\n<|memory_start|>```python\n# -*- coding:utf-8 -*-\n#\n# Copyright 2019 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a c... | ```python
# -*- coding:utf-8 -*-
#
# Copyright 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for the git_command.py module."""
from __future__ import print_function
import unittest
import git_command
class GitCallUnitTest(unittest.TestCase):
"""Tests the _GitCall class (via git_command.git)."""
def test_version_tuple(self):
"""Check git.version_tuple() handling."""
ver = git_command.git.version_tuple()
self.assertIsNotNone(ver)
# We don't dive too deep into the values here to avoid having to update
# whenever git versions change. We do check relative to this min version
# as this is what `repo` itself requires via MIN_GIT_VERSION.
MIN_GIT_VERSION = (1, 7, 2)
self.assertTrue(isinstance(ver.major, int))
self.assertTrue(isinstance(ver.minor, int))
self.assertTrue(isinstance(ver.micro, int))
self.assertGreater(ver.major, MIN_GIT_VERSION[0] - 1)
self.assertGreaterEqual(ver.micro, 0)
self.assertGreaterEqual(ver.major, 0)
self.assertGreaterEqual(ver, MIN_GIT_VERSION)
self.assertLess(ver, (9999, 9999, 9999))
self.assertNotEqual('', ver.full)
``` |
[
{
"content": "Write the code verbatim:\n```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, division, unicode_literals\n##\n## This is part of Pybble, a WMS (Whatever Management System) based on\n## Jinja2/Haml, Werkzeug, Flask, and Optimism.\n##\n## Py... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, division, unicode_literals\n##\n## This is part of Pybble, a WMS (Whatever Management System) based on\n## Jinja2/Haml, Werkzeug, Flask, and Opti... | ```python
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This is part of Pybble, a WMS (Whatever Management System) based on
## Jinja2/Haml, Werkzeug, Flask, and Optimism.
##
## Pybble is Copyright © 2009-2014 by Matthias Urlichs <matthias@urlichs.de>,
## it is licensed under the GPLv3. See the file `README.md` for details,
## including an optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
import pytest
from pybble.manager.main import RootManager
from pybble.core.models.site import Site
from .base import WebTC
from webunit.webunittest import WebTestCase
from .manager import run
def ap_test():
# set a class attribute on the invoking test context
run("mgr -Dt site add AppTest _test atest")
class AppRunTestCase(WebTC,WebTestCase):
# def setupData(self):
# super(AppRunTestCase,self).setupData()
# self.run_manager("mgr -Dt site new AppTest _test atest")
def test_one(self):
self.once(ap_test)
assert Site.q.get_by(name="AppTest").domain == "atest"
self.assertContent("http://atest/one","Number One")
def test_two(self):
self.once(ap_test)
self.assertContent("http://atest/two","Number Two")
def test_three(self):
self.once(ap_test)
self.assertContent("http://atest/three","Number Three")
``` |
[
{
"content": "Recreate the original code text:\n```python\nimport pytest\nimport time\nfrom anchore_engine.apis.oauth import merge_client_metadata\nfrom anchore_engine.apis.oauth import (\n setup_oauth_client,\n OAuth2Client,\n CLIENT_GRANT_KEY,\n ANONYMOUS_CLIENT_ID,\n)\n\n\n@pytest.mark.parametriz... | [
{
"content": "Recreate the original code text:\n<|memory_start|>```python\nimport pytest\nimport time\nfrom anchore_engine.apis.oauth import merge_client_metadata\nfrom anchore_engine.apis.oauth import (\n setup_oauth_client,\n OAuth2Client,\n CLIENT_GRANT_KEY,\n ANONYMOUS_CLIENT_ID,\n)\n\n\n@pytest... | ```python
import pytest
import time
from anchore_engine.apis.oauth import merge_client_metadata
from anchore_engine.apis.oauth import (
setup_oauth_client,
OAuth2Client,
CLIENT_GRANT_KEY,
ANONYMOUS_CLIENT_ID,
)
@pytest.mark.parametrize(
"existing_metadata, meta_to_add, expected_output",
[
(
{"grant_types": []},
{"grant_types": ["password"]},
{"grant_types": ["password"]},
),
(
{"grant_types": ["password"]},
{"grant_types": ["password"]},
{"grant_types": ["password"]},
),
(
{"grant_types": ["password"]},
{"grant_types": []},
{"grant_types": ["password"]},
),
(
{"grant_types": ["password"]},
{"grant_types": ["password", "bearer"]},
{"grant_types": ["password", "bearer"]},
),
(
{"grant_types": ["password", "foobar"]},
{"grant_types": ["password", "bearer"]},
{"grant_types": ["password", "bearer", "foobar"]},
),
(
{},
{"grant_types": ["password"]},
{"grant_types": ["password"]},
),
(
{},
{"grant_types": []},
{"grant_types": []},
),
(
None,
{"grant_types": []},
{"grant_types": []},
),
(
None,
{"grant_types": ["password"]},
{"grant_types": ["password"]},
),
],
)
def test_merge_client_metadata(existing_metadata, meta_to_add, expected_output):
"""
Unit test for merging client metadata records for the OAuth2Client
:param existing_metadata:
:param meta_to_add:
:param expected_output:
:return:
"""
merged = merge_client_metadata(existing_metadata, meta_to_add)
check_metadata(merged, expected_output)
def check_metadata(candidate: dict, expected: dict):
for k, v in expected.items():
if type(v) == list:
assert sorted(candidate.get(k)) == sorted(v)
else:
assert (
candidate.get(k) == v
), "Key {} from candidate {} did not match expected {}".format(
k, candidate, v
)
def password_oauth2_client():
c = OAuth2Client()
c.client_id = ANONYMOUS_CLIENT_ID
c.user_id = None
c.client_secret = None
# These are no-ops effectively since the client isn't authenticated itself
c.client_id_issued_at = time.time() - 100
c.client_secret_expires_at = time.time() + 1000
c.set_client_metadata(
{
"token_endpoint_auth_method": "none", # This should be a function of the grant type input but all of our types are this currently
"client_name": ANONYMOUS_CLIENT_ID,
"grant_types": ["password"],
}
)
return c
def legacy_password_oauth2_client():
c = OAuth2Client()
c.client_id = ANONYMOUS_CLIENT_ID
c.user_id = None
c.client_secret = None
# These are no-ops effectively since the client isn't authenticated itself
c.client_id_issued_at = time.time() - 100
c.client_secret_expires_at = time.time() + 1000
c.set_client_metadata(
{
"grant_types": ["password"],
}
)
return c
def no_metadata_oauth2_client():
c = OAuth2Client()
c.client_id = ANONYMOUS_CLIENT_ID
c.user_id = None
c.client_secret = None
# These are no-ops effectively since the client isn't authenticated itself
c.client_id_issued_at = time.time() - 100
c.client_secret_expires_at = time.time() + 1000
return c
def empty_metadata_oauth2_client():
c = OAuth2Client()
c.client_id = ANONYMOUS_CLIENT_ID
c.user_id = None
c.client_secret = None
# These are no-ops effectively since the client isn't authenticated itself
c.client_id_issued_at = time.time() - 100
c.client_secret_expires_at = time.time() + 1000
c.set_client_metadata({})
return c
def authorization_oauth2_client():
c = OAuth2Client()
c.client_id = ANONYMOUS_CLIENT_ID
c.user_id = None
c.client_secret = None
c.client_id_issued_at = time.time() - 100
c.client_secret_expires_at = time.time() + 1000
c.set_client_metadata(
{
"token_endpoint_auth_method": "none", # This should be a function of the grant type input but all of our types are this currently
"client_name": ANONYMOUS_CLIENT_ID,
"grant_types": ["authorization"],
}
)
return c
def combined_oauth2_client():
c = OAuth2Client()
c.client_id = ANONYMOUS_CLIENT_ID
c.user_id = None
c.client_secret = None
c.client_id_issued_at = time.time() - 100
c.client_secret_expires_at = time.time() + 1000
c.set_client_metadata(
{
"token_endpoint_auth_method": "none", # This should be a function of the grant type input but all of our types are this currently
"client_name": ANONYMOUS_CLIENT_ID,
"grant_types": ["authorization", "password"],
}
)
return c
@pytest.mark.parametrize(
"found_client, add_client, expected_result",
[
(
password_oauth2_client(),
authorization_oauth2_client(),
combined_oauth2_client(),
),
(
legacy_password_oauth2_client(),
authorization_oauth2_client(),
combined_oauth2_client(),
),
(
no_metadata_oauth2_client(),
authorization_oauth2_client(),
authorization_oauth2_client(),
),
(
empty_metadata_oauth2_client(),
authorization_oauth2_client(),
authorization_oauth2_client(),
),
],
)
def test_setup_oauth_client(found_client, add_client, expected_result):
"""
:param found_client:
:param add_client:
:param expected_result:
:return:
"""
assert found_client.client_id == expected_result.client_id
result = setup_oauth_client(found_client, add_client)
assert result is not None
check_metadata(
result.client_metadata,
expected_result.client_metadata,
)
``` |
[
{
"content": "Repeat the following code:\n```python\n# Bezier - last updated for NodeBox 1.8.3\n# Author: Tom De Smedt <tomdesmedt@trapdoor.be>\n# Manual: http://nodebox.net/code/index.php/Bezier\n# Copyright (c) 2007 by Tom De Smedt.\n# Refer to the \"Use\" section on http://nodebox.net/code\n# Thanks to Dr. F... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n# Bezier - last updated for NodeBox 1.8.3\n# Author: Tom De Smedt <tomdesmedt@trapdoor.be>\n# Manual: http://nodebox.net/code/index.php/Bezier\n# Copyright (c) 2007 by Tom De Smedt.\n# Refer to the \"Use\" section on http://nodebox.net/code\n#... | ```python
# Bezier - last updated for NodeBox 1.8.3
# Author: Tom De Smedt <tomdesmedt@trapdoor.be>
# Manual: http://nodebox.net/code/index.php/Bezier
# Copyright (c) 2007 by Tom De Smedt.
# Refer to the "Use" section on http://nodebox.net/code
# Thanks to Dr. Florimond De Smedt at the Free University of Brussels for the math routines.
from nodebox.graphics import BezierPath, PathElement, NodeBoxError, Point, MOVETO, LINETO, CURVETO, CLOSE
try:
from cPathmatics import linepoint, linelength, curvepoint, curvelength
except:
from pathmatics import linepoint, linelength, curvepoint, curvelength
def segment_lengths(path, relative=False, n=20):
"""Returns a list with the lengths of each segment in the path.
>>> path = BezierPath(None)
>>> segment_lengths(path)
[]
>>> path.moveto(0, 0)
>>> segment_lengths(path)
[]
>>> path.lineto(100, 0)
>>> segment_lengths(path)
[100.0]
>>> path.lineto(100, 300)
>>> segment_lengths(path)
[100.0, 300.0]
>>> segment_lengths(path, relative=True)
[0.25, 0.75]
>>> path = BezierPath(None)
>>> path.moveto(1, 2)
>>> path.curveto(3, 4, 5, 6, 7, 8)
>>> segment_lengths(path)
[8.4852813742385695]
"""
lengths = []
first = True
for el in path:
if first == True:
close_x, close_y = el.x, el.y
first = False
elif el.cmd == MOVETO:
close_x, close_y = el.x, el.y
lengths.append(0.0)
elif el.cmd == CLOSE:
lengths.append(linelength(x0, y0, close_x, close_y))
elif el.cmd == LINETO:
lengths.append(linelength(x0, y0, el.x, el.y))
elif el.cmd == CURVETO:
x3, y3, x1, y1, x2, y2 = el.x, el.y, el.ctrl1.x, el.ctrl1.y, el.ctrl2.x, el.ctrl2.y
lengths.append(curvelength(x0, y0, x1, y1, x2, y2, x3, y3, n))
if el.cmd != CLOSE:
x0 = el.x
y0 = el.y
if relative:
length = sum(lengths)
try:
return map(lambda l: l / length, lengths)
except ZeroDivisionError: # If the length is zero, just return zero for all segments
return [0.0] * len(lengths)
else:
return lengths
def length(path, segmented=False, n=20):
"""Returns the length of the path.
Calculates the length of each spline in the path,
using n as a number of points to measure.
When segmented is True, returns a list
containing the individual length of each spline
as values between 0.0 and 1.0,
defining the relative length of each spline
in relation to the total path length.
The length of an empty path is zero:
>>> path = BezierPath(None)
>>> length(path)
0.0
>>> path.moveto(0, 0)
>>> path.lineto(100, 0)
>>> length(path)
100.0
>>> path.lineto(100, 100)
>>> length(path)
200.0
# Segmented returns a list of each segment
>>> length(path, segmented=True)
[0.5, 0.5]
"""
if not segmented:
return sum(segment_lengths(path, n=n), 0.0)
else:
return segment_lengths(path, relative=True, n=n)
def _locate(path, t, segments=None):
"""Locates t on a specific segment in the path.
Returns (index, t, PathElement)
A path is a combination of lines and curves (segments).
The returned index indicates the start of the segment
that contains point t.
The returned t is the absolute time on that segment,
in contrast to the relative t on the whole of the path.
The returned point is the last MOVETO,
any subsequent CLOSETO after i closes to that point.
When you supply the list of segment lengths yourself,
as returned from length(path, segmented=True),
point() works about thirty times faster in a for-loop,
since it doesn't need to recalculate the length
during each iteration. Note that this has been deprecated:
the BezierPath now caches the segment lengths the moment you use
them.
>>> path = BezierPath(None)
>>> _locate(path, 0.0)
Traceback (most recent call last):
...
NodeBoxError: The given path is empty
>>> path.moveto(0,0)
>>> _locate(path, 0.0)
Traceback (most recent call last):
...
NodeBoxError: The given path is empty
>>> path.lineto(100, 100)
>>> _locate(path, 0.0)
(0, 0.0, Point(x=0.0, y=0.0))
>>> _locate(path, 1.0)
(0, 1.0, Point(x=0.0, y=0.0))
"""
if segments == None:
segments = path.segmentlengths(relative=True)
if len(segments) == 0:
raise NodeBoxError, "The given path is empty"
for i, el in enumerate(path):
if i == 0 or el.cmd == MOVETO:
closeto = Point(el.x, el.y)
if t <= segments[i] or i == len(segments)-1: break
else: t -= segments[i]
try: t /= segments[i]
except ZeroDivisionError: pass
if i == len(segments)-1 and segments[i] == 0: i -= 1
return (i, t, closeto)
def point(path, t, segments=None):
"""Returns coordinates for point at t on the path.
Gets the length of the path, based on the length
of each curve and line in the path.
Determines in what segment t falls.
Gets the point on that segment.
When you supply the list of segment lengths yourself,
as returned from length(path, segmented=True),
point() works about thirty times faster in a for-loop,
since it doesn't need to recalculate the length
during each iteration. Note that this has been deprecated:
the BezierPath now caches the segment lengths the moment you use
them.
>>> path = BezierPath(None)
>>> point(path, 0.0)
Traceback (most recent call last):
...
NodeBoxError: The given path is empty
>>> path.moveto(0, 0)
>>> point(path, 0.0)
Traceback (most recent call last):
...
NodeBoxError: The given path is empty
>>> path.lineto(100, 0)
>>> point(path, 0.0)
PathElement(LINETO, ((0.0, 0.0),))
>>> point(path, 0.1)
PathElement(LINETO, ((10.0, 0.0),))
"""
if len(path) == 0:
raise NodeBoxError, "The given path is empty"
i, t, closeto = _locate(path, t, segments=segments)
x0, y0 = path[i].x, path[i].y
p1 = path[i+1]
if p1.cmd == CLOSE:
x, y = linepoint(t, x0, y0, closeto.x, closeto.y)
return PathElement(LINETO, ((x, y),))
elif p1.cmd == LINETO:
x1, y1 = p1.x, p1.y
x, y = linepoint(t, x0, y0, x1, y1)
return PathElement(LINETO, ((x, y),))
elif p1.cmd == CURVETO:
x3, y3, x1, y1, x2, y2 = p1.x, p1.y, p1.ctrl1.x, p1.ctrl1.y, p1.ctrl2.x, p1.ctrl2.y
x, y, c1x, c1y, c2x, c2y = curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3)
return PathElement(CURVETO, ((c1x, c1y), (c2x, c2y), (x, y)))
else:
raise NodeBoxError, "Unknown cmd for p1 %s" % p1
def points(path, amount=100):
"""Returns an iterator with a list of calculated points for the path.
This method calls the point method <amount> times, increasing t,
distributing point spacing linearly.
>>> path = BezierPath(None)
>>> list(points(path))
Traceback (most recent call last):
...
NodeBoxError: The given path is empty
>>> path.moveto(0, 0)
>>> list(points(path))
Traceback (most recent call last):
...
NodeBoxError: The given path is empty
>>> path.lineto(100, 0)
>>> list(points(path, amount=4))
[PathElement(LINETO, ((0.0, 0.0),)), PathElement(LINETO, ((25.0, 0.0),)), PathElement(LINETO, ((50.0, 0.0),)), PathElement(LINETO, ((75.0, 0.0),))]
"""
if len(path) == 0:
raise NodeBoxError, "The given path is empty"
# The delta value is divided by amount - 1, because we also want the last point (t=1.0)
# If I wouldn't use amount - 1, I fall one point short of the end.
# E.g. if amount = 4, I want point at t 0.0, 0.33, 0.66 and 1.0,
# if amount = 2, I want point at t 0.0 and t 1.0
try:
delta = 1.0/(amount-1)
except ZeroDivisionError:
delta = 1.0
for i in xrange(amount):
yield point(path, delta*i)
def contours(path):
"""Returns a list of contours in the path.
A contour is a sequence of lines and curves
separated from the next contour by a MOVETO.
For example, the glyph "o" has two contours:
the inner circle and the outer circle.
>>> path = BezierPath(None)
>>> path.moveto(0, 0)
>>> path.lineto(100, 100)
>>> len(contours(path))
1
A new contour is defined as something that starts with a moveto:
>>> path.moveto(50, 50)
>>> path.curveto(150, 150, 50, 250, 80, 95)
>>> len(contours(path))
2
Empty moveto's don't do anything:
>>> path.moveto(50, 50)
>>> path.moveto(50, 50)
>>> len(contours(path))
2
It doesn't matter if the path is closed or open:
>>> path.closepath()
>>> len(contours(path))
2
"""
contours = []
current_contour = None
empty = True
for i, el in enumerate(path):
if el.cmd == MOVETO:
if not empty:
contours.append(current_contour)
current_contour = BezierPath(path._ctx)
current_contour.moveto(el.x, el.y)
empty = True
elif el.cmd == LINETO:
empty = False
current_contour.lineto(el.x, el.y)
elif el.cmd == CURVETO:
empty = False
current_contour.curveto(el.ctrl1.x, el.ctrl1.y,
el.ctrl2.x, el.ctrl2.y, el.x, el.y)
elif el.cmd == CLOSE:
current_contour.closepath()
if not empty:
contours.append(current_contour)
return contours
def findpath(points, curvature=1.0):
"""Constructs a path between the given list of points.
Interpolates the list of points and determines
a smooth bezier path betweem them.
The curvature parameter offers some control on
how separate segments are stitched together:
from straight angles to smooth curves.
Curvature is only useful if the path has more than three points.
"""
# The list of points consists of Point objects,
# but it shouldn't crash on something straightforward
# as someone supplying a list of (x,y)-tuples.
from types import TupleType
for i, pt in enumerate(points):
if type(pt) == TupleType:
points[i] = Point(pt[0], pt[1])
if len(points) == 0: return None
if len(points) == 1:
path = BezierPath(None)
path.moveto(points[0].x, points[0].y)
return path
if len(points) == 2:
path = BezierPath(None)
path.moveto(points[0].x, points[0].y)
path.lineto(points[1].x, points[1].y)
return path
# Zero curvature means straight lines.
curvature = max(0, min(1, curvature))
if curvature == 0:
path = BezierPath(None)
path.moveto(points[0].x, points[0].y)
for i in range(len(points)):
path.lineto(points[i].x, points[i].y)
return path
curvature = 4 + (1.0-curvature)*40
dx = {0: 0, len(points)-1: 0}
dy = {0: 0, len(points)-1: 0}
bi = {1: -0.25}
ax = {1: (points[2].x-points[0].x-dx[0]) / 4}
ay = {1: (points[2].y-points[0].y-dy[0]) / 4}
for i in range(2, len(points)-1):
bi[i] = -1 / (curvature + bi[i-1])
ax[i] = -(points[i+1].x-points[i-1].x-ax[i-1]) * bi[i]
ay[i] = -(points[i+1].y-points[i-1].y-ay[i-1]) * bi[i]
r = range(1, len(points)-1)
r.reverse()
for i in r:
dx[i] = ax[i] + dx[i+1] * bi[i]
dy[i] = ay[i] + dy[i+1] * bi[i]
path = BezierPath(None)
path.moveto(points[0].x, points[0].y)
for i in range(len(points)-1):
path.curveto(points[i].x + dx[i],
points[i].y + dy[i],
points[i+1].x - dx[i+1],
points[i+1].y - dy[i+1],
points[i+1].x,
points[i+1].y)
return path
def insert_point(path, t):
"""Returns a path copy with an extra point at t.
>>> path = BezierPath(None)
>>> path.moveto(0, 0)
>>> insert_point(path, 0.1)
Traceback (most recent call last):
...
NodeBoxError: The given path is empty
>>> path.moveto(0, 0)
>>> insert_point(path, 0.2)
Traceback (most recent call last):
...
NodeBoxError: The given path is empty
>>> path.lineto(100, 50)
>>> len(path)
2
>>> path = insert_point(path, 0.5)
>>> len(path)
3
>>> path[1]
PathElement(LINETO, ((50.0, 25.0),))
>>> path = BezierPath(None)
>>> path.moveto(0, 100)
>>> path.curveto(0, 50, 100, 50, 100, 100)
>>> path = insert_point(path, 0.5)
>>> path[1]
PathElement(LINETO, ((25.0, 62.5), (0.0, 75.0), (50.0, 62.5))
"""
i, t, closeto = _locate(path, t)
x0 = path[i].x
y0 = path[i].y
p1 = path[i+1]
p1cmd, x3, y3, x1, y1, x2, y2 = p1.cmd, p1.x, p1.y, p1.ctrl1.x, p1.ctrl1.y, p1.ctrl2.x, p1.ctrl2.y
if p1cmd == CLOSE:
pt_cmd = LINETO
pt_x, pt_y = linepoint(t, x0, y0, closeto.x, closeto.y)
elif p1cmd == LINETO:
pt_cmd = LINETO
pt_x, pt_y = linepoint(t, x0, y0, x3, y3)
elif p1cmd == CURVETO:
pt_cmd = CURVETO
pt_x, pt_y, pt_c1x, pt_c1y, pt_c2x, pt_c2y, pt_h1x, pt_h1y, pt_h2x, pt_h2y = \
curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3, True)
else:
raise NodeBoxError, "Locate should not return a MOVETO"
new_path = BezierPath(None)
new_path.moveto(path[0].x, path[0].y)
for j in range(1, len(path)):
if j == i+1:
if pt_cmd == CURVETO:
new_path.curveto(pt_h1x, pt_h1y,
pt_c1x, pt_c1y,
pt_x, pt_y)
new_path.curveto(pt_c2x, pt_c2y,
pt_h2x, pt_h2y,
path[j].x, path[j].y)
elif pt_cmd == LINETO:
new_path.lineto(pt_x, pt_y)
if path[j].cmd != CLOSE:
new_path.lineto(path[j].x, path[j].y)
else:
new_path.closepath()
else:
raise NodeBoxError, "Didn't expect pt_cmd %s here" % pt_cmd
else:
if path[j].cmd == MOVETO:
new_path.moveto(path[j].x, path[j].y)
if path[j].cmd == LINETO:
new_path.lineto(path[j].x, path[j].y)
if path[j].cmd == CURVETO:
new_path.curveto(path[j].ctrl1.x, path[j].ctrl1.y,
path[j].ctrl2.x, path[j].ctrl2.y,
path[j].x, path[j].y)
if path[j].cmd == CLOSE:
new_path.closepath()
return new_path
def _test():
import doctest, bezier
return doctest.testmod(bezier)
if __name__=='__main__':
_test()
``` |
[
{
"content": "```python\n#!/usr/bin/python3\n\"\"\"Post-process a GC roots dump.\n\"\"\"\n\nimport getopt\nimport os\nimport re\nimport sys\n\nimport script_utils as u\n\n# Input and output file (if not specified, defaults to stdin/stdout)\nflag_infile = None\nflag_outfile = None\n\n# Binary to analyze\nflag_mo... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/python3\n\"\"\"Post-process a GC roots dump.\n\"\"\"\n\nimport getopt\nimport os\nimport re\nimport sys\n\nimport script_utils as u\n\n# Input and output file (if not specified, defaults to stdin/stdout)\nflag_infile = None\nflag_outfile = None\n\n# Binary to ... | ```python
#!/usr/bin/python3
"""Post-process a GC roots dump.
"""
import getopt
import os
import re
import sys
import script_utils as u
# Input and output file (if not specified, defaults to stdin/stdout)
flag_infile = None
flag_outfile = None
# Binary to analyze
flag_module = None
#......................................................................
# Regular expressions to match:
# Start of root collection
rcstartre = re.compile(r"^root collection\s+(\d+)\s*$")
# Root list entry
rlere = re.compile(r"^\s*\d+\s+\:\s+0x(\S+)\s+(\d+)\s*$")
def perform_filt(inf, outf):
"""Read inf and emit summary to outf."""
ncollections = 0
nroots = 0
collections = []
elist = []
addrsize = {}
# Read input
while True:
line = inf.readline()
if not line:
break
u.verbose(3, "line is %s" % line)
# Root collection start?
m1 = rcstartre.match(line)
if m1:
collections.append(elist)
elist = []
ncollections += 1
continue
# Root list entry?
m2 = rlere.match(line)
if m2:
nroots += 1
hexaddr = m2.group(1)
siz = m2.group(2)
addrsize[hexaddr] = int(siz)
elist.append(hexaddr)
continue
if elist:
collections.append(elist)
# Now that we've read everything, write GDB script.
if os.path.exists("gdb-cmds.txt"):
os.unlink("gdb-cmds.txt")
if os.path.exists("gdb-out.txt"):
os.unlink("gdb-out.txt")
try:
gf = open("gdb-cmds.txt", "wb")
except IOError as e:
u.error("unable to open output file 'gdbcmds.txt': %s" % e.strerror)
gf.write("set height 0\n")
gf.write("set width 0\n")
gf.write("set pagination off\n")
gf.write("set logging file gdb-out.txt\n")
gf.write("set logging on\n")
gf.write("file %s\n" % flag_module)
ncol = 0
for el in collections:
gf.write("print \"collection %d\"\n" % ncol)
ncol += 1
for hexaddr in el:
gf.write("print \"0x%x size %d\"\n" % (int(hexaddr, 16), addrsize[hexaddr]))
gf.write("info sym 0x%s\n" % hexaddr)
gf.close()
# Invoke GDB
u.docmd("gdb -batch -nh -x gdb-cmds.txt")
# Emit
try:
rf = open("gdb-out.txt", "r")
except IOError as e:
u.error("unable to open output file 'gdb-out.txt': %s" % e.strerror)
lines = rf.readlines()
rf.close()
for line in lines:
outf.write(line)
outf.close()
u.verbose(0, "processed %d roots in %d collections" % (nroots, ncollections))
def perform():
"""Main driver routine."""
inf = sys.stdin
outf = sys.stdout
if flag_infile:
try:
inf = open(flag_infile, "rb")
except IOError as e:
u.error("unable to open input file %s: "
"%s" % (flag_infile, e.strerror))
if flag_outfile:
try:
outf = open(flag_outfile, "wb")
except IOError as e:
u.error("unable to open output file %s: "
"%s" % (flag_outfile, e.strerror))
perform_filt(inf, outf)
if flag_infile:
inf.close()
if flag_outfile:
outf.close()
def usage(msgarg):
"""Print usage and exit."""
me = os.path.basename(sys.argv[0])
if msgarg:
sys.stderr.write("error: %s\n" % msgarg)
print("""\
usage: %s [options] < input > output
options:
-d increase debug msg verbosity level
-i F read from input file F
-o G write to output file O
-m M analyze load module M
""" % me)
sys.exit(1)
def parse_args():
"""Command line argument parsing."""
global flag_infile, flag_outfile, flag_module
try:
optlist, _ = getopt.getopt(sys.argv[1:], "di:o:m:")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
for opt, arg in optlist:
if opt == "-d":
u.increment_verbosity()
elif opt == "-i":
flag_infile = arg
elif opt == "-m":
flag_module = arg
elif opt == "-o":
flag_outfile = arg
parse_args()
u.setdeflanglocale()
perform()
``` |
[
{
"content": "Here is a code file:\n```python\nfrom operator import attrgetter\n\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Min\n\nimport commonware.log\nfrom elasticsearch_dsl import F\nfrom elasticsearch_dsl.filter import Bool\n\nimport mkt\nfrom mkt.constants import APP_FEATU... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nfrom operator import attrgetter\n\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Min\n\nimport commonware.log\nfrom elasticsearch_dsl import F\nfrom elasticsearch_dsl.filter import Bool\n\nimport mkt\nfrom mkt.constants ... | ```python
from operator import attrgetter
from django.core.urlresolvers import reverse
from django.db.models import Min
import commonware.log
from elasticsearch_dsl import F
from elasticsearch_dsl.filter import Bool
import mkt
from mkt.constants import APP_FEATURES
from mkt.constants.applications import DEVICE_GAIA
from mkt.prices.models import AddonPremium
from mkt.search.indexers import BaseIndexer
from mkt.search.utils import Search
from mkt.tags.models import attach_tags
from mkt.translations.models import attach_trans_dict
log = commonware.log.getLogger('z.addons')
class WebappIndexer(BaseIndexer):
"""Fields we don't need to expose in the results, only used for filtering
or sorting."""
hidden_fields = (
'*.raw',
'*_sort',
'popularity_*',
'trending_*',
'boost',
'owners',
'features',
# 'name' and 'description', as well as the locale variants, are only
# used for filtering. The fields that are used by the API are
# 'name_translations' and 'description_translations'.
'name',
'description',
'name_l10n_*',
'description_l10n_*',
)
"""
Bunch of ES stuff for Webapp include mappings, indexing, search.
"""
@classmethod
def search(cls, using=None):
"""
Returns a `Search` object.
We override this to use our patched version which adds statsd timing.
"""
return (Search(
using=using or cls.get_es(), index=cls.get_index(),
doc_type=cls.get_mapping_type_name())
.extra(_source={'exclude': cls.hidden_fields}))
@classmethod
def get_mapping_type_name(cls):
"""
Returns mapping type name which is used as the key in ES_INDEXES to
determine which index to use.
We override this because Webapp is a proxy model to Addon.
"""
return 'webapp'
@classmethod
def get_model(cls):
from mkt.webapps.models import Webapp
return Webapp
@classmethod
def get_mapping(cls):
doc_type = cls.get_mapping_type_name()
mapping = {
doc_type: {
# Disable _all field to reduce index size.
'_all': {'enabled': False},
'properties': {
# App fields.
'id': {'type': 'long'},
'app_slug': {'type': 'string'},
'app_type': {'type': 'byte'},
'author': {
'type': 'string',
'analyzer': 'default_icu',
'fields': {
# For exact matches. The simple analyzer allows
# for case-insensitive matching.
'raw': {'type': 'string',
'analyzer': 'exact_lowercase'},
},
},
'banner_regions': cls.string_not_indexed(),
'bayesian_rating': {'type': 'float', 'doc_values': True},
'category': cls.string_not_analyzed(),
'content_descriptors': cls.string_not_indexed(),
'content_ratings': {
'type': 'object',
'dynamic': 'true',
},
'created': {'format': 'dateOptionalTime', 'type': 'date',
'doc_values': True},
'current_version': cls.string_not_indexed(),
'default_locale': cls.string_not_indexed(),
'description': {'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100},
'device': {'type': 'byte'},
# The date this app was added to the escalation queue.
'escalation_date': {'format': 'dateOptionalTime',
'type': 'date', 'doc_values': True},
'features': {
'type': 'object',
'properties': dict(
('has_%s' % f.lower(), {'type': 'boolean'})
for f in APP_FEATURES)
},
'file_size': {'type': 'long'},
'guid': cls.string_not_analyzed(),
'has_public_stats': {'type': 'boolean'},
'icon_hash': cls.string_not_indexed(),
'interactive_elements': cls.string_not_indexed(),
'installs_allowed_from': cls.string_not_analyzed(),
'is_disabled': {'type': 'boolean'},
'is_escalated': {'type': 'boolean'},
'is_offline': {'type': 'boolean'},
'is_priority': {'type': 'boolean'},
'is_rereviewed': {'type': 'boolean'},
'last_updated': {'format': 'dateOptionalTime',
'type': 'date'},
'latest_version': {
'type': 'object',
'properties': {
'status': {'type': 'byte'},
'is_privileged': {'type': 'boolean'},
'has_editor_comment': {'type': 'boolean'},
'has_info_request': {'type': 'boolean'},
'nomination_date': {'type': 'date',
'format': 'dateOptionalTime'},
'created_date': {'type': 'date',
'format': 'dateOptionalTime'},
},
},
'manifest_url': cls.string_not_analyzed(),
'modified': {'format': 'dateOptionalTime',
'type': 'date'},
# Name for searching. This is a list of all the localized
# names for the app. We add "position_offset_gap" to work
# around the fact that ES stores the same list of tokens as
# if this were a single string. The offset gap adds 100
# positions between each name and ensures one string from
# one name and one string from another name won't both
# match with a phrase match query.
'name': {
'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100,
# For exact matches. Referenced as `name.raw`.
'fields': {
'raw': cls.string_not_analyzed(
position_offset_gap=100)
},
},
# Name for sorting.
'name_sort': cls.string_not_analyzed(doc_values=True),
# Name for suggestions.
'name_suggest': {'type': 'completion', 'payloads': True},
'owners': {'type': 'long'},
'package_path': cls.string_not_indexed(),
'premium_type': {'type': 'byte'},
'previews': {
'type': 'object',
'dynamic': 'true',
},
'price_tier': cls.string_not_indexed(),
'ratings': {
'type': 'object',
'properties': {
'average': {'type': 'float'},
'count': {'type': 'short'},
}
},
'region_exclusions': {'type': 'short'},
'reviewed': {'format': 'dateOptionalTime', 'type': 'date',
'doc_values': True},
# The date this app was added to the re-review queue.
'rereview_date': {'format': 'dateOptionalTime',
'type': 'date', 'doc_values': True},
'status': {'type': 'byte'},
'supported_locales': cls.string_not_analyzed(),
'tags': cls.string_not_analyzed(),
'upsell': {
'type': 'object',
'properties': {
'id': {'type': 'long'},
'app_slug': cls.string_not_indexed(),
'icon_url': cls.string_not_indexed(),
'name': cls.string_not_indexed(),
'region_exclusions': {'type': 'short'},
}
},
'uses_flash': {'type': 'boolean'},
'versions': {
'type': 'object',
'properties': {
'version': cls.string_not_indexed(),
'resource_uri': cls.string_not_indexed(),
}
},
}
}
}
# Attach boost field, because we are going to need search by relevancy.
cls.attach_boost_mapping(mapping)
# Attach popularity and trending.
cls.attach_trending_and_popularity_mappings(mapping)
# Add fields that we expect to return all translations.
cls.attach_translation_mappings(
mapping, ('banner_message', 'description', 'homepage',
'name', 'release_notes', 'support_email',
'support_url'))
# Add language-specific analyzers.
cls.attach_language_specific_analyzers(
mapping, ('name', 'description'))
return mapping
@classmethod
def extract_document(cls, pk=None, obj=None):
"""Extracts the ElasticSearch index document for this instance."""
from mkt.webapps.models import (AppFeatures, attach_devices,
attach_prices, attach_translations,
RatingDescriptors, RatingInteractives)
if obj is None:
obj = cls.get_model().objects.get(pk=pk)
# Attach everything we need to index apps.
for transform in (attach_devices, attach_prices, attach_tags,
attach_translations):
transform([obj])
latest_version = obj.latest_version
version = obj.current_version
geodata = obj.geodata
features = (version.features.to_dict()
if version else AppFeatures().to_dict())
try:
status = latest_version.statuses[0][1] if latest_version else None
except IndexError:
status = None
attrs = ('app_slug', 'bayesian_rating', 'created', 'default_locale',
'guid', 'icon_hash', 'id', 'is_disabled', 'is_offline',
'file_size', 'last_updated', 'modified', 'premium_type',
'status', 'uses_flash')
d = dict(zip(attrs, attrgetter(*attrs)(obj)))
d['app_type'] = obj.app_type_id
d['author'] = obj.developer_name
d['banner_regions'] = geodata.banner_regions_slugs()
d['category'] = obj.categories if obj.categories else []
d['content_ratings'] = (obj.get_content_ratings_by_body(es=True) or
None)
try:
d['content_descriptors'] = obj.rating_descriptors.to_keys()
except RatingDescriptors.DoesNotExist:
d['content_descriptors'] = []
d['current_version'] = version.version if version else None
d['device'] = getattr(obj, 'device_ids', [])
d['features'] = features
d['has_public_stats'] = obj.public_stats
try:
d['interactive_elements'] = obj.rating_interactives.to_keys()
except RatingInteractives.DoesNotExist:
d['interactive_elements'] = []
d['installs_allowed_from'] = (
version.manifest.get('installs_allowed_from', ['*'])
if version else ['*'])
d['is_priority'] = obj.priority_review
is_escalated = obj.escalationqueue_set.exists()
d['is_escalated'] = is_escalated
d['escalation_date'] = (obj.escalationqueue_set.get().created
if is_escalated else None)
is_rereviewed = obj.rereviewqueue_set.exists()
d['is_rereviewed'] = is_rereviewed
d['rereview_date'] = (obj.rereviewqueue_set.get().created
if is_rereviewed else None)
if latest_version:
d['latest_version'] = {
'status': status,
'is_privileged': latest_version.is_privileged,
'has_editor_comment': latest_version.has_editor_comment,
'has_info_request': latest_version.has_info_request,
'nomination_date': latest_version.nomination,
'created_date': latest_version.created,
}
else:
d['latest_version'] = {
'status': None,
'is_privileged': None,
'has_editor_comment': None,
'has_info_request': None,
'nomination_date': None,
'created_date': None,
}
d['manifest_url'] = obj.get_manifest_url()
d['package_path'] = obj.get_package_path()
d['name_sort'] = unicode(obj.name).lower()
d['owners'] = [au.user.id for au in
obj.addonuser_set.filter(role=mkt.AUTHOR_ROLE_OWNER)]
d['previews'] = [{'filetype': p.filetype, 'modified': p.modified,
'id': p.id, 'sizes': p.sizes}
for p in obj.previews.all()]
try:
p = obj.addonpremium.price
d['price_tier'] = p.name
except AddonPremium.DoesNotExist:
d['price_tier'] = None
d['ratings'] = {
'average': obj.average_rating,
'count': obj.total_reviews,
}
d['region_exclusions'] = obj.get_excluded_region_ids()
d['reviewed'] = obj.versions.filter(
deleted=False).aggregate(Min('reviewed')).get('reviewed__min')
# The default locale of the app is considered "supported" by default.
supported_locales = [obj.default_locale]
other_locales = (filter(None, version.supported_locales.split(','))
if version else [])
if other_locales:
supported_locales.extend(other_locales)
d['supported_locales'] = list(set(supported_locales))
d['tags'] = getattr(obj, 'tags_list', [])
if obj.upsell and obj.upsell.premium.is_published():
upsell_obj = obj.upsell.premium
d['upsell'] = {
'id': upsell_obj.id,
'app_slug': upsell_obj.app_slug,
'icon_url': upsell_obj.get_icon_url(128),
# TODO: Store all localizations of upsell.name.
'name': unicode(upsell_obj.name),
'region_exclusions': upsell_obj.get_excluded_region_ids()
}
d['versions'] = [dict(version=v.version,
resource_uri=reverse_version(v))
for v in obj.versions.all()]
# Handle localized fields.
# This adds both the field used for search and the one with
# all translations for the API.
for field in ('description', 'name'):
d.update(cls.extract_field_translations(
obj, field, include_field_for_search=True))
# This adds only the field with all the translations for the API, we
# don't need to search on those.
for field in ('homepage', 'support_email', 'support_url'):
d.update(cls.extract_field_translations(obj, field))
if version:
attach_trans_dict(version._meta.model, [version])
d.update(cls.extract_field_translations(
version, 'release_notes', db_field='releasenotes_id'))
else:
d['release_notes_translations'] = None
attach_trans_dict(geodata._meta.model, [geodata])
d.update(cls.extract_field_translations(geodata, 'banner_message'))
# Add boost, popularity, trending values.
d.update(cls.extract_popularity_trending_boost(obj))
# If the app is compatible with Firefox OS, push suggestion data in the
# index - This will be used by RocketbarView API, which is specific to
# Firefox OS.
if DEVICE_GAIA.id in d['device'] and obj.is_published():
d['name_suggest'] = {
'input': d['name'],
'output': unicode(obj.id), # We only care about the payload.
'weight': int(d['boost']),
'payload': {
'default_locale': d['default_locale'],
'icon_hash': d['icon_hash'],
'id': d['id'],
'manifest_url': d['manifest_url'],
'modified': d['modified'],
'name_translations': d['name_translations'],
'slug': d['app_slug'],
}
}
for field in ('name', 'description'):
d.update(cls.extract_field_analyzed_translations(obj, field))
return d
@classmethod
def get_indexable(cls):
"""Returns the queryset of ids of all things to be indexed."""
from mkt.webapps.models import Webapp
return Webapp.with_deleted.all()
@classmethod
def run_indexing(cls, ids, ES=None, index=None, **kw):
"""Override run_indexing to use app transformers."""
from mkt.webapps.models import Webapp
log.info('Indexing %s webapps' % len(ids))
qs = Webapp.with_deleted.filter(id__in=ids)
ES = ES or cls.get_es()
docs = []
for obj in list(qs):
try:
docs.append(cls.extract_document(obj.id, obj=obj))
except Exception as e:
log.error('Failed to index webapp {0}: {1}'
.format(obj.id, repr(e)),
# Trying to chase down a cache-machine problem.
exc_info="marketplace:" in str(e))
cls.bulk_index(docs, es=ES, index=index or cls.get_index())
@classmethod
def filter_by_apps(cls, app_ids, queryset=None):
"""
Filters the given queryset by the given app IDs.
This uses a `should` filter, which is equivalent to an "OR".
"""
queryset = queryset or cls.search()
app_ids = list(set(app_ids)) # De-dupe.
queryset = queryset.filter(Bool(should=[F('terms', id=app_ids)]))
return queryset[0:len(app_ids)]
def reverse_version(version):
"""
The try/except AttributeError allows this to be used where the input is
ambiguous, and could be either an already-reversed URL or a Version object.
"""
if version:
try:
return reverse('version-detail', kwargs={'pk': version.pk})
except AttributeError:
return version
return
``` |
[
{
"content": "Here is the script:\n```python\n# -*- coding: utf-8 -*-\n#---------------------------------------------------------------------\n'''\nCreated on 07 Nov. 2015\n\n@author: Seko\n@summary: Advanced Downloader\n\n'''\n#---------------------------------------------------------------------\n\n# ________... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#---------------------------------------------------------------------\n'''\nCreated on 07 Nov. 2015\n\n@author: Seko\n@summary: Advanced Downloader\n\n'''\n#-------------------------------------------------------------------... | ```python
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------
'''
Created on 07 Nov. 2015
@author: Seko
@summary: Advanced Downloader
'''
#---------------------------------------------------------------------
# ____________________ I M P O R T ____________________
import util
import xbmc
import xbmcaddon
import xbmcvfs
import xbmcgui
import StorageServer
import os
import re
import urllib2
import time
import zipfile
from contextlib import closing
from downloaderModule.pluginDownloaderTpl import downloaderTemplate
# ___ Initialize database
try:
from sqlite3 import dbapi2 as sqlite
xbmc.log("[AdvDownloader] Loading sqlite3 as DB engine", xbmc.LOGDEBUG)
except:
from pysqlite2 import dbapi2 as sqlite
xbmc.log("[AdvDownloader] Loading pysqlite2 as DB engine", xbmc.LOGDEBUG)
"""
AdvancedDownloader Class
"""
class AdvancedDownloader(downloaderTemplate):
# ___ HEADER CONFIGURATION FOR HTML REQUEST
HEADER_CFG = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
# ___ DOWNLOAD STATUS
STATUS_STARTING = 0
STATUS_DOWNLOADING = 1
STATUS_DOWNLOADING_READY_TO_PLAY = 2
STATUS_STOPPED = 3
STATUS_FINISHED = 4
STATUS_ERROR = 5
# ___ QUEUE TYPE
QUEUE_DB = 1
QUEUE_CACHE = 2
# ___ progressDialog
pDialog = None
def __init__(self):
"""
Constructor
"""
self.ID = 2
# ___ Various variables
self.__addon__ = xbmcaddon.Addon(id='script.module.seko.downloader')
self.__addonDir__ = xbmc.translatePath(self.__addon__.getAddonInfo('path'))
# ___ Init queue
# ___ @Todo : Add variable to settings
self.queueType = 1
if self.queueType == self.QUEUE_DB:
self.__queue__ = DatabaseQueue()
elif self.queueType == self.QUEUE_CACHE:
self.__queue__ = CacheQueue()
xbmc.log("[AdvDownloader] Init Advanced Downloader Done", xbmc.LOGDEBUG)
def clearAll(self):
pass
def download(self, fileName, params, async=True):
"""
Download method
@param filename: the name of the file to download
@param params: the dictionnary with informations about the file to download
The minimum format is :
{
'url':'<the url to download the file>',
'title':'<Title of the movie>',
'destinationFolder':'<the destination folder>',
'webBrowserId': '<the web browser id>',
'playAtEnd': playAtEnd, => Boolean which indicates if the download file should be play at the end of the download
(-- No mandatory variable --)
'useragent': the user-agent to use for download the file
'incomplete_path': incomplete_path, => path where the temporary file is downloaded
(-- Variable added after --)
'fileName' : the file name
'complete_path': complete_path, => path where the complete file is moved
'totalsize': totalsize, => the total size of the downloaded file
'current_dl_size': float(result[i][7]),
'current_percent_dl': float(result[i][8]),
'dl_status': int(result[i][9]),
'async': int(result[i][10]),
'queuePosition': the position in the queue, => calculate during insertion in the queue
(-- Variable for streamer --)
'cacheSize': cache Size, => the cache size in percent
'percent': percent, => the current progress in percent of the download
'oldpercent':oldpercent,
'initialpercent': initialpercent, => the initial percent at the start of the download (used for resume download)
Used for calculating the percent cache
'percentcache': percentcache, => the percent to exceed for reaching the cache (initialpercent+cacheSize)
'percentforcache': percentforcache => the percent to exceed for reaching the cache
}
@param async: Boolean which indicates if the download should be start in an other thread
"""
# ___ Initialize all necessary variable
# ___ Set the filename
params['fileName'] = self._slugify(fileName)
# ___ Set the async value
if async:
params['async'] = 1
else:
params['async'] = 0
# ___ Set the playAtEnd value
if params['playAtEnd'] == 'True':
params['playAtEnd'] = 1
else:
params['playAtEnd'] = 0
# ___ Set the complete_path value
params['complete_path'] = os.path.join(params['destinationFolder'], params['fileName'].encode("utf-8"))
# ___ If incomplete path is not in params variable, the incomplete path will be the complete path.
if 'incomplete_path' not in params :
params['incomplete_path'] = params['complete_path']
else:
params['incomplete_path'] = os.path.join(params['incomplete_path'], params['fileName'].encode("utf-8"))
params['totalsize'] = float(self._getTargetFileSize(params))
params['current_dl_size'] = float(0)
params['current_percent_dl'] = float(0)
params['dl_status' ] = self.STATUS_STOPPED
params['queuePosition'] = self.__queue__._getLastIndex() + 1
# ___ Add element to the queue
self.__queue__._clearQueue()
self.__queue__._addToQueue(params['fileName'], params)
if params['async'] == 1:
xbmc.log("[AdvDownloader] Async", xbmc.LOGDEBUG)
self._run_async(self._startDownload)
xbmc.log("[AdvDownloader] Download added to the queue", xbmc.LOGDEBUG)
else:
xbmc.log("[AdvDownloader] Normal", xbmc.LOGDEBUG)
self._startDownload()
xbmc.log("[AdvDownloader] Download done", xbmc.LOGDEBUG)
def _processQueueDownload(self):
item = self.__queue__._getNextItemFromQueue()
if item:
# __ If the progress dialog does not exist, we initialize it.
if not self.pDialog :
self.pDialog = xbmcgui.DialogProgressBG()
self.pDialog.create("Progressbar", "")
while item:
# __ Default status = 500
status = 500
self._setPaths(item)
# __ Verify parameters in item
if not "url" in item:
xbmc.log("URL missing : %s" % repr(item), xbmc.LOGERROR)
elif item["url"].find("ftp") > -1 or item["url"].find("http") > -1:
# __ Start to download a new item if it is a http or ftp url :
# - Set the 'StopDownloaderQueue' to 'False'
# - Download the file
self.__queue__._setStopFlag(False)
status = self._downloadURL(item)
else:
# __ Bad URL
xbmc.log("[AdvDownloader] Bad url : "+item["url"], xbmc.LOGERROR)
# __ If status = 200, the download is complete
if status == 200:
if xbmcvfs.exists(item["incomplete_path"]):
# __ Move the file from temp directory to the complete path.
xbmc.log("[AdvDownloader] Moving %s to %s" % (repr(item["incomplete_path"]), repr(item["complete_path"])),xbmc.LOGDEBUG)
if repr(item["incomplete_path"]) != repr(item["complete_path"]):
xbmcvfs.rename(item["incomplete_path"], item["complete_path"])
# __ Extract file if necessary
# __ Extract rar file
if str(item["complete_path"]).endswith(".rar"):
xbmc.executebuiltin("XBMC.Extract("+str(item["complete_path"])+","+item["download_path"].decode("utf-8")+")")
# __ Extract all zip file
elif str(item["complete_path"]).endswith(".zip"):
with zipfile.ZipFile(str(item["complete_path"]), "r") as compressFile:
compressFile.extractall(item["download_path"].decode("utf-8"))
# __ Display complete message
self._showMessage("Download complete", item['fileName'])
# __ Launch video if it is asked
if 'playAtEnd' in item and int(item['playAtEnd'])==1:
if not str(item["complete_path"]).endswith(".rar") and not str(item["complete_path"]).endswith(".zip"):
try:
xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(str(item["complete_path"]))
except Exception:
xbmc.log("[AdvDownloader] Download complete, play movie failed",xbmc.LOGDEBUG)
self._showMessage("Play movie failed", "ERROR")
else:
xbmc.log("[AdvDownloader] Download complete, but file %s not found" % repr(item["incomplete_path"]),xbmc.LOGERROR)
self._showMessage("Download failed", "ERROR")
# __ Else if the status = 300, the download is failed
elif status != 300:
xbmc.log("[AdvDownloader] Failure: " + repr(item) + " - " + repr(status),xbmc.LOGERROR)
self._showMessage("Download failed", "unknown error")
# __ If status = 300, the download is just stopped.
if status == 300:
item = False
# __ Else delete incomplete path, and remove the item in the queue.
else:
if xbmcvfs.exists(item["incomplete_path"]) and repr(item["incomplete_path"]) != repr(item["complete_path"]):
xbmcvfs.delete(item["incomplete_path"])
self.__queue__._removeToQueue(item['fileName'])
# __ Get the next download
item = self.__queue__._getNextItemFromQueue()
xbmc.log("[AdvDownloader] Finished download queue.",xbmc.LOGDEBUG)
# __ Close the progress dialog at the end, if necessary
if self.pDialog:
self.pDialog.close()
xbmc.log("[AdvDownloader] Closed dialog",xbmc.LOGDEBUG)
self.pDialog = None
def _downloadURL(self,item):
"""
Method to download a file from an url
@param item: the dictionnary with informations about the file to download
{'url': url,'incomplete_path': incomplete_path,'complete_path': complete_path,'playAtEnd': playAtEnd }
@note:
-add 'old_percent' to item
-add 'percentforcache' to item
"""
xbmc.log('[AdvDownloader] '+item["fileName"],xbmc.LOGDEBUG)
item["dl_status"]=self.STATUS_STARTING
url = urllib2.Request(item["url"])
shouldRestartDl = False
params = {"bytes_so_far": 0, "mark": 0.0, "queue_mark": 0.0, "obytes_so_far": 0}
item["current_percent_dl"] = 0.1
item["old_percent"] = -1
# __ Set the useragent in the header
if "useragent" in item:
url.add_header("User-Agent", item["useragent"])
else:
url.add_header("User-Agent", self.HEADER_CFG['User-Agent'])
# __ Open the temporary file 'incomplete_path'
if "current_dl_size" in item and float(item['current_dl_size']) > 0:
# _ If we resume the download, we open the file with parameters 'ab' for appending bytes
file = open(item["incomplete_path"], "ab")
else:
file = open(item["incomplete_path"], "wb")
shouldRestartDl = True
# __ If we should resume the download, add in the header "Range" with the file size
if "current_dl_size" in item and float(item['current_dl_size']) > 0:
xbmc.log("[AdvDownloader] Current size "+str(item['current_dl_size'])+" / Total size : "+str(item["totalsize"]),xbmc.LOGDEBUG)
url.add_header("Range","bytes=%s-%s" % (item['current_dl_size'],item["totalsize"]))
params["bytes_so_far"] = item['current_dl_size']
# __ Start the connexion
con = urllib2.urlopen(url)
# __ Get headers informations, to know if we can resume the download
responseHeader = con.info()
if 'Accept-Ranges' in responseHeader:
xbmc.log("[AdvDownloader] Accept-Ranges: "+responseHeader['Accept-Ranges'],xbmc.LOGINFO)
if "current_dl_size" in item and 'Accept-Ranges' in responseHeader and responseHeader['Accept-Ranges'] == 'none':
# ___ If we can't resume the download, we re start the download
url = urllib2.Request(item["url"])
# __ Set the useragent in the header
if "useragent" in item:
url.add_header("User-Agent", item["useragent"])
else:
url.add_header("User-Agent", self.HEADER_CFG["User-Agent"])
# __ Delete the temporary file 'incomplete_path'
xbmcvfs.delete(item["incomplete_path"])
# __ Open the temporary file 'incomplete_path'
file = open(item["incomplete_path"], "wb")
# close the current connection
con.close()
# __ Restart the connection
con = urllib2.urlopen(url)
# __ Set shouldRestartDl to True
shouldRestartDl = True
item['current_dl_size'] = float(0)
# __ Set the chunk_size
chunk_size = 1024 * 8
# __ If we should resume the download, calculate the percent
if "current_dl_size" in item and float(item['current_dl_size']) > 0 :
# __ Calculate the percent
self._generatePercent(item, params)
try:
# __ Download the file until it is complete or until asking for stop
item["dl_status"]=self.STATUS_DOWNLOADING
while (not self.__queue__._shouldStop() ):
# Read next 'chunk_size'
chunk = con.read(chunk_size)
# Write
file.write(chunk)
# Increase bytes_so_far
params["bytes_so_far"] += len(chunk)
if params["mark"] == 0.0 and params["bytes_so_far"] > 0:
params["mark"] = time.time()
xbmc.log("[AdvDownloader] Mark set",xbmc.LOGDEBUG)
# __ Calculate the percent
self._generatePercent(item, params)
# xbmc.log("recieved chunk: %s - %s" % ( repr(item["percent"] > item["old_percent"]), repr(time.time() - params["queue_mark"])),xbmc.LOGDEBUG)
if item["current_percent_dl"] > item["old_percent"] or time.time() - params["queue_mark"] > 30:
# __ Update the progress bar asynchronous if the download is not for streamer
self._run_async(self._updateProgress(item, params))
item["old_percent"] = item["current_percent_dl"]
params["obytes_so_far"] = params["bytes_so_far"]
# _ Break when the chunk is None
if not chunk:
break
# __ Close connection and the file
xbmc.log("[AdvDownloader] Loop done",xbmc.LOGDEBUG)
con.close()
file.close()
except Exception, e:
print str(e)
xbmc.log("Error : "+repr(e),xbmc.LOGERROR)
xbmc.log("Download failed.",xbmc.LOGERROR)
try:
con.close()
except:
xbmc.log("Failed to close download stream")
try:
file.close()
except:
xbmc.log("Failed to close file handle")
self._showMessage("Download failed", "ERROR")
# ___ Set status to stopped
item['dl_status'] = self.STATUS_ERROR
# __ Return 500 if the download is failed due to an error
return 500
if self.__queue__._shouldStop() and int(item["current_percent_dl"]) < 99 :
# __ Return 300 if the download is aborted
xbmc.log("[AdvDownloader] Download aborted : "+str(self.__queue__._shouldStop()),xbmc.LOGINFO)
# ___ Set status to stopped
item['dl_status'] = self.STATUS_STOPPED
self.__queue__._updateQueueItem(item['fileName'],item)
return 300
# ___ Set status to stopped
item['dl_status'] = self.STATUS_FINISHED
self.__queue__._updateQueueItem(item['fileName'],item)
# __ Return 200 if the download is complete
xbmc.log("[AdvDownloader] _downloadURL Done",xbmc.LOGERROR)
return 200
def _setPaths(self, params):
""""
_setPaths Method
Method to set :
-the 'incomplete_path' in the 'params' variable
-the 'complete_path' in the 'params' variable
@param params: the dictionnary with informations about the file to download
"""
xbmc.log('[AdvDownloader] '+params['fileName'], xbmc.LOGDEBUG)
# Check utf-8 stuff here
xbmc.log("[AdvDownloader] Path incomplete: "+params["incomplete_path"], xbmc.LOGDEBUG)
xbmc.log("[AdvDownloader] Path complete: "+params["complete_path"], xbmc.LOGDEBUG)
# __ If the 'complete_path' already exists, delete it
if xbmcvfs.exists(params["complete_path"]):
xbmc.log("[AdvDownloader] Removing existing %s" % repr(params["complete_path"]), xbmc.LOGDEBUG)
xbmcvfs.delete(params["complete_path"])
# __ If the 'incomplete_path' already exists, delete it
if xbmcvfs.exists(params["incomplete_path"]):
if self._confirmResume(self.__addon__.getLocalizedString(33207),self.__addon__.getLocalizedString(33208)+params['fileName']):
xbmc.log("[AdvDownloader] Resuming incomplete %s" % repr(params["incomplete_path"]), xbmc.LOGDEBUG)
params['current_dl_size']=self._getFileSize(params["incomplete_path"])
else:
xbmc.log("[AdvDownloader] Removing incomplete %s" % repr(params["incomplete_path"]), xbmc.LOGDEBUG)
xbmcvfs.delete(params["incomplete_path"])
xbmc.log("[AdvDownloader] _setPaths Done", xbmc.LOGDEBUG)
def _generatePercent(self, item, params):
"""
Method _generatePercent
@param item: the item for updating the percent
@param params: all parameters associated with the item
"""
get = params.get
iget = item.get
new_delta = False
if "last_delta" in item:
if time.time() - item["last_delta"] > 0.2:
new_delta = True
else:
item["last_delta"] = 0.0
new_delta = True
item['current_dl_size'] = get("bytes_so_far")
if item["totalsize"] > 0 and new_delta:
item["current_percent_dl"] = ( float(get("bytes_so_far")) * 100) / float(item["totalsize"])
elif iget("duration") and get("mark") != 0.0 and new_delta:
time_spent = time.time() - get("mark")
item["current_percent_dl"] = time_spent / int(iget("duration")) * 100
xbmc.log("[AdvDownloader] Time spent: %s. Duration: %s. Time left: %s (%s)" % (int(time_spent), int(iget("duration")),
int(int(iget("duration")) - time_spent),
self._convertSecondsToHuman(int(iget("duration")) - time_spent)), xbmc.LOGDEBUG)
elif new_delta:
xbmc.log("[AdvDownloader] cycle - " + str(time.time() - item["last_delta"]), xbmc.LOGDEBUG)
delta = time.time() - item["last_delta"]
if delta > 10 or delta < 0:
delta = 5
item["current_percent_dl"] = iget("old_percent") + delta
if item["current_percent_dl"] >= 100:
item["current_percent_dl"] -= 100
item["old_percent"] = item["current_percent_dl"]
if new_delta:
item["last_delta"] = time.time()
def _updateProgress(self, item, params):
"""
Method _updateProgress
@param item: the current item
@param params: the dictionnary with informations about the file to download
"""
get = params.get
iget = item.get
queue = False
new_mark = time.time()
if new_mark == get("mark"):
speed = 0
else:
speed = int((get("bytes_so_far") / 1024) / (new_mark - get("mark")))
if new_mark - get("queue_mark") > 1.5:
heading = u"[%s] %sKb/s (%.2f%%)" % (self.__queue__._getLastIndex(), speed, item["current_percent_dl"])
xbmc.log("[AdvDownloader] Updating %s - %s" % (heading, item['fileName'].encode("utf-8")), xbmc.LOGDEBUG)
params["queue_mark"] = new_mark
self.__queue__._updateQueueItem(item['fileName'],item)
if xbmc.Player().isPlaying() and xbmc.getCondVisibility("VideoPlayer.IsFullscreen"):
# __ Hide the progress dialog if we start to play a movie
if self.pDialog:
self.pDialog.close()
self.pDialog = None
else:
# __ Initialize the progress dialog if it is closed
if not self.pDialog:
self.pDialog = xbmcgui.DialogProgressBG()
self.pDialog.create("Preparing download", "")
heading = u"[%s] %s - %.2f%% (%s Kb/s)" % (str(self.__queue__._getLastIndex()), "Downloading", float(item["current_percent_dl"]),speed)
xbmc.log("[AdvDownloader] Heading : "+heading, xbmc.LOGDEBUG)
# __ Update the progress dialog
if iget("Title"):
self.pDialog.update(int(item["current_percent_dl"]), heading, iget("Title"))
else:
xbmc.log("[AdvDownloader] Try to update the dialog",xbmc.LOGDEBUG)
self.pDialog.update(int(item["current_percent_dl"]), heading, item['fileName'])
xbmc.log("[AdvDownloader] _updateProgress Done", xbmc.LOGDEBUG)
def _startDownload(self):
self._processQueueDownload()
def _stopDownload(self):
self.__queue__._askStop()
def _pauseDownload(self):
pass
def _resumeDownload(self):
pass
def getQueue(self):
return self.__queue__._getQueue()
def _run_async(self, func, *args, **kwargs):
"""
Method _run_async
@param func: the function to execute asynchronous
@param *args: the arguments passed into the function called
@param **kwargs: others arguments
@return the thread started
"""
from threading import Thread
worker = Thread(target=func, args=args, kwargs=kwargs)
worker.start()
return worker
def _showMessage(self, heading, message):
"""
Method to show a notification
@param heading : the heading text
@param message : the message of the notification
"""
xbmc.executebuiltin((u'XBMC.Notification("%s", "%s", %s)' % (heading, message.encode("utf-8"), 2000)).encode("utf-8"))
def _showDialog(self, heading, message):
"""
Method to show a "ok" dialog window
@param heading : the heading text
@param message : the message of the dialog window
"""
dialog = xbmcgui.Dialog()
dialog.ok(heading, message)
def _confirmResume(self, heading, line1, line2="", line3=""):
"""
Method to ask a confirmation for resuming the download
@param heading: the heading text
@param line1 : the first line of the confirmation dialog
@param line2 : the second line of the confirmation dialog
@param line3 : the third line of the confirmation dialog
@return: true if the user confirm the resume of dialog
"""
dialog = xbmcgui.Dialog()
return dialog.yesno(heading, line1, line2, line3)
def _getTargetFileSize(self, item):
"""
Method to get a size of a file from an url
@param itemParams: Dictionnary which represents the file. It contains the url to downlaod the file.
@return the item with the size of the file in the parameter 'total_size'
"""
url = urllib2.Request(item["url"], headers=AdvancedDownloader.HEADER_CFG)
# __ Set the useragent in the header
if "useragent" in item:
url.add_header("User-Agent", item["useragent"])
# __ Start the connexion
con = urllib2.urlopen(url)
total_size = 0
# __ Set the total size
if con.info().getheader("Content-Length").strip():
total_size = int(con.info().getheader("Content-Length").strip())
# __ Return the total size
return total_size
def _slugify(self, value):
"""
Method to :
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
import unicodedata
extension = value[len(value) - 4:]
value = value[:len(value) - 4]
value = value.decode("utf-8")
value = value.decode('unicode-escape','ignore')
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s\.-]', ' ', value).strip().lower())
value = re.sub('[-\s]+', ' ', value)
return value + extension
def _convertSecondsToHuman(self, seconds):
"""
Method _convertSecondsToHuman
@param seconds:the number of seconds
@return a number of seconds if the input is inferior of 60 seconds, else return a number of minutes
"""
seconds = int(seconds)
if seconds < 60:
return "~%ss" % (seconds)
elif seconds < 3600:
return "~%sm" % (seconds / 60)
def _isStarted(self):
pass
class QueueClass():
"""
Defaut queue class
"""
def _getQueue(self):
pass
def _getNextItemFromQueue(self):
pass
def _getLastIndex(self):
pass
def _addToQueue(self, fileName, params):
pass
def _removeToQueue(self, fileName, params={}):
pass
def _clearQueue(self):
pass
def _updateQueueItem(self,fileName,params):
pass
def _shouldStop(self):
pass
def _askStop(self):
pass
def _setStopFlag(self,shouldStop):
pass
class DatabaseQueue(QueueClass):
"""
Database queue class
"""
def __init__(self):
self.__addon__ = xbmcaddon.Addon(id='script.module.seko.downloader')
self.__addonDir__ = xbmc.translatePath(self.__addon__.getAddonInfo('path'))
# ___ Initialize database
# ___ db file
self.__dbFile__ = os.path.join(self.__addonDir__, 'AdvancedDownloader.db')
try:
self.createDatabase()
except:
xbmc.log("[AdvDownloader] Error during connection to the downloader database", xbmc.LOGERROR)
def createDatabase(self):
# ___ Create table with columns:
# queuePosition INT
# fileName TEXT
# url TEXT
# title TEXT
# incomplete_path TEXT
# complete_path TEXT
# total_size REAL
# current_dl_size REAL
# current_percent_dl REAL
# dl_status INT
# async INT
# playAtEnd INT
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
sql_create_db = "CREATE TABLE IF NOT EXISTS AdvDownloader (queuePosition INTEGER, fileName TEXT, url TEXT, title TEXT, incomplete_path TEXT, complet_path TEXT, total_size REAL, current_dl_size REAL, current_percent_dl REAL, dl_status INTEGER, async INTEGER, playAtEnd INTEGER);"
dbCursor.execute(sql_create_db)
try:
db.commit()
xbmc.log("[AdvDownloader] Database created",xbmc.LOGINFO)
except Exception, e:
xbmc.log("[AdvDownloader] Error during creating database with query " + sql_create_db, xbmc.LOGERROR)
def _getQueue(self):
resultList = []
# ___ Select sql (12)
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
sql_select = "SELECT * from AdvDownloader;"
# queuePosition
# fileName
# url
# title
# incomplete_path
# complete_path
# total_size
# current_dl_size
# current_percent_dl
# dl_status
# async
# playAtEnd
try:
dbCursor.execute(sql_select)
result = dbCursor.fetchall()
if len(result) > 0:
for i in result:
itemJSON = {
'queuePosition': i[0],
'fileName': i[1],
'url':i[2],
'title':i[3],
'destinationFolder':i[5],
'webBrowserId': 0,
'incomplete_path': i[4],
'complete_path': i[5],
'totalsize': float(i[6]),
'current_dl_size': float(i[7]),
'current_percent_dl': float(i[8]),
'dl_status': int(i[9]),
'async': int(i[10]),
'playAtEnd': int(i[11])
}
resultList.append(itemJSON)
xbmc.log("[AdvDownloader] Get the queue list success in db" , xbmc.LOGINFO)
return resultList
except Exception, e:
xbmc.log(str(e))
xbmc.log("[AdvDownloader] Error during select execution in db with query : " + sql_select, xbmc.LOGERROR)
return None
def _getNextItemFromQueue(self):
# ___ Select sql (12)
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
sql_select = "SELECT * FROM AdvDownloader ORDER BY queuePosition ASC;"
# queuePosition
# fileName
# url
# title
# incomplete_path
# complete_path
# total_size
# current_dl_size
# current_percent_dl
# dl_status
# async
# playAtEnd
try:
dbCursor.execute(sql_select)
result = dbCursor.fetchall()
if len(result) > 0:
itemJSON = {
'queuePosition': result[0][0],
'fileName': result[0][1],
'url':result[0][2],
'title':result[0][3],
'destinationFolder':result[0][5],
'webBrowserId': 0,
'incomplete_path': result[0][4],
'complete_path': result[0][5],
'totalsize': float(result[0][6]),
'current_dl_size': float(result[0][7]),
'current_percent_dl': float(result[0][8]),
'dl_status': int(result[0][9]),
'async': int(result[0][10]),
'playAtEnd': int(result[0][11])
}
xbmc.log("[AdvDownloader] Find next element %s success in db" % (itemJSON['fileName']), xbmc.LOGINFO)
return itemJSON
except Exception, e:
print str(e)
xbmc.log("[AdvDownloader] Error during select execution in db with query : " + sql_select, xbmc.LOGERROR)
return None
def _getLastIndex(self):
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
# ___ Select sql (12)
sql_select = "SELECT max(queuePosition) from AdvDownloader;"
try:
dbCursor.execute(sql_select)
result = dbCursor.fetchall()
if len(result) > 0:
if result[0][0] == None:
maxIndex = 0
else:
maxIndex = int(result[0][0])
xbmc.log("[AdvDownloader] Find last index %s success in db" % (maxIndex), xbmc.LOGINFO)
return maxIndex
except Exception, e:
print str(e)
xbmc.log("[AdvDownloader] Error during select execution in db with query : " + sql_select, xbmc.LOGERROR)
return 0
def _searchItem(self,item):
"""
Method to search an item in the queue
@param item: the item to search
"""
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
# ___ Select sql (12)
sql_select = "SELECT * from AdvDownloader WHERE fileName = '"+item['fileName']+"';"
try:
dbCursor.execute(sql_select)
result = dbCursor.fetchall()
if len(result) > 0 :
if result[0][0] == None:
xbmc.log("[AdvDownloader] No element %s in db" % (item['fileName']), xbmc.LOGINFO)
return None
else:
itemJSON = {
'queuePosition': result[0][0],
'fileName': result[0][1],
'url':result[0][2],
'title':result[0][3],
'destinationFolder':result[0][5],
'webBrowserId': 0,
'incomplete_path': result[0][4],
'complete_path': result[0][5],
'totalsize': float(result[0][6]),
'current_dl_size': float(result[0][7]),
'current_percent_dl': float(result[0][8]),
'dl_status': int(result[0][9]),
'async': int(result[0][10]),
'playAtEnd': int(result[0][11])
}
xbmc.log("[AdvDownloader] Find element %s success in db" % (item['fileName']), xbmc.LOGINFO)
return itemJSON
except Exception, e:
print str(e)
xbmc.log("[AdvDownloader] Error during select execution in db with query : " + sql_select, xbmc.LOGERROR)
return None
def _addToQueue(self, fileName, params):
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
if self._searchItem(params) is None:
index = self._getLastIndex() + 1;
# ___ Insert value (12)
# queuePosition
# fileName
# url
# title
# incomplete_path
# complete_path
# total_size
# current_dl_size
# current_percent_dl
# dl_status
# async
# playAtEnd
sql_insert = "INSERT INTO AdvDownloader VALUES ( %s, '%s', '%s', '%s', '%s', '%s', %s, %s, %s, %s, %s, %s);" % (index, fileName, params['url'], params['title'], params['incomplete_path'], params['complete_path'], str(params['totalsize']), 0, 0, 0, int(params['async']), int(params['playAtEnd']))
dbCursor.execute(sql_insert)
try:
db.commit()
xbmc.log("[AdvDownloader] Insert success in db", xbmc.LOGINFO)
except Exception, e:
xbmc.log("[AdvDownloader] Error during insertion execution in db with query : " + sql_insert, xbmc.LOGERROR)
def _removeToQueue(self, fileName, params={}):
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
clause_value = "fileName = '%s'" % (fileName)
if 'url' in params:
clause_value = clause_value + " AND url = '%s'" % (params['url'])
sql_delete = "DELETE FROM AdvDownloader WHERE %s ;" % (clause_value)
dbCursor.execute(sql_delete)
try:
db.commit()
xbmc.log("[AdvDownloader] Delete success in db", xbmc.LOGINFO)
except Exception, e:
xbmc.log("[AdvDownloader] Error during delete execution in db with query : " + sql_delete, xbmc.LOGERROR)
def _updateQueueItem(self,fileName,params):
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
clause_value = "fileName = '%s'" % (fileName)
if 'url' in params:
clause_value = clause_value + " AND url = '%s'" % (params['url'])
sql_update = "UPDATE AdvDownloader SET "
sql_update = sql_update+" current_dl_size = "+str(params['current_dl_size'])+ ","
sql_update = sql_update+" current_percent_dl = "+str(params['current_percent_dl'])+ ","
sql_update = sql_update+" dl_status = "+str(params['dl_status'])
sql_update = sql_update+" WHERE %s ;" % (clause_value)
dbCursor.execute(sql_update)
try:
db.commit()
xbmc.log("[AdvDownloader] Update success in db", xbmc.LOGINFO)
except Exception, e:
xbmc.log("[AdvDownloader] Error during update execution in db with query : " + sql_update, xbmc.LOGERROR)
def _clearQueue(self):
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
sql_delete = "DELETE FROM AdvDownloader;"
dbCursor.execute(sql_delete)
try:
db.commit()
xbmc.log("[AdvDownloader] Clear success in db", xbmc.LOGINFO)
except Exception, e:
xbmc.log("[AdvDownloader] Error during delete execution in db with query : " + sql_delete, xbmc.LOGERROR)
def __del__(self):
# ___ Destroy object
pass
def _shouldStop(self):
"""
Method _shouldStop
@return True if we ask to stop all downloads, else return False
"""
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
# ___ Select sql (12)
sql_select = "SELECT * from AdvDownloader WHERE dl_status < 3;"
try:
dbCursor.execute(sql_select)
result = dbCursor.fetchall()
if len(result) > 0 :
if result[0][0] == None:
xbmc.log("[AdvDownloader] No download started in db", xbmc.LOGINFO)
return True
else:
itemJSON = {
'queuePosition': result[0][0],
'fileName': result[0][1],
'url':result[0][2],
'title':result[0][3],
'destinationFolder':result[0][5],
'webBrowserId': 0,
'incomplete_path': result[0][4],
'complete_path': result[0][5],
'totalsize': float(result[0][6]),
'current_dl_size': float(result[0][7]),
'current_percent_dl': float(result[0][8]),
'dl_status': int(result[0][9]),
'async': int(result[0][10]),
'playAtEnd': int(result[0][11])
}
xbmc.log("[AdvDownloader] Find element in download", xbmc.LOGINFO)
return False
except Exception, e:
print str(e)
xbmc.log("[AdvDownloader] Error during select execution in db with query : " + sql_select, xbmc.LOGERROR)
return True
def _askStop(self):
"""
Method _askStop
Ask to stop all downloads
"""
with sqlite.connect(self.__dbFile__) as db:
dbCursor = db.cursor()
sql_update = "UPDATE AdvDownloader SET dl_status = 3 WHERE dl_status < 3;"
dbCursor.execute(sql_update)
try:
db.commit()
xbmc.log("[AdvDownloader] Stop download - Update success in db", xbmc.LOGINFO)
except Exception, e:
xbmc.log("[AdvDownloader] Error during update execution in db with query : " + sql_update, xbmc.LOGERROR)
def _setStopFlag(self,shouldStop):
pass
class CacheQueue(QueueClass):
"""
Cache queue class
"""
def __init__(self):
# Initialize 'cache' variables
try:
import StorageServer
except:
import storageserverdummy as StorageServer
self.cache = StorageServer.StorageServer("AdvDownloader")
def _getQueue(self):
"""
Method to get the queue
@return the queue
@attention: Use this method, only for read the queue
"""
queue = self.cache.get("AdvDownloaderQueue")
try:
items = eval(queue)
except:
items = {}
xbmc.log("[AdvDownloader] _getQueue Done: " + str(len(items)), xbmc.LOGDEBUG)
return items
def moveItemToPosition(self, filename, position):
"""
Method moveItemToPosition
@param filename: The name of the file
@param position: The new index of the item in the queue
"""
if position > 0 and self.cache.lock("AdvDownloaderQueueLock"):
items = []
if filename:
queue = self.cache.get("AdvDownloaderQueue")
xbmc.log("[AdvDownloader] Queue loaded : " + repr(queue), xbmc.LOGDEBUG)
if queue:
try:
items = eval(queue)
except:
items = []
xbmc.log("[AdvDownloader] Pre items: %s " % repr(items), xbmc.LOGDEBUG)
# ___ Move item in the position
for index, item in enumerate(items):
(item_id, item) = item
if item_id == filename:
del items[index]
items = items[:position] + [(filename, item)] + items[position:]
break
# ___ Recalculate queuePosition
for index, itemQueue in enumerate(items):
(item_id, item) = itemQueue
item['queuePosition'] = index
del items[index]
items = items[:index] + [(item_id, item)] + items[index:]
xbmc.log("[AdvDownloader] Post items: %s " % repr(items), xbmc.LOGDEBUG)
self.cache.set("AdvDownloaderQueue", repr(items))
self.cache.unlock("AdvDownloaderQueueLock")
xbmc.log("[AdvDownloader] moveItemToPosition Done", xbmc.LOGDEBUG)
else:
xbmc.log("[AdvDownloader] Couldn't lock AdvDownloaderQueueQueueLock in the method _moveItemToPosition", xbmc.LOGDEBUG)
def _getNextItemFromQueue(self):
"""
_getNextItemFromQueue : Method to get the next item into the queue
@return the next item in the queue
the item has the format : (filename, params)
"""
if self.cache.lock("AdvDownloaderQueueLock"):
# ___ Initialiaze the items array
items = []
# ___ Get the current queue
queue = self.cache.get("AdvDownloaderQueue")
xbmc.log("[AdvDownloader] Queue loaded : " + repr(queue), xbmc.LOGDEBUG)
if queue:
try:
items = eval(queue)
except:
items = False
item = {}
# ___ If the current queue is not empty, we get the next item
if len(items) > 0:
item = items[0]
xbmc.log("[AdvDownloader] Returning : " + item[0], xbmc.LOGDEBUG)
self.cache.unlock("AdvDownloaderQueueLock")
if item:
(fileName, item) = item
return item
else:
return False
else:
xbmc.log("[AdvDownloader] Couldn't aquire lock on AdvDownloaderQueueLock in the method _getNextItemFromQueue", xbmc.LOGDEBUG)
def _getLastIndex(self):
"""
Method to return the last index of the queue
"""
return len(self._getQueue())
def _addToQueue(self, fileName, params):
"""
Method _addToQueue
@param filename: the name of the file to download
@param params: the dictionnary with informations about the file to download
"""
if self.cache.lock("AdvDownloaderQueueLock"):
items = []
if filename:
queue = self.cache.get("AdvDownloaderQueue")
xbmc.log("[AdvDownloader] Queue loaded : " + repr(queue), xbmc.LOGDEBUG)
if queue:
try:
items = eval(queue)
except:
items = []
append = True
# __ Verify if the item is already into the queue
for index, item in enumerate(items):
(item_id, item) = item
if item_id == filename:
# __ If the item is already into the queue, we will delete it
append = False
del items[index]
break
# __ If we should add the item in the queue
if append:
items.append((filename, params))
xbmc.log("[AdvDownloader] Added: " + filename.decode('utf-8') + " to queue - " + str(len(items)).decode('utf-8'), xbmc.LOGDEBUG)
# __ Else we should insert the item in the head of the queue
else:
items.insert(1, (filename, params)) # 1 or 0?
xbmc.log("[AdvDownloader] Moved " + filename.decode('utf-8') + " to front of queue. - " + str(len(items)).decode('utf-8'), xbmc.LOGDEBUG)
# __ Set the new queue
self.cache.set("AdvDownloaderQueue", repr(items))
# __ Unlock the queue
self.cache.unlock("AdvDownloaderQueueLock")
xbmc.log("[AdvDownloader] _addItemToQueue Done", xbmc.LOGDEBUG)
else:
xbmc.log("[AdvDownloader] Couldn't lock AdvDownloaderQueueLock on _addItemToQueue", xbmc.LOGERROR)
def _removeToQueue(self, fileName, params={}):
"""
_removeToQueue Method
@param fileName :the filename to remove of the download queue
@param params : All associate parameters
"""
if self.cache.lock("AdvDownloaderQueueLock"):
items = []
queue = self.cache.get("AdvDownloaderQueue")
xbmc.log("[AdvDownloader] Queue loaded : " + repr(queue), xbmc.LOGDEBUG)
if queue:
try:
items = eval(queue)
except:
items = []
for index, item in enumerate(items):
(item_id, item) = item
if item_id == filename:
self._removeTempFile(filename, item)
del items[index]
self.cache.set("AdvDownloaderQueue", repr(items))
xbmc.log("[AdvDownloader] Removed: " + filename.decode('utf-8') + " from queue", xbmc.LOGDEBUG)
self.cache.unlock("AdvDownloaderQueueLock")
xbmc.log("[AdvDownloader] Remove item from queue : Done")
else:
xbmc.log("[AdvDownloader] Exception in _removeToQueue", xbmc.LOGDEBUG)
else:
xbmc.log("[AdvDownloader] Couldn't lock AdvDownloaderQueueLock on _removeToQueue", xbmc.LOGERROR)
def _clearQueue(self):
"""
_clearQueue Method
"""
if self.cache.lock("AdvDownloaderQueueLock"):
items = []
self.cache.set("AdvDownloaderQueue", repr(items))
xbmc.log("[AdvDownloader] Clear queue successful ", xbmc.LOGDEBUG)
else:
xbmc.log("[AdvDownloader] Couldn't lock AdvDownloaderQueueLock on _clearQueue", xbmc.LOGERROR)
def _shouldStop(self):
"""
Method _shouldStop
@return True if we ask to stop all downloads, else return False
@warning: this method read the cache for the value "StopDownloaderQueue"
"""
shouldStop = False
shouldStopStr = self.cache.get("AdvDownloaderStop")
if shouldStopStr is not None:
try:
"""
@bug: SyntaxError: ('unexpected EOF while parsing', ('<string>', 0, 0, ''))
@note : Not use eval(shouldStopStr) to avoid SyntaxError
"""
if shouldStopStr == "True" :
shouldStop = True
except Exception:
pass
return shouldStop
def _askStop(self):
"""
Method _askStop
Ask to stop all downloads
@warning: this method read and set the cache for the value "AdvDownloaderStop"
"""
shouldStopStr = self.cache.get("AdvDownloaderStop")
"""
@bug: SyntaxError: ('unexpected EOF while parsing', ('<string>', 0, 0, ''))
@note : Not use eval(shouldStopStr) to avoid SyntaxError
"""
if shouldStopStr == "False":
self.cache.set("AdvDownloaderStop", repr(True))
def _setStopFlag(self,shouldStop):
"""
Method _setStopFlag
Ask to set the flag AdvDownloaderStop
@warning: this method read and set the cache for the value "AdvDownloaderStop"
"""
self.cache.set("AdvDownloaderStop", repr(shouldStop))
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n# coding=utf-8\r\n'''\r\nCreated on 2016. 8. 14.\r\n\r\n@author: Jay\r\n'''\r\nfrom cppy.adaptor import CpRqRpClass\r\nimport win32com.client\r\n\r\n\r\n@CpRqRpClass('CpTrade.CpTd0311')\r\nclass StockOrderCash(object):\r\n '''\r\n ... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n# coding=utf-8\r\n'''\r\nCreated on 2016. 8. 14.\r\n\r\n@author: Jay\r\n'''\r\nfrom cppy.adaptor import CpRqRpClass\r\nimport win32com.client\r\n\r\n\r\n@CpRqRpClass('CpTrade.CpTd0311')\r\nclass StockOrderCash(object):\... | ```python
# coding=utf-8
'''
Created on 2016. 8. 14.
@author: Jay
'''
from cppy.adaptor import CpRqRpClass
import win32com.client
@CpRqRpClass('CpTrade.CpTd0311')
class StockOrderCash(object):
'''
장내주식/코스닥주식/ELW 주문(현금주문) 데이터를 요청하고 수신한다.
'''
def __init__(self):
self.instCpTdUtil = win32com.client.Dispatch("CpTrade.CpTdUtil")
class InputType(enumerate):
SellOrBuy = 0 #주문종류코드 (1: 매도, 2:매수)
AccountNumber = 1 #계좌번호
StockCode = 3 #종목코드
OrderNumber = 4 #주문수량
OrderPrice = 5 #주문단가
class OutputType(enumerate):
AccountNumber = 1 #계좌번호
StockCode = 3 #종목코드
OrderNumber = 4 #주문수량
OrderPrice = 5 #주문단가
def setInputValue(self, inputTypes, inputValues):
self.inputTypes = inputTypes
self.inputValues = inputValues
def setOutputValue(self, outputTypes):
self.outputTypes = outputTypes
def request(self, com_obj):
self.instCpTdUtil.TradeInit()
for i in range(len(self.inputTypes)) :
com_obj.SetInputValue(self.inputTypes[i], self.inputValues[i])
#계좌번호
accountNumber = self.instCpTdUtil.AccountNumber[0]
com_obj.SetInputValue(1, accountNumber)
com_obj.Request()
def response(self, com_obj):
result = ""
for j in range(0, len(self.outputTypes)) :
value = com_obj.GetHeaderValue(self.outputTypes[j])
result += str(value) + "; "
print (result)
``` |
[
{
"content": "Here is the source code:\n```python\nimport sys\n\ntry:\n from django.conf import settings\n\n settings.configure(\n DEBUG=True,\n USE_TZ=True,\n DATABASES={\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n }\n ... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\nimport sys\n\ntry:\n from django.conf import settings\n\n settings.configure(\n DEBUG=True,\n USE_TZ=True,\n DATABASES={\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n ... | ```python
import sys
try:
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF="djangocms_owl.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"djangocms_owl",
],
SITE_ID=1,
NOSE_ARGS=['-s'],
MIDDLEWARE_CLASSES=(),
)
try:
import django
setup = django.setup
except AttributeError:
pass
else:
setup()
from django_nose import NoseTestSuiteRunner
except ImportError:
import traceback
traceback.print_exc()
raise ImportError("To fix this error, run: pip install -r requirements-test.txt")
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
# Run tests
test_runner = NoseTestSuiteRunner(verbosity=1)
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(failures)
if __name__ == '__main__':
run_tests(*sys.argv[1:])
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------------\n# police-uum.py\n# Author: Maxim Dubinin (sim@gis-lab.info)\n# About: Grab 112.ru data on участковые, creates two tabl... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------------\n# police-uum.py\n# Author: Maxim Dubinin (sim@gis-lab.info)\n# About: Grab 112.ru data on участковые, ... | ```python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# police-uum.py
# Author: Maxim Dubinin (sim@gis-lab.info)
# About: Grab 112.ru data on участковые, creates two tables linked with unique id, policemen and locations they are responsible for.
# Created: 13:26 07.05.2013
# Usage example: python police-uum.py 45000000000
# ---------------------------------------------------------------------------
import urllib2
from bs4 import BeautifulSoup
import sys
import os
import ucsv as csv
from datetime import datetime
def download_list(link,cntry):
try:
u = urllib2.urlopen(link)
except urllib2.URLError, e:
if hasattr(e, 'reason'):
print 'We failed to reach a server.'
print 'Reason: ', e.reason
elif hasattr(e, 'code'):
print 'The server couldn\'t fulfill the request.'
print 'Error code: ', e.code
f_errors.write(cntry + "," + link + "\n")
success = False
else:
f = open("countries/" + cntry + ".html","wb")
f.write(u.read())
f.close()
print("Listing for " + cntry + " was downloaded")
success = True
return success
def get_country_codes(link):
country_codes = []
u = urllib2.urlopen(link)
soup = BeautifulSoup(''.join(u.read()))
sel = soup.find("select", { "name" : "countryId" })
options = sel.findAll('option')
for option in options:
optval = option['value']
if optval != '':
country_codes.append(optval)
return country_codes
def download_person(link,cntry,name):
try:
u = urllib2.urlopen(link)
except urllib2.URLError, e:
if hasattr(e, 'reason'):
print 'We failed to reach a server.'
print 'Reason: ', e.reason
elif hasattr(e, 'code'):
print 'The server couldn\'t fulfill the request.'
print 'Error code: ', e.code
f_errors.write(cntry + "," + link + "," + name + "\n")
success = False
else:
f = open("peoples/" + cntry + "_" + name + ".html","wb")
f.write(u.read())
f.close()
print("Listing for " + name.encode("utf-8") + " was downloaded")
success = True
return success
def parse_list(cntry):
cntry_list = open("countries/" + cntry + ".html")
soup = BeautifulSoup(''.join(cntry_list.read()))
maintable = soup.find("table", { "class" : "participantList sortable" })
trs = maintable.findAll('tr')
del trs[0]
for tr in trs:
tds = tr.findAll('td')
name = list(tds[0].find("span", { "class" : "sortValue hidden" }).strings)[0]
link = "http://kazan2013.ru" + tds[0].find('a')['href']
nameru = list(tds[0].find('a').strings)[0]
#if len(list(tds[1].strings)) != 0:
# gender = list(tds[1].strings)[0]
#else:
# gender = "not set"
if tds[2].find('a') != None:
sports = list(tds[2].find('a').strings)[0]
sports = sports.replace("\r\n","").strip()
sportslink = "http://kazan2013.ru" + tds[2].find('a')['href']
else:
sports = ""
sportslink = ""
#cntry = list(tds[3].find('a').strings)[0]
cntrylink = "http://kazan2013.ru" + tds[3].find('a')['href']
success = download_person(link.replace("/ru/","/en/"),cntry,name)
if success == True:
lastname,firstname,gender,dob,day_b,month_b,year_b,height,weight,uniname,unicity,team = parse_person(cntry,name)
else:
lastname = firstname = gender = dob = day_b = month_b = year_b = height = weight = uniname = unicity = team = "error"
#write to man file
csvwriter.writerow(dict(NAME=name,
LINK=link,
NAMERU=nameru,
GENDER=gender,
SPORTS=sports,
SPORTSLINK=sportslink,
CNTRY=cntry,
CNTRYLINK=cntrylink,
LASTNAME=lastname,
FIRSTNAME=firstname,
DOB=dob,
DOB_DAY=day_b,
DOB_MNTH=month_b,
DOB_YEAR=year_b,
HEIGHT=height,
WEIGHT=weight,
UNINAME=uniname,
UNICITY=unicity,
TEAM=team))
def parse_person(cntry,name):
f_person = open("peoples/" + cntry + "_" + name + ".html",'rb')
soup = BeautifulSoup(''.join(f_person.read()))
persinfotable = soup.findAll('table')[0]
trs = persinfotable.findAll('tr')
del trs[0]
lastname = firstname = dob = day_b = month_b = year_b = height = weight = uniname = unicity = team = ""
for tr in trs:
tds = tr.findAll('td')
trname = list(tds[0].strings)[0].strip()
if trname == "Family name":
lastname = list(tds[1].strings)[0].strip()
elif trname == "Given name":
firstname = list(tds[1].strings)[0].strip()
elif trname == "Gender":
gender = list(tds[1].find('div').strings)[0].strip()
elif trname == "Birthdate":
dob = list(tds[1].findAll('div')[0].strings)[0].strip()
date_object = datetime.strptime(dob, '%d %B %Y')
day_b = date_object.day
month_b = date_object.month
year_b = date_object.year
elif trname == "Height (cm)":
height = list(tds[1].strings)[0].strip()
elif trname == "Weight (kg)":
weight = list(tds[1].strings)[0].strip()
elif trname == "University":
uniname = list(tds[1].strings)[0].strip()
elif trname == "University City":
unicity = list(tds[1].strings)[0].strip()
elif trname == "Teams":
team = list(tds[1].find("span").strings)[0].strip()
return lastname,firstname,gender,dob,day_b,month_b,year_b,height,weight,uniname,unicity,team
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) == 1:
country_codes = [args[0]] #use RUS for RUSSIA
else:
country_codes = get_country_codes(link = "http://kazan2013.ru/hide/ru/-240/Participant/List?isRelay=False&isAnimal=False&lastNameStarts=&sportId=&countryId=RUS")
f_errors = open("errors.log","a")
fieldnames_data = ("NAME","LINK","NAMERU","SPORTS","SPORTSLINK","CNTRY","CNTRYLINK","LASTNAME","FIRSTNAME","GENDER","DOB","DOB_DAY","DOB_MNTH","DOB_YEAR","HEIGHT","WEIGHT","UNINAME","UNICITY","TEAM")
for cntry in country_codes:
link = "http://kazan2013.ru/hide/ru/-240/Participant/List?isRelay=False&isAnimal=False&lastNameStarts=&sportId=&countryId=" + cntry
data_name = cntry + ".csv"
f_data = open("countries/" + data_name,"wb")
csvwriter = csv.DictWriter(f_data, fieldnames=fieldnames_data)
success = download_list(link,cntry)
if success == True:
parse_list(cntry)
else:
f_errors.write(cntry + "\n")
f_data.close()
f_errors.close()
``` |
[
{
"content": "```python\n\"\"\"\nJedi is a static analysis tool for Python that can be used in IDEs/editors. Its\nhistoric focus is autocompletion, but does static analysis for now as well.\nJedi is fast and is very well tested. It understands Python on a deeper level\nthan all other static analysis frameworks ... | [
{
"content": "<|memory_start|>```python\n\"\"\"\nJedi is a static analysis tool for Python that can be used in IDEs/editors. Its\nhistoric focus is autocompletion, but does static analysis for now as well.\nJedi is fast and is very well tested. It understands Python on a deeper level\nthan all other static anal... | ```python
"""
Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its
historic focus is autocompletion, but does static analysis for now as well.
Jedi is fast and is very well tested. It understands Python on a deeper level
than all other static analysis frameworks for Python.
Jedi has support for two different goto functions. It's possible to search for
related names and to list all names in a Python file and infer them. Jedi
understands docstrings and you can use Jedi autocompletion in your REPL as
well.
Jedi uses a very simple API to connect with IDE's. There's a reference
implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
It's really easy.
To give you a simple example how you can use the Jedi library, here is an
example for the autocompletion feature:
>>> import jedi
>>> source = '''
... import datetime
... datetime.da'''
>>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py')
>>> script
<Script: 'example.py' ...>
>>> completions = script.completions()
>>> completions #doctest: +ELLIPSIS
[<Completion: date>, <Completion: datetime>, ...]
>>> print(completions[0].complete)
te
>>> print(completions[0].name)
date
As you see Jedi is pretty simple and allows you to concentrate on writing a
good text editor, while still having very good IDE features for Python.
"""
__version__ = '0.13.2'
from jedi.api import Script, Interpreter, set_debug_function, \
preload_module, names
from jedi import settings
from jedi.api.environment import find_virtualenvs, find_system_environments, \
get_default_environment, InvalidPythonEnvironment, create_environment, \
get_system_environment
from jedi.api.exceptions import InternalError
``` |
[
{
"content": "```python\nimport requests, logging, json\nfrom storitch import config, config_load, logger\n\ndef upload_multipart():\n r = requests.post(\n 'http://127.0.0.1:{}/store'.format(config['port']),\n files={'file': open('test1.txt', 'rb')}\n )\n logging.debug(r.text)\n loggin... | [
{
"content": "<|memory_start|>```python\nimport requests, logging, json\nfrom storitch import config, config_load, logger\n\ndef upload_multipart():\n r = requests.post(\n 'http://127.0.0.1:{}/store'.format(config['port']),\n files={'file': open('test1.txt', 'rb')}\n )\n logging.debug(r.t... | ```python
import requests, logging, json
from storitch import config, config_load, logger
def upload_multipart():
r = requests.post(
'http://127.0.0.1:{}/store'.format(config['port']),
files={'file': open('test1.txt', 'rb')}
)
logging.debug(r.text)
logging.debug(r.status_code)
r.raise_for_status()
assert r.status_code == 201
d = r.json()
assert d[0]['hash'] == 'f29bc64a9d3732b4b9035125fdb3285f5b6455778edca72414671e0ca3b2e0de'
assert d[0]['type'] == 'file'
def upload_stream():
session = ''
with open('test1.txt', 'rb') as f:
while True:
d = f.read(5)
r = requests.put(
'http://127.0.0.1:{}/store/session'.format(config['port']),
data=d,
headers={
'Content-Type': 'application/octet-stream',
'storitch-json': json.dumps({
'session': session,
'filename': 'testæøå.txt',
'finished': False if d else True
})
},
)
logging.debug(r.text)
logging.debug(r.status_code)
r.raise_for_status()
j = r.json()
logging.debug(j)
if 'session' in j:
session = j['session']
if not d:
break
logging.debug(j)
assert j['hash'] == 'f29bc64a9d3732b4b9035125fdb3285f5b6455778edca72414671e0ca3b2e0de'
assert j['type'] == 'file'
assert j['filename'] == 'testæøå.txt'
def thumbnail():
r = requests.post(
'http://127.0.0.1:{}/store'.format(config['port']),
files={'file': open('test.png', 'rb')}
)
logging.debug(r.text)
logging.debug(r.status_code)
r.raise_for_status()
assert r.status_code == 201
d = r.json()
assert d[0]['hash'] == '1171aad9f52efe4f577ccabec4aaeb063e28a80978f3853721381bca2b5fe501'
assert d[0]['type'] == 'image'
assert d[0]['width'] == 5
assert d[0]['height'] == 5
r = requests.get(
'http://127.0.0.1:{}/1171aad9f52efe4f577ccabec4aaeb063e28a80978f3853721381bca2b5fe501@.jpg'.format(config['port']),
)
logging.debug(r.text)
logging.debug(r.status_code)
assert r.status_code == 200
if __name__ == '__main__':
config_load()
logger.set_logger(None)
upload_multipart()
upload_stream()
thumbnail()
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\nfrom __future__ import unicode_literals\n__author__ = 'hz'\nimport nltk\n\nclass DataFormat:\n def __init__(self):\n pass\n\n def dataFormat( self, _sentence ):\n \"\"\"\n format the input data and save it in the tmp file\n ... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\nfrom __future__ import unicode_literals\n__author__ = 'hz'\nimport nltk\n\nclass DataFormat:\n def __init__(self):\n pass\n\n def dataFormat( self, _sentence ):\n \"\"\"\n format the input data and save it in the ... | ```python
from __future__ import unicode_literals
__author__ = 'hz'
import nltk
class DataFormat:
def __init__(self):
pass
def dataFormat( self, _sentence ):
"""
format the input data and save it in the tmp file
:param _sentence:
:return:
"""
if isinstance( _sentence, list ):
result = []
for sentence in _sentence:
result.extend( self.__dataFormat( sentence) )
result.append( '\n' )
self.__save( result)
return result
elif isinstance( _sentence, str ):
result = self.__dataFormat( _sentence )
self.__save( result )
return result
else:
return None
def __dataFormat( self, sentence ):
result = []
words = nltk.word_tokenize( sentence )
for word in words:
word = ' ' + word + '\n'
result.append( word )
return result
def __save( self, data ):
f = file( 'tmp', 'wb' )
f.writelines( data )
f.flush()
f.close()
if __name__ == '__main__':
testdata = ['I saw a dog chasing a cat.', 'I love you.']
data_format = DataFormat()
data_format.dataFormat( testdata )
print( "test" )
``` |
[
{
"content": "Repeat the code precisely:\n```python\nimport ntlm_auth.compute_hash as compute_hash\n\n\nclass TestComputeHash(object):\n\n def test_lmowfv1(self):\n # 4.2.2.1.1 - LMOWFv1()\n expected = b\"\\xe5\\x2c\\xac\\x67\\x41\\x9a\\x9a\\x22\" \\\n b\"\\x4a\\x3b\\x10\\x8f\... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\nimport ntlm_auth.compute_hash as compute_hash\n\n\nclass TestComputeHash(object):\n\n def test_lmowfv1(self):\n # 4.2.2.1.1 - LMOWFv1()\n expected = b\"\\xe5\\x2c\\xac\\x67\\x41\\x9a\\x9a\\x22\" \\\n b\"\\x4a... | ```python
import ntlm_auth.compute_hash as compute_hash
class TestComputeHash(object):
def test_lmowfv1(self):
# 4.2.2.1.1 - LMOWFv1()
expected = b"\xe5\x2c\xac\x67\x41\x9a\x9a\x22" \
b"\x4a\x3b\x10\x8f\x3f\xa6\xcb\x6d"
actual = compute_hash._lmowfv1("Password")
assert actual == expected
def test_ntowfv1(self):
# 4.2.2.1.2 - NTOWFv1()
expected = b"\xa4\xf4\x9c\x40\x65\x10\xbd\xca" \
b"\xb6\x82\x4e\xe7\xc3\x0f\xd8\x52"
actual = compute_hash._ntowfv1("Password")
assert actual == expected
def test_lmofv1_hash(self):
# 4.2.2.1.1 - LMOWFv1()
expected = b"\xe5\x2c\xac\x67\x41\x9a\x9a\x22" \
b"\x4a\x3b\x10\x8f\x3f\xa6\xcb\x6d"
password_hash = "e52cac67419a9a224a3b108f3fa6cb6d:" \
"a4f49c406510bdcab6824ee7c30fd852"
actual = compute_hash._lmowfv1(password_hash)
assert actual == expected
def test_ntowfv1_hash(self):
# 4.2.2.1.2 - NTOWFv1()
expected = b"\xa4\xf4\x9c\x40\x65\x10\xbd\xca" \
b"\xb6\x82\x4e\xe7\xc3\x0f\xd8\x52"
password_hash = "e52cac67419a9a224a3b108f3fa6cb6d:" \
"a4f49c406510bdcab6824ee7c30fd852"
actual = compute_hash._ntowfv1(password_hash)
assert actual == expected
def test_ntowfv2(self):
# 4.2.4.1.1 - NTOWFv2() and LMOWFv2()
expected = b"\x0c\x86\x8a\x40\x3b\xfd\x7a\x93" \
b"\xa3\x00\x1e\xf2\x2e\xf0\x2e\x3f"
actual = compute_hash._ntowfv2("User", "Password", "Domain")
assert actual == expected
``` |
[
{
"content": "Replicate the source code:\n```python\n# japanese_theme_base.py --- Traditional colors of Japan\n\ndef parse_csv(s):\n result = dict()\n\n def modifier(x):\n if x[0] == \"#\":\n return x[1:]\n else:\n return result[x]\n\n ls = s.splitlines()\n for l ... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n# japanese_theme_base.py --- Traditional colors of Japan\n\ndef parse_csv(s):\n result = dict()\n\n def modifier(x):\n if x[0] == \"#\":\n return x[1:]\n else:\n return result[x]\n\n ls = s.splitlin... | ```python
# japanese_theme_base.py --- Traditional colors of Japan
def parse_csv(s):
result = dict()
def modifier(x):
if x[0] == "#":
return x[1:]
else:
return result[x]
ls = s.splitlines()
for l in ls:
ss = l.split(",")
result[ss[0]] = modifier(ss[1])
return result
import sys
def replace_colors(csv, in_stream=sys.stdin, out_stream=sys.stdout):
s = in_stream.read()
t = s.format(**csv)
out_stream.write(t)
class MyString (str):
def __init__(self, s):
self.upcase = s.upper()
def print_colors(csv, fmt, out_stream=sys.stdout):
s = ""
for k, v in csv.items():
s += fmt.format(MyString(k), MyString(v),n='\n')
out_stream.write(s)
if __name__ == '__main__':
s = ""
fmt = ""
filename = sys.argv[1]
if len(sys.argv) > 2:
fmt = sys.argv[2]
with open(filename) as f:
s = f.read()
if s:
csv = parse_csv(s)
if fmt:
print_colors(csv, fmt)
else:
replace_colors(csv)
``` |
[
{
"content": "Here is the script:\n```python\nfrom strategy import Strategy\nimport gobject\n\nclass SimpleStrategy(Strategy):\n\tdef __init__(self):\n\t\tsuper(SimpleStrategy, self).__init__()\n\n\tdef do_raise_cash(self, target, hand):\n\t\traised = 0\n\n\t\t(monopolies, crap) = self.split_hand(hand)\n\n\t\t#... | [
{
"content": "Here is the script:\n<|memory_start|>```python\nfrom strategy import Strategy\nimport gobject\n\nclass SimpleStrategy(Strategy):\n\tdef __init__(self):\n\t\tsuper(SimpleStrategy, self).__init__()\n\n\tdef do_raise_cash(self, target, hand):\n\t\traised = 0\n\n\t\t(monopolies, crap) = self.split_han... | ```python
from strategy import Strategy
import gobject
class SimpleStrategy(Strategy):
def __init__(self):
super(SimpleStrategy, self).__init__()
def do_raise_cash(self, target, hand):
raised = 0
(monopolies, crap) = self.split_hand(hand)
# first try mortgage properties that are not
# part of monopolies
for e in crap:
if raised >= target or e.mortgaged or e.houses > 0:
continue
self.mortgage(e.estateid)
raised += e.mortgageprice
if raised >= target:
return raised
# now try mortgage undeveloped monopolies
monoplist = sum(monopolies, [])
for e in monoplist:
if raised >= target or e.mortgaged or e.houses > 0:
continue
self.unmortgage(e.estateid)
raised += e.mortgageprice
if raised >= target:
return raised
# now to sell houses, sell entire rows at once
# just to keep it simple
for g in monopolies:
if True in map(lambda x:x.mortgaged,g):
continue
if raised >= target:
break
for e in g:
if e.houses <= 0:
continue
self.sell_house(e.estateid)
# FIXME
e.houses -= 1
raised += e.sellhouseprice
# shouldn't really be possible, we're bust
return raised
def raise_cash(self, p, target):
hand = self.hand(p)
for e in hand:
self.msg('I own: [%d]: %s\n'%(e.estateid, e.name),
[e.mortgaged and 'red' or 'dark green'])
self.msg('must raise %d bucks!\n'%target)
raised = self.do_raise_cash(target, hand)
if raised < target:
self.msg('only raised %d bucks\n'%raised,
['bold','red'])
return False
self.msg('raised %d bucks\n'%raised, ['bold','dark green'])
return True
def handle_debt(self, p):
self.msg('handle debts\n')
e = self.s.estates[p.location]
due = self.due(p, e)
if due <= 0:
self.msg('not sure what to do\n')
due = 100
self.raise_cash(p, due)
def handle_purchase(self, p):
e = self.s.estates[p.location]
self.msg('price is %d, i gots %d\n'%(e.price, p.money))
if e.price > p.money:
can_afford = self.raise_cash(p, e.price - p.money)
else:
can_afford = True
if can_afford:
self.msg('BUYING IT, THEY HATIN\n', ['dark green'])
return True
else:
self.msg('CANNOT AFFORD, AUCTION\n', ['red'])
return False
def remain_in_jail(self, i):
# decide whether to pay, use card, or what
if i.money < 50:
self.raise_cash(i, 50 - i.money)
self.msg('BUYING OUT OF JAIL\n', ['red'])
return False
def pay_asset_tax(self, p):
self.msg('got %d bucks\n'%p.money)
e = self.s.estates[p.location]
pc = e.taxpercentage and e.taxpercentage or 10
fixed = e.tax and e.tax or 200
money = p.money
for e in self.hand(p):
self.msg('I own: %s (%d + %d)\n'%(e.name,
e.mortgageprice, e.houses * e.sellhouseprice),
[e.mortgaged and 'red' or 'dark green'])
if not e.mortgaged:
money += e.mortgageprice
money += e.houses * e.sellhouseprice
money = float(pc) * float(money) / 100.0
self.msg('fixed price is %d, assets is %d\n'%(fixed, money))
if money < fixed:
self.msg('PAYING PERCENTAGE\n', ['dark green'])
return True
else:
self.msg('PAYING FIXED\n', ['red'])
return False
def manage_estates(self, p):
money = p.money
hand = self.hand(p)
# unmortgage properties
reserve = 200
for e in hand:
if not e.mortgaged:
continue
if money < e.unmortgageprice + reserve:
continue
self.unmortgage(e.estateid)
money -= e.unmortgageprice
# buy houses
(monopolies, misc) = self.split_hand(hand)
for m in monopolies:
tc = sum(map(lambda x:x.houseprice, m))
if money < reserve + tc:
continue
if m[0].houses < 5:
self.msg('monopoly: buying a level on %s for %d\n'%\
(self.s.groups[m[0].group].name, tc),
['bold', 'dark blue'])
for e in m:
if e.houses >= 5:
continue
self.msg(' - %r\n'%e, ['bold', 'dark blue'])
self.buy_house(e.estateid)
e.houses += 1
money -= tc
gobject.type_register(SimpleStrategy)
``` |
[
{
"content": "Repeat the code exactly:\n```python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport tensorflow as tf\nimport numpy as np\nimport scipy.misc as misc\nimport os\nimport time\nfrom collections import namedtuple\nfrom .ops import conv2d... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport tensorflow as tf\nimport numpy as np\nimport scipy.misc as misc\nimport os\nimport time\nfrom collections import namedtuple\nfrom .o... | ```python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
import scipy.misc as misc
import os
import time
from collections import namedtuple
from .ops import conv2d, deconv2d, lrelu, fc, batch_norm, init_embedding, conditional_instance_norm
from .dataset import TrainDataProvider, InjectDataProvider, NeverEndingLoopingProvider
from .utils import scale_back, merge, save_concat_images
# Auxiliary wrapper classes
# Used to save handles(important nodes in computation graph) for later evaluation
LossHandle = namedtuple("LossHandle", ["d_loss", "g_loss", "const_loss", "l1_loss",
"category_loss", "cheat_loss", "tv_loss"])
InputHandle = namedtuple("InputHandle", ["real_data", "embedding_ids", "no_target_data", "no_target_ids"])
EvalHandle = namedtuple("EvalHandle", ["encoder", "generator", "target", "source", "embedding"])
SummaryHandle = namedtuple("SummaryHandle", ["d_merged", "g_merged"])
class UNet(object):
def __init__(self, experiment_dir=None, experiment_id=0, batch_size=16, input_width=256, output_width=256,
generator_dim=64, discriminator_dim=64, L1_penalty=100, Lconst_penalty=15, Ltv_penalty=0.0,
Lcategory_penalty=1.0, embedding_num=40, embedding_dim=128, input_filters=3, output_filters=3):
self.experiment_dir = experiment_dir
self.experiment_id = experiment_id
self.batch_size = batch_size
self.input_width = input_width
self.output_width = output_width
self.generator_dim = generator_dim
self.discriminator_dim = discriminator_dim
self.L1_penalty = L1_penalty
self.Lconst_penalty = Lconst_penalty
self.Ltv_penalty = Ltv_penalty
self.Lcategory_penalty = Lcategory_penalty
self.embedding_num = embedding_num
self.embedding_dim = embedding_dim
self.input_filters = input_filters
self.output_filters = output_filters
# init all the directories
self.sess = None
# experiment_dir is needed for training
if experiment_dir:
self.data_dir = os.path.join(self.experiment_dir, "data")
self.checkpoint_dir = os.path.join(self.experiment_dir, "checkpoint")
self.sample_dir = os.path.join(self.experiment_dir, "sample")
self.log_dir = os.path.join(self.experiment_dir, "logs")
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
print("create checkpoint directory")
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
print("create log directory")
if not os.path.exists(self.sample_dir):
os.makedirs(self.sample_dir)
print("create sample directory")
def encoder(self, images, is_training, reuse=False):
with tf.variable_scope("generator"):
if reuse:
tf.get_variable_scope().reuse_variables()
encode_layers = dict()
def encode_layer(x, output_filters, layer):
act = lrelu(x)
conv = conv2d(act, output_filters=output_filters, scope="g_e%d_conv" % layer)
enc = batch_norm(conv, is_training, scope="g_e%d_bn" % layer)
encode_layers["e%d" % layer] = enc
return enc
e1 = conv2d(images, self.generator_dim, scope="g_e1_conv")
encode_layers["e1"] = e1
e2 = encode_layer(e1, self.generator_dim * 2, 2)
e3 = encode_layer(e2, self.generator_dim * 4, 3)
e4 = encode_layer(e3, self.generator_dim * 8, 4)
e5 = encode_layer(e4, self.generator_dim * 8, 5)
e6 = encode_layer(e5, self.generator_dim * 8, 6)
e7 = encode_layer(e6, self.generator_dim * 8, 7)
e8 = encode_layer(e7, self.generator_dim * 8, 8)
return e8, encode_layers
def decoder(self, encoded, encoding_layers, ids, inst_norm, is_training, reuse=False):
with tf.variable_scope("generator"):
if reuse:
tf.get_variable_scope().reuse_variables()
s = self.output_width
s2, s4, s8, s16, s32, s64, s128 = int(s / 2), int(s / 4), int(s / 8), int(s / 16), int(s / 32), int(
s / 64), int(s / 128)
def decode_layer(x, output_width, output_filters, layer, enc_layer, dropout=False, do_concat=True):
dec = deconv2d(tf.nn.relu(x), [self.batch_size, output_width,
output_width, output_filters], scope="g_d%d_deconv" % layer)
if layer != 8:
# IMPORTANT: normalization for last layer
# Very important, otherwise GAN is unstable
# Trying conditional instance normalization to
# overcome the fact that batch normalization offers
# different train/test statistics
if inst_norm:
dec = conditional_instance_norm(dec, ids, self.embedding_num, scope="g_d%d_inst_norm" % layer)
else:
dec = batch_norm(dec, is_training, scope="g_d%d_bn" % layer)
if dropout:
dec = tf.nn.dropout(dec, 0.5)
if do_concat:
dec = tf.concat([dec, enc_layer], 3)
return dec
d1 = decode_layer(encoded, s128, self.generator_dim * 8, layer=1, enc_layer=encoding_layers["e7"],
dropout=True)
d2 = decode_layer(d1, s64, self.generator_dim * 8, layer=2, enc_layer=encoding_layers["e6"], dropout=True)
d3 = decode_layer(d2, s32, self.generator_dim * 8, layer=3, enc_layer=encoding_layers["e5"], dropout=True)
d4 = decode_layer(d3, s16, self.generator_dim * 8, layer=4, enc_layer=encoding_layers["e4"])
d5 = decode_layer(d4, s8, self.generator_dim * 4, layer=5, enc_layer=encoding_layers["e3"])
d6 = decode_layer(d5, s4, self.generator_dim * 2, layer=6, enc_layer=encoding_layers["e2"])
d7 = decode_layer(d6, s2, self.generator_dim, layer=7, enc_layer=encoding_layers["e1"])
d8 = decode_layer(d7, s, self.output_filters, layer=8, enc_layer=None, do_concat=False)
output = tf.nn.tanh(d8) # scale to (-1, 1)
return output
def generator(self, images, embeddings, embedding_ids, inst_norm, is_training, reuse=False):
e8, enc_layers = self.encoder(images, is_training=is_training, reuse=reuse)
local_embeddings = tf.nn.embedding_lookup(embeddings, ids=embedding_ids)
local_embeddings = tf.reshape(local_embeddings, [self.batch_size, 1, 1, self.embedding_dim])
embedded = tf.concat([e8, local_embeddings], 3)
output = self.decoder(embedded, enc_layers, embedding_ids, inst_norm, is_training=is_training, reuse=reuse)
return output, e8
def discriminator(self, image, is_training, reuse=False):
with tf.variable_scope("discriminator"):
if reuse:
tf.get_variable_scope().reuse_variables()
h0 = lrelu(conv2d(image, self.discriminator_dim, scope="d_h0_conv"))
h1 = lrelu(batch_norm(conv2d(h0, self.discriminator_dim * 2, scope="d_h1_conv"),
is_training, scope="d_bn_1"))
h2 = lrelu(batch_norm(conv2d(h1, self.discriminator_dim * 4, scope="d_h2_conv"),
is_training, scope="d_bn_2"))
h3 = lrelu(batch_norm(conv2d(h2, self.discriminator_dim * 8, sh=1, sw=1, scope="d_h3_conv"),
is_training, scope="d_bn_3"))
# real or fake binary loss
fc1 = fc(tf.reshape(h3, [self.batch_size, -1]), 1, scope="d_fc1")
# category loss
fc2 = fc(tf.reshape(h3, [self.batch_size, -1]), self.embedding_num, scope="d_fc2")
return tf.nn.sigmoid(fc1), fc1, fc2
def build_model(self, is_training=True, inst_norm=False, no_target_source=False):
real_data = tf.placeholder(tf.float32,
[self.batch_size, self.input_width, self.input_width,
self.input_filters + self.output_filters],
name='real_A_and_B_images')
embedding_ids = tf.placeholder(tf.int64, shape=None, name="embedding_ids")
no_target_data = tf.placeholder(tf.float32,
[self.batch_size, self.input_width, self.input_width,
self.input_filters + self.output_filters],
name='no_target_A_and_B_images')
no_target_ids = tf.placeholder(tf.int64, shape=None, name="no_target_embedding_ids")
# target images
real_B = real_data[:, :, :, :self.input_filters]
# source images
real_A = real_data[:, :, :, self.input_filters:self.input_filters + self.output_filters]
embedding = init_embedding(self.embedding_num, self.embedding_dim)
fake_B, encoded_real_A = self.generator(real_A, embedding, embedding_ids, is_training=is_training,
inst_norm=inst_norm)
real_AB = tf.concat([real_A, real_B], 3)
fake_AB = tf.concat([real_A, fake_B], 3)
# Note it is not possible to set reuse flag back to False
# initialize all variables before setting reuse to True
real_D, real_D_logits, real_category_logits = self.discriminator(real_AB, is_training=is_training, reuse=False)
fake_D, fake_D_logits, fake_category_logits = self.discriminator(fake_AB, is_training=is_training, reuse=True)
# encoding constant loss
# this loss assume that generated imaged and real image
# should reside in the same space and close to each other
encoded_fake_B = self.encoder(fake_B, is_training, reuse=True)[0]
const_loss = (tf.reduce_mean(tf.square(encoded_real_A - encoded_fake_B))) * self.Lconst_penalty
# category loss
true_labels = tf.reshape(tf.one_hot(indices=embedding_ids, depth=self.embedding_num),
shape=[self.batch_size, self.embedding_num])
real_category_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=real_category_logits,
labels=true_labels))
fake_category_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_category_logits,
labels=true_labels))
category_loss = self.Lcategory_penalty * (real_category_loss + fake_category_loss)
# binary real/fake loss
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=real_D_logits,
labels=tf.ones_like(real_D)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_D_logits,
labels=tf.zeros_like(fake_D)))
# L1 loss between real and generated images
l1_loss = self.L1_penalty * tf.reduce_mean(tf.abs(fake_B - real_B))
# total variation loss
width = self.output_width
tv_loss = (tf.nn.l2_loss(fake_B[:, 1:, :, :] - fake_B[:, :width - 1, :, :]) / width
+ tf.nn.l2_loss(fake_B[:, :, 1:, :] - fake_B[:, :, :width - 1, :]) / width) * self.Ltv_penalty
# maximize the chance generator fool the discriminator
cheat_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_D_logits,
labels=tf.ones_like(fake_D)))
d_loss = d_loss_real + d_loss_fake + category_loss / 2.0
g_loss = cheat_loss + l1_loss + self.Lcategory_penalty * fake_category_loss + const_loss + tv_loss
if no_target_source:
# no_target source are examples that don't have the corresponding target images
# however, except L1 loss, we can compute category loss, binary loss and constant losses with those examples
# it is useful when discriminator get saturated and d_loss drops to near zero
# those data could be used as additional source of losses to break the saturation
no_target_A = no_target_data[:, :, :, self.input_filters:self.input_filters + self.output_filters]
no_target_B, encoded_no_target_A = self.generator(no_target_A, embedding, no_target_ids,
is_training=is_training,
inst_norm=inst_norm, reuse=True)
no_target_labels = tf.reshape(tf.one_hot(indices=no_target_ids, depth=self.embedding_num),
shape=[self.batch_size, self.embedding_num])
no_target_AB = tf.concat([no_target_A, no_target_B], 3)
no_target_D, no_target_D_logits, no_target_category_logits = self.discriminator(no_target_AB,
is_training=is_training,
reuse=True)
encoded_no_target_B = self.encoder(no_target_B, is_training, reuse=True)[0]
no_target_const_loss = tf.reduce_mean(
tf.square(encoded_no_target_A - encoded_no_target_B)) * self.Lconst_penalty
no_target_category_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=no_target_category_logits,
labels=no_target_labels)) * self.Lcategory_penalty
d_loss_no_target = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=no_target_D_logits,
labels=tf.zeros_like(
no_target_D)))
cheat_loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=no_target_D_logits,
labels=tf.ones_like(no_target_D)))
d_loss = d_loss_real + d_loss_fake + d_loss_no_target + (category_loss + no_target_category_loss) / 3.0
g_loss = cheat_loss / 2.0 + l1_loss + \
(self.Lcategory_penalty * fake_category_loss + no_target_category_loss) / 2.0 + \
(const_loss + no_target_const_loss) / 2.0 + tv_loss
d_loss_real_summary = tf.summary.scalar("d_loss_real", d_loss_real)
d_loss_fake_summary = tf.summary.scalar("d_loss_fake", d_loss_fake)
category_loss_summary = tf.summary.scalar("category_loss", category_loss)
cheat_loss_summary = tf.summary.scalar("cheat_loss", cheat_loss)
l1_loss_summary = tf.summary.scalar("l1_loss", l1_loss)
fake_category_loss_summary = tf.summary.scalar("fake_category_loss", fake_category_loss)
const_loss_summary = tf.summary.scalar("const_loss", const_loss)
d_loss_summary = tf.summary.scalar("d_loss", d_loss)
g_loss_summary = tf.summary.scalar("g_loss", g_loss)
tv_loss_summary = tf.summary.scalar("tv_loss", tv_loss)
d_merged_summary = tf.summary.merge([d_loss_real_summary, d_loss_fake_summary,
category_loss_summary, d_loss_summary])
g_merged_summary = tf.summary.merge([cheat_loss_summary, l1_loss_summary,
fake_category_loss_summary,
const_loss_summary,
g_loss_summary, tv_loss_summary])
# expose useful nodes in the graph as handles globally
input_handle = InputHandle(real_data=real_data,
embedding_ids=embedding_ids,
no_target_data=no_target_data,
no_target_ids=no_target_ids)
loss_handle = LossHandle(d_loss=d_loss,
g_loss=g_loss,
const_loss=const_loss,
l1_loss=l1_loss,
category_loss=category_loss,
cheat_loss=cheat_loss,
tv_loss=tv_loss)
eval_handle = EvalHandle(encoder=encoded_real_A,
generator=fake_B,
target=real_B,
source=real_A,
embedding=embedding)
summary_handle = SummaryHandle(d_merged=d_merged_summary,
g_merged=g_merged_summary)
# those operations will be shared, so we need
# to make them visible globally
setattr(self, "input_handle", input_handle)
setattr(self, "loss_handle", loss_handle)
setattr(self, "eval_handle", eval_handle)
setattr(self, "summary_handle", summary_handle)
def register_session(self, sess):
self.sess = sess
def retrieve_trainable_vars(self, freeze_encoder=False):
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
if freeze_encoder:
# exclude encoder weights
print("freeze encoder weights")
g_vars = [var for var in g_vars if not ("g_e" in var.name)]
return g_vars, d_vars
def retrieve_generator_vars(self):
all_vars = tf.global_variables()
generate_vars = [var for var in all_vars if 'embedding' in var.name or "g_" in var.name]
return generate_vars
def retrieve_handles(self):
input_handle = getattr(self, "input_handle")
loss_handle = getattr(self, "loss_handle")
eval_handle = getattr(self, "eval_handle")
summary_handle = getattr(self, "summary_handle")
return input_handle, loss_handle, eval_handle, summary_handle
def get_model_id_and_dir(self):
model_id = "experiment_%d_batch_%d" % (self.experiment_id, self.batch_size)
model_dir = os.path.join(self.checkpoint_dir, model_id)
return model_id, model_dir
def checkpoint(self, saver, step):
model_name = "unet.model"
model_id, model_dir = self.get_model_id_and_dir()
if not os.path.exists(model_dir):
os.makedirs(model_dir)
saver.save(self.sess, os.path.join(model_dir, model_name), global_step=step)
def restore_model(self, saver, model_dir):
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt:
saver.restore(self.sess, ckpt.model_checkpoint_path)
print("restored model %s" % model_dir)
else:
print("fail to restore model %s" % model_dir)
def generate_fake_samples(self, input_images, embedding_ids):
input_handle, loss_handle, eval_handle, summary_handle = self.retrieve_handles()
fake_images, real_images, \
d_loss, g_loss, l1_loss = self.sess.run([eval_handle.generator,
eval_handle.target,
loss_handle.d_loss,
loss_handle.g_loss,
loss_handle.l1_loss],
feed_dict={
input_handle.real_data: input_images,
input_handle.embedding_ids: embedding_ids,
input_handle.no_target_data: input_images,
input_handle.no_target_ids: embedding_ids
})
return fake_images, real_images, d_loss, g_loss, l1_loss
def validate_model(self, val_iter, epoch, step):
labels, images = next(val_iter)
fake_imgs, real_imgs, d_loss, g_loss, l1_loss = self.generate_fake_samples(images, labels)
print("Sample: d_loss: %.5f, g_loss: %.5f, l1_loss: %.5f" % (d_loss, g_loss, l1_loss))
merged_fake_images = merge(scale_back(fake_imgs), [self.batch_size, 1])
merged_real_images = merge(scale_back(real_imgs), [self.batch_size, 1])
merged_pair = np.concatenate([merged_real_images, merged_fake_images], axis=1)
model_id, _ = self.get_model_id_and_dir()
model_sample_dir = os.path.join(self.sample_dir, model_id)
if not os.path.exists(model_sample_dir):
os.makedirs(model_sample_dir)
sample_img_path = os.path.join(model_sample_dir, "sample_%02d_%04d.png" % (epoch, step))
misc.imsave(sample_img_path, merged_pair)
def export_generator(self, save_dir, model_dir, model_name="gen_model"):
saver = tf.train.Saver()
self.restore_model(saver, model_dir)
gen_saver = tf.train.Saver(var_list=self.retrieve_generator_vars())
gen_saver.save(self.sess, os.path.join(save_dir, model_name), global_step=0)
def infer(self, source_obj, embedding_ids, model_dir, save_dir):
source_provider = InjectDataProvider(source_obj)
if isinstance(embedding_ids, int) or len(embedding_ids) == 1:
embedding_id = embedding_ids if isinstance(embedding_ids, int) else embedding_ids[0]
source_iter = source_provider.get_single_embedding_iter(self.batch_size, embedding_id)
else:
source_iter = source_provider.get_random_embedding_iter(self.batch_size, embedding_ids)
tf.global_variables_initializer().run()
saver = tf.train.Saver(var_list=self.retrieve_generator_vars())
self.restore_model(saver, model_dir)
def save_imgs(imgs, count):
p = os.path.join(save_dir, "inferred_%04d.png" % count)
save_concat_images(imgs, img_path=p)
print("generated images saved at %s" % p)
count = 0
batch_buffer = list()
for labels, source_imgs in source_iter:
fake_imgs = self.generate_fake_samples(source_imgs, labels)[0]
merged_fake_images = merge(scale_back(fake_imgs), [self.batch_size, 1])
batch_buffer.append(merged_fake_images)
if len(batch_buffer) == 10:
save_imgs(batch_buffer, count)
batch_buffer = list()
count += 1
if batch_buffer:
# last batch
save_imgs(batch_buffer, count)
def interpolate(self, source_obj, between, model_dir, save_dir, steps):
tf.global_variables_initializer().run()
saver = tf.train.Saver(var_list=self.retrieve_generator_vars())
self.restore_model(saver, model_dir)
# new interpolated dimension
new_x_dim = steps + 1
alphas = np.linspace(0.0, 1.0, new_x_dim)
def _interpolate_tensor(_tensor):
"""
Compute the interpolated tensor here
"""
x = _tensor[between[0]]
y = _tensor[between[1]]
interpolated = list()
for alpha in alphas:
interpolated.append(x * (1. - alpha) + alpha * y)
interpolated = np.asarray(interpolated, dtype=np.float32)
return interpolated
def filter_embedding_vars(var):
var_name = var.name
if var_name.find("embedding") != -1:
return True
if var_name.find("inst_norm/shift") != -1 or var_name.find("inst_norm/scale") != -1:
return True
return False
embedding_vars = filter(filter_embedding_vars, tf.trainable_variables())
# here comes the hack, we overwrite the original tensor
# with interpolated ones. Note, the shape might differ
# this is to restore the embedding at the end
embedding_snapshot = list()
for e_var in embedding_vars:
val = e_var.eval(session=self.sess)
embedding_snapshot.append((e_var, val))
t = _interpolate_tensor(val)
op = tf.assign(e_var, t, validate_shape=False)
print("overwrite %s tensor" % e_var.name, "old_shape ->", e_var.get_shape(), "new shape ->", t.shape)
self.sess.run(op)
source_provider = InjectDataProvider(source_obj)
input_handle, _, eval_handle, _ = self.retrieve_handles()
for step_idx in range(len(alphas)):
alpha = alphas[step_idx]
print("interpolate %d -> %.4f + %d -> %.4f" % (between[0], 1. - alpha, between[1], alpha))
source_iter = source_provider.get_single_embedding_iter(self.batch_size, 0)
batch_buffer = list()
count = 0
for _, source_imgs in source_iter:
count += 1
labels = [step_idx] * self.batch_size
generated, = self.sess.run([eval_handle.generator],
feed_dict={
input_handle.real_data: source_imgs,
input_handle.embedding_ids: labels
})
merged_fake_images = merge(scale_back(generated), [self.batch_size, 1])
batch_buffer.append(merged_fake_images)
if len(batch_buffer):
save_concat_images(batch_buffer,
os.path.join(save_dir, "frame_%02d_%02d_step_%02d.png" % (
between[0], between[1], step_idx)))
# restore the embedding variables
print("restore embedding values")
for var, val in embedding_snapshot:
op = tf.assign(var, val, validate_shape=False)
self.sess.run(op)
def train(self, lr=0.0002, epoch=100, schedule=10, resume=True, flip_labels=False,
freeze_encoder=False, fine_tune=None, sample_steps=50, checkpoint_steps=500):
g_vars, d_vars = self.retrieve_trainable_vars(freeze_encoder=freeze_encoder)
input_handle, loss_handle, _, summary_handle = self.retrieve_handles()
if not self.sess:
raise Exception("no session registered")
learning_rate = tf.placeholder(tf.float32, name="learning_rate")
d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(loss_handle.d_loss, var_list=d_vars)
g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(loss_handle.g_loss, var_list=g_vars)
tf.global_variables_initializer().run()
real_data = input_handle.real_data
embedding_ids = input_handle.embedding_ids
no_target_data = input_handle.no_target_data
no_target_ids = input_handle.no_target_ids
# filter by one type of labels
data_provider = TrainDataProvider(self.data_dir, filter_by=fine_tune)
total_batches = data_provider.compute_total_batch_num(self.batch_size)
val_batch_iter = data_provider.get_val_iter(self.batch_size)
saver = tf.train.Saver(max_to_keep=3)
summary_writer = tf.summary.FileWriter(self.log_dir, self.sess.graph)
if resume:
_, model_dir = self.get_model_id_and_dir()
self.restore_model(saver, model_dir)
current_lr = lr
counter = 0
start_time = time.time()
for ei in range(epoch):
train_batch_iter = data_provider.get_train_iter(self.batch_size)
if (ei + 1) % schedule == 0:
update_lr = current_lr / 2.0
# minimum learning rate guarantee
update_lr = max(update_lr, 0.0002)
print("decay learning rate from %.5f to %.5f" % (current_lr, update_lr))
current_lr = update_lr
for bid, batch in enumerate(train_batch_iter):
counter += 1
labels, batch_images = batch
shuffled_ids = labels[:]
if flip_labels:
np.random.shuffle(shuffled_ids)
# Optimize D
_, batch_d_loss, d_summary = self.sess.run([d_optimizer, loss_handle.d_loss,
summary_handle.d_merged],
feed_dict={
real_data: batch_images,
embedding_ids: labels,
learning_rate: current_lr,
no_target_data: batch_images,
no_target_ids: shuffled_ids
})
# Optimize G
_, batch_g_loss = self.sess.run([g_optimizer, loss_handle.g_loss],
feed_dict={
real_data: batch_images,
embedding_ids: labels,
learning_rate: current_lr,
no_target_data: batch_images,
no_target_ids: shuffled_ids
})
# magic move to Optimize G again
# according to https://github.com/carpedm20/DCGAN-tensorflow
# collect all the losses along the way
_, batch_g_loss, category_loss, cheat_loss, \
const_loss, l1_loss, tv_loss, g_summary = self.sess.run([g_optimizer,
loss_handle.g_loss,
loss_handle.category_loss,
loss_handle.cheat_loss,
loss_handle.const_loss,
loss_handle.l1_loss,
loss_handle.tv_loss,
summary_handle.g_merged],
feed_dict={
real_data: batch_images,
embedding_ids: labels,
learning_rate: current_lr,
no_target_data: batch_images,
no_target_ids: shuffled_ids
})
passed = time.time() - start_time
log_format = "Epoch: [%2d], [%4d/%4d] time: %4.4f, d_loss: %.5f, g_loss: %.5f, " + \
"category_loss: %.5f, cheat_loss: %.5f, const_loss: %.5f, l1_loss: %.5f, tv_loss: %.5f"
print(log_format % (ei, bid, total_batches, passed, batch_d_loss, batch_g_loss,
category_loss, cheat_loss, const_loss, l1_loss, tv_loss))
summary_writer.add_summary(d_summary, counter)
summary_writer.add_summary(g_summary, counter)
if counter % sample_steps == 0:
# sample the current model states with val data
self.validate_model(val_batch_iter, ei, counter)
if counter % checkpoint_steps == 0:
print("Checkpoint: save checkpoint step %d" % counter)
self.checkpoint(saver, counter)
# save the last checkpoint
print("Checkpoint: last checkpoint step %d" % counter)
self.checkpoint(saver, counter)
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# Section 3: Algebraic simplification\n\n# This code implements a simple computer algebra system, which takes in an\n# expression made of nested sums and products, and simplifies it into a\n# single sum of products. Th... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# Section 3: Algebraic simplification\n\n# This code implements a simple computer algebra system, which takes in an\n# expression made of nested sums and products, and simplifies it into a\n# single sum... | ```python
# Section 3: Algebraic simplification
# This code implements a simple computer algebra system, which takes in an
# expression made of nested sums and products, and simplifies it into a
# single sum of products. The goal is described in more detail in the
# problem set writeup.
# Much of this code is already implemented. We provide you with a
# representation for sums and products, and a top-level simplify() function
# which applies the associative law in obvious cases. For example, it
# turns both (a + (b + c)) and ((a + b) + c) into the simpler expression
# (a + b + c).
# However, the code has a gap in it: it cannot simplify expressions that are
# multiplied together. In interesting cases of this, you will need to apply
# the distributive law.
# Your goal is to fill in the do_multiply() function so that multiplication
# can be simplified as intended.
# Testing will be mathematical: If you return a flat list that
# evaluates to the same value as the original expression, you will
# get full credit.
# We've already defined the data structures that you'll use to symbolically
# represent these expressions, as two classes called Sum and Product,
# defined below. These classes both descend from the abstract Expression class.
#
# The top level function that will be called is the .simplify() method of an
# Expression.
#
# >>> expr = Sum([1, Sum([2, 3])])
# >>> expr.simplify()
# Sum([1, 2, 3])
### Expression classes _____________________________________________________
# Expressions will be represented as "Sum()" and "Product()" objects.
# These objects can be treated just like lists (they inherit from the
# "list" class), but you can test for their type using the "isinstance()"
# function. For example:
#
# >>> isinstance(Sum([1,2,3]), Sum)
# True
# >>> isinstance(Product([1,2,3]), Product)
# True
# >>> isinstance(Sum([1,2,3]), Expression) # Sums and Products are both Expressions
# True
class Expression:
"This abstract class does nothing on its own."
pass
class Sum(list, Expression):
"""
A Sum acts just like a list in almost all regards, except that this code
can tell it is a Sum using isinstance(), and we add useful methods
such as simplify().
Because of this:
* You can index into a sum like a list, as in term = sum[0].
* You can iterate over a sum with "for term in sum:".
* You can convert a sum to an ordinary list with the list() constructor:
the_list = list(the_sum)
* You can convert an ordinary list to a sum with the Sum() constructor:
the_sum = Sum(the_list)
"""
def __repr__(self):
return "Sum(%s)" % list.__repr__(self)
def simplify(self):
"""
This is the starting point for the task you need to perform. It
removes unnecessary nesting and applies the associative law.
"""
terms = self.flatten()
if len(terms) == 1:
return simplify_if_possible(terms[0])
else:
return Sum([simplify_if_possible(term) for term in terms]).flatten()
def flatten(self):
"""Simplifies nested sums."""
terms = []
for term in self:
if isinstance(term, Sum):
terms += list(term)
else:
terms.append(term)
return Sum(terms)
class Product(list, Expression):
"""
See the documentation above for Sum. A Product acts almost exactly
like a list, and can be converted to and from a list when necessary.
"""
def __repr__(self):
return "Product(%s)" % list.__repr__(self)
def simplify(self):
"""
To simplify a product, we need to multiply all its factors together
while taking things like the distributive law into account. This
method calls multiply() repeatedly, leading to the code you will
need to write.
"""
factors = []
for factor in self:
if isinstance(factor, Product):
factors += list(factor)
else:
factors.append(factor)
result = Product([1])
for factor in factors:
result = multiply(result, simplify_if_possible(factor))
return result.flatten()
def flatten(self):
"""Simplifies nested products."""
factors = []
for factor in self:
if isinstance(factor, Product):
factors += list(factor)
else:
factors.append(factor)
return Product(factors)
def simplify_if_possible(expr):
"""
A helper function that guards against trying to simplify a non-Expression.
"""
if isinstance(expr, Expression):
return expr.simplify()
else:
return expr
# You may find the following helper functions to be useful.
# "multiply" is provided for you; but you will need to write "do_multiply"
# if you would like to use it.
def multiply(expr1, expr2):
"""
This function makes sure that its arguments are represented as either a
Sum or a Product, and then passes the hard work onto do_multiply.
"""
# Simple expressions that are not sums or products can be handled
# in exactly the same way as products -- they just have one thing in them.
if not isinstance(expr1, Expression): expr1 = Product([expr1])
if not isinstance(expr2, Expression): expr2 = Product([expr2])
return do_multiply(expr1, expr2)
def do_multiply(expr1, expr2):
"""
You have two Expressions, and you need to make a simplified expression
representing their product. They are guaranteed to be of type Expression
-- that is, either Sums or Products -- by the multiply() function that
calls this one.
So, you have four cases to deal with:
* expr1 is a Sum, and expr2 is a Sum`
* expr1 is a Sum, and expr2 is a Product
* expr1 is a Product, and expr2 is a Sum
* expr1 is a Product, and expr2 is a Product
You need to create Sums or Products that represent what you get by
applying the algebraic rules of multiplication to these expressions,
and simplifying.
Look above for details on the Sum and Product classes. The Python operator
'*' will not help you.
"""
# Replace this with your solution.
raise NotImplementedError
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport unittest\n\nfrom utils.read_data_file import read_int_array\nfrom sorting.insertion_sort import sort\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass InsertionSortTester(unittest.TestCase):\n\n #... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport unittest\n\nfrom utils.read_data_file import read_int_array\nfrom sorting.insertion_sort import sort\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass InsertionSortTester(unittest.Tes... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from utils.read_data_file import read_int_array
from sorting.insertion_sort import sort
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class InsertionSortTester(unittest.TestCase):
# Test sort in default order, i.e., in ascending order.
def test_sort_default(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array)
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in ascending order.
def test_sort_ascending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'asc')
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in descending order.
def test_sort_descending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'desc')
expect = [969, 864, 702, 647, 637, 589, 567, 445, 444, 417, 140, 113, 86, 76, 65]
self.assertEqual(expect, array)
if __name__ == '__main__':
unittest.main()
``` |
[
{
"content": "Here is the script:\n```python\n\"\"\" sample variance decay of two level QG system \"\"\"\n\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nfrom firedrake import *\n\nfrom quasi_geostrophic_model import *\n\nimport numpy as np\n\nimport matplotlib.pyplot as plot\n\n\n#... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n\"\"\" sample variance decay of two level QG system \"\"\"\n\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nfrom firedrake import *\n\nfrom quasi_geostrophic_model import *\n\nimport numpy as np\n\nimport matplotlib.pyplo... | ```python
""" sample variance decay of two level QG system """
from __future__ import division
from __future__ import absolute_import
from firedrake import *
from quasi_geostrophic_model import *
import numpy as np
import matplotlib.pyplot as plot
# define mesh hierarchy
mesh = UnitSquareMesh(5, 5)
L = 4
mesh_hierarchy = MeshHierarchy(mesh, L)
# define sample size
n = 10
# define variance
variance = 0.125
# define initial condition function
def ic(mesh, xp):
x = SpatialCoordinate(mesh)
ufl_expression = (exp(-(pow(x[0] - 0.5 + xp, 2) / (2 * pow(0.25, 2)) +
pow(x[1] - 0.7, 2) / (2 * pow(0.1, 2)))) -
exp(-(pow(x[0] - 0.5 + xp, 2) / (2 * pow(0.25, 2)) +
pow(x[1] - 0.3, 2) / (2 * pow(0.1, 2)))))
return ufl_expression
sample_variances_difference = np.zeros(L)
finest_fs = FunctionSpace(mesh_hierarchy[-1], 'CG', 1)
for l in range(L):
print 'level: ', l
meshc = mesh_hierarchy[l]
meshf = mesh_hierarchy[l + 1]
# define fs
dg_fs_c = FunctionSpace(meshc, 'DG', 1)
cg_fs_c = FunctionSpace(meshc, 'CG', 1)
dg_fs_f = FunctionSpace(meshf, 'DG', 1)
cg_fs_f = FunctionSpace(meshf, 'CG', 1)
m = Function(finest_fs)
sq = Function(finest_fs)
for j in range(n):
print 'sample: ', j
# set-up system
QG = two_level_quasi_geostrophic(dg_fs_c, cg_fs_c, dg_fs_f, cg_fs_f, variance)
# fixed ic
xp = 0
QG.initial_condition(ic(meshc, xp), ic(meshf, xp))
# time-step
QG.timestepper(3.0)
# prolong coarse and fine
comp_c = Function(finest_fs)
comp_f = Function(finest_fs)
prolong(QG.psi_[0], comp_c)
if l < L - 1:
prolong(QG.psi_[1], comp_f)
else:
comp_f.assign(QG.psi_[1])
m += assemble((comp_f - comp_c) * (1.0 / n))
sq += assemble(((comp_f - comp_c) ** 2) * (1.0 / n))
ff = Function(finest_fs).assign((sq - (m ** 2)))
sample_variances_difference[l] = assemble(ff * dx)
dxf = 1.0 / 2 ** (np.linspace(1, L, L))
plot.loglog(dxf, sample_variances_difference)
plot.loglog(dxf, 1e-9 * dxf ** (4), 'k--')
plot.xlabel('normalized dx of coarse level')
plot.ylabel('sample variance difference')
plot.show()
``` |
[
{
"content": "Here is a code file:\n```python\n#Copyright (c) 2011,12 Walter Bender\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your o... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n#Copyright (c) 2011,12 Walter Bender\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, ... | ```python
#Copyright (c) 2011,12 Walter Bender
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# You should have received a copy of the GNU General Public License
# along with this library; if not, write to the Free Software
# Foundation, 51 Franklin Street, Suite 500 Boston, MA 02110-1335 USA
from StringIO import StringIO
import json
json.dumps
from json import load as jload
from json import dump as jdump
def json_load(text):
""" Load JSON data using what ever resources are available. """
# strip out leading and trailing whitespace, nulls, and newlines
io = StringIO(text)
try:
listdata = jload(io)
except ValueError:
# assume that text is ascii list
listdata = text.split()
for i, value in enumerate(listdata):
listdata[i] = int(value)
return listdata
def json_dump(data):
""" Save data using available JSON tools. """
_io = StringIO()
jdump(data, _io)
return _io.getvalue()
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\nimport argparse\nimport itertools\nimport json\nimport collections\nimport datetime\nimport sys\n\nimport mercantile\nimport shapely.geometry\n\n\nMIN_DATE = '0000-00-00'\nMAX_DATE = '9999-99-99'\nMIN_ZOOM = 0\nMAX_ZOOM = 19\n\n... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\nimport argparse\nimport itertools\nimport json\nimport collections\nimport datetime\nimport sys\n\nimport mercantile\nimport shapely.geometry\n\n\nMIN_DATE = '0000-00-00'\nMAX_DATE = '9999-99-99'\nMIN_ZOOM = 0\nM... | ```python
import argparse
import itertools
import json
import collections
import datetime
import sys
import mercantile
import shapely.geometry
MIN_DATE = '0000-00-00'
MAX_DATE = '9999-99-99'
MIN_ZOOM = 0
MAX_ZOOM = 19
cache_down = {}
cache_up = {}
cache_center = {}
cache_date = {}
cache_in_bound = {}
def get_down_tiles(x, y, z, target_zoom):
assert z <= target_zoom, 'target zoom less than zoom %s <= %s' % (z, target_zoom)
k = (x, y, z, target_zoom)
if k not in cache_down:
if z == target_zoom:
result = [(x, y, z)]
else:
result = []
for t in mercantile.children(x, y, z):
result += get_down_tiles(t.x, t.y, t.z, target_zoom)
cache_down[k] = tuple(result)
return result
return cache_down[k]
def get_up_tile(x, y, z, target_zoom):
assert z >= target_zoom, 'target zoom more than zoom %s >= %s' % (z, target_zoom)
k = (x, y, z, target_zoom)
if k not in cache_up:
if z == target_zoom:
result = (x, y, z)
else:
t = mercantile.parent(x, y, z)
result = get_up_tile(t.x, t.y, t.z, target_zoom)
cache_up[k] = result
return result
return cache_up[k]
def get_date_precision(date, date_prec, date_prec_measure):
if date not in cache_date:
old_date = date
if date_prec_measure == 'd':
old_part = int(date[8:])
new_part = old_part // date_prec * date_prec + (1 if old_part % date_prec else 0)
date = '%s-%02d' % (date[:7], new_part)
elif date_prec_measure == 'm':
old_part = int(date[5:7])
new_part = old_part // date_prec * date_prec + (1 if old_part % date_prec else 0)
date = '%s-%02d-01' % (date[:4], new_part)
elif date_prec_measure == 'y':
old_part = int(date[:4])
new_part = old_part // date_prec * date_prec + (1 if old_part % date_prec else 0)
date = '%04d-01-01' % (new_part)
else:
raise TypeError('unknown date precision measure %s' % date_prec_measure)
cache_date[old_date] = date
return date
return cache_date[date]
def calculate_center(x, y, z):
k = (x, y, z)
if k not in cache_center:
bounds = mercantile.bounds(x, y, z)
height = bounds.north - bounds.south
width = bounds.east - bounds.west
center = (bounds.north + height / 2, bounds.west + width / 2)
cache_center[k] = center
return center
return cache_center[k]
def in_boundaries(k, lat, lon, boundary, west, south, east, north):
if k not in cache_in_bound:
in_bounds = lat < north and lat > south and lon > west and lon < east
if in_bounds:
in_bounds = boundary.contains(shapely.geometry.Point(lon, lat))
cache_in_bound[k] = in_bounds
return in_bounds
return cache_in_bound[k]
FIELD_VALUES = (
('data', lambda k, date, count, *args, **kwargs: date, []),
('count', lambda k, date, count, *args, **kwargs: count, []),
('z', lambda k, date, count, z, x, y, *args, **kwargs: z, ['no_xyz']),
('x', lambda k, date, count, z, x, y, *args, **kwargs: x, ['no_xyz']),
('y', lambda k, date, count, z, x, y, *args, **kwargs: y, ['no_xyz']),
('lat', lambda k, date, count, z, x, y, lat, lon, *args, **kwargs: lat, ['no_latlon']),
('lon', lambda k, date, count, z, x, y, lat, lon, *args, **kwargs: lon, ['no_latlon']),
('per_day', lambda k, date, count, *args, **kwargs: count / kwargs['days'], ['no_per_day']),
('countries', lambda k, date, count, z, x, y, lat, lon, countries, *args, **kwargs: countries, ['no_countries']),
)
def flush_fields(stdout, date, count, z, x, y, lat, lon, countries, extra, headers=False, **kwargs):
k = '%s/%s/%s' % (z, x, y)
values = []
for field, applier, filters in FIELD_VALUES:
if any(kwargs.get(filter) for filter in filters):
continue
if headers:
values.append(field)
else:
values.append(applier(k, date, count, z, x, y, lat, lon, countries, extra, **kwargs))
if extra is not None:
values.append(extra)
stdout.write(('%s\n' % ','.join(str(value) for value in values)).encode())
def flush(stdout, tiles, min_count, max_count, boundaries, **kwargs):
for k, count in tiles.items():
if min_count and count < min_count:
continue
if max_count and count > max_count:
continue
date, z, x, y, countries = k
lat, lon = calculate_center(x, y, z)
if boundaries is None:
flush_fields(stdout, date, count, z, x, y, lat, lon, countries, None, **kwargs)
continue
for boundary, boundary_bounds, extra, hash in boundaries:
cache_key = '%s/%s/%s' % (lat, lon, hash)
if not in_boundaries(cache_key, lat, lon, boundary, *boundary_bounds):
continue
flush_fields(stdout, date, count, z, x, y, lat, lon, countries, extra, **kwargs)
return collections.defaultdict(int)
def split(stdin, stdout, date_precision=None, per_day=False,
boundaries=tuple(), boundary_buffer=None,
date_from=None, date_to=None,
min_count=None, max_count=None,
min_zoom=None, max_zoom=None,
min_subz=None, max_subz=None,
extras=tuple(), extra_header=None, **kwargs):
if not kwargs.get('no_per_day'):
date_from_parsed = datetime.datetime.strptime(date_from, '%Y-%m-%d')
date_to_parsed = datetime.datetime.strptime(date_to, '%Y-%m-%d')
assert date_from_parsed
assert date_to_parsed
assert date_from_parsed < date_to_parsed
kwargs['days'] = (date_to_parsed - date_from_parsed).days
if not kwargs.get('no_header'):
flush_fields(stdout, 'date', 'count', 'z', 'x', 'y', 'lat', 'lon', 'countries',
','.join(extras) or None, headers=True, **kwargs)
boudaries_geom = []
for boundary, extra in itertools.zip_longest(boundaries, extras):
if isinstance(boundary, str):
boundary = shapely.geometry.shape(json.load(open(boundary)))
if boundary_buffer is not None:
boundary = boundary.buffer(boundary_buffer)
boudaries_geom.append((boundary, boundary.bounds, extra, id(boundary)))
boudaries_geom = boudaries_geom or None
if date_precision:
date_prec = float(date_precision[:-1])
date_prec_measure = date_precision[-1:]
date_from = date_from or MIN_DATE
date_to = date_to or MAX_DATE
min_zoom = min_zoom or MIN_ZOOM
max_zoom = max_zoom or MAX_ZOOM
min_subz = min_subz or min_zoom
max_subz = max_subz or max_zoom
assert date_from <= date_to
assert min_zoom <= max_zoom
assert min_subz <= max_subz
tiles = flush(stdout, {}, min_count, max_count, boudaries_geom, **kwargs)
start = datetime.datetime.now()
flush_date = None
for line in stdin:
date, z, x, y, count, lat, lon, countries = line.decode().strip().split(',')
if not date_from <= date <= date_to:
continue
count = int(count)
x = int(x)
y = int(y)
z = int(z)
if not min_zoom <= z <= max_zoom:
continue
if date_precision is not None:
date = get_date_precision(date, date_prec, date_prec_measure)
if flush_date is None:
start = datetime.datetime.now()
flush_date = date
if date != flush_date:
sys.stderr.write('%s - %s\n' % (flush_date, datetime.datetime.now() - start))
flush(stdout, tiles, min_count, max_count, boudaries_geom, **kwargs)
flush_date = date
start = datetime.datetime.now()
if z < min_subz:
for x, y, z in get_down_tiles(x, y, z, min_subz):
tiles[(date, z, x, y, countries)] += count
if z > max_subz:
x, y, z = get_up_tile(x, y, z, max_subz)
tiles[(date, z, x, y, countries)] += count
if min_subz <= z <= max_subz:
tiles[(date, z, x, y, countries)] += count
sys.stderr.write('%s - %s\n' % (flush_date, datetime.datetime.now() - start))
flush(stdout, tiles, min_count, max_count, boudaries_geom, **kwargs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Agregate OSM access logs.')
parser.add_argument('--date_from', default=None)
parser.add_argument('--date_to', default=None)
parser.add_argument('--date_precision', default=None)
parser.add_argument('--boundaries', action='append', default=[])
parser.add_argument('--boundary_buffer', type=float, default=None)
parser.add_argument('--min_zoom', type=int, default=None)
parser.add_argument('--max_zoom', type=int, default=None)
parser.add_argument('--min_subz', type=int, default=None)
parser.add_argument('--max_subz', type=int, default=None)
parser.add_argument('--min_count', type=int, default=None)
parser.add_argument('--max_count', type=int, default=None)
parser.add_argument('--no_header', action='store_true')
parser.add_argument('--no_xyz', action='store_true')
parser.add_argument('--no_latlon', action='store_true')
parser.add_argument('--no_per_day', action='store_true')
parser.add_argument('--no_countries', action='store_true')
stdin = sys.stdin if sys.version_info.major == 2 else sys.stdin.buffer
stdout = sys.stdout if sys.version_info.major == 2 else sys.stdout.buffer
split(stdin, stdout, **parser.parse_args().__dict__)
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n# Copyright (c) 2014, 2015, 2016, 2017 Timothy Savannah under LGPL version 2.1. See LICENSE for more information.\n#\n# fields.compressed - Some types and objects related to compressed fields. Use in place of IRField ( in FIELDS... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n# Copyright (c) 2014, 2015, 2016, 2017 Timothy Savannah under LGPL version 2.1. See LICENSE for more information.\n#\n# fields.compressed - Some types and objects related to compressed fields. Use in place of IRF... | ```python
# Copyright (c) 2014, 2015, 2016, 2017 Timothy Savannah under LGPL version 2.1. See LICENSE for more information.
#
# fields.compressed - Some types and objects related to compressed fields. Use in place of IRField ( in FIELDS array to activate functionality )
#
# vim: set ts=8 shiftwidth=8 softtabstop=8 noexpandtab :
import zlib
import bz2
from . import IRField, irNull
from ..compat_str import tobytes, isEmptyString, getDefaultIREncoding, isStringy
__all__ = ('COMPRESS_MODE_BZ2', 'COMPRESS_MODE_ZLIB', 'IRCompressedField')
# COMPRESS_MODE_ZLIB - Use to compress using zlib (gzip)
COMPRESS_MODE_ZLIB = 'zlib'
# All aliases for gzip compression
_COMPRESS_MODE_ALIASES_ZLIB = ('gzip', 'gz')
# COMPRESS_MODE_BZ2 - Use to compress using bz2 (bz2)
COMPRESS_MODE_BZ2 = 'bz2'
# All aliases for bz2 compression
_COMPRESS_MODE_ALIASES_BZ2 = ('bzip2', )
# COMPRESS_MODE_LZMA - Use to compress using lzma
COMPRESS_MODE_LZMA = 'lzma'
# All aliases for lzma compression
_COMPRESS_MODE_ALIASES_LZMA = ('xz', )
global _lzmaMod
_lzmaMod = None
class IRCompressedField(IRField):
'''
IRCompressedField - A field that automatically compresses/decompresses going to/from Redis.
Pass this into the FIELDS array of the model to get this functionality,
like:
FIELDS = [ ..., IRCompressedField('my_compressed_field', compressMode=COMPRESS_MODE_ZLIB]
By default, after fetch the data will be encoded as "bytes". If you need it to be unicode/string, use an
IRFieldChain with an IRUnicodeField and an IRCompressedField together.
An IRCompressedField is indexable, and forces the index to be hashed.
'''
CAN_INDEX = True
hashIndex = True
# NOTE: We don't support different compression levels, as doing so changes header and would prevent indexing.
def __init__(self, name='', compressMode=COMPRESS_MODE_ZLIB, defaultValue=irNull):
'''
__init__ - Create this object
@param name <str> - Field name
@param compressMode <str>, default "zlib". Determines the compression module to use
for this field. See COMPRESS_MODE_* variables in this module.
Supported values as of 5.0.0 are:
"zlib" / "gz" / "gzip" - zlib compression
"bz2" / "bzip2" - bzip2 compression
"lzma" / "xz" - LZMA compression.
NOTE: This is provided in python3 by default, but in python2 you will need an external module.
IndexedRedis will automatically detect if "backports.lzma" or "lzmaffi" are installed, and use them
if the core "lzma" module is not available.
@param defaultValue - The default value for this field
An IRCompressedField is indexable, and forces the index to be hashed.
'''
self.valueType = None
self.defaultValue = defaultValue
if compressMode == COMPRESS_MODE_ZLIB or compressMode in _COMPRESS_MODE_ALIASES_ZLIB:
self.compressMode = COMPRESS_MODE_ZLIB
self.header = b'x\xda'
self.extraCompressArgs = (9, )
elif compressMode == COMPRESS_MODE_BZ2 or compressMode in _COMPRESS_MODE_ALIASES_BZ2:
self.compressMode = COMPRESS_MODE_BZ2
self.header = b'BZh9'
self.extraCompressArgs = (9, )
elif compressMode == COMPRESS_MODE_LZMA or compressMode in _COMPRESS_MODE_ALIASES_LZMA:
self.compressMode = COMPRESS_MODE_LZMA
self.header = b'\xfd7zXZ'
self.extraCompressArgs = tuple()
self.getCompressMod() # Die early if LZMA compression is not available
else:
raise ValueError('Invalid compressMode, "%s", for field "%s". Should be one of the IndexedRedis.fields.compressed.COMPRESS_MODE_* constants.' %(str(compressMode), name))
def getCompressMod(self):
'''
getCompressMod - Return the module used for compression on this field
@return <module> - The module for compression
'''
if self.compressMode == COMPRESS_MODE_ZLIB:
return zlib
if self.compressMode == COMPRESS_MODE_BZ2:
return bz2
if self.compressMode == COMPRESS_MODE_LZMA:
# Since lzma is not provided by python core in python2, search out some common alternatives.
# Throw exception if we can find no lzma implementation.
global _lzmaMod
if _lzmaMod is not None:
return _lzmaMod
try:
import lzma
_lzmaMod = lzma
return _lzmaMod
except:
# Python2 does not provide "lzma" module, search for common alternatives
try:
from backports import lzma
_lzmaMod = lzma
return _lzmaMod
except:
pass
try:
import lzmaffi as lzma
_lzmaMod = lzma
return _lzmaMod
except:
pass
raise ImportError("Requested compress mode is lzma and could not find a module providing lzma support. Tried: 'lzma', 'backports.lzma', 'lzmaffi' and none of these were available. Please install one of these, or to use an unlisted implementation, set IndexedRedis.fields.compressed._lzmaMod to the module (must implement standard python compression interface)")
def _toStorage(self, value):
if isEmptyString(value):
return ''
try:
valueBytes = tobytes(value)
except Exception as e:
raise ValueError('Failed to convert value to bytes. If this requires a different codec than the defaultIREncoding (currently %s), use an IRFieldChain with an IRBytesField or IRUnicodeField with the required encoding set (Depending on if you want the uncompressed value to be "bytes" or "unicode" type). Exception was: <%s> %s' %(getDefaultIREncoding(), e.__class__.__name__, str(e)) )
# TODO: I don't think this next block is needed anymore..
# Check it out when IRCompressionTest is written
if tobytes(value[:len(self.header)]) == self.header:
return value
return self.getCompressMod().compress(tobytes(value), *self.extraCompressArgs)
def _fromStorage(self, value):
if isEmptyString(value):
return ''
# TODO: Check this out too, this enxt conditional probably shouldn't be here, maybe it should be an error when false..
if isStringy(value) and tobytes(value[:len(self.header)]) == self.header:
return self.getCompressMod().decompress(value)
return value
def _fromInput(self, value):
return value
def _getReprProperties(self):
return [ 'compressMode="%s"' %(self.compressMode, ) ]
def copy(self):
return self.__class__(name=self.name, compressMode=self.compressMode, defaultValue=self.defaultValue)
def __new__(self, name='', compressMode=COMPRESS_MODE_ZLIB, defaultValue=irNull):
return IRField.__new__(self, name)
# vim:set ts=8 shiftwidth=8 softtabstop=8 noexpandtab :
``` |
[
{
"content": "Here is the script:\n```python\n# -*- coding: UTF-8 -*-\n\n\nimport re,urllib,urlparse\n\nfrom resources.lib.modules import cleantitle\nfrom resources.lib.modules import client\nfrom resources.lib.modules import debrid\n\nclass source:\n def __init__(self):\n self.priority = 1\n s... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n# -*- coding: UTF-8 -*-\n\n\nimport re,urllib,urlparse\n\nfrom resources.lib.modules import cleantitle\nfrom resources.lib.modules import client\nfrom resources.lib.modules import debrid\n\nclass source:\n def __init__(self):\n self.priorit... | ```python
# -*- coding: UTF-8 -*-
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['best-moviez.ws']
self.base_link = 'http://www.best-moviez.ws'
self.search_link = '/search/%s/feed/rss2/'
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
c = client.parseDOM(post, 'content.+?')[0]
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c)
s = s[0] if s else '0'
u = zip(client.parseDOM(c, 'a', ret='href'), client.parseDOM(c, 'a'))
u = [(i[1], i[0], s) for i in u]
items += u
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
``` |
[
{
"content": "Here is the code block:\n```python\nimport inspect\nimport os\nimport sys\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import DEFAULT_DB_ALIAS, connections\nfrom django.db.migrations.loader import Migrati... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\nimport inspect\nimport os\nimport sys\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import DEFAULT_DB_ALIAS, connections\nfrom django.db.migrations.loade... | ```python
import inspect
import os
import sys
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.loader import MigrationLoader
from django.utils.timezone import now
from . import __version__ as VERSION
from .conf import get_default_language
from .utils import split_translated_fieldname
try:
from modeltranslation.translator import translator
DJANGO_MODELTRANSLATION_AVAILABLE = True
except ImportError:
DJANGO_MODELTRANSLATION_AVAILABLE = False
DEFAULT_LANGUAGE = get_default_language()
def _raise_if_not_django_modeltranslation():
"""Raise if we cannot import django-modeltranslation during migration"""
if not DJANGO_MODELTRANSLATION_AVAILABLE:
raise ImproperlyConfigured(
"django-modeltranslation must be still installed when creating"
"the modeltranslation -> modeltrans migrations."
)
def get_translatable_models():
"""
Get the translatable models according to django-modeltranslation
!! only use to migrate from django-modeltranslation !!
"""
_raise_if_not_django_modeltranslation()
return translator.get_registered_models()
def get_translated_fields(Model):
"""
Enumerates the translated fields for a model according to django-modeltranslation.
For example: title_nl, title_en, title_fr, body_nl, body_en, body_fr
!! only use to migrate from django-modeltranslation !!
"""
_raise_if_not_django_modeltranslation()
options = translator.get_options_for_model(Model)
for original_field, fields in options.fields.items():
for translated in fields:
yield translated.name
def copy_translations(Model, fields):
"""
Copy translations for all items in the database for a Model with
translations managed by django-modeltranslation into a json field `i18n`
managed by django-modeltrans.
Values for the default language will be copied to the original field.
Arguments:
Model: A (historical) Model from the migraton's app registry
fields(iterable): list of fields to copy into their new places.
"""
for m in Model.objects.all():
m.i18n = {}
for field in fields:
value = getattr(m, field)
if value in (None, ""):
continue
original_field, lang = split_translated_fieldname(field)
if lang == DEFAULT_LANGUAGE:
setattr(m, original_field, value)
else:
m.i18n[field] = value
m.save()
def get_latest_migration(app_name, connection=None):
"""
Get the name of the latest applied migration and raises if unapplied
migrations exist for the app.
Arguments:
app_name(str): Name of the app.
connection: database connection to get the latest migration for.
Simplified version of
https://github.com/django/django/blob/1.9.2/django/core/management/commands/showmigrations.py#L38-L77
"""
if connection is None:
connection = connections[DEFAULT_DB_ALIAS]
loader = MigrationLoader(connection, ignore_no_migrations=True)
graph = loader.graph
last = None
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
if plan_node in loader.applied_migrations:
last = plan_node[1]
else:
raise Exception("You have unapplied migration(s) for app {}".format(app_name))
shown.add(plan_node)
return last
def get_next_migration_filename(app_name, connection=None, migration_type="data"):
"""
Return name (including the absolute path) of the next migration to insert for this app
"""
latest_migration_name = get_latest_migration(app_name)
next_migration_name = "{0:04d}_i18n_{1}_migration.py".format(
int(latest_migration_name[0:4]) + 1, migration_type
)
app_base_path = os.path.dirname(apps.get_app_config(app_name).module.__file__)
return os.path.join(app_base_path, "migrations", next_migration_name)
class I18nMigration(object):
helper_functions = ()
template = """
# -*- coding: utf-8 -*-
# Generated by django-modeltrans {version} on {timestamp}
from __future__ import print_function, unicode_literals
from django.db import migrations
DEFAULT_LANGUAGE = "{DEFAULT_LANGUAGE}"
{helpers}
class Migration(migrations.Migration):
dependencies = [
("{app}", "{last_migration}"),
]
operations = [
{operations}
]
"""
def __init__(self, app):
self.models = []
self.app = app
self.migration_filename = (
get_latest_migration(self.app) or "# TODO: manually insert latest migration here"
)
def get_helper_functions(self):
def to_str(fn):
return inspect.getsource(fn) if callable(fn) else fn
for fn in self.helper_functions:
yield to_str(fn)
for fn in self.get_extra_helper_functions():
yield to_str(fn)
def get_extra_helper_functions(self):
return []
def add_model(self, Model, fields):
self.models.append((Model, fields))
def get_helper_src(self):
return "\n\n".join(self.get_helper_functions())
def write(self, out=None):
if out is None:
out = sys.stdout
out.write(
self.template.format(
version=VERSION,
DEFAULT_LANGUAGE=getattr(
settings, "MODELTRANSLATION_DEFAULT_LANGUAGE", get_default_language()
),
timestamp=now().strftime("%Y-%m-%d %H:%M"),
helpers=self.get_helper_src(),
app=self.app,
last_migration=self.migration_filename,
operations=self.get_operations(),
)
)
def write_migration_file(self):
"""
Write the migration to file.
"""
filename = get_next_migration_filename(self.app, migration_type=self.migration_type)
with open(filename, "w") as f:
self.write(f)
return filename
class I18nDataMigration(I18nMigration):
migration_type = "data"
helper_functions = (split_translated_fieldname, copy_translations)
forwards_template = """
def forwards(apps, schema_editor):
app = '{app}'
todo = (
{todo},
)
for model, fields in todo:
Model = apps.get_model(app, model)
copy_translations(Model, fields)
"""
def get_extra_helper_functions(self):
yield self.forwards_template.format(
todo=",\n ".join(
[str((Model.__name__, fields)) for Model, fields in self.models]
),
app=self.app,
)
def get_operations(self):
return """
# The copying of values is (sort of) reversable by a no-op:
# - values are copied into i18n (which is not used by anything but django-modeltrans)
# - the default language is copied to the orignal field, which was not used
# with django-modeltrans.
migrations.RunPython(forwards, migrations.RunPython.noop),
"""
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n# -*- encoding: utf-8 -*-\nimport pytest\n\nfrom decimal import Decimal\n\nfrom finance.tests.factories import VatSettingsFactory\nfrom invoice.models import Invoice, InvoiceLine\nfrom invoice.service import InvoicePrint\nfrom i... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n# -*- encoding: utf-8 -*-\nimport pytest\n\nfrom decimal import Decimal\n\nfrom finance.tests.factories import VatSettingsFactory\nfrom invoice.models import Invoice, InvoiceLine\nfrom invoice.service import Invo... | ```python
# -*- encoding: utf-8 -*-
import pytest
from decimal import Decimal
from finance.tests.factories import VatSettingsFactory
from invoice.models import Invoice, InvoiceLine
from invoice.service import InvoicePrint
from invoice.tests.factories import (
InvoiceFactory,
InvoiceLineFactory,
InvoiceSettingsFactory,
TimeRecordFactory,
)
@pytest.mark.django_db
def test_create():
""" Create a simple invoice """
invoice = InvoiceFactory()
invoice.full_clean()
invoice.save()
assert invoice.pk > 0
assert invoice.number > 0
@pytest.mark.django_db
def test_create_with_lines():
""" Create a simple invoice with lines """
VatSettingsFactory()
invoice = InvoiceFactory()
line = InvoiceLineFactory(
invoice=invoice,
quantity=Decimal('1.3'),
units='hours',
price=Decimal('300.00'),
)
line = InvoiceLineFactory(
invoice=invoice,
quantity=Decimal('2.4'),
units='hours',
price=Decimal('200.23'),
)
assert invoice.pk > 0
assert Decimal('870.55') == invoice.net
assert Decimal('1044.66') == invoice.gross
assert line.is_credit is False
@pytest.mark.django_db
def test_description():
invoice = InvoiceFactory()
assert 'Invoice' == invoice.description
@pytest.mark.django_db
def test_get_first_line_number():
"""get the number for the first invoice line"""
invoice = InvoiceFactory()
assert 1 == invoice.get_next_line_number()
@pytest.mark.django_db
def test_get_next_line_number():
"""get the number for the next invoice line"""
invoice = InvoiceFactory()
InvoiceLineFactory(invoice=invoice, line_number=1)
InvoiceLineFactory(invoice=invoice, line_number=2)
assert 3 == invoice.get_next_line_number()
@pytest.mark.django_db
def test_get_next_line_number_fill_gap():
"""get the number for the next invoice line"""
invoice = InvoiceFactory()
InvoiceLineFactory(invoice=invoice, line_number=1)
InvoiceLineFactory(invoice=invoice, line_number=2)
InvoiceLineFactory(invoice=invoice, line_number=4)
assert 3 == invoice.get_next_line_number()
@pytest.mark.django_db
def test_get_next_line_number_two_invoices():
"""get the number for the next invoice line"""
invoice_1 = InvoiceFactory()
InvoiceLineFactory(invoice=invoice_1, line_number=1)
InvoiceLineFactory(invoice=invoice_1, line_number=2)
invoice_2 = InvoiceFactory()
InvoiceLineFactory(invoice=invoice_2, line_number=1)
assert 3 == invoice_1.get_next_line_number()
assert 2 == invoice_2.get_next_line_number()
@pytest.mark.django_db
def test_has_lines():
"""does the invoice have any lines"""
invoice = InvoiceFactory()
InvoiceLineFactory(
invoice=invoice,
quantity=Decimal('1.3'),
units='hours',
price=Decimal('300.00'),
)
assert invoice.has_lines is True
@pytest.mark.django_db
def test_has_lines_not():
invoice = InvoiceFactory()
assert invoice.has_lines is False
@pytest.mark.django_db
def test_next_number():
InvoiceFactory(number=99)
assert 100 == Invoice.objects.next_number()
@pytest.mark.django_db
def test_next_number_2():
InvoiceFactory(number=99, deleted=True)
InvoiceFactory(number=98, deleted_version=1)
assert 1 == Invoice.objects.next_number()
@pytest.mark.django_db
def test_user_can_edit():
line = InvoiceLineFactory()
assert line.user_can_edit is True
@pytest.mark.django_db
def test_user_can_edit_has_time():
line = InvoiceLineFactory()
TimeRecordFactory(invoice_line=line)
assert line.user_can_edit is False
@pytest.mark.django_db
def test_user_can_edit_invoice():
InvoiceSettingsFactory()
VatSettingsFactory()
invoice = InvoiceFactory()
line = InvoiceLineFactory(invoice=invoice)
TimeRecordFactory(invoice_line=line)
InvoicePrint().create_pdf(invoice, None)
# refresh
line = InvoiceLine.objects.get(pk=line.pk)
assert line.user_can_edit is False
``` |
[
{
"content": "```python\n# coding: utf-8\n#\n# This file is part of Sequana software\n#\n# Copyright (c) 2016 - Sequana Development Team\n#\n# File author(s):\n# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>\n# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,\n# <d.desvillechab... | [
{
"content": "<|memory_start|>```python\n# coding: utf-8\n#\n# This file is part of Sequana software\n#\n# Copyright (c) 2016 - Sequana Development Team\n#\n# File author(s):\n# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>\n# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,\n# ... | ```python
# coding: utf-8
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Module to write coverage report"""
import os
import glob
import io
from sequana.modules_report.base_module import SequanaBaseModule
from sequana.utils import config
from sequana.lazy import pandas as pd
from sequana.lazy import pylab
import colorlog
logger = colorlog.getLogger(__name__)
from sequana.utils.datatables_js import DataTable
class BWABAMtoFastQModule(SequanaBaseModule):
""" Write HTML report of BWA mapping (phix)"""
def __init__(self, input_directory, output_filename=None):
"""
:param input_directory: the directory of the bwa_bam_to_fastq output
:param output_filename: if not provided, the HTML is not created.
"""
super().__init__()
self.directory = input_directory + os.sep
self.create_report_content()
if output_filename:
self.create_html(output_filename)
def create_report_content(self):
""" Generate the sections list to fill the HTML report.
"""
self.sections = list()
self.add_stats()
def _get_html_stats(self):
from sequana.tools import StatsBAM2Mapped
from easydev import precision
data = StatsBAM2Mapped(self.directory + "bwa_mem_stats.json").data
html = "Reads with Phix: %s %%<br>" % precision(data['contamination'], 3)
# add HTML table
if "R2_mapped" in data.keys():
df = pd.DataFrame({
'R1': [data['R1_mapped'], data['R1_unmapped']],
'R2': [data['R2_mapped'], data['R2_unmapped']]})
else:
df = pd.DataFrame({
'R1': [data['R1_mapped'], data['R1_unmapped']]})
df.index = ['mapped', 'unmapped']
datatable = DataTable(df, "bwa_bam")
datatable.datatable.datatable_options = {
'scrollX': '300px',
'pageLength': 30,
'scrollCollapse': 'true',
'dom': 'irtpB',
"paging": "false",
'buttons': ['copy', 'csv']}
js = datatable.create_javascript_function()
html_tab = datatable.create_datatable(float_format='%.3g')
#html += "{} {}".format(html_tab, js)
html += "Unpaired: %s <br>" % data['unpaired']
html += "duplicated: %s <br>" % data['duplicated']
return html
def _get_html_mapped_stats(self):
html = ""
return html
def add_stats(self):
html1 = self._get_html_stats()
html2 = self._get_html_mapped_stats()
self.sections.append({
"name": "Stats inputs",
"anchor": "stats",
"content": html1+html2
})
``` |
[
{
"content": "Here is the snippet:\n```python\n#!/usr/bin/env python\nimport numpy\nimport os\nimport tensorflow as tf\nfrom multiprocessing import Pool\nfrom tqdm import tqdm\nimport numpy as np\nimport h5py\n\nfrom generic.data_provider.nlp_utils import DummyTokenizer\nfrom generic.data_provider.iterator impo... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n#!/usr/bin/env python\nimport numpy\nimport os\nimport tensorflow as tf\nfrom multiprocessing import Pool\nfrom tqdm import tqdm\nimport numpy as np\nimport h5py\n\nfrom generic.data_provider.nlp_utils import DummyTokenizer\nfrom generic.data_provid... | ```python
#!/usr/bin/env python
import numpy
import os
import tensorflow as tf
from multiprocessing import Pool
from tqdm import tqdm
import numpy as np
import h5py
from generic.data_provider.nlp_utils import DummyTokenizer
from generic.data_provider.iterator import Iterator
def extract_features(
img_input,
ft_output,
network_ckpt,
dataset_cstor,
dataset_args,
batchifier_cstor,
out_dir,
set_type,
batch_size,
no_threads,
gpu_ratio):
# CPU/GPU option
cpu_pool = Pool(no_threads, maxtasksperchild=1000)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_ratio)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:
saver = tf.train.Saver()
saver.restore(sess, network_ckpt)
for one_set in set_type:
print("Load dataset -> set: {}".format(one_set))
dataset_args["which_set"] = one_set
dataset = dataset_cstor(**dataset_args)
# hack dataset to only keep one game by image
image_id_set = {}
games = []
for game in dataset.games:
if game.image.id not in image_id_set:
games.append(game)
image_id_set[game.image.id] = 1
dataset.games = games
no_images = len(games)
#TODO find a more generic approach
if type(dataset.games[0].image.id) is int:
image_id_type = np.int64
else:
image_id_type = h5py.special_dtype(vlen=type(dataset.games[0].image.id))
source_name = os.path.basename(img_input.name[:-2])
dummy_tokenizer = DummyTokenizer()
batchifier = batchifier_cstor(tokenizer=dummy_tokenizer, sources=[source_name])
iterator = Iterator(dataset,
batch_size=batch_size,
pool=cpu_pool,
batchifier=batchifier)
############################
# CREATE FEATURES
############################
print("Start computing image features...")
if one_set == "all":
filepath = os.path.join(out_dir, "features.h5")
else:
filepath = os.path.join(out_dir, "{}_features.h5".format(one_set))
with h5py.File(filepath, 'w') as f:
ft_shape = [int(dim) for dim in ft_output.get_shape()[1:]]
ft_dataset = f.create_dataset('features', shape=[no_images] + ft_shape, dtype=np.float32)
idx2img = f.create_dataset('idx2img', shape=[no_images], dtype=image_id_type)
pt_hd5 = 0
i = 0
for batch in tqdm(iterator):
i += 1
feat = sess.run(ft_output, feed_dict={img_input: numpy.array(batch[source_name])})
# Store dataset
batch_size = len(batch["raw"])
ft_dataset[pt_hd5: pt_hd5 + batch_size] = feat
# Store idx to image.id
for i, game in enumerate(batch["raw"]):
idx2img[pt_hd5 + i] = game.image.id
# update hd5 pointer
pt_hd5 += batch_size
print("Start dumping file: {}".format(filepath))
print("Finished dumping file: {}".format(filepath))
print("Done!")
``` |
[
{
"content": "Here is the code block:\n```python\nfrom __future__ import print_function\nimport httplib2\nimport os\n\nfrom apiclient import discovery\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\n\nimport datetime\n\ntry:\n import argparse\n flag... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\nfrom __future__ import print_function\nimport httplib2\nimport os\n\nfrom apiclient import discovery\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\n\nimport datetime\n\ntry:\n import ar... | ```python
from __future__ import print_function
import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import datetime
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/calendar-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Google Calendar API Python Quickstart'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'calendar-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
"""Shows basic usage of the Google Calendar API.
Creates a Google Calendar API service object and outputs a list of the next
10 events on the user's calendar.
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http)
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print('Getting the upcoming 10 events')
eventsResult = service.events().list(
calendarId='primary', timeMin=now, maxResults=10, singleEvents=True,
orderBy='startTime').execute()
events = eventsResult.get('items', [])
if not events:
print('No upcoming events found.')
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
print(start, event['summary'])
page_token = None
while True:
calendar_list = service.calendarList().list(pageToken=page_token).execute()
for calendar_list_entry in calendar_list['items']:
print('C: ' + 'id: ' + calendar_list_entry['id'] + ' summary: ' + calendar_list_entry['summary'])
page_token = calendar_list.get('nextPageToken')
if not page_token:
break
print('')
print('')
service.calendarList()
calendar_list_entry = service.calendarList().get(calendarId='mergtn05h7dbffq2b4n2941j9k@group.calendar.google.com').execute()
print('F: ' + 'id: ' + calendar_list_entry['id'] + ' summary: ' + calendar_list_entry['summary'])
print('')
print('')
page_token = None
while True:
events = service.events().list(calendarId='mergtn05h7dbffq2b4n2941j9k@group.calendar.google.com', pageToken=page_token, singleEvents=True, orderBy='startTime', timeMin=now).execute()
for event in events['items']:
start = event['start'].get('dateTime', event['start'].get('date'))
print (start + ": " + event['summary'])
page_token = events.get('nextPageToken')
if not page_token:
break
if __name__ == '__main__':
main()
``` |
[
{
"content": "```python\nclass bokehLine(object):\n\tdef __init__(self, line, symbol = None, viewNum = None, parent = None):\n\t\tself.line = line\n\t\tself.symbol = symbol\n\t\tself.viewNum = viewNum\n\t\tself.style = None\n\t\tself.val = {'name' : self.line.name, \n\t\t\t\t\t'color' : self.line.lin... | [
{
"content": "<|memory_start|>```python\nclass bokehLine(object):\n\tdef __init__(self, line, symbol = None, viewNum = None, parent = None):\n\t\tself.line = line\n\t\tself.symbol = symbol\n\t\tself.viewNum = viewNum\n\t\tself.style = None\n\t\tself.val = {'name' : self.line.name, \n\t\t\t\t\t'color' ... | ```python
class bokehLine(object):
def __init__(self, line, symbol = None, viewNum = None, parent = None):
self.line = line
self.symbol = symbol
self.viewNum = viewNum
self.style = None
self.val = {'name' : self.line.name,
'color' : self.line.line_color,
'width' : self.line.line_width,
'style' : None,
'symbol' : self.symbol,
'visible' : self.line.visible,
'viewNum' : self.viewNum}
def line_val(self, name = None, color = None, width = None,
style = None, symbol = None, visible = None, viewNum = None):
if name is not None:
self.line.name = name
if color:
self.line.line_color = color
if width is not None:
self.line.line_width = width
if style:
self.style = style
if symbol:
self.symbol = symbol
if visible is not None:
self.line.visible = visible
if viewNum is not None:
self.viewNum = viewNum
self.val.update({'name' : self.line.name})
self.val.update({'color' : self.line.line_color})
self.val.update({'width' : self.line.line_width})
self.val.update({'style' : self.style})
self.val.update({'symbol' : self.symbol})
self.val.update({'visible' : self.line.visible})
self.val.update({'viewNum' : self.viewNum})
return self.val
``` |
[
{
"content": "Here is the code content:\n```python\nfrom functools import partial\nimport json\n\nfrom PyQt5.QtCore import QCoreApplication, Qt\nfrom PyQt5.QtGui import QCursor, QIcon, QKeySequence\nfrom PyQt5.QtWidgets import (\n QMainWindow,\n QMenu,\n)\n\nfrom plover import log\nfrom plover.oslayer imp... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\nfrom functools import partial\nimport json\n\nfrom PyQt5.QtCore import QCoreApplication, Qt\nfrom PyQt5.QtGui import QCursor, QIcon, QKeySequence\nfrom PyQt5.QtWidgets import (\n QMainWindow,\n QMenu,\n)\n\nfrom plover import log\nfrom pl... | ```python
from functools import partial
import json
from PyQt5.QtCore import QCoreApplication, Qt
from PyQt5.QtGui import QCursor, QIcon, QKeySequence
from PyQt5.QtWidgets import (
QMainWindow,
QMenu,
)
from plover import log
from plover.oslayer import wmctrl
from plover.registry import registry
from plover.resource import resource_filename
from plover.gui_qt.log_qt import NotificationHandler
from plover.gui_qt.main_window_ui import Ui_MainWindow
from plover.gui_qt.config_window import ConfigWindow
from plover.gui_qt.about_dialog import AboutDialog
from plover.gui_qt.trayicon import TrayIcon
from plover.gui_qt.utils import WindowState, find_menu_actions
class MainWindow(QMainWindow, Ui_MainWindow, WindowState):
ROLE = 'main'
def __init__(self, engine, use_qt_notifications):
super(MainWindow, self).__init__()
self.setupUi(self)
if hasattr(self, 'setUnifiedTitleAndToolBarOnMac'):
self.setUnifiedTitleAndToolBarOnMac(True)
self._engine = engine
self._active_dialogs = {}
self._dialog_class = {
'about' : AboutDialog,
'configuration' : ConfigWindow,
}
all_actions = find_menu_actions(self.menubar)
# Dictionaries.
self.dictionaries = self.scroll_area.widget()
self.dictionaries.add_translation.connect(self._add_translation)
self.dictionaries.setFocus()
edit_menu = all_actions['menu_Edit'].menu()
edit_menu.addAction(self.dictionaries.action_Undo)
edit_menu.addSeparator()
edit_menu.addMenu(self.dictionaries.menu_AddDictionaries)
edit_menu.addAction(self.dictionaries.action_EditDictionaries)
edit_menu.addAction(self.dictionaries.action_RemoveDictionaries)
edit_menu.addSeparator()
edit_menu.addAction(self.dictionaries.action_MoveDictionariesUp)
edit_menu.addAction(self.dictionaries.action_MoveDictionariesDown)
# Tray icon.
self._trayicon = TrayIcon()
self._trayicon.enable()
self._trayicon.clicked.connect(self._engine.toggle_output)
if use_qt_notifications:
handler = NotificationHandler()
handler.emitSignal.connect(self._trayicon.log)
log.add_handler(handler)
popup_menu = QMenu()
for action_name in (
'action_ToggleOutput',
'action_Reconnect',
'',
'menu_Tools',
'',
'action_Configure',
'',
'menu_Help',
'',
'action_Show',
'action_Quit',
):
if action_name:
popup_menu.addAction(all_actions[action_name])
else:
popup_menu.addSeparator()
self._trayicon.set_menu(popup_menu)
engine.signal_connect('machine_state_changed', self._trayicon.update_machine_state)
engine.signal_connect('quit', self.on_quit)
self.action_Quit.triggered.connect(engine.quit)
# Populate tools bar/menu.
tools_menu = all_actions['menu_Tools'].menu()
# Toolbar popup menu for selecting which tools are shown.
self.toolbar_menu = QMenu()
self.toolbar.setContextMenuPolicy(Qt.CustomContextMenu)
self.toolbar.customContextMenuRequested.connect(
lambda: self.toolbar_menu.popup(QCursor.pos())
)
for tool_plugin in registry.list_plugins('gui.qt.tool'):
tool = tool_plugin.obj
action_parameters = []
if tool.ICON is not None:
icon = tool.ICON
# Internal QT resources start with a `:`.
if not icon.startswith(':'):
icon = resource_filename(icon)
action_parameters.append(QIcon(icon))
action_parameters.append(tool.TITLE)
toolbar_action = None
for parent in (tools_menu, self.toolbar, self.toolbar_menu):
action = parent.addAction(*action_parameters)
action.setObjectName(tool_plugin.name)
if tool.__doc__ is not None:
action.setToolTip(tool.__doc__)
if tool.SHORTCUT is not None:
action.setShortcut(QKeySequence.fromString(tool.SHORTCUT))
if parent == self.toolbar_menu:
action.setCheckable(True)
action.setChecked(True)
assert toolbar_action is not None
action.toggled.connect(toolbar_action.setVisible)
else:
if parent == self.toolbar:
toolbar_action = action
action.triggered.connect(partial(self._activate_dialog,
tool_plugin.name,
args=()))
self._dialog_class[tool_plugin.name] = tool
engine.signal_connect('output_changed', self.on_output_changed)
# Machine.
self.machine_type.addItems(
_(plugin.name)
for plugin in registry.list_plugins('machine')
)
engine.signal_connect('config_changed', self.on_config_changed)
engine.signal_connect('machine_state_changed',
lambda machine, state:
self.machine_state.setText(_(state.capitalize()))
)
self.restore_state()
# Commands.
engine.signal_connect('add_translation', partial(self._add_translation, manage_windows=True))
engine.signal_connect('focus', self._focus)
engine.signal_connect('configure', partial(self._configure, manage_windows=True))
engine.signal_connect('lookup', partial(self._activate_dialog, 'lookup',
manage_windows=True))
# Load the configuration (but do not start the engine yet).
if not engine.load_config():
self.on_configure()
# Apply configuration settings.
config = self._engine.config
self.machine_type.setCurrentText(config['machine_type'])
self._configured = False
self.dictionaries.on_config_changed(config)
self.set_visible(not config['start_minimized'])
# Start the engine.
engine.start()
def set_visible(self, visible):
if visible:
self.show()
else:
if self._trayicon.is_enabled():
self.hide()
else:
self.showMinimized()
def _activate_dialog(self, name, args=(), manage_windows=False):
if manage_windows:
previous_window = wmctrl.GetForegroundWindow()
dialog = self._active_dialogs.get(name)
if dialog is None:
dialog_class = self._dialog_class[name]
dialog = self._active_dialogs[name] = dialog_class(self._engine, *args)
dialog.setWindowIcon(self.windowIcon())
def on_finished():
del self._active_dialogs[name]
dialog.deleteLater()
if manage_windows:
wmctrl.SetForegroundWindow(previous_window)
dialog.finished.connect(on_finished)
dialog.show()
dialog.activateWindow()
dialog.raise_()
def _add_translation(self, dictionary=None, manage_windows=False):
if not dictionary:
dictionary = None
self._activate_dialog('add_translation', args=(dictionary,),
manage_windows=manage_windows)
def _focus(self):
self.set_visible(True)
self.activateWindow()
self.raise_()
def _configure(self, manage_windows=False):
self._activate_dialog('configuration', manage_windows=manage_windows)
def _lookup(self, manage_windows=False):
self._activate_dialog('lookup', manage_windows=manage_windows)
def _restore_state(self, settings):
if settings.contains('hidden_toolbar_tools'):
hidden_toolbar_tools = json.loads(settings.value('hidden_toolbar_tools'))
for action in self.toolbar_menu.actions():
if action.objectName() in hidden_toolbar_tools:
action.setChecked(False)
def _save_state(self, settings):
hidden_toolbar_tools = set()
for action in self.toolbar_menu.actions():
if not action.isChecked():
hidden_toolbar_tools.add(action.objectName())
settings.setValue('hidden_toolbar_tools', json.dumps(list(sorted(hidden_toolbar_tools))))
def on_config_changed(self, config_update):
if 'machine_type' in config_update:
self.machine_type.setCurrentText(config_update['machine_type'])
if not self._configured:
self._configured = True
if config_update.get('show_suggestions_display', False):
self._activate_dialog('suggestions')
if config_update.get('show_stroke_display', False):
self._activate_dialog('paper_tape')
def on_machine_changed(self, machine_type):
self._engine.config = { 'machine_type': machine_type }
def on_output_changed(self, enabled):
self._trayicon.update_output(enabled)
self.output_enable.setChecked(enabled)
self.output_disable.setChecked(not enabled)
self.action_ToggleOutput.setChecked(enabled)
def on_toggle_output(self, enabled):
self._engine.output = enabled
def on_enable_output(self):
self.on_toggle_output(True)
def on_disable_output(self):
self.on_toggle_output(False)
def on_configure(self):
self._configure()
def on_reconnect(self):
self._engine.reset_machine()
def on_manage_dictionaries(self):
self._activate_dialog('dictionary_manager')
def on_about(self):
self._activate_dialog('about')
def on_quit(self):
for dialog in list(self._active_dialogs.values()):
dialog.close()
self.save_state()
self._trayicon.disable()
self.hide()
QCoreApplication.quit()
def on_show(self):
self._focus()
def closeEvent(self, event):
self.hide()
if not self._trayicon.is_enabled():
self._engine.quit()
event.ignore()
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# Copyright 2012 Hewlett-Packard Development Company, L.P. All Rights Reserved.\n# Copyright 2012 Managed I.T.\n#\n# Author: Patrick Galbraith <patg@hp.com>\n# Author: Kiall Mac Innes <kiall@managedit.ie>\n#\n# License... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# Copyright 2012 Hewlett-Packard Development Company, L.P. All Rights Reserved.\n# Copyright 2012 Managed I.T.\n#\n# Author: Patrick Galbraith <patg@hp.com>\n# Author: Kiall Mac Innes <kiall@managedit.i... | ```python
# Copyright 2012 Hewlett-Packard Development Company, L.P. All Rights Reserved.
# Copyright 2012 Managed I.T.
#
# Author: Patrick Galbraith <patg@hp.com>
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import threading
from oslo.config import cfg
from oslo.db import options
from sqlalchemy.sql import select
from designate.openstack.common import excutils
from designate.openstack.common import log as logging
from designate.i18n import _LC
from designate import exceptions
from designate.backend import base
from designate.backend.impl_powerdns import tables
from designate.sqlalchemy import session
from designate.sqlalchemy.expressions import InsertFromSelect
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
TSIG_SUPPORTED_ALGORITHMS = ['hmac-md5']
CONF.register_group(cfg.OptGroup(
name='backend:powerdns', title="Configuration for Powerdns Backend"
))
CONF.register_opts([
cfg.StrOpt('domain-type', default='NATIVE', help='PowerDNS Domain Type'),
cfg.ListOpt('also-notify', default=[], help='List of additional IPs to '
'send NOTIFYs to'),
] + options.database_opts, group='backend:powerdns')
# Overide the default DB connection registered above, to avoid name conflicts
# between the Designate and PowerDNS databases.
CONF.set_default('connection', 'sqlite:///$state_path/powerdns.sqlite',
group='backend:powerdns')
def _map_col(keys, col):
return dict([(keys[i], col[i]) for i in range(len(keys))])
class PowerDNSBackend(base.Backend):
__plugin_name__ = 'powerdns'
def __init__(self, *args, **kwargs):
super(PowerDNSBackend, self).__init__(*args, **kwargs)
self.local_store = threading.local()
def start(self):
super(PowerDNSBackend, self).start()
@property
def session(self):
# NOTE: This uses a thread local store, allowing each greenthread to
# have it's own session stored correctly. Without this, each
# greenthread may end up using a single global session, which
# leads to bad things happening.
global LOCAL_STORE
if not hasattr(self.local_store, 'session'):
self.local_store.session = session.get_session(self.name)
return self.local_store.session
def _create(self, table, values):
query = table.insert()
resultproxy = self.session.execute(query, values)
# Refetch the row, for generated columns etc
query = select([table])\
.where(table.c.id == resultproxy.inserted_primary_key[0])
resultproxy = self.session.execute(query)
return _map_col(query.columns.keys(), resultproxy.fetchone())
def _update(self, table, values, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = table.update()\
.where(id_col == values[id_col.name])\
.values(**values)
resultproxy = self.session.execute(query)
if resultproxy.rowcount != 1:
raise exc_notfound()
# Refetch the row, for generated columns etc
query = select([table])\
.where(id_col == values[id_col.name])
resultproxy = self.session.execute(query)
return _map_col(query.columns.keys(), resultproxy.fetchone())
def _get(self, table, id_, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = select([table])\
.where(id_col == id_)
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
if len(results) != 1:
raise exc_notfound()
# Map col keys to values in result
return _map_col(query.columns.keys(), results[0])
def _delete(self, table, id_, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = table.delete()\
.where(id_col == id_)
resultproxy = self.session.execute(query)
if resultproxy.rowcount != 1:
raise exc_notfound()
# TSIG Key Methods
def create_tsigkey(self, context, tsigkey):
"""Create a TSIG Key"""
if tsigkey['algorithm'] not in TSIG_SUPPORTED_ALGORITHMS:
raise exceptions.NotImplemented('Unsupported algorithm')
values = {
'designate_id': tsigkey['id'],
'name': tsigkey['name'],
'algorithm': tsigkey['algorithm'],
'secret': base64.b64encode(tsigkey['secret'])
}
self._create(tables.tsigkeys, values)
# NOTE(kiall): Prepare and execute query to install this TSIG Key on
# every domain. We use a manual query here since anything
# else would be impossibly slow.
query_select = select([
tables.domains.c.id,
"'TSIG-ALLOW-AXFR'",
"'%s'" % tsigkey['name']]
)
columns = [
tables.domain_metadata.c.domain_id,
tables.domain_metadata.c.kind,
tables.domain_metadata.c.content,
]
query = InsertFromSelect(tables.domain_metadata, query_select,
columns)
# NOTE(kiall): A TX is required for, at the least, SQLite.
self.session.begin()
self.session.execute(query)
self.session.commit()
def update_tsigkey(self, context, tsigkey):
"""Update a TSIG Key"""
values = self._get(
tables.tsigkeys,
tsigkey['id'],
exceptions.TsigKeyNotFound,
id_col=tables.tsigkeys.c.designate_id)
# Store a copy of the original name..
original_name = values['name']
values.update({
'name': tsigkey['name'],
'algorithm': tsigkey['algorithm'],
'secret': base64.b64encode(tsigkey['secret'])
})
self._update(tables.tsigkeys, values,
id_col=tables.tsigkeys.c.designate_id,
exc_notfound=exceptions.TsigKeyNotFound)
# If the name changed, Update the necessary DomainMetadata records
if original_name != tsigkey['name']:
query = tables.domain_metadata.update()\
.where(tables.domain_metadata.c.kind == 'TSIG_ALLOW_AXFR')\
.where(tables.domain_metadata.c.content == original_name)
query.values(content=tsigkey['name'])
self.session.execute(query)
def delete_tsigkey(self, context, tsigkey):
"""Delete a TSIG Key"""
try:
# Delete this TSIG Key itself
self._delete(
tables.tsigkeys, tsigkey['id'],
exceptions.TsigKeyNotFound,
id_col=tables.tsigkeys.c.designate_id)
except exceptions.TsigKeyNotFound:
# If the TSIG Key is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a TSIG key which is '
'not present in the backend. ID: %s') %
tsigkey['id'])
return
query = tables.domain_metadata.delete()\
.where(tables.domain_metadata.c.kind == 'TSIG-ALLOW-AXFR')\
.where(tables.domain_metadata.c.content == tsigkey['name'])
self.session.execute(query)
# Domain Methods
def create_domain(self, context, domain):
try:
self.session.begin()
servers = self.central_service.find_servers(self.admin_context)
domain_values = {
'designate_id': domain['id'],
'name': domain['name'].rstrip('.'),
'master': servers[0]['name'].rstrip('.'),
'type': CONF['backend:powerdns'].domain_type,
'account': context.tenant
}
domain_ref = self._create(tables.domains, domain_values)
# Install all TSIG Keys on this domain
query = select([tables.tsigkeys.c.name])
resultproxy = self.session.execute(query)
values = [i for i in resultproxy.fetchall()]
self._update_domainmetadata(domain_ref['id'], 'TSIG-ALLOW-AXFR',
values)
# Install all Also Notify's on this domain
self._update_domainmetadata(domain_ref['id'], 'ALSO-NOTIFY',
CONF['backend:powerdns'].also_notify)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def update_domain(self, context, domain):
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
try:
self.session.begin()
# Update the Records TTLs where necessary
query = tables.records.update()\
.where(tables.records.c.domain_id == domain_ref['id'])
query = query.where(tables.records.c.inherit_ttl == True) # noqa\
query = query.values(ttl=domain['ttl'])
self.session.execute(query)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def delete_domain(self, context, domain):
try:
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
except exceptions.DomainNotFound:
# If the Domain is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a domain which is '
'not present in the backend. ID: %s') %
domain['id'])
return
self._delete(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
# Ensure the records are deleted
query = tables.records.delete()\
.where(tables.records.c.domain_id == domain_ref['id'])
self.session.execute(query)
# Ensure domainmetadata is deleted
query = tables.domain_metadata.delete()\
.where(tables.domain_metadata.c.domain_id == domain_ref['id'])
self.session.execute(query)
# RecordSet Methods
def create_recordset(self, context, domain, recordset):
try:
self.session.begin(subtransactions=True)
# Create all the records..
for record in recordset.records:
self.create_record(context, domain, recordset, record)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def update_recordset(self, context, domain, recordset):
# TODO(kiall): This is a total kludge. Intended as the simplest
# possible fix for the issue. This needs to be
# re-implemented correctly.
try:
self.session.begin(subtransactions=True)
self.delete_recordset(context, domain, recordset)
self.create_recordset(context, domain, recordset)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def delete_recordset(self, context, domain, recordset):
# Ensure records are deleted
query = tables.records.delete()\
.where(tables.records.c.designate_recordset_id == recordset['id'])
self.session.execute(query)
# Record Methods
def create_record(self, context, domain, recordset, record):
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
content = self._sanitize_content(recordset['type'], record['data'])
ttl = domain['ttl'] if recordset['ttl'] is None else recordset['ttl']
record_values = {
'designate_id': record['id'],
'designate_recordset_id': record['recordset_id'],
'domain_id': domain_ref['id'],
'name': recordset['name'].rstrip('.'),
'type': recordset['type'],
'content': content,
'ttl': ttl,
'inherit_ttl': True if recordset['ttl'] is None else False,
'prio': record['priority'],
'auth': self._is_authoritative(domain, recordset, record)
}
self._create(tables.records, record_values)
def update_record(self, context, domain, recordset, record):
record_ref = self._get_record(record['id'])
content = self._sanitize_content(recordset['type'], record['data'])
ttl = domain['ttl'] if recordset['ttl'] is None else recordset['ttl']
record_ref.update({
'content': content,
'ttl': ttl,
'inherit_ttl': True if recordset['ttl'] is None else False,
'prio': record['priority'],
'auth': self._is_authoritative(domain, recordset, record)
})
self._update(tables.records, record_ref,
exc_notfound=exceptions.RecordNotFound)
def delete_record(self, context, domain, recordset, record):
try:
record_ref = self._get(tables.records, record['id'],
exceptions.RecordNotFound,
id_col=tables.records.c.designate_id)
except exceptions.RecordNotFound:
# If the Record is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a record which is '
'not present in the backend. ID: %s') %
record['id'])
else:
self._delete(tables.records, record_ref['id'],
exceptions.RecordNotFound)
# Internal Methods
def _update_domainmetadata(self, domain_id, kind, values=None,
delete=True):
"""Updates a domain's metadata with new values"""
# Fetch all current metadata of the specified kind
values = values or []
query = select([tables.domain_metadata.c.content])\
.where(tables.domain_metadata.c.domain_id == domain_id)\
.where(tables.domain_metadata.c.kind == kind)
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
for metadata_id, content in results:
if content not in values:
if delete:
LOG.debug('Deleting stale domain metadata: %r' %
([domain_id, kind, content],))
# Delete no longer necessary values
# We should never get a notfound here, so UnknownFailure is
# a reasonable choice.
self._delete(tables.domain_metadata, metadata_id,
exceptions.UnknownFailure)
else:
# Remove pre-existing values from the list of values to insert
values.remove(content)
# Insert new values
for value in values:
LOG.debug('Inserting new domain metadata: %r' %
([domain_id, kind, value],))
self._create(
tables.domain_metadata,
{
"domain_id": domain_id,
"kind": kind,
"content": value
})
def _is_authoritative(self, domain, recordset, record):
# NOTE(kiall): See http://doc.powerdns.com/dnssec-modes.html
if recordset['type'] == 'NS' and recordset['name'] != domain['name']:
return False
else:
return True
def _sanitize_content(self, type, content):
if type in ('CNAME', 'MX', 'SRV', 'NS', 'PTR'):
return content.rstrip('.')
if type in ('TXT', 'SPF'):
return '"%s"' % content.replace('"', '\\"')
return content
def _get_record(self, record_id=None, domain=None, type_=None):
query = select([tables.records])
if record_id:
query = query.where(tables.records.c.designate_id == record_id)
if type_:
query = query.where(tables.records.c.type == type_)
if domain:
query = query.where(tables.records.c.domain_id == domain['id'])
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
if len(results) < 1:
raise exceptions.RecordNotFound('No record found')
elif len(results) > 1:
raise exceptions.RecordNotFound('Too many records found')
else:
return _map_col(query.columns.keys(), results[0])
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.