text
stringlengths 2
999k
|
|---|
"""Paint, for drawing shapes.
Exercises
1. Add a color.
2. Complete circle.
3. Complete rectangle.
4. Complete triangle.
5. Add width parameter.
"""
from turtle import *
from freegames import vector
def line(start, end):
"Draw line from start to end."
up()
goto(start.x, start.y)
down()
goto(end.x, end.y)
def square(start, end):
"Draw square from start to end."
up()
goto(start.x, start.y)
down()
begin_fill()
for count in range(4):
forward(end.x - start.x)
left(90)
end_fill()
def circle(start, end):
"Draw circle from start to end."
pass # TODO
def rectangle(start, end):
"Draw rectangle from start to end."
pass # TODO
def triangle(start, end):
"Draw triangle from start to end."
pass # TODO
def tap(x, y):
"Store starting point or draw shape."
start = state['start']
if start is None:
state['start'] = vector(x, y)
else:
shape = state['shape']
end = vector(x, y)
shape(start, end)
state['start'] = None
def store(key, value):
"Store value in state at key."
state[key] = value
state = {'start': None, 'shape': line}
setup(420, 420, 370, 0)
onscreenclick(tap)
listen()
onkey(undo, 'u')
onkey(lambda: color('black'), 'K')
onkey(lambda: color('white'), 'W')
onkey(lambda: color('green'), 'G')
onkey(lambda: color('blue'), 'B')
onkey(lambda: color('red'), 'R')
onkey(lambda: store('shape', line), 'l')
onkey(lambda: store('shape', square), 's')
onkey(lambda: store('shape', circle), 'c')
onkey(lambda: store('shape', rectangle), 'r')
onkey(lambda: store('shape', triangle), 't')
done()
|
import uuid
from datetime import datetime
class Sheet(object):
def __init__(self, db_client, s3_client, redis_client):
self._db_client = db_client
self._s3_client = s3_client
self._redis_client = redis_client
def get(self, user_id):
memoized = self._redis_client._get(user_id)
if(memoized != None):
return memoized
value = self._db_client.get('Sheet', user_id)
self._redis_client._set(user_id, value)
return value
def set_data(self, data):
self._data = {
'id': data.get('id', None),
'title': data.get('title', None),
'description': data.get('description', None),
'image': data.get('image', None),
'composition_date': data.get('composition_date', None),
'title': data.get('title', None),
'artist': data.get('artist', None),
'duration': data.get('duration', None),
'date_added': data.get('date_added', None),
'date_modified': data.get('date_modified', None),
'signature': data.get('signature', None),
'href': data.get('href', None),
'created_by': data.get('created_by', None),
'tempo': data.get('tempo', None),
'user_id': data.get('user_id', None),
'upload_photo': data.get('upload_photo', None)
}
print("dynamodb/models/sheet.py#set_data - Set data")
print(self._data)
return self._data
def save(self):
filename = "pdf/%s.pdf"%str(uuid.uuid1())
self._data['id'] = str(uuid.uuid1())
self._data['date_added'] = str(datetime.now())
self._data['date_modified'] = self._data['date_added']
image_base64 = self._data['upload_photo']
del self._data['upload_photo']
# Subir archivo a s3
image_url = self._s3_client.save64(filename, image_base64)
self._data['image'] = image_url
# Limpiar cache de usuario para que se refresque
self._redis_client._delete(self._data['user_id'])
# Guardar registro en base de datos
return self._db_client.insert('Sheet', self._data)
def update(self, user_id):
# Limpiar cache de usuario para que se refresque
self._redis_client._delete(user_id)
return self._db_client.update('Sheet', self._data)
def delete(self, ids, user_id):
# Limpiar cache de usuario para que se refresque
self._redis_client._delete(user_id)
return self._db_client.delete_items('Sheet', ids)
|
import os
import numpy
import matplotlib.pyplot as plt
from matplotlib.pyplot import MultipleLocator
# pylint: disable=redefined-outer-name
# pylint: disable=exec-used
def fetch_log_files(directory):
log_files = []
files = os.listdir(directory)
for f in files:
if os.path.splitext(f)[1] == '.txt':
log_files.append(f)
return log_files
def fetch_func_perf(func_list, log_files):
parser_key1 = 'time:'
# declare var res_arrs based on func_name
res = []
for func_name in func_list:
exec('res_{}=[]'.format(func_name))
for func_name in func_list:
exec('tmp_{}=[]'.format(func_name))
for log_file in log_files:
with open(log_dir + log_file, 'r') as f:
for line in f:
line = line.split()
for func_name in func_list:
parser_key2 = 'run_test_'+func_name+','
if parser_key1 in line and parser_key2 in line:
cur_tmp_name = 'tmp_'+func_name
tmp_arr = locals()[cur_tmp_name]
tmp_arr.append(float(line[-2]))
for func_name in func_list:
target_res_arr_name = 'res_'+func_name
source_tmp_arr_name = 'tmp_'+func_name
arr = locals()[target_res_arr_name]
tmp_arr = locals()[source_tmp_arr_name]
if tmp_arr:
arr.append(numpy.mean(tmp_arr))
for func_name in func_list:
res_arr = 'res_' + func_name
res.append(locals()[res_arr])
return res
def perf_stability_alarm(func_name, res_func_arr):
warning_str = ' [Warning] : The performance of ' + str(func_name) + ' fluctuates greatly!'
# warning based on standard deviation
std_deviation = numpy.std(res_func_arr, ddof=1)
# print("Performance test standard deviation of %s: "%func_name,end='')
# print(std_deviation)
if std_deviation > std_threshold:
print(warning_str)
# print perf_test result
# print('perf_test result for ' + str(func_name) + ' :')
# print(res_func_arr)
return 1
return 0
def plot_perf_regression(func_name, res_func_arr):
perf_picture = plot_dir + str(func_name) + '_perf.png'
perf_fig_title = str(func_name) + ' Performance Fig'
# plot
index1 = list(range(1, len(res_func_arr) + 1))
index2 = list(range(1, len(res_func_arr) + 1))
res_mock = numpy.random.rand(len(index2)) * 10 + numpy.mean(res_func_arr)
plt.figure(figsize=(16, 4)) # picture size
# plt.plot(index,res_func_arr,color="b--",label="$input_1k$",linewidth=1)
plt.plot(index1, res_func_arr, label="$10k$", color="blue", linewidth=1)
plt.plot(index2, res_mock, label="$110k(Simulated-contrast-test)$", color="red", linestyle='--', linewidth=1)
plt.xlabel("Version ") # X label
plt.ylabel("Cost /ms") # Y label
plt.title(perf_fig_title)
x_major_locator = MultipleLocator(1)
y_major_locator = MultipleLocator(10)
ax = plt.gca()
ax.xaxis.set_major_locator(x_major_locator)
ax.yaxis.set_major_locator(y_major_locator)
plt.xlim(0, len(res_func_arr) + 2)
plt.ylim(min(res_func_arr) - 10, max(res_func_arr) + 10)
plt.legend()
# plt.show()
plt.savefig(perf_picture)
plt.close('all')
# print('plot %s performance regression picture success.'%func_name)
func_list_arr = ['st_geomfromgeojson',
'st_geomfromgeojson2',
'st_curvetoline',
'st_point',
'st_isvalid_1',
'st_isvalid_curve',
'st_intersection',
'st_intersection_curve',
'st_convexhull',
'st_convexhull_curve',
'st_buffer',
'st_buffer_curve',
'st_buffer_curve1',
'st_envelope',
'st_envelope_curve',
'st_centroid',
'st_centroid_curve',
'st_length',
'st_length_curve',
'st_area',
'st_area_curve',
'st_distance',
'st_distance_curve',
'st_issimple',
'st_issimple_curve',
'st_npoints',
'st_geometrytype',
'st_geometrytype_curve',
'st_intersects',
'st_intersects_curve',
'st_contains',
'st_contains_curve',
'st_within',
'st_within_curve',
'st_equals_1',
'st_equals_2',
'st_crosses',
'st_crosses_curve',
'st_overlaps',
'st_overlaps_curve',
'st_touches',
'st_touches_curve',
'st_makevalid',
'st_precisionreduce',
'st_polygonfromenvelope',
'st_simplifypreservetopology',
'st_simplifypreservetopology_curve',
'st_hausdorffdistance',
'st_hausdorffdistance_curve',
'st_pointfromtext',
'st_polygonfromtext',
'st_linestringfromtext',
'st_geomfromtext',
'st_geomfromwkt',
'st_astext',
'st_buffer1',
'st_buffer2',
'st_buffer3',
'st_buffer4',
'st_buffer5',
'st_buffer6',
'envelope_aggr_1',
'envelope_aggr_curve',
'envelope_aggr_2',
'union_aggr_2',
'union_aggr_curve',
'st_transform',
'st_transform1',
'none']
log_dir = 'perf/log/'
plot_dir = 'perf/picture/'
# Performance regression standard deviation accuracy tolerance
std_threshold = 3.0
# main invocation
if __name__ == "__main__":
# res_set is a list that contains historical performance data for all gis functions
res_set = fetch_func_perf(func_list_arr, fetch_log_files(log_dir))
assert len(func_list_arr) == len(res_set)
# produce specific result variable in main for every functions in func_list_arr
for i, func_name in enumerate(func_list_arr):
exec('res_{}={}'.format(func_name, res_set[i]))
# for func_name in func_list_arr:
# res_arr = 'res_' + func_name
# print('------ %s performance history data ----' % func_name)
# print(locals()[res_arr])
alarm_num = 0
plot_num = 0
plot_failed_func = []
alarm_func = []
for func_name in func_list_arr:
res_arr = 'res_' + func_name
# plot
cur_res_arr = locals()[res_arr]
if cur_res_arr:
plot_perf_regression(func_name, cur_res_arr)
plot_num = plot_num + 1
# performance test stability
if perf_stability_alarm(func_name, locals()[res_arr]):
alarm_num = alarm_num + 1
alarm_func.append(func_name)
else:
plot_failed_func.append(func_name)
print('Plot %s performance regression Fig.'%plot_num)
print('Plot failed functions list :%s '%plot_failed_func)
print('There are %s functions alarming!'%alarm_num)
print('Alarm functions list :%s '%alarm_func)
# specific funciton test examples
#plot_perf_regression('st_within',res_st_within)
#perf_stability_alarm('st_within',res_st_within)
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test-appropriate entry points into the gRPC Python Beta API."""
from grpc._adapter import _intermediary_low
from grpc.beta import implementations
def not_really_secure_channel(
host, port, channel_credentials, server_host_override):
"""Creates an insecure Channel to a remote host.
Args:
host: The name of the remote host to which to connect.
port: The port of the remote host to which to connect.
channel_credentials: The implementations.ChannelCredentials with which to
connect.
server_host_override: The target name used for SSL host name checking.
Returns:
An implementations.Channel to the remote host through which RPCs may be
conducted.
"""
hostport = '%s:%d' % (host, port)
intermediary_low_channel = _intermediary_low.Channel(
hostport, channel_credentials._low_credentials,
server_host_override=server_host_override)
return implementations.Channel(
intermediary_low_channel._internal, intermediary_low_channel)
|
from functools import reduce
from operator import add
import re
import six
from werkzeug.datastructures import ImmutableOrderedMultiDict
EMAIL_REGEX = r'^[^@^\s]+@[^@^\.^\s]+(\.[^@^\.^\s]+)+$'
def get_validator(framework, content, answers):
"""
Retrieves a validator by slug contained in the framework dictionary.
"""
if framework is None:
raise ValueError("a framework dictionary must be provided")
if framework is not None:
validator_cls = VALIDATORS.get(framework['slug'])
return validator_cls(content, answers)
class DeclarationValidator(object):
email_validation_fields = []
number_string_fields = []
character_limit = None
optional_fields = set([])
def __init__(self, content, answers):
self.content = content
self.answers = answers
def get_error_messages_for_page(self, section):
all_errors = self.get_error_messages()
page_ids = section.get_question_ids()
page_errors = ImmutableOrderedMultiDict(filter(lambda err: err[0] in page_ids, all_errors))
return page_errors
def get_error_messages(self):
raw_errors_map = self.errors()
errors_map = list()
for question_id in self.all_fields():
if question_id in raw_errors_map:
question_number = self.content.get_question(question_id).get('number')
validation_message = self.get_error_message(question_id, raw_errors_map[question_id])
errors_map.append((question_id, {
'input_name': question_id,
'question': "Question {}".format(question_number)
if question_number else self.content.get_question(question_id).get('question'),
'message': validation_message,
}))
return errors_map
def get_error_message(self, question_id, message_key):
for validation in self.content.get_question(question_id).get('validations', []):
if validation['name'] == message_key:
return validation['message']
default_messages = {
'answer_required': 'You need to answer this question.',
'under_character_limit': 'Your answer must be no more than {} characters.'.format(self.character_limit),
'invalid_format': 'You must enter a valid email address.',
}
return default_messages.get(
message_key, 'There was a problem with the answer to this question')
def errors(self):
raise NotImplementedError("only a subclass should be used")
def all_fields(self):
return reduce(add, (section.get_question_ids() for section in self.content))
def fields_with_values(self):
return set(key for key, value in self.answers.items()
if value is not None and (not isinstance(value, six.string_types) or len(value) > 0))
def errors(self):
errors_map = {}
errors_map.update(self.character_limit_errors())
errors_map.update(self.formatting_errors(self.answers))
errors_map.update(self.answer_required_errors())
return errors_map
def answer_required_errors(self):
req_fields = self.get_required_fields()
filled_fields = self.fields_with_values()
errors_map = {}
for field in req_fields - filled_fields:
errors_map[field] = 'answer_required'
return errors_map
def character_limit_errors(self):
errors_map = {}
for question_id in self.all_fields():
if self.content.get_question(question_id).get('type') in ['text', 'textbox_large']:
answer = self.answers.get(question_id) or ''
if self.character_limit is not None and len(answer) > self.character_limit:
errors_map[question_id] = "under_character_limit"
return errors_map
def formatting_errors(self, answers):
errors_map = {}
if self.email_validation_fields is not None and len(self.email_validation_fields) > 0:
for field in self.email_validation_fields:
if self.answers.get(field) is None or not re.match(EMAIL_REGEX, self.answers.get(field, '')):
errors_map[field] = 'invalid_format'
if self.number_string_fields is not None and len(self.number_string_fields) > 0:
for field, length in self.number_string_fields:
if self.answers.get(field) is None or not re.match(
'^\d{{{0}}}$'.format(length), self.answers.get(field, '')
):
errors_map[field] = 'invalid_format'
return errors_map
def get_required_fields(self):
try:
req_fields = self.required_fields
except AttributeError:
req_fields = set(self.all_fields())
# Remove optional fields
if self.optional_fields is not None:
req_fields -= set(self.optional_fields)
return req_fields
class G7Validator(DeclarationValidator):
"""
Validator for G-Cloud 7.
"""
optional_fields = set([
"SQ1-1p-i", "SQ1-1p-ii", "SQ1-1p-iii", "SQ1-1p-iv",
"SQ1-1q-i", "SQ1-1q-ii", "SQ1-1q-iii", "SQ1-1q-iv", "SQ1-1cii", "SQ1-1i-ii",
"SQ1-1j-i", "SQ1-1j-ii", "SQ4-1c", "SQ3-1k", "SQ1-1i-i"
])
email_validation_fields = set(['SQ1-1o', 'SQ1-2b'])
character_limit = 5000
def get_required_fields(self):
req_fields = super(G7Validator, self).get_required_fields()
# If you answered other to question 19 (trading status)
if self.answers.get('SQ1-1ci') == 'other (please specify)':
req_fields.add('SQ1-1cii')
# If you answered yes to question 27 (non-UK business registered in EU)
if self.answers.get('SQ1-1i-i', False):
req_fields.add('SQ1-1i-ii')
# If you answered 'licensed' or 'a member of a relevant organisation' in question 29
answer_29 = self.answers.get('SQ1-1j-i', [])
if answer_29 and len(answer_29) > 0 and \
('licensed' in answer_29 or 'a member of a relevant organisation' in answer_29):
req_fields.add('SQ1-1j-ii')
# If you answered yes to either question 53 or 54 (tax returns)
if self.answers.get('SQ4-1a', False) or self.answers.get('SQ4-1b', False):
req_fields.add('SQ4-1c')
# If you answered Yes to questions 39 - 51 (discretionary exclusion)
dependent_fields = [
'SQ2-2a', 'SQ3-1a', 'SQ3-1b', 'SQ3-1c', 'SQ3-1d', 'SQ3-1e', 'SQ3-1f', 'SQ3-1g',
'SQ3-1h-i', 'SQ3-1h-ii', 'SQ3-1i-i', 'SQ3-1i-ii', 'SQ3-1j'
]
if any(self.answers.get(field) for field in dependent_fields):
req_fields.add('SQ3-1k')
# If you answered No to question 26 (established in the UK)
if 'SQ5-2a' in self.answers and not self.answers['SQ5-2a']:
req_fields.add('SQ1-1i-i')
req_fields.add('SQ1-1j-i')
return req_fields
class DOSValidator(DeclarationValidator):
optional_fields = set([
"mitigatingFactors", "mitigatingFactors2", "tradingStatusOther",
# Registered in UK = no
"appropriateTradeRegisters", "appropriateTradeRegistersNumber",
"licenceOrMemberRequired", "licenceOrMemberRequiredDetails",
])
email_validation_fields = set(["contactEmailContractNotice", "primaryContactEmail"])
character_limit = 5000
def get_required_fields(self):
req_fields = super(DOSValidator, self).get_required_fields()
dependent_fields = {
# If you responded yes to any of questions 22 to 34
"mitigatingFactors": [
'misleadingInformation', 'confidentialInformation', 'influencedContractingAuthority',
'witheldSupportingDocuments', 'seriousMisrepresentation', 'significantOrPersistentDeficiencies',
'distortedCompetition', 'conflictOfInterest', 'distortedCompetition', 'graveProfessionalMisconduct',
'bankrupt', 'environmentalSocialLabourLaw', 'taxEvasion'
],
# If you responded yes to either 36 or 37
"mitigatingFactors2": [
"unspentTaxConvictions", "GAAR"
],
}
for target_field, fields in dependent_fields.items():
if any(self.answers.get(field) for field in fields):
req_fields.add(target_field)
# Describe your trading status
if self.answers.get('tradingStatus') == "other (please specify)":
req_fields.add('tradingStatusOther')
# If your company was not established in the UK
if self.answers.get('establishedInTheUK') is False:
req_fields.add('appropriateTradeRegisters')
# If yes to appropriate trade registers
if self.answers.get('appropriateTradeRegisters') is True:
req_fields.add('appropriateTradeRegistersNumber')
req_fields.add('licenceOrMemberRequired')
# If not 'none of the above' to licenceOrMemberRequired
if self.answers.get('licenceOrMemberRequired') in ['licensed', 'a member of a relevant organisation']:
req_fields.add('licenceOrMemberRequiredDetails')
return req_fields
class G8Validator(DOSValidator):
number_string_fields = [('dunsNumber', 9)]
VALIDATORS = {
"g-cloud-7": G7Validator,
"g-cloud-8": G8Validator,
"digital-outcomes-and-specialists": DOSValidator,
}
|
from __future__ import absolute_import
#
# Partnerbox E2
#
# $Id$
#
# Coded by Dr.Best (c) 2009
# Support: www.dreambox-tools.info
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# for localized messages
from . import _
from Screens.Screen import Screen
from Components.config import config, ConfigSelection, ConfigText, ConfigSubList, ConfigDateTime, ConfigClock, ConfigYesNo, getConfigListEntry
from Components.ActionMap import NumberActionMap
from Components.ConfigList import ConfigListScreen
from Components.Label import Label
from Components.Pixmap import Pixmap
from RecordTimer import AFTEREVENT
from enigma import getDesktop
from time import localtime, mktime, time, strftime
from datetime import datetime
from Screens.TimerEntry import TimerEntry
from Screens.MessageBox import MessageBox
from Tools.BoundFunction import boundFunction
from six.moves.urllib.parse import quote
from six.moves.urllib.request import urlopen
from six import PY2
from xml.etree.cElementTree import fromstring
from .PartnerboxFunctions import PlaylistEntry, SetPartnerboxTimerlist, sendPartnerBoxWebCommand, getServiceRef
from . import PartnerboxFunctions as partnerboxfunctions
HD = False
try:
sz_w = getDesktop(0).size().width()
if sz_w >= 1280:
HD = True
except:
pass
class RemoteTimerEntry(Screen, ConfigListScreen):
if HD:
skin = """
<screen name="RemoteTimerEntry" position="center,center" size="760,430" title="Timer entry">
<widget name="cancel" pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget name="ok" pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget name="canceltext" position="0,0" zPosition="2" size="140,40" halign="center" valign="center" font="Regular;21" backgroundColor="#9f1313" transparent="1" />
<widget name="oktext" position="140,0" zPosition="2" size="140,40" halign="center" valign="center" font="Regular;21" backgroundColor="#1f771f" transparent="1" />
<widget name="config" position="10,45" size="740,385" scrollbarMode="showOnDemand" />
</screen>"""
else:
skin = """
<screen name="RemoteTimerEntry" position="center,center" size="560,430" title="Timer entry">
<widget name="cancel" pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget name="ok" pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget name="canceltext" position="0,0" zPosition="2" size="140,40" halign="center" valign="center" font="Regular;21" backgroundColor="#9f1313" transparent="1" />
<widget name="oktext" position="140,0" zPosition="2" size="140,40" halign="center" valign="center" font="Regular;21" backgroundColor="#1f771f" transparent="1" />
<widget name="config" position="10,45" size="540,385" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, timer, Locations):
self.session = session
Screen.__init__(self, session)
self.setTitle(_("Timer entry"))
self.timer = timer
self.Locations = Locations
self.entryDate = None
self.entryService = None
self["oktext"] = Label(_("OK"))
self["canceltext"] = Label(_("Cancel"))
self["ok"] = Pixmap()
self["cancel"] = Pixmap()
self.createConfig()
self["actions"] = NumberActionMap(["SetupActions", "GlobalActions", "PiPSetupActions"],
{
"save": self.keyGo,
"cancel": self.keyCancel,
"volumeUp": self.incrementStart,
"volumeDown": self.decrementStart,
"size+": self.incrementEnd,
"size-": self.decrementEnd
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list, session=session)
self.createSetup("config")
def createConfig(self):
if self.timer.type == 0:
justplay = self.timer.justplay
afterevent = {
0: "nothing",
2: "deepstandby",
1: "standby",
3: "auto"
}[self.timer.afterevent]
else:
if self.timer.type & PlaylistEntry.doShutdown:
afterevent = PlaylistEntry.doShutdown
elif self.timer.type & PlaylistEntry.doGoSleep:
afterevent = PlaylistEntry.doGoSleep
else:
afterevent = 3
if self.timer.type & PlaylistEntry.RecTimerEntry:
if self.timer.type & PlaylistEntry.recDVR:
justplay = PlaylistEntry.recDVR
elif self.timer.type & PlaylistEntry.recNgrab:
justplay = PlaylistEntry.recNgrab
elif self.timer.type & PlaylistEntry.SwitchTimerEntry:
justplay = PlaylistEntry.SwitchTimerEntry
weekday_table = ("mon", "tue", "wed", "thu", "fri", "sat", "sun")
day = []
weekday = 0
for x in (0, 1, 2, 3, 4, 5, 6):
day.append(0)
begin = self.timer.timebegin
end = self.timer.timeend
weekday = (int(strftime("%w", localtime(begin))) - 1) % 7
day[weekday] = 1
name = self.timer.name
description = self.timer.description
if self.timer.type == 0:
self.timerentry_justplay = ConfigSelection(choices=[("1", _("zap")), ("0", _("record"))], default=str(justplay))
self.timerentry_afterevent = ConfigSelection(choices=[("nothing", _("do nothing")), ("standby", _("go to standby")), ("deepstandby", _("go to deep standby")), ("auto", _("auto"))], default=afterevent)
self.timerentry_name = ConfigText(default=name, visible_width=50, fixed_size=False)
else:
self.timerentry_justplay = ConfigSelection(choices=[(str(PlaylistEntry.SwitchTimerEntry), _("zap")), (str(PlaylistEntry.recNgrab), _("NGRAB")), (str(PlaylistEntry.recDVR), _("DVR"))], default=str(justplay))
self.timerentry_afterevent = ConfigSelection(choices=[("0", _("do nothing")), (str(PlaylistEntry.doGoSleep), _("go to standby")), (str(PlaylistEntry.doShutdown), _("go to deep standby"))], default=str(afterevent))
self.timerentry_description = ConfigText(default=description, visible_width=50, fixed_size=False)
self.timerentry_date = ConfigDateTime(default=begin, formatstring=_("%d.%B %Y"), increment=86400)
self.timerentry_starttime = ConfigClock(default=begin)
self.timerentry_endtime = ConfigClock(default=end)
if self.timer.type == 0:
default = self.timer.dirname
if default == "None":
if self.Locations:
default = self.Locations[0]
else:
default = "N/A"
if default not in self.Locations:
self.Locations.append(default)
self.timerentry_dirname = ConfigSelection(default=default, choices=self.Locations)
self.timerentry_weekday = ConfigSelection(default=weekday_table[weekday], choices=[("mon", _("Monday")), ("tue", _("Tuesday")), ("wed", _("Wednesday")), ("thu", _("Thursday")), ("fri", _("Friday")), ("sat", _("Saturday")), ("sun", _("Sunday"))])
self.timerentry_day = ConfigSubList()
for x in (0, 1, 2, 3, 4, 5, 6):
self.timerentry_day.append(ConfigYesNo(default=day[x]))
servicename = self.timer.servicename
self.timerentry_service = ConfigSelection([servicename])
def createSetup(self, widget):
self.list = []
if self.timer.type == 0:
self.list.append(getConfigListEntry(_("Name"), self.timerentry_name))
self.list.append(getConfigListEntry(_("Description"), self.timerentry_description))
self.timerJustplayEntry = getConfigListEntry(_("Timer Type"), self.timerentry_justplay)
self.list.append(self.timerJustplayEntry)
self.entryDate = getConfigListEntry(_("Date"), self.timerentry_date)
self.list.append(self.entryDate)
self.entryStartTime = getConfigListEntry(_("StartTime"), self.timerentry_starttime)
self.list.append(self.entryStartTime)
if self.timer.type == 0:
if int(self.timerentry_justplay.value) != 1:
self.entryEndTime = getConfigListEntry(_("EndTime"), self.timerentry_endtime)
self.list.append(self.entryEndTime)
else:
self.entryEndTime = None
else:
self.entryEndTime = getConfigListEntry(_("EndTime"), self.timerentry_endtime)
self.list.append(self.entryEndTime)
self.channelEntry = getConfigListEntry(_("Channel"), self.timerentry_service)
self.list.append(self.channelEntry)
if self.timer.type == 0:
self.dirname = getConfigListEntry(_("Location"), self.timerentry_dirname)
if int(self.timerentry_justplay.value) != 1:
self.list.append(self.dirname)
self.list.append(getConfigListEntry(_("After event"), self.timerentry_afterevent))
else:
self.list.append(getConfigListEntry(_("After event"), self.timerentry_afterevent))
self[widget].list = self.list
self[widget].l.setList(self.list)
def newConfig(self):
if self["config"].getCurrent() == self.timerJustplayEntry:
self.createSetup("config")
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.newConfig()
def getTimestamp(self, date, mytime):
d = localtime(date)
dt = datetime(d.tm_year, d.tm_mon, d.tm_mday, mytime[0], mytime[1])
return int(mktime(dt.timetuple()))
def getBeginEnd(self):
date = self.timerentry_date.value
endtime = self.timerentry_endtime.value
starttime = self.timerentry_starttime.value
begin = self.getTimestamp(date, starttime)
end = self.getTimestamp(date, endtime)
if end < begin:
end += 86400
return begin, end
def keyCancel(self):
self.close((False,))
def keyGo(self):
if self.timer.type == 0:
if self.timerentry_dirname.value == "N/A" or self.timerentry_dirname.value == "None":
self.session.open(MessageBox, _("Timer can not be added...no locations on partnerbox available."), MessageBox.TYPE_INFO)
return
else:
self.timer.name = self.timerentry_name.value
self.timer.dirname = self.timerentry_dirname.value
self.timer.afterevent = {
"nothing": 0,
"deepstandby": 2,
"standby": 1,
"auto": 3
}[self.timerentry_afterevent.value]
else:
self.timer.afterevent = int(self.timerentry_afterevent.value)
self.timer.description = self.timerentry_description.value
self.timer.justplay = int(self.timerentry_justplay.value)
self.timer.timebegin, self.timer.timeend = self.getBeginEnd()
self.close((True, self.timer))
def incrementStart(self):
self.timerentry_starttime.increment()
self["config"].invalidate(self.entryStartTime)
def decrementStart(self):
self.timerentry_starttime.decrement()
self["config"].invalidate(self.entryStartTime)
def incrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.increment()
self["config"].invalidate(self.entryEndTime)
def decrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.decrement()
self["config"].invalidate(self.entryEndTime)
# ##########################################
# TimerEntry
# ##########################################
baseTimerEntrySetup = None
baseTimerEntryGo = None
baseTimerEntrynewConfig = None
baseTimerkeyLeft = None
baseTimerkeyRight = None
baseTimerkeySelect = None
baseTimercreateConfig = None
baseTimer__init__ = None
def RemoteTimerInit():
global baseTimerEntrySetup, baseTimerEntryGo, baseTimerEntrynewConfig, baseTimerkeyLeft, baseTimerkeyRight, baseTimerkeySelect, baseTimercreateConfig, baseTimer__init__
if baseTimerEntrySetup is None:
baseTimerEntrySetup = TimerEntry.createSetup
if baseTimerEntryGo is None:
baseTimerEntryGo = TimerEntry.keyGo
if baseTimerEntrynewConfig is None:
baseTimerEntrynewConfig = TimerEntry.newConfig
if baseTimerkeyLeft is None:
baseTimerkeyLeft = TimerEntry.keyLeft
if baseTimerkeyRight is None:
baseTimerkeyRight = TimerEntry.keyRight
if baseTimerkeySelect is None:
baseTimerkeySelect = TimerEntry.keySelect
if baseTimercreateConfig is None:
baseTimercreateConfig = TimerEntry.createConfig
if baseTimer__init__ is None:
baseTimer__init__ = TimerEntry.__init__
TimerEntry.createConfig = RemoteTimerConfig
TimerEntry.keyLeft = RemoteTimerkeyLeft
TimerEntry.keyRight = RemoteTimerkeyRight
TimerEntry.keySelect = RemoteTimerkeySelect
TimerEntry.createSetup = createRemoteTimerSetup
TimerEntry.keyGo = RemoteTimerGo
TimerEntry.newConfig = RemoteTimernewConfig
TimerEntry.__init__ = RemoteTimer__init__
def RemoteTimer__init__(self, session, timer):
baseTimer__init__(self, session, timer)
if int(self.timerentry_remote.value) != 0:
RemoteTimernewConfig(self)
def RemoteTimerConfig(self):
self.Locations = []
self.entryguilist = []
self.entryguilist.append(("0", _("No"), None))
index = 1
for c in config.plugins.Partnerbox.Entries:
self.entryguilist.append((str(index), str(c.name.value), c))
index = index + 1
if config.plugins.Partnerbox.enabledefaultpartnerboxintimeredit.value and index > 1:
default = "1"
else:
default = "0"
self.timerentry_remote = ConfigSelection(default=default, choices=self.entryguilist)
baseTimercreateConfig(self)
#def getLocationsError(self, error):
# RemoteTimercreateConfig(self)
# RemoteTimerCreateSetup(self,"config")
def getLocations(self, url, check):
try:
f = urlopen(url)
sxml = f.read()
getLocationsCallback(self, sxml, check)
except:
pass
def getLocationsCallback(self, xmlstring, check=False):
try:
root = fromstring(xmlstring)
except:
return
for location in root.findall("e2location"):
add = True
loc = location.text.decode("utf-8").encode("utf-8", 'ignore') if PY2 else location.text
if check:
add = loc not in self.Locations
if add:
self.Locations.append(loc)
for location in root.findall("e2simplexmlitem"):
add = True
loc = location.text.decode("utf-8").encode("utf-8", 'ignore') if PY2 else location.text
if check:
add = loc not in self.Locations
if add:
self.Locations.append(loc)
def createRemoteTimerSetup(self, widget):
baseTimerEntrySetup(self, widget)
self.display = _("Remote Timer")
self.timerRemoteEntry = getConfigListEntry(self.display, self.timerentry_remote)
self.list.insert(0, self.timerRemoteEntry)
self[widget].list = self.list
def RemoteTimerkeyLeft(self):
if int(self.timerentry_remote.value) != 0:
ConfigListScreen.keyLeft(self)
RemoteTimernewConfig(self)
else:
baseTimerkeyLeft(self)
def RemoteTimerkeyRight(self):
if int(self.timerentry_remote.value) != 0:
ConfigListScreen.keyRight(self)
RemoteTimernewConfig(self)
else:
baseTimerkeyRight(self)
def RemoteTimerkeySelect(self):
if int(self.timerentry_remote.value) != 0:
RemoteTimerGo(self)
else:
baseTimerkeySelect(self)
def RemoteTimernewConfig(self):
if self["config"].getCurrent() == self.timerRemoteEntry:
if int(self.timerentry_remote.value) != 0:
if int(self.entryguilist[int(self.timerentry_remote.value)][2].enigma.value) == 1: # E1
self.timertype = PlaylistEntry.RecTimerEntry | PlaylistEntry.recDVR
else: # E2
self.timertype = 0
ip = "%d.%d.%d.%d" % tuple(self.entryguilist[int(self.timerentry_remote.value)][2].ip.value)
port = self.entryguilist[int(self.timerentry_remote.value)][2].port.value
http_ = "%s:%d" % (ip, port)
self.Locations = []
getLocations(self, "http://root:" + self.entryguilist[int(self.timerentry_remote.value)][2].password.value + "@" + http_ + "/web/getlocations", False)
if len(self.Locations) == 0:
getLocations(self, "http://root:" + self.entryguilist[int(self.timerentry_remote.value)][2].password.value + "@" + http_ + "/web/getcurrlocation", True)
RemoteTimercreateConfig(self)
RemoteTimerCreateSetup(self, "config")
else:
baseTimercreateConfig(self)
createRemoteTimerSetup(self, "config")
elif self["config"].getCurrent() == self.timerJustplayEntry:
if int(self.timerentry_remote.value) != 0:
RemoteTimerCreateSetup(self, "config")
else:
baseTimerEntrynewConfig(self)
else:
if int(self.timerentry_remote.value) == 0:
baseTimerEntrynewConfig(self)
if isVPSplugin():
if self["config"].getCurrent() == self.timerVps_enabled_Entry:
if self.timerentry_vpsplugin_enabled.value == "no":
self.timerentry_vpsplugin_dontcheck_pdc = False
self.createSetup("config")
self["config"].setCurrentIndex(self["config"].getCurrentIndex() + 1)
def RemoteTimercreateConfig(self):
if int(self.entryguilist[int(self.timerentry_remote.value)][2].enigma.value) == 0:
justplay = self.timer.justplay
afterevent = {
AFTEREVENT.NONE: "nothing",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.AUTO: "auto"
}[self.timer.afterEvent]
else:
if self.timertype & PlaylistEntry.doShutdown:
afterevent = PlaylistEntry.doShutdown
elif self.timertype & PlaylistEntry.doGoSleep:
afterevent = PlaylistEntry.doGoSleep
else:
afterevent = 0
if self.timertype & PlaylistEntry.RecTimerEntry:
if self.timertype & PlaylistEntry.recDVR:
justplay = PlaylistEntry.recDVR
elif self.timertype & PlaylistEntry.recNgrab:
justplay = PlaylistEntry.recNgrab
elif self.timertype & PlaylistEntry.SwitchTimerEntry:
justplay = PlaylistEntry.SwitchTimerEntry
weekday_table = ("mon", "tue", "wed", "thu", "fri", "sat", "sun")
day = []
weekday = 0
for x in (0, 1, 2, 3, 4, 5, 6):
day.append(0)
begin = self.timer.begin
end = self.timer.end
weekday = (int(strftime("%w", localtime(begin))) - 1) % 7
day[weekday] = 1
if int(self.entryguilist[int(self.timerentry_remote.value)][2].enigma.value) == 0:
name = self.timer.name
description = self.timer.description
self.timerentry_justplay = ConfigSelection(choices=[("zap", _("zap")), ("record", _("record"))], default={0: "record", 1: "zap"}[justplay])
self.timerentry_afterevent = ConfigSelection(choices=[("nothing", _("do nothing")), ("standby", _("go to standby")), ("deepstandby", _("go to deep standby")), ("auto", _("auto"))], default=afterevent)
self.timerentry_name = ConfigText(default=name, visible_width=50, fixed_size=False)
else:
description = self.timer.name
self.timerentry_justplay = ConfigSelection(choices=[(str(PlaylistEntry.SwitchTimerEntry), _("zap")), (str(PlaylistEntry.recNgrab), _("NGRAB")), (str(PlaylistEntry.recDVR), _("DVR"))], default=str(justplay))
self.timerentry_afterevent = ConfigSelection(choices=[("0", _("do nothing")), (str(PlaylistEntry.doGoSleep), _("go to standby")), (str(PlaylistEntry.doShutdown), _("go to deep standby"))], default=str(afterevent))
self.timerentry_description = ConfigText(default=description, visible_width=50, fixed_size=False)
self.timerentry_date = ConfigDateTime(default=begin, formatstring=_("%d.%B %Y"), increment=86400)
self.timerentry_starttime = ConfigClock(default=begin)
self.timerentry_endtime = ConfigClock(default=end)
if int(self.entryguilist[int(self.timerentry_remote.value)][2].enigma.value) == 0:
if self.Locations:
default = self.Locations[0]
else:
default = "N/A"
if default not in self.Locations:
self.Locations.append(default)
self.timerentry_dirname = ConfigSelection(default=default, choices=self.Locations)
self.timerentry_weekday = ConfigSelection(default=weekday_table[weekday], choices=[("mon", _("Monday")), ("tue", _("Tuesday")), ("wed", _("Wednesday")), ("thu", _("Thursday")), ("fri", _("Friday")), ("sat", _("Saturday")), ("sun", _("Sunday"))])
self.timerentry_day = ConfigSubList()
for x in (0, 1, 2, 3, 4, 5, 6):
self.timerentry_day.append(ConfigYesNo(default=day[x]))
# FIXME some service-chooser needed here
servicename = "N/A"
try: # no current service available?
servicename = str(self.timer.service_ref.getServiceName())
except:
pass
self.timerentry_service_ref = self.timer.service_ref
self.timerentry_service = ConfigSelection([servicename])
self.timerentry_vps_in_timerevent = ConfigSelection(default="no", choices=[("no", _("No")), ("yes_safe", _("Yes (safe mode)")), ("yes", _("Yes"))])
def RemoteTimerCreateSetup(self, widget):
self.list = []
self.timerRemoteEntry = getConfigListEntry(self.display, self.timerentry_remote)
self.list.append(self.timerRemoteEntry)
if int(self.entryguilist[int(self.timerentry_remote.value)][2].enigma.value) == 0:
self.list.append(getConfigListEntry(_("Name"), self.timerentry_name))
self.list.append(getConfigListEntry(_("Description"), self.timerentry_description))
self.timerJustplayEntry = getConfigListEntry(_("Timer Type"), self.timerentry_justplay)
self.list.append(self.timerJustplayEntry)
self.entryDate = getConfigListEntry(_("Date"), self.timerentry_date)
self.list.append(self.entryDate)
self.entryStartTime = getConfigListEntry(_("StartTime"), self.timerentry_starttime)
self.list.append(self.entryStartTime)
if int(self.entryguilist[int(self.timerentry_remote.value)][2].enigma.value) == 0:
if self.timerentry_justplay.value != "zap":
self.entryEndTime = getConfigListEntry(_("EndTime"), self.timerentry_endtime)
self.list.append(self.entryEndTime)
else:
self.entryEndTime = None
else:
self.entryEndTime = getConfigListEntry(_("EndTime"), self.timerentry_endtime)
self.list.append(self.entryEndTime)
self.channelEntry = getConfigListEntry(_("Channel"), self.timerentry_service)
self.list.append(self.channelEntry)
if int(self.entryguilist[int(self.timerentry_remote.value)][2].enigma.value) == 0:
self.dirname = getConfigListEntry(_("Location"), self.timerentry_dirname)
if self.timerentry_justplay.value != "zap":
self.list.append(self.dirname)
self.list.append(getConfigListEntry(_("After event"), self.timerentry_afterevent))
else:
self.list.append(getConfigListEntry(_("After event"), self.timerentry_afterevent))
if config.plugins.Partnerbox.enablevpsintimerevent.value:
if isVPSplugin():
cfg = self.timerentry_vpsplugin_enabled
else:
cfg = self.timerentry_vps_in_timerevent
self.list.append(getConfigListEntry(_("Enable VPS"), cfg))
self[widget].list = self.list
self[widget].l.setList(self.list)
def RemoteTimerGo(self):
if int(self.timerentry_remote.value) == 0:
baseTimerEntryGo(self)
else:
service_ref = self.timerentry_service_ref
descr = quote(self.timerentry_description.value)
begin, end = self.getBeginEnd()
ip = "%d.%d.%d.%d" % tuple(self.entryguilist[int(self.timerentry_remote.value)][2].ip.value)
port = self.entryguilist[int(self.timerentry_remote.value)][2].port.value
http = "http://%s:%d" % (ip, port)
if int(self.entryguilist[int(self.timerentry_remote.value)][2].enigma.value) == 1:
# E1
afterevent = self.timerentry_afterevent.value
justplay = int(self.timerentry_justplay.value)
if justplay & PlaylistEntry.SwitchTimerEntry:
action = "zap"
elif justplay & PlaylistEntry.recNgrab:
action = "ngrab"
else:
action = ""
# FIXME some service-chooser needed here
servicename = "N/A"
try: # no current service available?
servicename = str(service_ref .getServiceName())
except:
pass
channel = quote(servicename)
sCommand = "%s/addTimerEvent?ref=%s&start=%d&duration=%d&descr=%s&channel=%s&after_event=%s&action=%s" % (http, service_ref, begin, end - begin, descr, channel, afterevent, action)
sendPartnerBoxWebCommand(sCommand, None, 3, "root", str(self.entryguilist[int(self.timerentry_remote.value)][2].password.value)).addCallback(boundFunction(AddTimerE1Callback, self, self.session)).addErrback(boundFunction(AddTimerError, self, self.session))
else:
# E2
name = quote(self.timerentry_name.value)
self.timer.tags = self.timerentry_tags
if self.timerentry_justplay.value == "zap":
justplay = 1
dirname = ""
else:
justplay = 0
dirname = quote(self.timerentry_dirname.value)
if dirname == "N/A":
self.session.open(MessageBox, _("Timer can not be added...no locations on partnerbox available."), MessageBox.TYPE_INFO)
else:
afterevent = {
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"standby": AFTEREVENT.STANDBY,
"nothing": AFTEREVENT.NONE,
"auto": AFTEREVENT.AUTO
}.get(self.timerentry_afterevent.value, AFTEREVENT.AUTO)
try:
eit = self.timer.eit
except:
eit = 0
if eit is None:
eit = 0
if service_ref.getPath(): # partnerbox service ?
service_ref = getServiceRef(service_ref.ref.toString())
refstr = ':'.join(str(service_ref).split(':')[:11])
sCommand = "%s/web/timeradd?sRef=%s&begin=%d&end=%d&name=%s&description=%s&dirname=%s&eit=%d&justplay=%d&afterevent=%s&vps_pbox=%s" % (http, refstr, begin, end, name, descr, dirname, eit, justplay, afterevent, vpsValue(self))
sendPartnerBoxWebCommand(sCommand, None, 3, "root", str(self.entryguilist[int(self.timerentry_remote.value)][2].password.value)).addCallback(boundFunction(AddTimerE2Callback, self, self.session)).addErrback(boundFunction(AddTimerError, self, self.session))
def AddTimerE2Callback(self, session, answer):
text = ""
try:
root = fromstring(answer)
except:
pass
statetext = root.findtext("e2statetext")
state = root.findtext("e2state")
if statetext:
text = statetext.encode("utf-8", 'ignore') if PY2 else statetext
ok = state == "True"
session.open(MessageBox, _("Partnerbox Answer: \n%s") % _(text), MessageBox.TYPE_INFO, timeout=10)
if ok:
if (config.plugins.Partnerbox.enablepartnerboxepglist.value):
# Timerlist der Partnerbox neu laden --> Anzeige fuer EPGList, aber nur, wenn die gleiche IP in EPGList auch angezeigt wird
if partnerboxfunctions.CurrentIP == self.entryguilist[int(self.timerentry_remote.value)][2].ip.value:
SetPartnerboxTimerlist(self.entryguilist[int(self.timerentry_remote.value)][2])
self.keyCancel()
def AddTimerE1Callback(self, session, answer):
ok = answer == "Timer event was created successfully."
if answer == "Timer event was created successfully.":
answer = _("Timer event was created successfully.")
session.open(MessageBox, _("Partnerbox Answer: \n%s") % (answer), MessageBox.TYPE_INFO, timeout=10)
if ok:
if (config.plugins.Partnerbox.enablepartnerboxepglist.value):
# Timerlist der Partnerbox neu laden --> Anzeige fuer EPGList, aber nur, wenn die gleiche IP in EPGList auch angezeigt wird
if partnerboxfunctions.CurrentIP == self.entryguilist[int(self.timerentry_remote.value)][2].ip.value:
SetPartnerboxTimerlist(self.entryguilist[int(self.timerentry_remote.value)][2])
self.keyCancel()
def AddTimerError(self, session, error):
session.open(MessageBox, str(_(error.getErrorMessage())), MessageBox.TYPE_INFO)
def isVPSplugin():
try:
from Plugins.SystemPlugins.vps.Modifications import vps_already_registered
if vps_already_registered:
return True
except:
return False
def vpsValue(self):
if isVPSplugin():
return self.timerentry_vpsplugin_enabled.value
if config.plugins.Partnerbox.enablevpsintimerevent.value:
return self.timerentry_vps_in_timerevent.value
return ""
|
import itertools
import os
import pickle
import numpy as np
import torch
import torch.nn.functional as F
def get_onehot(vec_len, pos):
v = np.zeros(vec_len)
v[pos] = 1
return v
def save_to_file(vals, folder='', file=''):
if not os.path.exists(folder):
os.makedirs(folder)
save_str = os.path.join(folder, file+'.pkl')
with open(save_str, 'wb') as f1:
pickle.dump(vals, f1)
def torch_save_to_file(to_save, folder='', file=''):
if not os.path.exists(folder):
os.makedirs(folder)
torch.save(to_save, os.path.join(folder, file))
def get_batch_indices(v, num_properties, types_per_property, device):
v = torch.tensor(v, device=device, dtype=torch.long)
batch_size = v.shape[0]
v = v.view(batch_size, num_properties, types_per_property)
out = []
for prop in range(num_properties):
v1 = v[:, prop]
out.append(torch.argmax(v1, -1))
out = torch.stack(out)
return out
def make_dataset(num_properties=3, types_per_property=5, val_pct=0.1, test_pct=0.1):
onehot_list = [get_onehot(types_per_property, i) for i in range(types_per_property)]
train = list(itertools.product(onehot_list, repeat=num_properties))
for i in range(len(train)):
train[i] = np.concatenate(train[i])
test_ind = int((1 - test_pct) * len(train))
val_ind = int((1 - val_pct - test_pct) * len(train))
test = train[test_ind:]
val = train[val_ind: test_ind]
train = train[:val_ind]
return train, val, test
def sample_gumbel(shape, device, eps=1e-8):
values = torch.empty(shape, device=device, dtype=torch.float).uniform_(0, 1)
return -torch.log(-torch.log(values + eps) + eps)
def gumbel_softmax(logits, temperature, device):
y = logits + sample_gumbel(logits.shape, device)
return F.softmax(y / temperature, -1)
|
subset.groupby("road_type").size() # subset["road_type"].value_counts()
|
# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Slice nodes.
Slices are important when working with lists. Tracking them can allow to
achieve more compact code, or predict results at compile time.
There will be a method "computeExpressionSlice" to aid predicting them.
"""
from nuitka.PythonVersions import python_version
from nuitka.specs import BuiltinParameterSpecs
from .ConstantRefNodes import ExpressionConstantNoneRef
from .ExpressionBases import (
ExpressionChildrenHavingBase,
ExpressionSpecBasedComputationMixin,
)
from .NodeBases import StatementChildrenHavingBase
from .NodeMakingHelpers import (
convertNoneConstantToNone,
makeStatementExpressionOnlyReplacementNode,
makeStatementOnlyNodesFromExpressions,
)
class StatementAssignmentSlice(StatementChildrenHavingBase):
kind = "STATEMENT_ASSIGNMENT_SLICE"
named_children = ("source", "expression", "lower", "upper")
getLower = StatementChildrenHavingBase.childGetter("lower")
getUpper = StatementChildrenHavingBase.childGetter("upper")
def __init__(self, expression, lower, upper, source, source_ref):
assert python_version < 300
StatementChildrenHavingBase.__init__(
self,
values={
"source": source,
"expression": expression,
"lower": lower,
"upper": upper,
},
source_ref=source_ref,
)
def computeStatement(self, trace_collection):
source = trace_collection.onExpression(self.subnode_source)
# No assignment will occur, if the assignment source raises, so strip it
# away.
if source.willRaiseException(BaseException):
result = makeStatementExpressionOnlyReplacementNode(
expression=source, node=self
)
return (
result,
"new_raise",
"""\
Slice assignment raises exception in assigned value, removed assignment.""",
)
lookup_source = trace_collection.onExpression(self.subnode_expression)
if lookup_source.willRaiseException(BaseException):
result = makeStatementOnlyNodesFromExpressions(
expressions=(source, lookup_source)
)
return (
result,
"new_raise",
"""\
Slice assignment raises exception in sliced value, removed assignment.""",
)
lower = trace_collection.onExpression(self.getLower(), allow_none=True)
if lower is not None and lower.willRaiseException(BaseException):
result = makeStatementOnlyNodesFromExpressions(
expressions=(source, lookup_source, lower)
)
return (
result,
"new_raise",
"""\
Slice assignment raises exception in lower slice boundary value, removed \
assignment.""",
)
upper = trace_collection.onExpression(self.getUpper(), allow_none=True)
if upper is not None and upper.willRaiseException(BaseException):
result = makeStatementOnlyNodesFromExpressions(
expressions=(source, lookup_source, lower, upper)
)
return (
result,
"new_raise",
"""\
Slice assignment raises exception in upper slice boundary value, removed \
assignment.""",
)
return lookup_source.computeExpressionSetSlice(
set_node=self,
lower=lower,
upper=upper,
value_node=source,
trace_collection=trace_collection,
)
class StatementDelSlice(StatementChildrenHavingBase):
kind = "STATEMENT_DEL_SLICE"
named_children = ("expression", "lower", "upper")
getLower = StatementChildrenHavingBase.childGetter("lower")
getUpper = StatementChildrenHavingBase.childGetter("upper")
def __init__(self, expression, lower, upper, source_ref):
StatementChildrenHavingBase.__init__(
self,
values={"expression": expression, "lower": lower, "upper": upper},
source_ref=source_ref,
)
def computeStatement(self, trace_collection):
lookup_source = trace_collection.onExpression(self.subnode_expression)
if lookup_source.willRaiseException(BaseException):
result = makeStatementExpressionOnlyReplacementNode(
expression=lookup_source, node=self
)
return (
result,
"new_raise",
"""\
Slice del raises exception in sliced value, removed del""",
)
lower = trace_collection.onExpression(self.getLower(), allow_none=True)
if lower is not None and lower.willRaiseException(BaseException):
result = makeStatementOnlyNodesFromExpressions(
expressions=(lookup_source, lower)
)
return (
result,
"new_raise",
"""
Slice del raises exception in lower slice boundary value, removed del""",
)
trace_collection.onExpression(self.getUpper(), allow_none=True)
upper = self.getUpper()
if upper is not None and upper.willRaiseException(BaseException):
result = makeStatementOnlyNodesFromExpressions(
expressions=(lookup_source, lower, upper)
)
return (
result,
"new_raise",
"""
Slice del raises exception in upper slice boundary value, removed del""",
)
return lookup_source.computeExpressionDelSlice(
set_node=self, lower=lower, upper=upper, trace_collection=trace_collection
)
class ExpressionSliceLookup(ExpressionChildrenHavingBase):
kind = "EXPRESSION_SLICE_LOOKUP"
named_children = ("expression", "lower", "upper")
getLower = ExpressionChildrenHavingBase.childGetter("lower")
getUpper = ExpressionChildrenHavingBase.childGetter("upper")
checkers = {"upper": convertNoneConstantToNone, "lower": convertNoneConstantToNone}
def __init__(self, expression, lower, upper, source_ref):
assert python_version < 300
ExpressionChildrenHavingBase.__init__(
self,
values={"expression": expression, "upper": upper, "lower": lower},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
lookup_source = self.subnode_expression
return lookup_source.computeExpressionSlice(
lookup_node=self,
lower=self.getLower(),
upper=self.getUpper(),
trace_collection=trace_collection,
)
def isKnownToBeIterable(self, count):
# TODO: Should ask SliceRegistry
return None
class ExpressionBuiltinSlice(
ExpressionSpecBasedComputationMixin, ExpressionChildrenHavingBase
):
kind = "EXPRESSION_BUILTIN_SLICE"
named_children = ("start", "stop", "step")
getStart = ExpressionChildrenHavingBase.childGetter("start")
getStop = ExpressionChildrenHavingBase.childGetter("stop")
getStep = ExpressionChildrenHavingBase.childGetter("step")
builtin_spec = BuiltinParameterSpecs.builtin_slice_spec
def __init__(self, start, stop, step, source_ref):
if start is None:
start = ExpressionConstantNoneRef(source_ref=source_ref)
if stop is None:
stop = ExpressionConstantNoneRef(source_ref=source_ref)
if step is None:
step = ExpressionConstantNoneRef(source_ref=source_ref)
ExpressionChildrenHavingBase.__init__(
self,
values={"start": start, "stop": stop, "step": step},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
start = self.getStart()
stop = self.getStop()
step = self.getStep()
args = (start, stop, step)
return self.computeBuiltinSpec(
trace_collection=trace_collection, given_values=args
)
def mayRaiseException(self, exception_type):
return (
self.getStart().mayRaiseException(exception_type)
or self.getStop().mayRaiseException(exception_type)
or self.getStep().mayRaiseException(exception_type)
)
|
'''
---------------------------
Licensing and Distribution
---------------------------
Program name: Pilgrim
Version : 2021.5
License : MIT/x11
Copyright (c) 2021, David Ferro Costas (david.ferro@usc.es) and
Antonio Fernandez Ramos (qf.ramos@usc.es)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
---------------------------
*----------------------------------*
| Module : common |
| Sub-module : Ugraph |
| Last Update: 2020/05/03 (Y/M/D) |
| Main Author: David Ferro-Costas |
*----------------------------------*
This module contains the Ugraph class
'''
#=============================================#
import numpy as np
from common.criteria import ZERO_LAPLA
#=============================================#
#>>>>>>>>>>>>>>>>>>*
# CLASS: Queue *
#>>>>>>>>>>>>>>>>>>*
class Queue:
"""
A simple implementation of a FIFO queue.
"""
def __init__(self):
self._items = []
def __len__(self):
return len(self._items)
def __iter__(self):
for item in self._items:
yield item
def __str__(self):
return str(self._items)
def enqueue(self, item):
self._items.append(item)
def dequeue(self):
return self._items.pop(0)
def clear(self):
self._items = []
#>>>>>>>>>>>>>>>>>>*
# CLASS: Stack *
#>>>>>>>>>>>>>>>>>>*
class Stack:
"""
A simple implementation of a LIFO stack
"""
def __init__(self):
self._items = []
def __len__(self):
return len(self._items)
def __iter__(self):
for item in self._items:
yield item
def __str__(self):
return str(self._items)
def push(self, item):
self._items = [item] + self._items
def pop(self):
return self._items.pop(0)
def clear(self):
self._items = []
#>>>>>>>>>>>>>>>>>>*
# CLASS: UGRAPH *
#>>>>>>>>>>>>>>>>>>*
class UGRAPH:
"""
A simple implementation of a undirected graph
"""
def __init__(self):
self._ugdict = {}
self._nnodes = 0
self._nedges = 0
self._cnumber= 0 # cycle number
self._lapla = None
def __str__(self):
return "(n,e)=(%i,%i)"%(self._nnodes,self._nedges)
#-----------------#
# Add/Remove node #
#-----------------#
def add_node(self,node):
if node not in self._ugdict.keys():
self._nnodes += 1
self._ugdict[node] = set([])
def remove_node(self,node1):
# Remove node
self._ugdict.pop(node1)
self._nnodes -= 1
# Remove edges with that node
for node2 in self._ugdict.keys():
self._ugdict[node2].discard(node1)
#-----------------#
#-----------------#
# Add/Remove edge #
#-----------------#
def add_edge(self,node1,node2):
self.add_node(node1)
self.add_node(node2)
if node2 not in self._ugdict[node1]:
self._ugdict[node1].add(node2)
self._ugdict[node2].add(node1)
self._nedges += 1
def remove_edge(self,node1,node2):
self._ugdict[node1].discard(node2)
self._ugdict[node2].discard(node1)
self._nedges -= 1
#-----------------#
def set_from_amatrix(self,amatrix):
'''
set graph from adjacency matrix
'''
nn = len(amatrix)
for node1 in range(nn):
self.add_node(node1)
for node2 in range(node1+1,nn):
if amatrix[node1,node2] in [True,1]:
self.add_edge(node1,node2)
def get_amatrix(self):
# create adj matrix
amatrix = np.zeros((self._nnodes,self._nnodes),dtype=int)
# complete it
for node1,neighbors in self._ugdict.items():
for node2 in neighbors:
amatrix[node1,node2] = 1
amatrix[node2,node1] = 1
return amatrix
#-------------------------#
# get different variables #
#-------------------------#
def get_nnodes(self):
'''
Returns number of nodes in the ugraph
'''
return self._nnodes
def get_nedges(self):
'''
Returns number of edges in the ugraph
'''
return self._nedges
def get_nodes(self):
'''
Returns the nodes in the ugraph
'''
return list(self._ugdict.keys())
def get_edges(self):
'''
Returns the edges in the ugraph
'''
edges = set([])
for node1 in self._ugdict.keys():
for node2 in self._ugdict[node1]:
edge = tuple(sorted((node1,node2)))
edges.add(edge)
return edges
#-------------------------#
def neighbors(self,node):
return self._ugdict[node].copy()
#-------------#
# BFS and DFS #
#-------------#
def bfsearch(self,start_idx):
'''
Breadth First Search for undirected graph
Input:
* graph_dict: a dict of the graph representing the
adjacency list
- key : integer
- value: list of integers
* start_idx : the index where to start the BFS
'''
# Initialize queue
queue = Queue()
visited = [start_idx]
queue.enqueue(start_idx)
# Start BFS
while len(queue) != 0:
# Take node out of queue
target_idx = queue.dequeue()
# Get neighbors
neighbors = self._ugdict[target_idx]
# Visit neighbors
for neighbor in neighbors:
if neighbor in visited: continue
visited.append(neighbor)
queue.enqueue(neighbor)
return visited
def dfsearch(self,start_idx):
'''
Depth First Search
Breadth First Search for undirected graph
Input:
* graph_dict: a dict of the graph representing the
adjacency list
- key : integer
- value: list of integers
* start_idx : the index where to start the BFS
'''
# Initialize queue
stack = Stack()
visited = [start_idx]
stack.push(start_idx)
# Start BFS
while len(stack) != 0:
# Take node out of queue
target_idx = stack.pop()
# Get neighbors
neighbors = self._ugdict[target_idx]
# Visit neighbors
for neighbor in neighbors:
if neighbor in visited: continue
visited.append(neighbor)
stack.push(neighbor)
return visited
#-------------#
def get_fragments(self):
fragments = []
nodes = list(self._ugdict.keys())
visited_nodes = set([])
for node in nodes:
if node in visited_nodes: continue
# explore graph with BFS from node
fragment = self.bfsearch(node)
# update visited nodes
visited_nodes = visited_nodes.union(fragment)
# store fragment
fragments.append(sorted(fragment))
return fragments
def bfsearch1d(self,idx1,idx2):
'''
Using a BFS algorithm, goes through
the graph.
However, it does it in the idx1-->idx2
directions.
'''
# Initialize queue
queue = Queue()
neighbors1 = self._ugdict[idx1]
old2 = None
# idx1 and idx2 are not bonded, there is a node in the middle (idx1--idxJ--idx2)
if idx2 not in neighbors1:
neighbors2 = self._ugdict[idx2]
idxJ = list(neighbors1.intersection(neighbors2))
if idxJ == []:
return None
else:
old2 = idx2
idx2 = idxJ[0]
visited = [idx2]
queue.enqueue(idx2)
# Start BFS
while len(queue) != 0:
# Take node out of queue
target_idx = queue.dequeue()
# Get neighbors
neighbors = list(self._ugdict[target_idx])
if target_idx == idx2:
neighbors.remove(idx1)
# Visit neighbors
for neighbor in neighbors:
if neighbor in visited: continue
visited.append(neighbor)
queue.enqueue(neighbor)
visited.remove(idx2)
if old2 is not None: visited.remove(old2)
return visited
def dfs_cycle(self,node2,node1,color,mark,par):
# node2 was completely visited
if color[node2] == 2: return
# node2 was visited once. Now, backtrack based on parents to find complete cycle
if color[node2] == 1:
self._cnumber += 1
cur = node1
mark[cur] = self._cnumber
# backtrack the vertex which are
# in the current cycle thats found
while cur != node2:
cur = par[cur]
mark[cur] = self._cnumber
return
par[node2] = node1
# partially visited.
color[node2] = 1
# simple dfs on graph
for node3 in self.neighbors(node2):
# if it has not been visited previously
if node3 == par[node2]:
continue
self.dfs_cycle(node3, node2, color, mark, par)
# completely visited.
color[node2] = 2
def nodes_in_cycles(self):
'''
graph coloring method --> uses dfs_cycle
Good when the graph is not very dense in edges; in such case, use v2
For molecules, v1 is the best option
'''
if self._nnodes - self._nedges == 1: return []
color = [0] * self._nnodes
par = [0] * self._nnodes # parent of node
mark = [0] * self._nnodes
self.dfs_cycle(0,-1,color=color,mark=mark,par=par)
nodes_in_cycles = [node_i for node_i,mark_i in enumerate(mark) if mark_i != 0]
return nodes_in_cycles
def remove_external(self):
'''
remove external nodes iteratively
If there are no cycles, it should returns an empty list
'''
cmatrix = self.get_amatrix()
# remove atoms with connectivity smaller than 2 until convergence
while True:
# check which nodes have only 1 neighbor (or none)
toremove = []
for node in range(self._nnodes):
if sum(cmatrix[node,:]) == 1: toremove.append(node)
# nothing to remove --> so no cycle
if len(toremove) == 0: break
# remove them
for node in toremove:
cmatrix[node,:] = 0
cmatrix[:,node] = 0
# see nodes
remaining_nodes = [node for node in range(self._nnodes) if sum(cmatrix[node,:] != 0)]
return remaining_nodes
def longest_path_from_node(self,start_idx,visited=[]):
'''
Naive algorithm to explore the graph, starting at start_idx,
and return the longest path
'''
# Get neighbors, excluding previously visited ones
neighbors = [node for node in self._ugdict[start_idx] if node not in visited]
if len(neighbors) == 0: return [start_idx]
# Get longest from non-visited neighbors
length = - float("inf")
for neighbor in neighbors:
visited_i = visited + [start_idx,neighbor]
path_i = self.longest_path_from_node(neighbor,visited=visited_i)
if len(path_i) > length:
length = len(path_i)
the_path = path_i
return [start_idx] + the_path
def longest_path(self):
# DFS to find one end point of longest path
lnode = self.longest_path_from_node(0)[-1]
# DFS to find the actual longest path
path = self.longest_path_from_node(lnode)
return path
def get_layers(self,center):
'''
returns a list of layers for the node center
* 1st layer: neighbors of node center
* 2nd layer: neighbors of neighbors of center
(excluding repetitions of previous layers)
'''
layers = [set([center])]
current = [center]
visited = set([center])
nnodes = len(self._ugdict.keys())
while len(visited) != nnodes:
layer = []
for node in current:
neighbors = self._ugdict[node]
layer = layer + list(neighbors)
layer = set(layer).difference(visited)
visited = visited.union(layer)
layers.append(layer)
current = list(layer)
return layers
#----------------------------#
# Get matrix representations #
#----------------------------#
def evals_laplacian(self):
self._lapla = np.zeros((self._nnodes,self._nnodes))
for node in self._ugdict.keys():
neighbors = self._ugdict[node]
for neighbor in neighbors:
self._lapla[node,node] = self._lapla[node,node] + 1
self._lapla[node,neighbor] = -1
# Eigenvalues
vals, vecs = np.linalg.eigh(self._lapla)
return vals
def evals_connsymbs(self,atonums):
matrix = np.zeros((self._nnodes,self._nnodes))
for node in self._ugdict.keys():
neighbors = self._ugdict[node]
atonum = atonums[node]
matrix[node,node] = atonum
for neighbor in neighbors: matrix[node,neighbor] = -1
# Eigenvalues
vals, vecs = np.linalg.eigh(matrix)
return vals
def evals_matrix3(self,atonums,xcc):
matrix = np.zeros((self._nnodes,self._nnodes))
nodes = self.get_nodes()
for idx1,node1 in enumerate(nodes):
matrix[node1,node1] = atonums[node1]
x1 = np.array(xcc[3*node1:3*node1+3])
for idx2 in range(idx1+1,len(nodes)):
node2 = nodes[idx2]
x2 = np.array(xcc[3*node2:3*node2+3])
# calculate distance
d = np.linalg.norm(x2-x1)
matrix[node1,node2] = d
matrix[node2,node1] = d
# Eigenvalues
vals, vecs = np.linalg.eigh(matrix)
return vals
if __name__ == "__main__":
graph = UGRAPH()
graph.add_edge(1-1 , 2-1)
graph.add_edge(2-1 , 3-1)
graph.add_edge(3-1 , 4-1)
graph.add_edge(4-1 , 6-1)
graph.add_edge(4-1 , 7-1)
graph.add_edge(3-1 , 5-1)
graph.add_edge(7-1 , 8-1)
graph.add_edge(6-1 , 10-1)
graph.add_edge(9-1 , 10-1)
graph.add_edge(5-1 , 9-1)
graph.add_edge(11-1, 12-1)
graph.add_edge(11-1, 13-1)
graph.add_edge(5-1 , 11-1)
graph.add_edge(6-1 , 13-1)
print([at+1 for at in graph.nodes_in_cycles()])
|
class BootstrapInformation(object):
"""The BootstrapInformation class holds all information about the bootstrapping process.
The nature of the attributes of this class are rather diverse.
Tasks may set their own attributes on this class for later retrieval by another task.
Information that becomes invalid (e.g. a path to a file that has been deleted) must be removed.
"""
def __init__(self, manifest=None, debug=False):
"""Instantiates a new bootstrap info object.
:param Manifest manifest: The manifest
:param bool debug: Whether debugging is turned on
"""
# Set the manifest attribute.
self.manifest = manifest
self.debug = debug
# Create a run_id. This id may be used to uniquely identify the currrent bootstrapping process
import random
self.run_id = '{id:08x}'.format(id=random.randrange(16 ** 8))
# Define the path to our workspace
import os.path
self.workspace = os.path.join(manifest.bootstrapper['workspace'], self.run_id)
# Load all the volume information
from fs import load_volume
self.volume = load_volume(self.manifest.volume, manifest.system['bootloader'])
# The default apt mirror
self.apt_mirror = self.manifest.packages.get('mirror', 'http://http.debian.net/debian')
from bootstrapvz.common.tools import get_codename
self.release_codename = get_codename(self.manifest.system['release'])
# Create the manifest_vars dictionary
self.manifest_vars = self.__create_manifest_vars(self.manifest, {'apt_mirror': self.apt_mirror})
# Keep a list of apt sources,
# so that tasks may add to that list without having to fiddle with apt source list files.
from pkg.sourceslist import SourceLists
self.source_lists = SourceLists(self.manifest_vars)
# Keep a list of apt preferences
from pkg.preferenceslist import PreferenceLists
self.preference_lists = PreferenceLists(self.manifest_vars)
# Keep a list of packages that should be installed, tasks can add and remove things from this list
from pkg.packagelist import PackageList
self.packages = PackageList(self.manifest_vars, self.source_lists)
# These sets should rarely be used and specify which packages the debootstrap invocation
# should be called with.
self.include_packages = set()
self.exclude_packages = set()
# Dictionary to specify which commands are required on the host.
# The keys are commands, while the values are either package names or urls
# that hint at how a command may be made available.
self.host_dependencies = {}
# Lists of startup scripts that should be installed and disabled
self.initd = {'install': {}, 'disable': []}
# Add a dictionary that can be accessed via info._pluginname for the provider and every plugin
# Information specific to the module can be added to that 'namespace', this avoids clutter.
providername = manifest.modules['provider'].__name__.split('.')[-1]
setattr(self, '_' + providername, {})
for plugin in manifest.modules['plugins']:
pluginname = plugin.__name__.split('.')[-1]
setattr(self, '_' + pluginname, {})
def __create_manifest_vars(self, manifest, additional_vars={}):
"""Creates the manifest variables dictionary, based on the manifest contents
and additional data.
:param Manifest manifest: The Manifest
:param dict additional_vars: Additional values (they will take precedence and overwrite anything else)
:return: The manifest_vars dictionary
:rtype: dict
"""
class DictClass(dict):
"""Tiny extension of dict to allow setting and getting keys via attributes
"""
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
def set_manifest_vars(obj, data):
"""Runs through the manifest and creates DictClasses for every key
:param dict obj: dictionary to set the values on
:param dict data: dictionary of values to set on the obj
"""
for key, value in data.iteritems():
if isinstance(value, dict):
obj[key] = DictClass()
set_manifest_vars(obj[key], value)
continue
# Lists are not supported
if not isinstance(value, list):
obj[key] = value
# manifest_vars is a dictionary of all the manifest values,
# with it users can cross-reference values in the manifest, so that they do not need to be written twice
manifest_vars = {}
set_manifest_vars(manifest_vars, manifest.data)
# Populate the manifest_vars with datetime information
# and map the datetime variables directly to the dictionary
from datetime import datetime
now = datetime.now()
time_vars = ['%a', '%A', '%b', '%B', '%c', '%d', '%f', '%H',
'%I', '%j', '%m', '%M', '%p', '%S', '%U', '%w',
'%W', '%x', '%X', '%y', '%Y', '%z', '%Z']
for key in time_vars:
manifest_vars[key] = now.strftime(key)
# Add any additional manifest variables
# They are added last so that they may override previous variables
set_manifest_vars(manifest_vars, additional_vars)
return manifest_vars
|
import collections.abc
import warnings
from abc import abstractmethod
from collections import defaultdict
from datetime import datetime
from enum import Enum, EnumMeta
from textwrap import dedent
from typing import Any, Callable, Dict, Mapping, Optional, Set, Tuple, Union
from urllib.parse import urlencode
import ciso8601
from ruamel.yaml.timestamp import TimeStamp as RuamelTimeStamp
from eodatasets3.utils import default_utc
class FileFormat(Enum):
GeoTIFF = 1
NetCDF = 2
Zarr = 3
JPEG2000 = 4
def nest_properties(d: Mapping[str, Any], separator=":") -> Dict[str, Any]:
"""
Split keys with embedded colons into sub dictionaries.
Intended for stac-like properties
>>> nest_properties({'landsat:path':1, 'landsat:row':2, 'clouds':3})
{'landsat': {'path': 1, 'row': 2}, 'clouds': 3}
"""
out = defaultdict(dict)
for key, val in d.items():
section, *remainder = key.split(separator, 1)
if remainder:
[sub_key] = remainder
out[section][sub_key] = val
else:
out[section] = val
for key, val in out.items():
if isinstance(val, dict):
out[key] = nest_properties(val, separator=separator)
return dict(out)
def datetime_type(value):
# Ruamel's TimeZone class can become invalid from the .replace(utc) call.
# (I think it no longer matches the internal ._yaml fields.)
# Convert to a regular datetime.
if isinstance(value, RuamelTimeStamp):
value = value.isoformat()
if isinstance(value, str):
value = ciso8601.parse_datetime(value)
# Store all dates with a timezone.
# yaml standard says all dates default to UTC.
# (and ruamel normalises timezones to UTC itself)
return default_utc(value)
def of_enum_type(
vals: Union[EnumMeta, Tuple[str, ...]] = None, lower=False, upper=False, strict=True
) -> Callable[[str], str]:
if isinstance(vals, EnumMeta):
vals = tuple(vals.__members__.keys())
def normalise(v: str):
if isinstance(v, Enum):
v = v.name
if upper:
v = v.upper()
if lower:
v = v.lower()
if v not in vals:
msg = f"Unexpected value {v!r}. Expected one of: {', '.join(vals)},"
if strict:
raise ValueError(msg)
else:
warnings.warn(msg)
return v
return normalise
def percent_type(value):
value = float(value)
if not (0.0 <= value <= 100.0):
raise ValueError("Expected percent between 0,100")
return value
def normalise_platforms(value: Union[str, list, set]):
"""
>>> normalise_platforms('LANDSAT_8')
'landsat-8'
>>> # Multiple can be comma-separated. They're normalised independently and sorted.
>>> normalise_platforms('LANDSAT_8,Landsat-5,landsat-7')
'landsat-5,landsat-7,landsat-8'
>>> # Can be given as a list.
>>> normalise_platforms(['sentinel-2b','SENTINEL-2a'])
'sentinel-2a,sentinel-2b'
>>> # Deduplicated too
>>> normalise_platforms('landsat-5,landsat-5,LANDSAT-5')
'landsat-5'
"""
if not isinstance(value, (list, set, tuple)):
value = value.split(",")
platforms = sorted({s.strip().lower().replace("_", "-") for s in value if s})
if not platforms:
return None
return ",".join(platforms)
def degrees_type(value):
value = float(value)
if not (-360.0 <= value <= 360.0):
raise ValueError("Expected degrees between -360,+360")
return value
def identifier_type(v: str):
v = v.replace("-", "_")
if not v.isidentifier() or not v.islower():
warnings.warn(
f"{v!r} is expected to be an identifier "
"(alphanumeric with underscores, typically lowercase)"
)
return v
def producer_check(value):
if "." not in value:
warnings.warn(
"Property 'odc:producer' is expected to be a domain name, "
"eg 'usgs.gov' or 'ga.gov.au'"
)
return value
def parsed_sentinel_tile_id(tile_id) -> Tuple[str, Dict]:
"""Extract useful extra fields from a sentinel tile id
>>> val, props = parsed_sentinel_tile_id("S2B_OPER_MSI_L1C_TL_EPAE_20201011T011446_A018789_T55HFA_N02.09")
>>> val
'S2B_OPER_MSI_L1C_TL_EPAE_20201011T011446_A018789_T55HFA_N02.09'
>>> props
{'sentinel:datatake_start_datetime': datetime.datetime(2020, 10, 11, 1, 14, 46, tzinfo=datetime.timezone.utc)}
"""
extras = {}
split_tile_id = tile_id.split("_")
try:
datatake_sensing_time = datetime_type(split_tile_id[-4])
extras["sentinel:datatake_start_datetime"] = datatake_sensing_time
except IndexError:
pass
# TODO: we could extract other useful fields?
return tile_id, extras
def parsed_sentinel_datastrip_id(tile_id) -> Tuple[str, Dict]:
"""Extract useful extra fields from a sentinel datastrip id
>>> val, props = parsed_sentinel_datastrip_id("S2B_OPER_MSI_L1C_DS_EPAE_20201011T011446_S20201011T000244_N02.09")
>>> val
'S2B_OPER_MSI_L1C_DS_EPAE_20201011T011446_S20201011T000244_N02.09'
>>> props
{'sentinel:datatake_start_datetime': datetime.datetime(2020, 10, 11, 1, 14, 46, tzinfo=datetime.timezone.utc)}
"""
extras = {}
split_tile_id = tile_id.split("_")
try:
datatake_sensing_time = datetime_type(split_tile_id[-3])
extras["sentinel:datatake_start_datetime"] = datatake_sensing_time
except IndexError:
pass
# TODO: we could extract other useful fields?
return tile_id, extras
# The primitive types allowed as stac values.
PrimitiveType = Union[str, int, float, datetime]
ExtraProperties = Dict
# A function to normalise a value.
# (eg. convert to int, or make string lowercase).
# They throw a ValueError if not valid.
NormaliseValueFn = Callable[
[Any],
# It returns the normalised value, but can optionally also return extra property values extracted from it.
Union[PrimitiveType, Tuple[PrimitiveType, ExtraProperties]],
]
# Extras typically on the ARD product.
_GQA_FMASK_PROPS = {
"fmask:clear": float,
"fmask:cloud": float,
"fmask:cloud_shadow": float,
"fmask:snow": float,
"fmask:water": float,
"gqa:abs_iterative_mean_x": float,
"gqa:abs_iterative_mean_xy": float,
"gqa:abs_iterative_mean_y": float,
"gqa:abs_x": float,
"gqa:abs_xy": float,
"gqa:abs_y": float,
"gqa:cep90": float,
"gqa:error_message": None,
"gqa:final_gcp_count": int,
"gqa:iterative_mean_x": float,
"gqa:iterative_mean_xy": float,
"gqa:iterative_mean_y": float,
"gqa:iterative_stddev_x": float,
"gqa:iterative_stddev_xy": float,
"gqa:iterative_stddev_y": float,
"gqa:mean_x": float,
"gqa:mean_xy": float,
"gqa:mean_y": float,
"gqa:ref_source": None,
"gqa:stddev_x": float,
"gqa:stddev_xy": float,
"gqa:stddev_y": float,
}
# Typically only from LPGS (ie. Level 1 products)
_LANDSAT_EXTENDED_PROPS = {
"landsat:algorithm_source_surface_reflectance": None,
"landsat:collection_category": None,
"landsat:collection_number": int,
"landsat:data_type": None,
"landsat:earth_sun_distance": None,
"landsat:ephemeris_type": None,
"landsat:geometric_rmse_model": None,
"landsat:geometric_rmse_model_x": None,
"landsat:geometric_rmse_model_y": None,
"landsat:geometric_rmse_verify": None,
"landsat:ground_control_points_model": None,
"landsat:ground_control_points_verify": None,
"landsat:ground_control_points_version": None,
"landsat:image_quality_oli": None,
"landsat:image_quality_tirs": None,
"landsat:processing_software_version": None,
"landsat:scan_gap_interpolation": float,
"landsat:station_id": None,
# Landsat USGS Properties
"landsat:rmse": None,
"landsat:rmse_x": None,
"landsat:rmse_y": None,
"landsat:wrs_type": None,
"landsat:correction": None,
"landsat:cloud_cover_land": None,
}
_SENTINEL_EXTENDED_PROPS = {
"sentinel:sentinel_tile_id": parsed_sentinel_tile_id,
"sentinel:datatake_start_datetime": datetime_type,
"sentinel:datastrip_id": parsed_sentinel_datastrip_id,
"sentinel:datatake_type": None,
"sentinel:processing_baseline": None,
"sentinel:processing_center": None,
"sentinel:product_name": None,
"sentinel:reception_station": None,
"sentinel:utm_zone": int,
"sentinel:latitude_band": None,
"sentinel:grid_square": None,
"sinergise_product_id": None,
}
_STAC_MISC_PROPS = {
"providers": None, # https://github.com/radiantearth/stac-spec/blob/master/item-spec/common-metadata.md#provider,
# Projection extension
"proj:epsg": int,
"proj:shape": None,
"proj:transform": None,
}
class Eo3Dict(collections.abc.MutableMapping):
"""
This acts like a dictionary, but will normalise known properties (consistent
case, types etc) and warn about common mistakes.
It wraps an inner dictionary. By default it will normalise the fields in
the input dictionary on creation, but you can disable this with `normalise_input=False`.
"""
# Every property we've seen or dealt with so far. Feel free to expand with abandon...
# This is to minimise minor typos, case differences, etc, which plagued previous systems.
# Keep sorted.
KNOWN_PROPERTIES: Mapping[str, Optional[NormaliseValueFn]] = {
"datetime": datetime_type,
"dea:dataset_maturity": of_enum_type(("final", "interim", "nrt"), lower=True),
"dea:product_maturity": of_enum_type(("stable", "provisional"), lower=True),
"dtr:end_datetime": datetime_type,
"dtr:start_datetime": datetime_type,
"eo:azimuth": float,
"eo:cloud_cover": percent_type,
"eo:epsg": None,
"eo:gsd": None,
"eo:instrument": None,
"eo:off_nadir": float,
"eo:platform": normalise_platforms,
"eo:constellation": None,
"eo:sun_azimuth": degrees_type,
"eo:sun_elevation": degrees_type,
"sat:orbit_state": None,
"sat:relative_orbit": int,
"sat:absolute_orbit": int,
"landsat:landsat_product_id": None,
"landsat:scene_id": None,
"landsat:landsat_scene_id": None,
"landsat:wrs_path": int,
"landsat:wrs_row": int,
"odc:dataset_version": None,
"odc:collection_number": int,
"odc:naming_conventions": None,
# Not strict as there may be more added in ODC...
"odc:file_format": of_enum_type(FileFormat, strict=False),
"odc:processing_datetime": datetime_type,
"odc:producer": producer_check,
"odc:product": None,
"odc:product_family": identifier_type,
"odc:region_code": None,
**_LANDSAT_EXTENDED_PROPS,
**_GQA_FMASK_PROPS,
**_SENTINEL_EXTENDED_PROPS,
**_STAC_MISC_PROPS,
}
# For backwards compatibility, in case users are extending at runtime.
KNOWN_STAC_PROPERTIES = KNOWN_PROPERTIES
def __init__(self, properties: Mapping = None, normalise_input=True) -> None:
if properties is None:
properties = {}
self._props = properties
# We normalise the properties they gave us.
for key in list(self._props):
# We always want to normalise dates as datetime objects rather than strings
# for consistency.
if normalise_input or ("datetime" in key):
self.normalise_and_set(key, self._props[key], expect_override=True)
self._finished_init_ = True
def __setattr__(self, name: str, value: Any) -> None:
"""
Prevent against users accidentally setting new properties (it has happened multiple times).
"""
if hasattr(self, "_finished_init_") and not hasattr(self, name):
raise TypeError(
f"Cannot set new field '{name}' on a dict. "
f"(Perhaps you meant to set it as a dictionary field??)"
)
super().__setattr__(name, value)
def __getitem__(self, item):
return self._props[item]
def __iter__(self):
return iter(self._props)
def __len__(self):
return len(self._props)
def __delitem__(self, name: str) -> None:
del self._props[name]
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._props!r})"
def __setitem__(self, key, value):
self.normalise_and_set(
key,
value,
# They can override properties but will receive a warning.
allow_override=True,
)
def normalise_and_set(self, key, value, allow_override=True, expect_override=False):
"""
Set a property with the usual normalisation.
This has some options that are not available on normal dictionary item
setting (``self[key] = val``)
The default behaviour of this class is very conservative in order to catch common errors
of users. You can loosen the settings here.
:argument allow_override: Is it okay to overwrite an existing value? (if not, error will be thrown)
:argument expect_override: We expect to overwrite a property, so don't produce a warning or error.
"""
if key not in self.KNOWN_PROPERTIES:
warnings.warn(
f"Unknown Stac property {key!r}. "
f"If this is valid property, please tell us on Github here so we can add it: "
f"\n\t{_github_suggest_new_property_url(key, value)}"
)
if value is not None:
normalise = self.KNOWN_PROPERTIES.get(key)
if normalise:
value = normalise(value)
# If the normaliser has extracted extra properties, we'll get two return values.
if isinstance(value, Tuple):
value, extra_properties = value
for k, v in extra_properties.items():
if k == key:
raise RuntimeError(
f"Infinite loop: writing key {k!r} from itself"
)
self.normalise_and_set(k, v, allow_override=allow_override)
if key in self._props and value != self[key] and (not expect_override):
message = (
f"Overriding property {key!r} " f"(from {self[key]!r} to {value!r})"
)
if allow_override:
warnings.warn(message, category=PropertyOverrideWarning)
else:
raise KeyError(message)
self._props[key] = value
def nested(self):
return nest_properties(self._props)
class StacPropertyView(Eo3Dict):
"""
Backwards compatibility class name. Deprecated.
Use the identical 'Eo3Dict' instead.
These were called "StacProperties" in Stac 0.6, but many of them have
changed in newer versions and we're sticking to the old names for consistency
and backwards-compatibility. So they're now EO3 Properties.
(The eo3-to-stac tool to convert EO3 properties to real Stac properties.)
"""
def __init__(self, properties=None) -> None:
super().__init__(properties)
warnings.warn(
"The class name 'StacPropertyView' is deprecated as it's misleading. "
"Please change your import to the (identical) 'Eo3Dict'.",
category=DeprecationWarning,
)
class PropertyOverrideWarning(UserWarning):
"""A warning that a property was set twice with different values."""
...
class Eo3Interface:
"""
These are convenience properties for common metadata fields. They are available
on DatasetAssemblers and within other naming APIs.
(This is abstract. If you want one of these of your own, you probably want to create
an :class:`eodatasets3.DatasetDoc`)
"""
@property
@abstractmethod
def properties(self) -> Eo3Dict:
raise NotImplementedError
@property
def platform(self) -> Optional[str]:
"""
Unique name of the specific platform the instrument is attached to.
For satellites this would be the name of the satellite (e.g., ``landsat-8``, ``sentinel-2a``),
whereas for drones this would be a unique name for the drone.
In derivative products, multiple platforms can be specified with a comma: ``landsat-5,landsat-7``.
Shorthand for ``eo:platform`` property
"""
return self.properties.get("eo:platform")
@platform.setter
def platform(self, value: str):
self.properties["eo:platform"] = value
@property
def platforms(self) -> Set[str]:
"""
Get platform as a set (containing zero or more items).
In EO3, multiple platforms are specified by comma-separating them.
"""
if not self.platform:
return set()
return set(self.properties.get("eo:platform", "").split(","))
@platforms.setter
def platforms(self, value: Set[str]):
# The normaliser supports sets/lists
self.properties["eo:platform"] = value
@property
def instrument(self) -> str:
"""
Name of instrument or sensor used (e.g., MODIS, ASTER, OLI, Canon F-1).
Shorthand for ``eo:instrument`` property
"""
return self.properties.get("eo:instrument")
@instrument.setter
def instrument(self, value: str):
self.properties["eo:instrument"] = value
@property
def constellation(self) -> str:
"""
Constellation. Eg ``sentinel-2``.
"""
return self.properties.get("eo:constellation")
@constellation.setter
def constellation(self, value: str):
self.properties["eo:constellation"] = value
@property
def product_name(self) -> Optional[str]:
"""
The ODC product name
"""
return self.properties.get("odc:product")
@product_name.setter
def product_name(self, value: str):
self.properties["odc:product"] = value
@property
def producer(self) -> str:
"""
Organisation that produced the data.
eg. ``usgs.gov`` or ``ga.gov.au``
Shorthand for ``odc:producer`` property
"""
return self.properties.get("odc:producer")
@producer.setter
def producer(self, domain: str):
self.properties["odc:producer"] = domain
@property
def datetime_range(self) -> Tuple[datetime, datetime]:
"""
An optional date range for the dataset.
The ``datetime`` is still mandatory when this is set.
This field is a shorthand for reading/setting the datetime-range
stac 0.6 extension properties: ``dtr:start_datetime`` and ``dtr:end_datetime``
"""
return (
self.properties.get("dtr:start_datetime"),
self.properties.get("dtr:end_datetime"),
)
@datetime_range.setter
def datetime_range(self, val: Tuple[datetime, datetime]):
# TODO: string type conversion, better validation/errors
start, end = val
self.properties["dtr:start_datetime"] = start
self.properties["dtr:end_datetime"] = end
@property
def processed(self) -> datetime:
"""When the dataset was created (Defaults to UTC if not specified)
Shorthand for the ``odc:processing_datetime`` field
"""
return self.properties.get("odc:processing_datetime")
@processed.setter
def processed(self, value: Union[str, datetime]):
self.properties["odc:processing_datetime"] = value
def processed_now(self):
"""
Shorthand for when the dataset was processed right now on the current system.
"""
self.properties["odc:processing_datetime"] = datetime.utcnow()
@property
def dataset_version(self) -> str:
"""
The version of the dataset.
Typically digits separated by a dot. Eg. `1.0.0`
The first digit is usually the collection number for
this 'producer' organisation, such as USGS Collection 1 or
GA Collection 3.
"""
return self.properties.get("odc:dataset_version")
@property
def collection_number(self) -> int:
"""
The version of the collection.
Eg.::
metadata:
product_family: wofs
dataset_version: 1.6.0
collection_number: 3
"""
return self.properties.get("odc:collection_number")
@dataset_version.setter
def dataset_version(self, value):
self.properties["odc:dataset_version"] = value
@collection_number.setter
def collection_number(self, value):
self.properties["odc:collection_number"] = value
@property
def naming_conventions(self) -> str:
return self.properties.get("odc:naming_conventions")
@naming_conventions.setter
def naming_conventions(self, value):
self.properties["odc:naming_conventions"] = value
@property
def product_family(self) -> str:
"""
The identifier for this "family" of products, such as ``ard``, ``level1`` or ``fc``.
It's used for grouping similar products together.
They products in a family are usually produced the same way but have small variations:
they come from different sensors, or are written in different projections, etc.
``ard`` family of products: ``ls7_ard``, ``ls5_ard`` ....
On older versions of Open Data Cube this was called ``product_type``.
Shorthand for ``odc:product_family`` property.
"""
return self.properties.get("odc:product_family")
@product_family.setter
def product_family(self, value):
self.properties["odc:product_family"] = value
@product_family.deleter
def product_family(self):
del self.properties["odc:product_family"]
@property
def region_code(self) -> Optional[str]:
"""
The "region" of acquisition. This is a platform-agnostic representation of things like
the Landsat Path+Row. Datasets with the same Region Code will *roughly* (but usually
not *exactly*) cover the same spatial footprint.
It's generally treated as an opaque string to group datasets and process as stacks.
For Landsat products it's the concatenated ``{path}{row}`` (both numbers formatted to three digits).
For Sentinel 2, it's the MGRS grid (TODO presumably?).
Shorthand for ``odc:region_code`` property.
"""
return self.properties.get("odc:region_code")
@region_code.setter
def region_code(self, value: str):
self.properties["odc:region_code"] = value
@property
def maturity(self) -> str:
"""
The dataset maturity. The same data may be processed multiple times -- becoming more
mature -- as new ancillary data becomes available.
Typical values (from least to most mature): ``nrt`` (near real time), ``interim``, ``final``
"""
return self.properties.get("dea:dataset_maturity")
@maturity.setter
def maturity(self, value):
self.properties["dea:dataset_maturity"] = value
@property
def product_maturity(self) -> str:
"""
Classification: is this a 'provisional' or 'stable' release of the product?
"""
return self.properties.get("dea:product_maturity")
@product_maturity.setter
def product_maturity(self, value):
self.properties["dea:product_maturity"] = value
# Note that giving a method the name 'datetime' will override the 'datetime' type
# for class-level declarations (ie, for any types on functions!)
# So we make an alias:
from datetime import datetime as datetime_
@property
def datetime(self) -> datetime_:
"""
The searchable date and time of the assets. (Default to UTC if not specified)
"""
return self.properties.get("datetime")
@datetime.setter
def datetime(self, val: datetime_):
self.properties["datetime"] = val
def _github_suggest_new_property_url(key: str, value: object) -> str:
"""Get a URL to create a Github issue suggesting new properties to be added."""
issue_parameters = urlencode(
dict(
title=f"Include property {key!r}",
labels="known-properties",
body=dedent(
f"""\
Hello! The property {key!r} does not appear to be in the KNOWN_STAC_PROPERTIES list,
but I believe it to be valid.
An example value of this property is: {value!r}
Thank you!
"""
),
)
)
return f"https://github.com/GeoscienceAustralia/eo-datasets/issues/new?{issue_parameters}"
|
import setuptools
description = "A simple idiot's guide api wrapper in python"
long_description = open("README.md").read()
version="1.2.0"
packages = ["idioticapi"]
setuptools.setup(
name="idioticapi",
version=version,
description=description,
long_description=long_description,
url="https://github.com/freetnt5852/idioticapi",
author="Free TNT",
author_email="darksoulgamer5852@gmail.com",
license="MIT",
packages=packages,
include_package_data=True,
install_requires=["aiohttp>=2.0.0"]
)
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import sys
import traceback
from infra_libs import app
from infra_libs import event_mon
from infra_libs import ts_mon
from infra.tools.send_monitoring_event import common
success_metric = ts_mon.BooleanMetric('send_monitoring_event/success',
description='Set to True if the monitoring event was sent successfully')
class SendMonitoringEvent(app.BaseApplication):
DESCRIPTION = """Send an event to the monitoring pipeline.
Examples:
run.py infra.tools.send_monitoring_event --service-event-type=START \\
--service-event-revinfo <filename>
run.py infra.tools.send_monitoring_event \\
--service-event-stack-trace "<stack trace>"
run.py infra.tools.send_monitoring_event --build-event-type=SCHEDULER \\
--build-event-build-name=foo
--build-event-hostname='bot.dns.name'
"""
def add_argparse_options(self, parser):
super(SendMonitoringEvent, self).add_argparse_options(parser)
common.add_argparse_options(parser)
parser.set_defaults(
ts_mon_flush='manual',
ts_mon_target_type='task',
ts_mon_task_service_name='send_monitoring_event',
ts_mon_task_job_name='manual',
)
def process_argparse_options(self, opts):
super(SendMonitoringEvent, self).process_argparse_options(opts)
common.process_argparse_options(opts)
def main(self, opts): # pragma: no cover
status = 0
try:
if opts.build_event_type:
success_metric.set(common.send_build_event(opts))
elif opts.service_event_type:
success_metric.set(common.send_service_event(opts))
elif opts.events_from_file:
success_metric.set(common.send_events_from_file(opts))
else:
print >> sys.stderr, ('At least one of the --*-event-type options or '
'--events-from-file should be provided. Nothing '
'was sent.')
status = 2
success_metric.set(False)
except Exception:
success_metric.set(False)
traceback.print_exc() # helps with debugging locally.
finally:
event_mon.close()
try:
ts_mon.flush()
except ts_mon.MonitoringNoConfiguredMonitorError:
logging.error("Unable to flush ts_mon because it's not configured.")
except Exception:
logging.exception("Flushing ts_mon metrics failed.")
return status
if __name__ == '__main__':
SendMonitoringEvent().run()
|
import random
import time
import numpy as np
import torch
from transformers import AdamW, BertTokenizer, WarmupLinearSchedule
from common.constants import *
from common.evaluators.bert_evaluator import BertEvaluator
from common.trainers.bert_trainer import BertTrainer
from datasets.bert_processors.aapd_processor import AAPDProcessor
from datasets.bert_processors.agnews_processor import AGNewsProcessor
from datasets.bert_processors.imdb_processor import IMDBProcessor
from datasets.bert_processors.reuters_processor import ReutersProcessor
from datasets.bert_processors.sogou_processor import SogouProcessor
from datasets.bert_processors.sst_processor import SST2Processor
from datasets.bert_processors.yelp2014_processor import Yelp2014Processor
from models.hbert.args import get_args
from models.hbert.model import HierarchicalBert
def evaluate_split(model, processor, tokenizer, args, split='dev'):
evaluator = BertEvaluator(model, processor, tokenizer, args, split)
accuracy, precision, recall, f1, avg_loss = evaluator.get_scores(silent=True)[0]
print('\n' + LOG_HEADER)
print(LOG_TEMPLATE.format(split.upper(), accuracy, precision, recall, f1, avg_loss))
if __name__ == '__main__':
# Set default configuration in args.py
args = get_args()
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
n_gpu = torch.cuda.device_count()
print('Device:', str(device).upper())
print('Number of GPUs:', n_gpu)
print('FP16:', args.fp16)
# Set random seed for reproducibility
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
dataset_map = {
'SST-2': SST2Processor,
'Reuters': ReutersProcessor,
'IMDB': IMDBProcessor,
'AAPD': AAPDProcessor,
'AGNews': AGNewsProcessor,
'Yelp2014': Yelp2014Processor,
'Sogou': SogouProcessor
}
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if args.dataset not in dataset_map:
raise ValueError('Unrecognized dataset')
args.batch_size = args.batch_size // args.gradient_accumulation_steps
args.device = device
args.n_gpu = n_gpu
args.num_labels = dataset_map[args.dataset].NUM_CLASSES
args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL
args.pretrained_model_path = args.model if os.path.isfile(args.model) else PRETRAINED_MODEL_ARCHIVE_MAP[args.model]
if not args.trained_model:
save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME)
os.makedirs(save_path, exist_ok=True)
args.is_hierarchical = True
args.output_hidden_states = True
processor = dataset_map[args.dataset]()
pretrained_vocab_path = PRETRAINED_VOCAB_ARCHIVE_MAP[args.model]
tokenizer = BertTokenizer.from_pretrained(pretrained_vocab_path)
train_examples = None
num_train_optimization_steps = None
if not args.trained_model:
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.batch_size / args.gradient_accumulation_steps) * args.epochs
model = HierarchicalBert(args)
if args.fp16:
model.half()
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if 'sentence_encoder' not in n],
'lr': args.lr * args.lr_mult, 'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer if 'sentence_encoder' in n and not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if 'sentence_encoder' in n and any(nd in n for nd in no_decay)],
'weight_decay': 0.0}]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install NVIDIA Apex for distributed and FP16 training")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.lr,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr, weight_decay=0.01, correct_bias=False)
scheduler = WarmupLinearSchedule(optimizer, t_total=num_train_optimization_steps,
warmup_steps=args.warmup_proportion * num_train_optimization_steps)
trainer = BertTrainer(model, optimizer, processor, scheduler, tokenizer, args)
if not args.trained_model:
trainer.train()
model = torch.load(trainer.snapshot_path)
else:
model = model = HierarchicalBert(args.model)
model_ = torch.load(args, map_location=lambda storage, loc: storage)
state = {}
for key in model_.state_dict().keys():
new_key = key.replace("module.", "")
state[new_key] = model_.state_dict()[key]
model.load_state_dict(state)
model = model.to(device)
evaluate_split(model, processor, tokenizer, args, split='dev')
evaluate_split(model, processor, tokenizer, args, split='test')
|
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2020 Apple Inc. All Rights Reserved.
#
import socket
import torch
import torch.distributed as dist
from utils import logger
def is_master(opts) -> bool:
node_rank = getattr(opts, "ddp.rank", 0)
return (node_rank == 0)
def distributed_init(opts) -> int:
ddp_url = getattr(opts, "ddp.dist_url", None)
ddp_port = getattr(opts, "ddp.dist_port", 6006)
is_master_node = is_master(opts)
if ddp_url is None:
hostname = socket.gethostname()
ddp_url = 'tcp://{}:{}'.format(hostname, ddp_port)
setattr(opts, "ddp.dist_url", ddp_url)
node_rank = getattr(opts, "ddp.rank", 0)
world_size = getattr(opts, "ddp.world_size", 0)
if torch.distributed.is_initialized():
logger.warning('DDP is already initialized and cannot be initialize twice!')
else:
logger.info('distributed init (rank {}): {}'.format(node_rank, ddp_url))
dist_backend = "gloo"
if dist.is_nccl_available():
dist_backend = 'nccl'
if is_master_node:
logger.log('Using NCCL as distributed backend with version={}'.format(torch.cuda.nccl.version()))
dist.init_process_group(
backend=dist_backend,
init_method=ddp_url,
world_size=world_size,
rank=node_rank
)
# perform a dummy all-reduce to initialize the NCCL communicator
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda())
node_rank = torch.distributed.get_rank()
setattr(opts, "ddp.rank", node_rank)
return node_rank
|
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from opencensus.trace import trace_options as trace_opt
class TestTraceOptions(unittest.TestCase):
def test_constructor_default(self):
trace_options = trace_opt.TraceOptions()
self.assertEqual(trace_options.trace_options_byte, trace_opt.DEFAULT)
self.assertTrue(trace_options.enabled)
def test_constructor_explicit(self):
trace_options_byte = '0'
trace_options = trace_opt.TraceOptions(trace_options_byte)
self.assertEqual(trace_options.trace_options_byte, trace_options_byte)
self.assertFalse(trace_options.enabled)
def test_check_trace_options_valid(self):
trace_options_byte = '10'
trace_options = trace_opt.TraceOptions(trace_options_byte)
self.assertEqual(trace_options.trace_options_byte, trace_options_byte)
def test_check_trace_options_invalid(self):
trace_options_byte = '256'
trace_options = trace_opt.TraceOptions(trace_options_byte)
self.assertEqual(trace_options.trace_options_byte, trace_opt.DEFAULT)
|
from dateutil import parser
from api.extensions import sql
from api.shows import models
from api.shows.services.wrapper import Wrapper
class ShowService(object):
def __init__(self, show_id):
self._show_id = show_id
def add_show(self):
x = Wrapper()
y = x.show(self._show_id)
if y['status'] == 'Ended':
stat = True
else:
stat = False
try:
image = y['image']['medium']
except TypeError:
image = None
s = models.Show(
id=y['id'],
name=y['name'],
premiered=y['premiered'],
status=stat,
summary=y['summary'],
image=image
)
sql.session.add(s)
sql.session.commit()
self._add_seasons()
self._add_episodes()
def update_show(self):
z = sql.session.query(
models.Show
).filter_by(
name=self._show_id
).first()
x = Wrapper(self._show_id)
y = x.query_show()
if y['status'] == 'Ended':
stat = True
else:
stat = False
z.summary = y['summary']
z.image = y['image']['medium']
z.status = stat
sql.session.add(z)
sql.session.commit()
def _add_seasons(self):
s = models.Show.query.get(self._show_id)
seasons = Wrapper(
show_id=self._show_id
).seasons()
for season in seasons:
if season['name'] == "":
season_name = None
else:
season_name = season['name']
try:
image = season['image']['medium']
except TypeError:
image = None
x = models.Season(
id=season['id'],
show_id=s.id,
number=season['number'],
name=season_name,
episodeOrder=season['episodeOrder'],
premiereDate=season['premiereDate'],
endDate=season['endDate'],
image=image,
)
sql.session.add(x)
sql.session.commit()
def _add_episodes(self):
file = models.Show.query.get(self._show_id)
episodes = Wrapper(show_id=file.id).episodes()
for episode in episodes:
try:
image = episode['image']['medium']
except TypeError:
image = None
x = models.Episode(
id=episode['id'],
air_date=parser.parse(episode['airstamp'], ignoretz=True) if episode['airstamp'] != '' else None,
name=episode['name'],
number=episode['number'],
season=episode['season'],
summary=episode['summary'],
run_time=episode['runtime'],
image=image,
show_id=file.id,
)
sql.session.add(x)
sql.session.commit()
def update_episodes(self): # new episodes
file = sql.session.query(models.Show).get(self._show_id)
episodes = Wrapper(show_id=self._show_id).episodes()
for episode in episodes:
x = sql.session.query(models.Episode).get(episode['id'])
if x is None:
x = models.Episode(
id=episode['id'],
air_date=parser.parse(episode['airstamp'], ignoretz=True),
name=episode['name'],
number=episode['number'],
season=episode['season'],
summary=episode['summary'],
run_time=episodes['runtime'],
image=episode['image']['medium'],
show_id=file.id,
)
else:
try:
x.image = episode['image']['medium']
except TypeError:
pass
sql.session.add(x)
sql.session.commit()
class SubscriptionService(object):
def __init__(self, user_id, show_id):
self._user_id = user_id
self._show_id = show_id
def subscribe(self):
sub = models.Subscription(
user_id=self._user_id,
show_id=self._show_id
)
sql.session.add(sub)
sql.session.commit()
self._subscribed_season()
self._subscribed_watched()
def _subscribed_season(self):
seasons = models.Season.query.filter_by(show_id=self._show_id).all()
for season in seasons:
x = models.SeasonWatched(
user_id=self._user_id,
show_id=self._show_id,
season_id=season.id
)
sql.session.add(x)
sql.session.commit()
def _subscribed_watched(self):
s = models.Show.query.get(self._show_id)
for episode in s.episodes:
season = models.Season.query.filter_by(
show_id=self._show_id,
number=episode.season
).first()
x = models.Watched(
user_id=self._user_id,
show_id=self._show_id,
season_id=season.id,
episode_id=episode.id
)
sql.session.add(x)
sql.session.commit()
def watched(self, episode_id, watch_state):
x = models.Watched.query.get(
(self._user_id, self._show_id, episode_id)
)
x.watched = watch_state
sql.session.add(x)
sql.session.commit()
def hidden(self, episode_id, hidden_state):
x = models.Watched.query.get(
(self._user_id, self._show_id, episode_id)
)
x.hidden = hidden_state
sql.session.commit()
def update_subscription(self):
seasons = models.Season.query.filter_by(show_id=self._show_id).all()
for season in seasons:
season_watched = models.SeasonWatched.query.get((self._user_id, self._show_id, season.id))
if season_watched is None:
x = models.SeasonWatched(
user_id=self._user_id,
show_id=self._show_id,
season_id=season.id
)
sql.session.add(x)
episodes = models.Episode.query.filter_by(
show_id=self._show_id,
season=season.number
).all()
for episode in episodes:
z = models.Watched.get((self._user_id, self._show_id, episode.id))
if z is None:
new = models.Watched(
user_id=self._user_id,
show_id=self._show_id,
season_id=season.id,
episode_id=episode.id
)
sql.session.add(new)
sql.session.commit()
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/xurui/double_robot_project/src/scout_robot/tracer_ros/tracer_msgs/msg/TracerMotorState.msg;/home/xurui/double_robot_project/src/scout_robot/tracer_ros/tracer_msgs/msg/UartTracerMotorState.msg;/home/xurui/double_robot_project/src/scout_robot/tracer_ros/tracer_msgs/msg/TracerLightState.msg;/home/xurui/double_robot_project/src/scout_robot/tracer_ros/tracer_msgs/msg/TracerLightCmd.msg;/home/xurui/double_robot_project/src/scout_robot/tracer_ros/tracer_msgs/msg/TracerStatus.msg;/home/xurui/double_robot_project/src/scout_robot/tracer_ros/tracer_msgs/msg/UartTracerStatus.msg"
services_str = ""
pkg_name = "tracer_msgs"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "tracer_msgs;/home/xurui/double_robot_project/src/scout_robot/tracer_ros/tracer_msgs/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
import re
from sublime import Region
class HTMLEditor:
def __init__(self, view):
self.new, self.edited, self.deleted = self._calculate(
old=self._readlines(view),
new=self._readregions(view)
)
def _calculate(self, old, new):
new_lines = []
edited = []
deleted = []
old_copy = [line.replace('\n', '') for line in old]
new_copy = [line.replace('\n', '') for line in new]
for i, line in enumerate(new_copy):
if line not in old_copy:
section = self._parse_id(line)
if section:
edited.append((self._to_html(line), section))
continue
if i <= len(old_copy):
for index in range(i-1, 0, -1):
section = self._parse_id(new_copy[index])
if section:
break
new_lines.append((new[i], section))
edited_sections = [section for line,section in edited]
for i, copy in enumerate(old_copy):
if copy not in new_copy:
section = self._parse_id(old[i-len(deleted)])
if section and section not in edited_sections:
deleted.append((None, section))
#old.remove(old[i-len(deleted)])
return new_lines[::-1], edited, deleted
def _readlines(self, view):
try:
with open(view.file_name()) as file:
return [line for line in file.readlines() if line and line != '\n']
except Exception as ex:
print(ex)
return []
def _readregions(self, view):
lines = []
for region in view.lines(Region(0, view.size())):
text = view.substr(region)
if text and text != '\n':
lines.append(text)
return lines
def _get_sections(self, lines):
return set([self._parse_id(line) for line in lines])
def _parse_id(self, line):
if line:
result = re.search(r"id='\w{11}'", line)
if result:
id = result.group(0).split("'")[-2]
if len(id) == 11:
return id
def _to_html(self, line):
if line.startswith('<') and (line.endswith('>') or line.endswith('>\n')):
return line
return "<p>" + line + "</p>"
|
import asyncio
import pytest
import json
import itertools
from typing import Union, List, Tuple
from botocore.exceptions import ReadTimeoutError
from botocore.utils import BadIMDSRequestError
from aiobotocore import utils
from aiobotocore._helpers import asynccontextmanager
# From class TestContainerMetadataFetcher
def fake_aiohttp_session(responses: Union[List[Tuple[Union[str, object], int]],
Tuple[Union[str, object], int]]):
"""
Dodgy shim class
"""
if isinstance(responses, Tuple):
data = itertools.cycle([responses])
else:
data = iter(responses)
class FakeAioHttpSession(object):
@asynccontextmanager
async def acquire(self):
yield self
class FakeResponse(object):
def __init__(self, request, *args, **kwargs):
self.request = request
self.url = request.url
self._body, self.status_code = next(data)
self.content = self._content()
self.text = self._text()
if not isinstance(self._body, str):
raise self._body
async def _content(self):
return self._body.encode('utf-8')
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
async def _text(self):
return self._body
async def json(self):
return json.loads(self._body)
def __init__(self, *args, **kwargs):
pass
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
async def send(self, request):
return self.FakeResponse(request)
return FakeAioHttpSession()
@pytest.mark.moto
@pytest.mark.asyncio
async def test_idmsfetcher_disabled():
env = {'AWS_EC2_METADATA_DISABLED': 'true'}
fetcher = utils.AioIMDSFetcher(env=env)
with pytest.raises(fetcher._RETRIES_EXCEEDED_ERROR_CLS):
await fetcher._get_request('path', None)
@pytest.mark.moto
@pytest.mark.asyncio
async def test_idmsfetcher_get_token_success():
session = fake_aiohttp_session([
('blah', 200),
])
fetcher = utils.AioIMDSFetcher(num_attempts=2,
session=session,
user_agent='test')
response = await fetcher._fetch_metadata_token()
assert response == 'blah'
@pytest.mark.moto
@pytest.mark.asyncio
async def test_idmsfetcher_get_token_not_found():
session = fake_aiohttp_session([
('blah', 404),
])
fetcher = utils.AioIMDSFetcher(num_attempts=2,
session=session,
user_agent='test')
response = await fetcher._fetch_metadata_token()
assert response is None
@pytest.mark.moto
@pytest.mark.asyncio
async def test_idmsfetcher_get_token_bad_request():
session = fake_aiohttp_session([
('blah', 400),
])
fetcher = utils.AioIMDSFetcher(num_attempts=2,
session=session,
user_agent='test')
with pytest.raises(BadIMDSRequestError):
await fetcher._fetch_metadata_token()
@pytest.mark.moto
@pytest.mark.asyncio
async def test_idmsfetcher_get_token_timeout():
session = fake_aiohttp_session([
(ReadTimeoutError(endpoint_url='aaa'), 500),
])
fetcher = utils.AioIMDSFetcher(num_attempts=2,
session=session)
response = await fetcher._fetch_metadata_token()
assert response is None
@pytest.mark.moto
@pytest.mark.asyncio
async def test_idmsfetcher_get_token_retry():
session = fake_aiohttp_session([
('blah', 500),
('blah', 500),
('token', 200),
])
fetcher = utils.AioIMDSFetcher(num_attempts=3,
session=session)
response = await fetcher._fetch_metadata_token()
assert response == 'token'
@pytest.mark.moto
@pytest.mark.asyncio
async def test_idmsfetcher_retry():
session = fake_aiohttp_session([
('blah', 500),
('data', 200),
])
fetcher = utils.AioIMDSFetcher(num_attempts=2,
session=session,
user_agent='test')
response = await fetcher._get_request('path', None, 'some_token')
assert await response.text == 'data'
session = fake_aiohttp_session([
('blah', 500),
('data', 200),
])
fetcher = utils.AioIMDSFetcher(num_attempts=1, session=session)
with pytest.raises(fetcher._RETRIES_EXCEEDED_ERROR_CLS):
await fetcher._get_request('path', None)
@pytest.mark.moto
@pytest.mark.asyncio
async def test_idmsfetcher_timeout():
session = fake_aiohttp_session([
(asyncio.TimeoutError(), 500),
])
fetcher = utils.AioIMDSFetcher(num_attempts=1,
session=session)
with pytest.raises(fetcher._RETRIES_EXCEEDED_ERROR_CLS):
await fetcher._get_request('path', None)
|
#Embedded file name: url_redirect.py
import re
import urlparse
import urllib
import time
if 0:
i11iIiiIii
def assign(service, arg):
if service != '''www''':
return
OO0o = urlparse.urlparse(arg)
Oo0Ooo = urlparse.parse_qsl(OO0o.query)
for O0O0OO0O0O0, iiiii in Oo0Ooo:
arg = arg.replace(iiiii, O0O0OO0O0O0)
if urlparse.urlparse(arg).query.find('''=''') == -1 or len(Oo0Ooo) > 6:
return
else:
return (True, arg)
if 0:
iIIi1iI1II111 + ii11i / oOooOoO0Oo0O
def iI1(action, query, k, v):
i1I11i = []
OoOoOO00 = decode('\x8e\xc3\x8f\x9f\xb28\xd4U\xd2\xad\x9cJ\x01=\x1e\xb0\x03\xda\x1d') + str(time.time())
for I11i, O0O in query:
O0O = OoOoOO00 if I11i == k else O0O
i1I11i.append((I11i, O0O))
if 0:
i11ii11iIi11i.oOoO0oo0OOOo + IiiI / Iii1ii1II11i
iI111iI = urllib.urlencode(i1I11i)
IiII = decode('\xdb\xdf\xc2\xc9\xf7') % (action, iI111iI)
iI1Ii11111iIi, i1i1II, O0oo0OO0, I1i1iiI1, I1i1iiI1 = curl.curl(IiII)
if i1i1II.find(decode('\xbe\xcc\x91\x99\xeb`\x94R\x87\xf3\xc4D\x0e%\x0c\xe1G\x99\\\xd5F]3n\r\xda\x9bm')) != -1:
return (True, decode('\xdb\xdf\xc2\xc9\xf7') % (action, iI111iI))
if 0:
oOOOO0o0o
def audit(arg):
Ii1iI = arg
Oo = urlparse.urlparse(Ii1iI)
I1Ii11I1Ii1i = urlparse.urlunsplit((Oo.scheme,
Oo.netloc,
Oo.path,
decode(''),
decode('')))
Oo0Ooo = urlparse.parse_qsl(Oo.query)
if 0:
iiI1iIiI.ooo0Oo0 * i1 - Oooo0000 * i1IIi11111i / o000o0o00o0Oo
oo = [decode('\xb0\xee\xa7\xb8\xcd[\xa7y\xe8\x81\xf1'), decode('\xaa\xd9\x8f\x84\xcd|\x9b_\xc6\xea\xc6'), decode('\xaa\xd9\x8f\x84\xcd|\x9b_\xc6\xea\xc2')]
for O0O0OO0O0O0, iiiii in Oo0Ooo:
if O0O0OO0O0O0 in oo:
continue
debug(decode('\xa0\xfb\xad\xb5\xce%\xddE\x8c\xe7\xcb'), O0O0OO0O0O0, I1Ii11I1Ii1i)
IiII1I1i1i1ii = iI1(I1Ii11I1Ii1i, Oo0Ooo, O0O0OO0O0O0, iiiii)
if IiII1I1i1i1ii:
security_info(IiII1I1i1i1ii[1])
return
if 0:
OOo0o0 / OOoOoo00oo - iI1OoOooOOOO + i1iiIII111ii + i1iIIi1
if __name__ == '__main__':
from dummy import *
#KEY---efb1fdfd9905e92bacd3a5367c4727dc7ae722ab7f214e1434b6e25041d34190---
|
from envs.g2048 import g2048
import time
import numpy as np
env = g2048()
obs = env.reset()
for i in range(10):
time.sleep(1.)
env.render()
obs, reward, done, _ = env.step(np.random.choice([0, 1, 2, 3]))
#for i in range(10):
# obs, reward, done, _ = env.step(0)
|
#!/usr/bin/env python
from __future__ import absolute_import
import unittest
from wagl import unittesting_tools as ut
class TestRandomPixelLocations(unittest.TestCase):
def test_non_2D(self):
"""
Test that specifying a non 2D tuple raises a TypeEror.
"""
dims = (3, 100, 100)
self.assertRaises(TypeError, ut.random_pixel_locations, dims)
def test_n_pixels(self):
"""
Test that the correct number of random pixels are returned.
The default return value is 100 pixels.
"""
dims = (100, 100)
idx = ut.random_pixel_locations(dims)
n = idx[0].shape[0]
self.assertTrue(n == 100)
if __name__ == "__main__":
unittest.main()
|
"""
Appointment Excel Exporter.
Exports the current patient's appoitnments from the mongoDB database
"""
import sys
from os import chdir
from os.path import exists
from time import sleep
import numpy as np
from bson.objectid import ObjectId
from openpyxl import Workbook
from pymongo import MongoClient
chdir(sys.argv[1]) if sys.argv[1] else sys.exit()
_id = sys.argv[2] if sys.argv[2] else sys.exit()
# _id = '58ef74e7e500480538b9c724'
client = MongoClient('localhost', 27017)
db = client['mnd-dashboard']
patient = db.patients.find_one({'_id': ObjectId(_id)})
def parseFloat(num):
"""Parse floats from MongoDB."""
if num and num != 0:
return num / 100
else:
return None
dest_filename = 'appointmentsExport.xlsx'
wb = Workbook()
ws = wb.active
ws.title = '%s %s' % (patient['firstName'][0], patient['lastName'])
heading = ['Clinic dates', 'RIG date', 'NIV date',
'Weight', 'Height', 'BMI', 'ALSFRS-R', 'ESS', 'FVC sitting(%)',
'FVC Supine (%)', 'SNP Score', 'SNP Size',
'SNP Nostril', 'SpO2', 'pH', 'pO2', 'pCO2', 'HCO3', 'Base Excess']
ws.append(heading)
appointmentDates = np.array([x['clinicDate'] for x in patient['appointments']])
RIGdiff = [abs(patient['gastrostomyDate'] - x).days for x in appointmentDates] if patient['gastrostomyDate'] else None
RIGindex = np.argmin(RIGdiff) if RIGdiff else -1
NIVdiff = [abs(patient['nivDate'] - x).days for x in appointmentDates] if patient['nivDate'] else None
NIVindex = np.argmin(NIVdiff) if NIVdiff else -1
for i, a in enumerate(patient['appointments']):
if i == RIGindex:
ws.append([None, patient['gastrostomyDate'].strftime('%d/%m/%Y')])
if i == NIVindex:
ws.append([None, None, patient['nivDate'].strftime('%d/%m/%Y')])
clinicDate = a['clinicDate'].strftime('%d/%m/%Y')
weight = parseFloat(a['weight'])
height = parseFloat(a['height'])
bmi = parseFloat(a['bmi'])
alsfrs = a['alsfrs']['total']
ess = a['ess']['total']
fvcSitting = a['fvc']['sitting']
fvcSupine = a['fvc']['supine']
SNPScore = a['snp']['score']
SNPSize = a['snp']['size']
SNPNostril = a['snp']['nostril']
SpO2 = a['spO2']
abgPH = parseFloat(a['abg']['pH'])
abgpO2 = parseFloat(a['abg']['pO2'])
abgpCO2 = parseFloat(a['abg']['pCO2'])
abgHCO3 = a['abg']['HCO3'] if 'HCO3' in a['abg'] else None
abgBE = a['abg']['be'] if 'be' in a['abg'] else None
data = [clinicDate, None, None, weight, height, bmi,
alsfrs, ess, fvcSitting, fvcSupine,
SNPScore, SNPSize, SNPNostril, SpO2, abgPH, abgpO2, abgpCO2, abgHCO3, abgBE]
ws.append(data)
wb.save(filename=dest_filename)
while not exists(dest_filename):
sleep(1)
|
"""Base class for all the objects in SymPy"""
from collections import defaultdict
from collections.abc import Mapping
from itertools import chain, zip_longest
from .assumptions import BasicMeta, ManagedProperties
from .cache import cacheit
from .sympify import _sympify, sympify, SympifyError
from .compatibility import iterable, ordered
from .kind import UndefinedKind
from ._print_helpers import Printable
from inspect import getmro
def as_Basic(expr):
"""Return expr as a Basic instance using strict sympify
or raise a TypeError; this is just a wrapper to _sympify,
raising a TypeError instead of a SympifyError."""
from sympy.utilities.misc import func_name
try:
return _sympify(expr)
except SympifyError:
raise TypeError(
'Argument must be a Basic object, not `%s`' % func_name(
expr))
class Basic(Printable, metaclass=ManagedProperties):
"""
Base class for all SymPy objects.
Notes and conventions
=====================
1) Always use ``.args``, when accessing parameters of some instance:
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
2) Never use internal methods or variables (the ones prefixed with ``_``):
>>> cot(x)._args # do not use this, use cot(x).args instead
(x,)
3) By "SymPy object" we mean something that can be returned by
``sympify``. But not all objects one encounters using SymPy are
subclasses of Basic. For example, mutable objects are not:
>>> from sympy import Basic, Matrix, sympify
>>> A = Matrix([[1, 2], [3, 4]]).as_mutable()
>>> isinstance(A, Basic)
False
>>> B = sympify(A)
>>> isinstance(B, Basic)
True
"""
__slots__ = ('_mhash', # hash value
'_args', # arguments
'_assumptions'
)
# To be overridden with True in the appropriate subclasses
is_number = False
is_Atom = False
is_Symbol = False
is_symbol = False
is_Indexed = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_AlgebraicNumber = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix = False
is_Vector = False
is_Point = False
is_MatAdd = False
is_MatMul = False
kind = UndefinedKind
def __new__(cls, *args):
obj = object.__new__(cls)
obj._assumptions = cls.default_assumptions
obj._mhash = None # will be set by __hash__ method.
obj._args = args # all items in args must be Basic objects
return obj
def copy(self):
return self.func(*self.args)
def __getnewargs__(self):
return self.args
def __getstate__(self):
return None
def __reduce_ex__(self, protocol):
if protocol < 2:
msg = "Only pickle protocol 2 or higher is supported by sympy"
raise NotImplementedError(msg)
return super().__reduce_ex__(protocol)
def __hash__(self):
# hash cannot be cached using cache_it because infinite recurrence
# occurs as hash is needed for setting cache dictionary keys
h = self._mhash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._mhash = h
return h
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevant attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__."""
return self._args
@property
def assumptions0(self):
"""
Return object `type` assumptions.
For example:
Symbol('x', real=True)
Symbol('x', integer=True)
are different objects. In other words, besides Python type (Symbol in
this case), the initial assumptions are also forming their typeinfo.
Examples
========
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x.assumptions0
{'commutative': True}
>>> x = Symbol("x", positive=True)
>>> x.assumptions0
{'commutative': True, 'complex': True, 'extended_negative': False,
'extended_nonnegative': True, 'extended_nonpositive': False,
'extended_nonzero': True, 'extended_positive': True, 'extended_real':
True, 'finite': True, 'hermitian': True, 'imaginary': False,
'infinite': False, 'negative': False, 'nonnegative': True,
'nonpositive': False, 'nonzero': True, 'positive': True, 'real':
True, 'zero': False}
"""
return {}
def compare(self, other):
"""
Return -1, 0, 1 if the object is smaller, equal, or greater than other.
Not in the mathematical sense. If the object is of a different type
from the "other" then their classes are ordered according to
the sorted_classes list.
Examples
========
>>> from sympy.abc import x, y
>>> x.compare(y)
-1
>>> x.compare(x)
0
>>> y.compare(x)
1
"""
# all redefinitions of __cmp__ method should start with the
# following lines:
if self is other:
return 0
n1 = self.__class__
n2 = other.__class__
c = (n1 > n2) - (n1 < n2)
if c:
return c
#
st = self._hashable_content()
ot = other._hashable_content()
c = (len(st) > len(ot)) - (len(st) < len(ot))
if c:
return c
for l, r in zip(st, ot):
l = Basic(*l) if isinstance(l, frozenset) else l
r = Basic(*r) if isinstance(r, frozenset) else r
if isinstance(l, Basic):
c = l.compare(r)
else:
c = (l > r) - (l < r)
if c:
return c
return 0
@staticmethod
def _compare_pretty(a, b):
from sympy.series.order import Order
if isinstance(a, Order) and not isinstance(b, Order):
return 1
if not isinstance(a, Order) and isinstance(b, Order):
return -1
if a.is_Rational and b.is_Rational:
l = a.p * b.q
r = b.p * a.q
return (l > r) - (l < r)
else:
from sympy.core.symbol import Wild
p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3")
r_a = a.match(p1 * p2**p3)
if r_a and p3 in r_a:
a3 = r_a[p3]
r_b = b.match(p1 * p2**p3)
if r_b and p3 in r_b:
b3 = r_b[p3]
c = Basic.compare(a3, b3)
if c != 0:
return c
return Basic.compare(a, b)
@classmethod
def fromiter(cls, args, **assumptions):
"""
Create a new object from an iterable.
This is a convenience function that allows one to create objects from
any iterable, without having to convert to a list or tuple first.
Examples
========
>>> from sympy import Tuple
>>> Tuple.fromiter(i for i in range(5))
(0, 1, 2, 3, 4)
"""
return cls(*tuple(args), **assumptions)
@classmethod
def class_key(cls):
"""Nice order of classes. """
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""
Return a sort key.
Examples
========
>>> from sympy.core import S, I
>>> sorted([S(1)/2, I, -I], key=lambda x: x.sort_key())
[1/2, -I, I]
>>> S("[x, 1/x, 1/x**2, x**2, x**(1/2), x**(1/4), x**(3/2)]")
[x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)]
>>> sorted(_, key=lambda x: x.sort_key())
[x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2]
"""
# XXX: remove this when issue 5169 is fixed
def inner_key(arg):
if isinstance(arg, Basic):
return arg.sort_key(order)
else:
return arg
args = self._sorted_args
args = len(args), tuple([inner_key(arg) for arg in args])
return self.class_key(), args, S.One.sort_key(), S.One
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
their symbolic trees.
This is the same as a.compare(b) == 0 but faster.
Notes
=====
If a class that overrides __eq__() needs to retain the
implementation of __hash__() from a parent class, the
interpreter must be told this explicitly by setting __hash__ =
<ParentClass>.__hash__. Otherwise the inheritance of __hash__()
will be blocked, just as if __hash__ had been explicitly set to
None.
References
==========
from http://docs.python.org/dev/reference/datamodel.html#object.__hash__
"""
if self is other:
return True
tself = type(self)
tother = type(other)
if tself is not tother:
try:
other = _sympify(other)
tother = type(other)
except SympifyError:
return NotImplemented
# As long as we have the ordering of classes (sympy.core),
# comparing types will be slow in Python 2, because it uses
# __cmp__. Until we can remove it
# (https://github.com/sympy/sympy/issues/4269), we only compare
# types in Python 2 directly if they actually have __ne__.
if type(tself).__ne__ is not type.__ne__:
if tself != tother:
return False
elif tself is not tother:
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
"""``a != b`` -> Compare two symbolic trees and see whether they are different
this is the same as:
``a.compare(b) != 0``
but faster
"""
return not self == other
def dummy_eq(self, other, symbol=None):
"""
Compare two expressions and handle dummy symbols.
Examples
========
>>> from sympy import Dummy
>>> from sympy.abc import x, y
>>> u = Dummy('u')
>>> (u**2 + 1).dummy_eq(x**2 + 1)
True
>>> (u**2 + 1) == (x**2 + 1)
False
>>> (u**2 + y).dummy_eq(x**2 + y, x)
True
>>> (u**2 + y).dummy_eq(x**2 + y, y)
False
"""
s = self.as_dummy()
o = _sympify(other)
o = o.as_dummy()
dummy_symbols = [i for i in s.free_symbols if i.is_Dummy]
if len(dummy_symbols) == 1:
dummy = dummy_symbols.pop()
else:
return s == o
if symbol is None:
symbols = o.free_symbols
if len(symbols) == 1:
symbol = symbols.pop()
else:
return s == o
tmp = dummy.__class__()
return s.xreplace({dummy: tmp}) == o.xreplace({symbol: tmp})
def atoms(self, *types):
"""Returns the atoms that form the current object.
By default, only objects that are truly atomic and cannot
be divided into smaller pieces are returned: symbols, numbers,
and number symbols like I and pi. It is possible to request
atoms of any type, however, as demonstrated below.
Examples
========
>>> from sympy import I, pi, sin
>>> from sympy.abc import x, y
>>> (1 + x + 2*sin(y + I*pi)).atoms()
{1, 2, I, pi, x, y}
If one or more types are given, the results will contain only
those types of atoms.
>>> from sympy import Number, NumberSymbol, Symbol
>>> (1 + x + 2*sin(y + I*pi)).atoms(Symbol)
{x, y}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number)
{1, 2}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol)
{1, 2, pi}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol, I)
{1, 2, I, pi}
Note that I (imaginary unit) and zoo (complex infinity) are special
types of number symbols and are not part of the NumberSymbol class.
The type can be given implicitly, too:
>>> (1 + x + 2*sin(y + I*pi)).atoms(x) # x is a Symbol
{x, y}
Be careful to check your assumptions when using the implicit option
since ``S(1).is_Integer = True`` but ``type(S(1))`` is ``One``, a special type
of sympy atom, while ``type(S(2))`` is type ``Integer`` and will find all
integers in an expression:
>>> from sympy import S
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(1))
{1}
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(2))
{1, 2}
Finally, arguments to atoms() can select more than atomic atoms: any
sympy type (loaded in core/__init__.py) can be listed as an argument
and those types of "atoms" as found in scanning the arguments of the
expression recursively:
>>> from sympy import Function, Mul
>>> from sympy.core.function import AppliedUndef
>>> f = Function('f')
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(Function)
{f(x), sin(y + I*pi)}
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef)
{f(x)}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Mul)
{I*pi, 2*sin(y + I*pi)}
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
nodes = preorder_traversal(self)
if types:
result = {node for node in nodes if isinstance(node, types)}
else:
result = {node for node in nodes if not node.args}
return result
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
For most expressions, all symbols are free symbols. For some classes
this is not true. e.g. Integrals use Symbols for the dummy variables
which are bound variables, so Integral has a method to return all
symbols except those. Derivative keeps track of symbols with respect
to which it will perform a derivative; those are
bound variables, too, so it has its own free_symbols method.
Any other method that uses bound variables should implement a
free_symbols method."""
return set().union(*[a.free_symbols for a in self.args])
@property
def expr_free_symbols(self):
from sympy.utilities.exceptions import SymPyDeprecationWarning
SymPyDeprecationWarning(feature="expr_free_symbols method",
issue=21494,
deprecated_since_version="1.9").warn()
return set()
def as_dummy(self):
"""Return the expression with any objects having structurally
bound symbols replaced with unique, canonical symbols within
the object in which they appear and having only the default
assumption for commutativity being True. When applied to a
symbol a new symbol having only the same commutativity will be
returned.
Examples
========
>>> from sympy import Integral, Symbol
>>> from sympy.abc import x
>>> r = Symbol('r', real=True)
>>> Integral(r, (r, x)).as_dummy()
Integral(_0, (_0, x))
>>> _.variables[0].is_real is None
True
>>> r.as_dummy()
_r
Notes
=====
Any object that has structurally bound variables should have
a property, `bound_symbols` that returns those symbols
appearing in the object.
"""
from sympy.core.symbol import Dummy, Symbol
def can(x):
# mask free that shadow bound
free = x.free_symbols
bound = set(x.bound_symbols)
d = {i: Dummy() for i in bound & free}
x = x.subs(d)
# replace bound with canonical names
x = x.xreplace(x.canonical_variables)
# return after undoing masking
return x.xreplace({v: k for k, v in d.items()})
if not self.has(Symbol):
return self
return self.replace(
lambda x: hasattr(x, 'bound_symbols'),
can,
simultaneous=False)
@property
def canonical_variables(self):
"""Return a dictionary mapping any variable defined in
``self.bound_symbols`` to Symbols that do not clash
with any free symbols in the expression.
Examples
========
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> Lambda(x, 2*x).canonical_variables
{x: _0}
"""
from sympy.utilities.iterables import numbered_symbols
if not hasattr(self, 'bound_symbols'):
return {}
dums = numbered_symbols('_')
reps = {}
# watch out for free symbol that are not in bound symbols;
# those that are in bound symbols are about to get changed
bound = self.bound_symbols
names = {i.name for i in self.free_symbols - set(bound)}
for b in bound:
d = next(dums)
if b.is_Symbol:
while d.name in names:
d = next(dums)
reps[b] = d
return reps
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance, in SymPy the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however, you can use:
>>> from sympy import Lambda
>>> from sympy.abc import x, y, z
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
return Basic._recursive_call(self, args)
@staticmethod
def _recursive_call(expr_to_call, on_args):
"""Helper for rcall method."""
from sympy import Symbol
def the_call_method_is_overridden(expr):
for cls in getmro(type(expr)):
if '__call__' in cls.__dict__:
return cls != Basic
if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call):
if isinstance(expr_to_call, Symbol): # XXX When you call a Symbol it is
return expr_to_call # transformed into an UndefFunction
else:
return expr_to_call(*on_args)
elif expr_to_call.args:
args = [Basic._recursive_call(
sub, on_args) for sub in expr_to_call.args]
return type(expr_to_call)(*args)
else:
return expr_to_call
def is_hypergeometric(self, k):
from sympy.simplify import hypersimp
from sympy.functions import Piecewise
if self.has(Piecewise):
return None
return hypersimp(self, k) is not None
@property
def is_comparable(self):
"""Return True if self can be computed to a real number
(or already is a real number) with precision, else False.
Examples
========
>>> from sympy import exp_polar, pi, I
>>> (I*exp_polar(I*pi/2)).is_comparable
True
>>> (I*exp_polar(I*pi*2)).is_comparable
False
A False result does not mean that `self` cannot be rewritten
into a form that would be comparable. For example, the
difference computed below is zero but without simplification
it does not evaluate to a zero with precision:
>>> e = 2**pi*(1 + 2**pi)
>>> dif = e - e.expand()
>>> dif.is_comparable
False
>>> dif.n(2)._prec
1
"""
is_extended_real = self.is_extended_real
if is_extended_real is False:
return False
if not self.is_number:
return False
# don't re-eval numbers that are already evaluated since
# this will create spurious precision
n, i = [p.evalf(2) if not p.is_Number else p
for p in self.as_real_imag()]
if not (i.is_Number and n.is_Number):
return False
if i:
# if _prec = 1 we can't decide and if not,
# the answer is False because numbers with
# imaginary parts can't be compared
# so return False
return False
else:
return n._prec != 1
@property
def func(self):
"""
The top-level function in an expression.
The following should hold for all objects::
>> x == x.func(*x.args)
Examples
========
>>> from sympy.abc import x
>>> a = 2*x
>>> a.func
<class 'sympy.core.mul.Mul'>
>>> a.args
(2, x)
>>> a.func(*a.args)
2*x
>>> a == a.func(*a.args)
True
"""
return self.__class__
@property
def args(self):
"""Returns a tuple of arguments of 'self'.
Examples
========
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
Notes
=====
Never use self._args, always use self.args.
Only use _args in __new__ when creating a new function.
Don't override .args() from Basic (so that it's easy to
change the interface in the future if needed).
"""
return self._args
@property
def _sorted_args(self):
"""
The same as ``args``. Derived classes which do not fix an
order on their arguments should override this method to
produce the sorted representation.
"""
return self.args
def as_content_primitive(self, radical=False, clear=True):
"""A stub to allow Basic args (like Tuple) to be skipped when computing
the content and primitive components of an expression.
See Also
========
sympy.core.expr.Expr.as_content_primitive
"""
return S.One, self
def subs(self, *args, **kwargs):
"""
Substitutes old for new in an expression after sympifying args.
`args` is either:
- two arguments, e.g. foo.subs(old, new)
- one iterable argument, e.g. foo.subs(iterable). The iterable may be
o an iterable container with (old, new) pairs. In this case the
replacements are processed in the order given with successive
patterns possibly affecting replacements already made.
o a dict or set whose key/value items correspond to old/new pairs.
In this case the old/new pairs will be sorted by op count and in
case of a tie, by number of args and the default_sort_key. The
resulting sorted list is then processed as an iterable container
(see previous).
If the keyword ``simultaneous`` is True, the subexpressions will not be
evaluated until all the substitutions have been made.
Examples
========
>>> from sympy import pi, exp, limit, oo
>>> from sympy.abc import x, y
>>> (1 + x*y).subs(x, pi)
pi*y + 1
>>> (1 + x*y).subs({x:pi, y:2})
1 + 2*pi
>>> (1 + x*y).subs([(x, pi), (y, 2)])
1 + 2*pi
>>> reps = [(y, x**2), (x, 2)]
>>> (x + y).subs(reps)
6
>>> (x + y).subs(reversed(reps))
x**2 + 2
>>> (x**2 + x**4).subs(x**2, y)
y**2 + y
To replace only the x**2 but not the x**4, use xreplace:
>>> (x**2 + x**4).xreplace({x**2: y})
x**4 + y
To delay evaluation until all substitutions have been made,
set the keyword ``simultaneous`` to True:
>>> (x/y).subs([(x, 0), (y, 0)])
0
>>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True)
nan
This has the added feature of not allowing subsequent substitutions
to affect those already made:
>>> ((x + y)/y).subs({x + y: y, y: x + y})
1
>>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True)
y/(x + y)
In order to obtain a canonical result, unordered iterables are
sorted by count_op length, number of arguments and by the
default_sort_key to break any ties. All other iterables are left
unsorted.
>>> from sympy import sqrt, sin, cos
>>> from sympy.abc import a, b, c, d, e
>>> A = (sqrt(sin(2*x)), a)
>>> B = (sin(2*x), b)
>>> C = (cos(2*x), c)
>>> D = (x, d)
>>> E = (exp(x), e)
>>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x)
>>> expr.subs(dict([A, B, C, D, E]))
a*c*sin(d*e) + b
The resulting expression represents a literal replacement of the
old arguments with the new arguments. This may not reflect the
limiting behavior of the expression:
>>> (x**3 - 3*x).subs({x: oo})
nan
>>> limit(x**3 - 3*x, x, oo)
oo
If the substitution will be followed by numerical
evaluation, it is better to pass the substitution to
evalf as
>>> (1/x).evalf(subs={x: 3.0}, n=21)
0.333333333333333333333
rather than
>>> (1/x).subs({x: 3.0}).evalf(21)
0.333333333333333314830
as the former will ensure that the desired level of precision is
obtained.
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
xreplace: exact node replacement in expr tree; also capable of
using matching rules
sympy.core.evalf.EvalfMixin.evalf: calculates the given formula to a desired level of precision
"""
from sympy.core.compatibility import _nodes, default_sort_key
from sympy.core.containers import Dict
from sympy.core.symbol import Dummy, Symbol
from sympy.utilities.misc import filldedent
unordered = False
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, set):
unordered = True
elif isinstance(sequence, (Dict, Mapping)):
unordered = True
sequence = sequence.items()
elif not iterable(sequence):
raise ValueError(filldedent("""
When a single argument is passed to subs
it should be a dictionary of old: new pairs or an iterable
of (old, new) tuples."""))
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
sequence = list(sequence)
for i, s in enumerate(sequence):
if isinstance(s[0], str):
# when old is a string we prefer Symbol
s = Symbol(s[0]), s[1]
try:
s = [sympify(_, strict=not isinstance(_, (str, type)))
for _ in s]
except SympifyError:
# if it can't be sympified, skip it
sequence[i] = None
continue
# skip if there is no change
sequence[i] = None if _aresame(*s) else tuple(s)
sequence = list(filter(None, sequence))
if unordered:
sequence = dict(sequence)
# order so more complex items are first and items
# of identical complexity are ordered so
# f(x) < f(y) < x < y
# \___ 2 __/ \_1_/ <- number of nodes
#
# For more complex ordering use an unordered sequence.
k = list(ordered(sequence, default=False, keys=(
lambda x: -_nodes(x),
default_sort_key,
)))
sequence = [(k, sequence[k]) for k in k]
if kwargs.pop('simultaneous', False): # XXX should this be the default for dict subs?
reps = {}
rv = self
kwargs['hack2'] = True
m = Dummy('subs_m')
for old, new in sequence:
com = new.is_commutative
if com is None:
com = True
d = Dummy('subs_d', commutative=com)
# using d*m so Subs will be used on dummy variables
# in things like Derivative(f(x, y), x) in which x
# is both free and bound
rv = rv._subs(old, d*m, **kwargs)
if not isinstance(rv, Basic):
break
reps[d] = new
reps[m] = S.One # get rid of m
return rv.xreplace(reps)
else:
rv = self
for old, new in sequence:
rv = rv._subs(old, new, **kwargs)
if not isinstance(rv, Basic):
break
return rv
@cacheit
def _subs(self, old, new, **hints):
"""Substitutes an expression old -> new.
If self is not equal to old then _eval_subs is called.
If _eval_subs doesn't want to make any special replacement
then a None is received which indicates that the fallback
should be applied wherein a search for replacements is made
amongst the arguments of self.
>>> from sympy import Add
>>> from sympy.abc import x, y, z
Examples
========
Add's _eval_subs knows how to target x + y in the following
so it makes the change:
>>> (x + y + z).subs(x + y, 1)
z + 1
Add's _eval_subs doesn't need to know how to find x + y in
the following:
>>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None
True
The returned None will cause the fallback routine to traverse the args and
pass the z*(x + y) arg to Mul where the change will take place and the
substitution will succeed:
>>> (z*(x + y) + 3).subs(x + y, 1)
z + 3
** Developers Notes **
An _eval_subs routine for a class should be written if:
1) any arguments are not instances of Basic (e.g. bool, tuple);
2) some arguments should not be targeted (as in integration
variables);
3) if there is something other than a literal replacement
that should be attempted (as in Piecewise where the condition
may be updated without doing a replacement).
If it is overridden, here are some special cases that might arise:
1) If it turns out that no special change was made and all
the original sub-arguments should be checked for
replacements then None should be returned.
2) If it is necessary to do substitutions on a portion of
the expression then _subs should be called. _subs will
handle the case of any sub-expression being equal to old
(which usually would not be the case) while its fallback
will handle the recursion into the sub-arguments. For
example, after Add's _eval_subs removes some matching terms
it must process the remaining terms so it calls _subs
on each of the un-matched terms and then adds them
onto the terms previously obtained.
3) If the initial expression should remain unchanged then
the original expression should be returned. (Whenever an
expression is returned, modified or not, no further
substitution of old -> new is attempted.) Sum's _eval_subs
routine uses this strategy when a substitution is attempted
on any of its summation variables.
"""
def fallback(self, old, new):
"""
Try to replace old with new in any of self's arguments.
"""
hit = False
args = list(self.args)
for i, arg in enumerate(args):
if not hasattr(arg, '_eval_subs'):
continue
arg = arg._subs(old, new, **hints)
if not _aresame(arg, args[i]):
hit = True
args[i] = arg
if hit:
rv = self.func(*args)
hack2 = hints.get('hack2', False)
if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack
coeff = S.One
nonnumber = []
for i in args:
if i.is_Number:
coeff *= i
else:
nonnumber.append(i)
nonnumber = self.func(*nonnumber)
if coeff is S.One:
return nonnumber
else:
return self.func(coeff, nonnumber, evaluate=False)
return rv
return self
if _aresame(self, old):
return new
rv = self._eval_subs(old, new)
if rv is None:
rv = fallback(self, old, new)
return rv
def _eval_subs(self, old, new):
"""Override this stub if you want to do anything more than
attempt a replacement of old with new in the arguments of self.
See also
========
_subs
"""
return None
def xreplace(self, rule):
"""
Replace occurrences of objects within the expression.
Parameters
==========
rule : dict-like
Expresses a replacement rule
Returns
=======
xreplace : the result of the replacement
Examples
========
>>> from sympy import symbols, pi, exp
>>> x, y, z = symbols('x y z')
>>> (1 + x*y).xreplace({x: pi})
pi*y + 1
>>> (1 + x*y).xreplace({x: pi, y: 2})
1 + 2*pi
Replacements occur only if an entire node in the expression tree is
matched:
>>> (x*y + z).xreplace({x*y: pi})
z + pi
>>> (x*y*z).xreplace({x*y: pi})
x*y*z
>>> (2*x).xreplace({2*x: y, x: z})
y
>>> (2*2*x).xreplace({2*x: y, x: z})
4*z
>>> (x + y + 2).xreplace({x + y: 2})
x + y + 2
>>> (x + 2 + exp(x + 2)).xreplace({x + 2: y})
x + exp(y) + 2
xreplace doesn't differentiate between free and bound symbols. In the
following, subs(x, y) would not change x since it is a bound symbol,
but xreplace does:
>>> from sympy import Integral
>>> Integral(x, (x, 1, 2*x)).xreplace({x: y})
Integral(y, (y, 1, 2*y))
Trying to replace x with an expression raises an error:
>>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y}) # doctest: +SKIP
ValueError: Invalid limits given: ((2*y, 1, 4*y),)
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
subs: substitution of subexpressions as defined by the objects
themselves.
"""
value, _ = self._xreplace(rule)
return value
def _xreplace(self, rule):
"""
Helper for xreplace. Tracks whether a replacement actually occurred.
"""
if self in rule:
return rule[self], True
elif rule:
args = []
changed = False
for a in self.args:
_xreplace = getattr(a, '_xreplace', None)
if _xreplace is not None:
a_xr = _xreplace(rule)
args.append(a_xr[0])
changed |= a_xr[1]
else:
args.append(a)
args = tuple(args)
if changed:
return self.func(*args), True
return self, False
@cacheit
def has(self, *patterns):
"""
Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y, z
>>> (x**2 + sin(x*y)).has(z)
False
>>> (x**2 + sin(x*y)).has(x, y, z)
True
>>> x.has(x)
True
Note ``has`` is a structural algorithm with no knowledge of
mathematics. Consider the following half-open interval:
>>> from sympy.sets import Interval
>>> i = Interval.Lopen(0, 5); i
Interval.Lopen(0, 5)
>>> i.args
(0, 5, True, False)
>>> i.has(4) # there is no "4" in the arguments
False
>>> i.has(0) # there *is* a "0" in the arguments
True
Instead, use ``contains`` to determine whether a number is in the
interval or not:
>>> i.contains(4)
True
>>> i.contains(0)
False
Note that ``expr.has(*patterns)`` is exactly equivalent to
``any(expr.has(p) for p in patterns)``. In particular, ``False`` is
returned when the list of patterns is empty.
>>> x.has()
False
"""
return any(self._has(pattern) for pattern in patterns)
def _has(self, pattern):
"""Helper for .has()"""
from sympy.core.function import UndefinedFunction, Function
if isinstance(pattern, UndefinedFunction):
return any(pattern in (f, f.func)
for f in self.atoms(Function, UndefinedFunction))
if isinstance(pattern, BasicMeta):
subtrees = preorder_traversal(self)
return any(isinstance(arg, pattern) for arg in subtrees)
pattern = _sympify(pattern)
_has_matcher = getattr(pattern, '_has_matcher', None)
if _has_matcher is not None:
match = _has_matcher()
return any(match(arg) for arg in preorder_traversal(self))
else:
return any(arg == pattern for arg in preorder_traversal(self))
def _has_matcher(self):
"""Helper for .has()"""
return lambda other: self == other
def replace(self, query, value, map=False, simultaneous=True, exact=None):
"""
Replace matching subexpressions of ``self`` with ``value``.
If ``map = True`` then also return the mapping {old: new} where ``old``
was a sub-expression found with query and ``new`` is the replacement
value for it. If the expression itself doesn't match the query, then
the returned value will be ``self.xreplace(map)`` otherwise it should
be ``self.subs(ordered(map.items()))``.
Traverses an expression tree and performs replacement of matching
subexpressions from the bottom to the top of the tree. The default
approach is to do the replacement in a simultaneous fashion so
changes made are targeted only once. If this is not desired or causes
problems, ``simultaneous`` can be set to False.
In addition, if an expression containing more than one Wild symbol
is being used to match subexpressions and the ``exact`` flag is None
it will be set to True so the match will only succeed if all non-zero
values are received for each Wild that appears in the match pattern.
Setting this to False accepts a match of 0; while setting it True
accepts all matches that have a 0 in them. See example below for
cautions.
The list of possible combinations of queries and replacement values
is listed below:
Examples
========
Initial setup
>>> from sympy import log, sin, cos, tan, Wild, Mul, Add
>>> from sympy.abc import x, y
>>> f = log(sin(x)) + tan(sin(x**2))
1.1. type -> type
obj.replace(type, newtype)
When object of type ``type`` is found, replace it with the
result of passing its argument(s) to ``newtype``.
>>> f.replace(sin, cos)
log(cos(x)) + tan(cos(x**2))
>>> sin(x).replace(sin, cos, map=True)
(cos(x), {sin(x): cos(x)})
>>> (x*y).replace(Mul, Add)
x + y
1.2. type -> func
obj.replace(type, func)
When object of type ``type`` is found, apply ``func`` to its
argument(s). ``func`` must be written to handle the number
of arguments of ``type``.
>>> f.replace(sin, lambda arg: sin(2*arg))
log(sin(2*x)) + tan(sin(2*x**2))
>>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args)))
sin(2*x*y)
2.1. pattern -> expr
obj.replace(pattern(wild), expr(wild))
Replace subexpressions matching ``pattern`` with the expression
written in terms of the Wild symbols in ``pattern``.
>>> a, b = map(Wild, 'ab')
>>> f.replace(sin(a), tan(a))
log(tan(x)) + tan(tan(x**2))
>>> f.replace(sin(a), tan(a/2))
log(tan(x/2)) + tan(tan(x**2/2))
>>> f.replace(sin(a), a)
log(x) + tan(x**2)
>>> (x*y).replace(a*x, a)
y
Matching is exact by default when more than one Wild symbol
is used: matching fails unless the match gives non-zero
values for all Wild symbols:
>>> (2*x + y).replace(a*x + b, b - a)
y - 2
>>> (2*x).replace(a*x + b, b - a)
2*x
When set to False, the results may be non-intuitive:
>>> (2*x).replace(a*x + b, b - a, exact=False)
2/x
2.2. pattern -> func
obj.replace(pattern(wild), lambda wild: expr(wild))
All behavior is the same as in 2.1 but now a function in terms of
pattern variables is used rather than an expression:
>>> f.replace(sin(a), lambda a: sin(2*a))
log(sin(2*x)) + tan(sin(2*x**2))
3.1. func -> func
obj.replace(filter, func)
Replace subexpression ``e`` with ``func(e)`` if ``filter(e)``
is True.
>>> g = 2*sin(x**3)
>>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2)
4*sin(x**9)
The expression itself is also targeted by the query but is done in
such a fashion that changes are not made twice.
>>> e = x*(x*y + 1)
>>> e.replace(lambda x: x.is_Mul, lambda x: 2*x)
2*x*(2*x*y + 1)
When matching a single symbol, `exact` will default to True, but
this may or may not be the behavior that is desired:
Here, we want `exact=False`:
>>> from sympy import Function
>>> f = Function('f')
>>> e = f(1) + f(0)
>>> q = f(a), lambda a: f(a + 1)
>>> e.replace(*q, exact=False)
f(1) + f(2)
>>> e.replace(*q, exact=True)
f(0) + f(2)
But here, the nature of matching makes selecting
the right setting tricky:
>>> e = x**(1 + y)
>>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=False)
x
>>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=True)
x**(-x - y + 1)
>>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=False)
x
>>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=True)
x**(1 - y)
It is probably better to use a different form of the query
that describes the target expression more precisely:
>>> (1 + x**(1 + y)).replace(
... lambda x: x.is_Pow and x.exp.is_Add and x.exp.args[0] == 1,
... lambda x: x.base**(1 - (x.exp - 1)))
...
x**(1 - y) + 1
See Also
========
subs: substitution of subexpressions as defined by the objects
themselves.
xreplace: exact node replacement in expr tree; also capable of
using matching rules
"""
from sympy.core.symbol import Wild
try:
query = _sympify(query)
except SympifyError:
pass
try:
value = _sympify(value)
except SympifyError:
pass
if isinstance(query, type):
_query = lambda expr: isinstance(expr, query)
if isinstance(value, type):
_value = lambda expr, result: value(*expr.args)
elif callable(value):
_value = lambda expr, result: value(*expr.args)
else:
raise TypeError(
"given a type, replace() expects another "
"type or a callable")
elif isinstance(query, Basic):
_query = lambda expr: expr.match(query)
if exact is None:
exact = (len(query.atoms(Wild)) > 1)
if isinstance(value, Basic):
if exact:
_value = lambda expr, result: (value.subs(result)
if all(result.values()) else expr)
else:
_value = lambda expr, result: value.subs(result)
elif callable(value):
# match dictionary keys get the trailing underscore stripped
# from them and are then passed as keywords to the callable;
# if ``exact`` is True, only accept match if there are no null
# values amongst those matched.
if exact:
_value = lambda expr, result: (value(**
{str(k)[:-1]: v for k, v in result.items()})
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value(**
{str(k)[:-1]: v for k, v in result.items()})
else:
raise TypeError(
"given an expression, replace() expects "
"another expression or a callable")
elif callable(query):
_query = query
if callable(value):
_value = lambda expr, result: value(expr)
else:
raise TypeError(
"given a callable, replace() expects "
"another callable")
else:
raise TypeError(
"first argument to replace() must be a "
"type, an expression or a callable")
def walk(rv, F):
"""Apply ``F`` to args and then to result.
"""
args = getattr(rv, 'args', None)
if args is not None:
if args:
newargs = tuple([walk(a, F) for a in args])
if args != newargs:
rv = rv.func(*newargs)
if simultaneous:
# if rv is something that was already
# matched (that was changed) then skip
# applying F again
for i, e in enumerate(args):
if rv == e and e != newargs[i]:
return rv
rv = F(rv)
return rv
mapping = {} # changes that took place
def rec_replace(expr):
result = _query(expr)
if result or result == {}:
v = _value(expr, result)
if v is not None and v != expr:
if map:
mapping[expr] = v
expr = v
return expr
rv = walk(self, rec_replace)
return (rv, mapping) if map else rv
def find(self, query, group=False):
"""Find all subexpressions matching a query. """
query = _make_find_query(query)
results = list(filter(query, preorder_traversal(self)))
if not group:
return set(results)
else:
groups = {}
for result in results:
if result in groups:
groups[result] += 1
else:
groups[result] = 1
return groups
def count(self, query):
"""Count the number of matching subexpressions. """
query = _make_find_query(query)
return sum(bool(query(sub)) for sub in preorder_traversal(self))
def matches(self, expr, repl_dict=None, old=False):
"""
Helper method for match() that looks for a match between Wild symbols
in self and expressions in expr.
Examples
========
>>> from sympy import symbols, Wild, Basic
>>> a, b, c = symbols('a b c')
>>> x = Wild('x')
>>> Basic(a + x, x).matches(Basic(a + b, c)) is None
True
>>> Basic(a + x, x).matches(Basic(a + b + c, b + c))
{x_: b + c}
"""
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if repl_dict is None:
repl_dict = dict()
else:
repl_dict = repl_dict.copy()
if self == expr:
return repl_dict
if len(self.args) != len(expr.args):
return None
d = repl_dict # already a copy
for arg, other_arg in zip(self.args, expr.args):
if arg == other_arg:
continue
if arg.is_Relational:
try:
d = arg.xreplace(d).matches(other_arg, d, old=old)
except TypeError: # Should be InvalidComparisonError when introduced
d = None
else:
d = arg.xreplace(d).matches(other_arg, d, old=old)
if d is None:
return None
return d
def match(self, pattern, old=False):
"""
Pattern matching.
Wild symbols match all.
Return ``None`` when expression (self) does not match
with pattern. Otherwise return a dictionary such that::
pattern.xreplace(self.match(pattern)) == self
Examples
========
>>> from sympy import Wild, Sum
>>> from sympy.abc import x, y
>>> p = Wild("p")
>>> q = Wild("q")
>>> r = Wild("r")
>>> e = (x+y)**(x+y)
>>> e.match(p**p)
{p_: x + y}
>>> e.match(p**q)
{p_: x + y, q_: x + y}
>>> e = (2*x)**2
>>> e.match(p*q**r)
{p_: 4, q_: x, r_: 2}
>>> (p*q**r).xreplace(e.match(p*q**r))
4*x**2
Structurally bound symbols are ignored during matching:
>>> Sum(x, (x, 1, 2)).match(Sum(y, (y, 1, p)))
{p_: 2}
But they can be identified if desired:
>>> Sum(x, (x, 1, 2)).match(Sum(q, (q, 1, p)))
{p_: 2, q_: x}
The ``old`` flag will give the old-style pattern matching where
expressions and patterns are essentially solved to give the
match. Both of the following give None unless ``old=True``:
>>> (x - 2).match(p - x, old=True)
{p_: 2*x - 2}
>>> (2/x).match(p*x, old=True)
{p_: 2/x**2}
"""
from sympy.core.symbol import Wild
from sympy.core.function import WildFunction
from sympy.utilities.misc import filldedent
pattern = sympify(pattern)
# match non-bound symbols
canonical = lambda x: x if x.is_Symbol else x.as_dummy()
m = canonical(pattern).matches(canonical(self), old=old)
if m is None:
return m
wild = pattern.atoms(Wild, WildFunction)
# sanity check
if set(m) - wild:
raise ValueError(filldedent('''
Some `matches` routine did not use a copy of repl_dict
and injected unexpected symbols. Report this as an
error at https://github.com/sympy/sympy/issues'''))
# now see if bound symbols were requested
bwild = wild - set(m)
if not bwild:
return m
# replace free-Wild symbols in pattern with match result
# so they will match but not be in the next match
wpat = pattern.xreplace(m)
# identify remaining bound wild
w = wpat.matches(self, old=old)
# add them to m
if w:
m.update(w)
# done
return m
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from sympy import count_ops
return count_ops(self, visual)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default like limits,
integrals, sums and products. All objects of this kind will be
evaluated recursively, unless some species were excluded via 'hints'
or unless the 'deep' hint was set to 'False'.
>>> from sympy import Integral
>>> from sympy.abc import x
>>> 2*Integral(x, x)
2*Integral(x, x)
>>> (2*Integral(x, x)).doit()
x**2
>>> (2*Integral(x, x)).doit(deep=False)
2*Integral(x, x)
"""
if hints.get('deep', True):
terms = [term.doit(**hints) if isinstance(term, Basic) else term
for term in self.args]
return self.func(*terms)
else:
return self
def simplify(self, **kwargs):
"""See the simplify function in sympy.simplify"""
from sympy.simplify import simplify
return simplify(self, **kwargs)
def refine(self, assumption=True):
"""See the refine function in sympy.assumptions"""
from sympy.assumptions import refine
return refine(self, assumption)
def _eval_derivative_n_times(self, s, n):
# This is the default evaluator for derivatives (as called by `diff`
# and `Derivative`), it will attempt a loop to derive the expression
# `n` times by calling the corresponding `_eval_derivative` method,
# while leaving the derivative unevaluated if `n` is symbolic. This
# method should be overridden if the object has a closed form for its
# symbolic n-th derivative.
from sympy import Integer
if isinstance(n, (int, Integer)):
obj = self
for i in range(n):
obj2 = obj._eval_derivative(s)
if obj == obj2 or obj2 is None:
break
obj = obj2
return obj2
else:
return None
def rewrite(self, *args, deep=True, **hints):
"""
Rewrite *self* using a defined rule.
Rewriting transforms an expression to another, which is mathematically
equivalent but structurally different. For example you can rewrite
trigonometric functions as complex exponentials or combinatorial
functions as gamma function.
This method takes a *pattern* and a *rule* as positional arguments.
*pattern* is optional parameter which defines the types of expressions
that will be transformed. If it is not passed, all possible expressions
will be rewritten. *rule* defines how the expression will be rewritten.
Parameters
==========
args : *rule*, or *pattern* and *rule*.
- *pattern* is a type or an iterable of types.
- *rule* can be any object.
deep : bool, optional.
If ``True``, subexpressions are recursively transformed. Default is
``True``.
Examples
========
If *pattern* is unspecified, all possible expressions are transformed.
>>> from sympy import cos, sin, exp, I
>>> from sympy.abc import x
>>> expr = cos(x) + I*sin(x)
>>> expr.rewrite(exp)
exp(I*x)
Pattern can be a type or an iterable of types.
>>> expr.rewrite(sin, exp)
exp(I*x)/2 + cos(x) - exp(-I*x)/2
>>> expr.rewrite([cos,], exp)
exp(I*x)/2 + I*sin(x) + exp(-I*x)/2
>>> expr.rewrite([cos, sin], exp)
exp(I*x)
Rewriting behavior can be implemented by defining ``_eval_rewrite()``
method.
>>> from sympy import Expr, sqrt, pi
>>> class MySin(Expr):
... def _eval_rewrite(self, rule, args, **hints):
... x, = args
... if rule == cos:
... return cos(pi/2 - x, evaluate=False)
... if rule == sqrt:
... return sqrt(1 - cos(x)**2)
>>> MySin(MySin(x)).rewrite(cos)
cos(-cos(-x + pi/2) + pi/2)
>>> MySin(x).rewrite(sqrt)
sqrt(1 - cos(x)**2)
Defining ``_eval_rewrite_as_[...]()`` method is supported for backwards
compatibility reason. This may be removed in the future and using it is
discouraged.
>>> class MySin(Expr):
... def _eval_rewrite_as_cos(self, *args, **hints):
... x, = args
... return cos(pi/2 - x, evaluate=False)
>>> MySin(x).rewrite(cos)
cos(-x + pi/2)
"""
if not args:
return self
hints.update(deep=deep)
pattern = args[:-1]
rule = args[-1]
# support old design by _eval_rewrite_as_[...] method
if isinstance(rule, str):
method = "_eval_rewrite_as_%s" % rule
elif hasattr(rule, "__name__"):
# rule is class or function
clsname = rule.__name__
method = "_eval_rewrite_as_%s" % clsname
else:
# rule is instance
clsname = rule.__class__.__name__
method = "_eval_rewrite_as_%s" % clsname
if pattern:
if iterable(pattern[0]):
pattern = pattern[0]
pattern = tuple(p for p in pattern if self.has(p))
if not pattern:
return self
# hereafter, empty pattern is interpreted as all pattern.
return self._rewrite(pattern, rule, method, **hints)
def _rewrite(self, pattern, rule, method, **hints):
deep = hints.pop('deep', True)
if deep:
args = [a._rewrite(pattern, rule, method, **hints)
for a in self.args]
else:
args = self.args
if not pattern or any(isinstance(self, p) for p in pattern):
meth = getattr(self, method, None)
if meth is not None:
rewritten = meth(*args, **hints)
else:
rewritten = self._eval_rewrite(rule, args, **hints)
if rewritten is not None:
return rewritten
if not args:
return self
return self.func(*args)
def _eval_rewrite(self, rule, args, **hints):
return None
_constructor_postprocessor_mapping = {} # type: ignore
@classmethod
def _exec_constructor_postprocessors(cls, obj):
# WARNING: This API is experimental.
# This is an experimental API that introduces constructor
# postprosessors for SymPy Core elements. If an argument of a SymPy
# expression has a `_constructor_postprocessor_mapping` attribute, it will
# be interpreted as a dictionary containing lists of postprocessing
# functions for matching expression node names.
clsname = obj.__class__.__name__
postprocessors = defaultdict(list)
for i in obj.args:
try:
postprocessor_mappings = (
Basic._constructor_postprocessor_mapping[cls].items()
for cls in type(i).mro()
if cls in Basic._constructor_postprocessor_mapping
)
for k, v in chain.from_iterable(postprocessor_mappings):
postprocessors[k].extend([j for j in v if j not in postprocessors[k]])
except TypeError:
pass
for f in postprocessors.get(clsname, []):
obj = f(obj)
return obj
def _sage_(self):
"""
Convert *self* to a symbolic expression of SageMath.
This version of the method is merely a placeholder.
"""
old_method = self._sage_
from sage.interfaces.sympy import sympy_init
sympy_init() # may monkey-patch _sage_ method into self's class or superclasses
if old_method == self._sage_:
raise NotImplementedError('conversion to SageMath is not implemented')
else:
# call the freshly monkey-patched method
return self._sage_()
class Atom(Basic):
"""
A parent class for atomic things. An atom is an expression with no subexpressions.
Examples
========
Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_Atom = True
__slots__ = ()
def matches(self, expr, repl_dict=None, old=False):
if self == expr:
if repl_dict is None:
return dict()
return repl_dict.copy()
def xreplace(self, rule, hack2=False):
return rule.get(self, self)
def doit(self, **hints):
return self
@classmethod
def class_key(cls):
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def _eval_simplify(self, **kwargs):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _aresame(a, b):
"""Return True if a and b are structurally the same, else False.
Examples
========
In SymPy (as in Python) two numbers compare the same if they
have the same underlying base-2 representation even though
they may not be the same type:
>>> from sympy import S
>>> 2.0 == S(2)
True
>>> 0.5 == S.Half
True
This routine was written to provide a query for such cases that
would give false when the types do not match:
>>> from sympy.core.basic import _aresame
>>> _aresame(S(2.0), S(2))
False
"""
from .numbers import Number
from .function import AppliedUndef, UndefinedFunction as UndefFunc
if isinstance(a, Number) and isinstance(b, Number):
return a == b and a.__class__ == b.__class__
for i, j in zip_longest(preorder_traversal(a), preorder_traversal(b)):
if i != j or type(i) != type(j):
if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or
(isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):
if i.class_key() != j.class_key():
return False
else:
return False
return True
def _ne(a, b):
# use this as a second test after `a != b` if you want to make
# sure that things are truly equal, e.g.
# a, b = 0.5, S.Half
# a !=b or _ne(a, b) -> True
from .numbers import Number
# 0.5 == S.Half
if isinstance(a, Number) and isinstance(b, Number):
return a.__class__ != b.__class__
def _atomic(e, recursive=False):
"""Return atom-like quantities as far as substitution is
concerned: Derivatives, Functions and Symbols. Don't
return any 'atoms' that are inside such quantities unless
they also appear outside, too, unless `recursive` is True.
Examples
========
>>> from sympy import Derivative, Function, cos
>>> from sympy.abc import x, y
>>> from sympy.core.basic import _atomic
>>> f = Function('f')
>>> _atomic(x + y)
{x, y}
>>> _atomic(x + f(y))
{x, f(y)}
>>> _atomic(Derivative(f(x), x) + cos(x) + y)
{y, cos(x), Derivative(f(x), x)}
"""
from sympy import Derivative, Function, Symbol
pot = preorder_traversal(e)
seen = set()
if isinstance(e, Basic):
free = getattr(e, "free_symbols", None)
if free is None:
return {e}
else:
return set()
atoms = set()
for p in pot:
if p in seen:
pot.skip()
continue
seen.add(p)
if isinstance(p, Symbol) and p in free:
atoms.add(p)
elif isinstance(p, (Derivative, Function)):
if not recursive:
pot.skip()
atoms.add(p)
return atoms
class preorder_traversal:
"""
Do a pre-order traversal of a tree.
This iterator recursively yields nodes that it has visited in a pre-order
fashion. That is, it yields the current node then descends through the
tree breadth-first to yield all of a node's children's pre-order
traversal.
For an expression, the order of the traversal depends on the order of
.args, which in many cases can be arbitrary.
Parameters
==========
node : sympy expression
The expression to traverse.
keys : (default None) sort key(s)
The key(s) used to sort args of Basic objects. When None, args of Basic
objects are processed in arbitrary order. If key is defined, it will
be passed along to ordered() as the only key(s) to use to sort the
arguments; if ``key`` is simply True then the default keys of ordered
will be used.
Yields
======
subtree : sympy expression
All of the subtrees in the tree.
Examples
========
>>> from sympy import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
The nodes are returned in the order that they are encountered unless key
is given; simply passing key=True will guarantee that the traversal is
unique.
>>> list(preorder_traversal((x + y)*z, keys=None)) # doctest: +SKIP
[z*(x + y), z, x + y, y, x]
>>> list(preorder_traversal((x + y)*z, keys=True))
[z*(x + y), z, x + y, x, y]
"""
def __init__(self, node, keys=None):
self._skip_flag = False
self._pt = self._preorder_traversal(node, keys)
def _preorder_traversal(self, node, keys):
yield node
if self._skip_flag:
self._skip_flag = False
return
if isinstance(node, Basic):
if not keys and hasattr(node, '_argset'):
# LatticeOp keeps args as a set. We should use this if we
# don't care about the order, to prevent unnecessary sorting.
args = node._argset
else:
args = node.args
if keys:
if keys != True:
args = ordered(args, keys, default=False)
else:
args = ordered(args)
for arg in args:
yield from self._preorder_traversal(arg, keys)
elif iterable(node):
for item in node:
yield from self._preorder_traversal(item, keys)
def skip(self):
"""
Skip yielding current node's (last yielded node's) subtrees.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
>>> pt = preorder_traversal((x+y*z)*z)
>>> for i in pt:
... print(i)
... if i == x+y*z:
... pt.skip()
z*(x + y*z)
z
x + y*z
"""
self._skip_flag = True
def __next__(self):
return next(self._pt)
def __iter__(self):
return self
def _make_find_query(query):
"""Convert the argument of Basic.find() into a callable"""
try:
query = _sympify(query)
except SympifyError:
pass
if isinstance(query, type):
return lambda expr: isinstance(expr, query)
elif isinstance(query, Basic):
return lambda expr: expr.match(query) is not None
return query
# Delayed to avoid cyclic import
from .singleton import S
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Log'
db.create_table(u'eventlog_log', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.SET_NULL)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('action', self.gf('django.db.models.fields.CharField')(max_length=50)),
('extra', self.gf('django.db.models.fields.TextField')(default='{}')),
))
db.send_create_signal(u'eventlog', ['Log'])
def backwards(self, orm):
# Deleting model 'Log'
db.delete_table(u'eventlog_log')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'eventlog.log': {
'Meta': {'ordering': "['-timestamp']", 'object_name': 'Log'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'extra': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'})
}
}
complete_apps = ['eventlog']
|
# Note: The first part of this file can be modified in place, but the latter
# part is autogenerated by the boilerplate.py script.
"""
`matplotlib.pyplot` is a state-based interface to matplotlib. It provides
a MATLAB-like way of plotting.
pyplot is mainly intended for interactive plots and simple cases of
programmatic plot generation::
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 5, 0.1)
y = np.sin(x)
plt.plot(x, y)
The object-oriented API is recommended for more complex plots.
"""
import functools
import importlib
import inspect
import logging
from numbers import Number
import re
import sys
import time
from cycler import cycler
import matplotlib
import matplotlib.colorbar
import matplotlib.image
from matplotlib import rcsetup, style
from matplotlib import _pylab_helpers, interactive
from matplotlib import cbook
from matplotlib.cbook import dedent, deprecated, silent_list, warn_deprecated
from matplotlib import docstring
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.figure import Figure, figaspect
from matplotlib.gridspec import GridSpec
from matplotlib import rcParams, rcParamsDefault, get_backend, rcParamsOrig
from matplotlib import rc_context
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import getp, get, Artist
from matplotlib.artist import setp as _setp
from matplotlib.axes import Axes, Subplot
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for _csv2rec, detrend_none, window_hanning
from matplotlib.scale import get_scale_docs, get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap, register_cmap
import numpy as np
# We may not need the following imports here:
from matplotlib.colors import Normalize
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from .ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
FuncFormatter, FormatStrFormatter, ScalarFormatter,\
LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
Locator, IndexLocator, FixedLocator, NullLocator,\
LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
MaxNLocator
from matplotlib.backends import pylab_setup, _get_running_interactive_framework
_log = logging.getLogger(__name__)
## Global ##
_IP_REGISTERED = None
_INSTALL_FIG_OBSERVER = False
def install_repl_displayhook():
"""
Install a repl display hook so that any stale figure are automatically
redrawn when control is returned to the repl.
This works both with IPython and with vanilla python shells.
"""
global _IP_REGISTERED
global _INSTALL_FIG_OBSERVER
class _NotIPython(Exception):
pass
# see if we have IPython hooks around, if use them
try:
if 'IPython' in sys.modules:
from IPython import get_ipython
ip = get_ipython()
if ip is None:
raise _NotIPython()
if _IP_REGISTERED:
return
def post_execute():
if matplotlib.is_interactive():
draw_all()
# IPython >= 2
try:
ip.events.register('post_execute', post_execute)
except AttributeError:
# IPython 1.x
ip.register_post_execute(post_execute)
_IP_REGISTERED = post_execute
_INSTALL_FIG_OBSERVER = False
# trigger IPython's eventloop integration, if available
from IPython.core.pylabtools import backend2gui
ipython_gui_name = backend2gui.get(get_backend())
if ipython_gui_name:
ip.enable_gui(ipython_gui_name)
else:
_INSTALL_FIG_OBSERVER = True
# import failed or ipython is not running
except (ImportError, _NotIPython):
_INSTALL_FIG_OBSERVER = True
def uninstall_repl_displayhook():
"""
Uninstall the matplotlib display hook.
.. warning
Need IPython >= 2 for this to work. For IPython < 2 will raise a
``NotImplementedError``
.. warning
If you are using vanilla python and have installed another
display hook this will reset ``sys.displayhook`` to what ever
function was there when matplotlib installed it's displayhook,
possibly discarding your changes.
"""
global _IP_REGISTERED
global _INSTALL_FIG_OBSERVER
if _IP_REGISTERED:
from IPython import get_ipython
ip = get_ipython()
try:
ip.events.unregister('post_execute', _IP_REGISTERED)
except AttributeError:
raise NotImplementedError("Can not unregister events "
"in IPython < 2.0")
_IP_REGISTERED = None
if _INSTALL_FIG_OBSERVER:
_INSTALL_FIG_OBSERVER = False
draw_all = _pylab_helpers.Gcf.draw_all
@functools.wraps(matplotlib.set_loglevel)
def set_loglevel(*args, **kwargs): # Ensure this appears in the pyplot docs.
return matplotlib.set_loglevel(*args, **kwargs)
@docstring.copy(Artist.findobj)
def findobj(o=None, match=None, include_self=True):
if o is None:
o = gcf()
return o.findobj(match, include_self=include_self)
def switch_backend(newbackend):
"""
Close all open figures and set the Matplotlib backend.
The argument is case-insensitive. Switching to an interactive backend is
possible only if no event loop for another interactive backend has started.
Switching to and from non-interactive backends is always possible.
Parameters
----------
newbackend : str
The name of the backend to use.
"""
close("all")
if newbackend is rcsetup._auto_backend_sentinel:
# Don't try to fallback on the cairo-based backends as they each have
# an additional dependency (pycairo) over the agg-based backend, and
# are of worse quality.
for candidate in [
"macosx", "qt5agg", "qt4agg", "gtk3agg", "tkagg", "wxagg"]:
try:
switch_backend(candidate)
except ImportError:
continue
else:
rcParamsOrig['backend'] = candidate
return
else:
# Switching to Agg should always succeed; if it doesn't, let the
# exception propagate out.
switch_backend("agg")
rcParamsOrig["backend"] = "agg"
return
backend_name = (
newbackend[9:] if newbackend.startswith("module://")
else "matplotlib.backends.backend_{}".format(newbackend.lower()))
backend_mod = importlib.import_module(backend_name)
Backend = type(
"Backend", (matplotlib.backends._Backend,), vars(backend_mod))
_log.debug("Loaded backend %s version %s.",
newbackend, Backend.backend_version)
required_framework = Backend.required_interactive_framework
if required_framework is not None:
current_framework = \
matplotlib.backends._get_running_interactive_framework()
if (current_framework and required_framework
and current_framework != required_framework):
raise ImportError(
"Cannot load backend {!r} which requires the {!r} interactive "
"framework, as {!r} is currently running".format(
newbackend, required_framework, current_framework))
rcParams['backend'] = rcParamsDefault['backend'] = newbackend
global _backend_mod, new_figure_manager, draw_if_interactive, _show
_backend_mod = backend_mod
new_figure_manager = Backend.new_figure_manager
draw_if_interactive = Backend.draw_if_interactive
_show = Backend.show
# Need to keep a global reference to the backend for compatibility reasons.
# See https://github.com/matplotlib/matplotlib/issues/6092
matplotlib.backends.backend = newbackend
def show(*args, **kw):
"""
Display a figure.
When running in ipython with its pylab mode, display all
figures and return to the ipython prompt.
In non-interactive mode, display all figures and block until
the figures have been closed; in interactive mode it has no
effect unless figures were created prior to a change from
non-interactive to interactive mode (not recommended). In
that case it displays the figures but does not block.
A single experimental keyword argument, *block*, may be
set to True or False to override the blocking behavior
described above.
"""
global _show
return _show(*args, **kw)
def isinteractive():
"""Return the status of interactive mode."""
return matplotlib.is_interactive()
def ioff():
"""Turn the interactive mode off."""
matplotlib.interactive(False)
uninstall_repl_displayhook()
def ion():
"""Turn the interactive mode on."""
matplotlib.interactive(True)
install_repl_displayhook()
def pause(interval):
"""
Pause for *interval* seconds.
If there is an active figure, it will be updated and displayed before the
pause, and the GUI event loop (if any) will run during the pause.
This can be used for crude animation. For more complex animation, see
:mod:`matplotlib.animation`.
Notes
-----
This function is experimental; its behavior may be changed or extended in a
future release.
"""
manager = _pylab_helpers.Gcf.get_active()
if manager is not None:
canvas = manager.canvas
if canvas.figure.stale:
canvas.draw_idle()
show(block=False)
canvas.start_event_loop(interval)
else:
time.sleep(interval)
@docstring.copy(matplotlib.rc)
def rc(group, **kwargs):
matplotlib.rc(group, **kwargs)
@docstring.copy(matplotlib.rc_context)
def rc_context(rc=None, fname=None):
return matplotlib.rc_context(rc, fname)
@docstring.copy(matplotlib.rcdefaults)
def rcdefaults():
matplotlib.rcdefaults()
if matplotlib.is_interactive():
draw_all()
## Current image ##
def gci():
"""
Get the current colorable artist. Specifically, returns the
current :class:`~matplotlib.cm.ScalarMappable` instance (image or
patch collection), or *None* if no images or patch collections
have been defined. The commands :func:`~matplotlib.pyplot.imshow`
and :func:`~matplotlib.pyplot.figimage` create
:class:`~matplotlib.image.Image` instances, and the commands
:func:`~matplotlib.pyplot.pcolor` and
:func:`~matplotlib.pyplot.scatter` create
:class:`~matplotlib.collections.Collection` instances. The
current image is an attribute of the current axes, or the nearest
earlier axes in the current figure that contains an image.
Notes
-----
Historically, the only colorable artists were images; hence the name
``gci`` (get current image).
"""
return gcf()._gci()
## Any Artist ##
# (getp is simply imported)
@docstring.copy(_setp)
def setp(obj, *args, **kwargs):
return _setp(obj, *args, **kwargs)
def xkcd(scale=1, length=100, randomness=2):
"""
Turn on `xkcd <https://xkcd.com/>`_ sketch-style drawing mode.
This will only have effect on things drawn after this function is
called.
For best results, the "Humor Sans" font should be installed: it is
not included with matplotlib.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source line.
length : float, optional
The length of the wiggle along the line.
randomness : float, optional
The scale factor by which the length is shrunken or expanded.
Notes
-----
This function works by a number of rcParams, so it will probably
override others you have set before.
If you want the effects of this function to be temporary, it can
be used as a context manager, for example::
with plt.xkcd():
# This figure will be in XKCD-style
fig1 = plt.figure()
# ...
# This figure will be in regular style
fig2 = plt.figure()
"""
if rcParams['text.usetex']:
raise RuntimeError(
"xkcd mode is not compatible with text.usetex = True")
from matplotlib import patheffects
return rc_context({
'font.family': ['xkcd', 'xkcd Script', 'Humor Sans', 'Comic Sans MS'],
'font.size': 14.0,
'path.sketch': (scale, length, randomness),
'path.effects': [patheffects.withStroke(linewidth=4, foreground="w")],
'axes.linewidth': 1.5,
'lines.linewidth': 2.0,
'figure.facecolor': 'white',
'grid.linewidth': 0.0,
'axes.grid': False,
'axes.unicode_minus': False,
'axes.edgecolor': 'black',
'xtick.major.size': 8,
'xtick.major.width': 3,
'ytick.major.size': 8,
'ytick.major.width': 3,
})
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize=None, # defaults to rc figure.figsize
dpi=None, # defaults to rc figure.dpi
facecolor=None, # defaults to rc figure.facecolor
edgecolor=None, # defaults to rc figure.edgecolor
frameon=True,
FigureClass=Figure,
clear=False,
**kwargs
):
"""
Create a new figure.
Parameters
----------
num : integer or string, optional, default: None
If not provided, a new figure will be created, and the figure number
will be incremented. The figure objects holds this number in a `number`
attribute.
If num is provided, and a figure with this id already exists, make
it active, and returns a reference to it. If this figure does not
exists, create it and returns it.
If num is a string, the window title will be set to this figure's
`num`.
figsize : (float, float), optional, default: None
width, height in inches. If not provided, defaults to
:rc:`figure.figsize` = ``[6.4, 4.8]``.
dpi : integer, optional, default: None
resolution of the figure. If not provided, defaults to
:rc:`figure.dpi` = ``100``.
facecolor : color spec
the background color. If not provided, defaults to
:rc:`figure.facecolor` = ``'w'``.
edgecolor : color spec
the border color. If not provided, defaults to
:rc:`figure.edgecolor` = ``'w'``.
frameon : bool, optional, default: True
If False, suppress drawing the figure frame.
FigureClass : subclass of `~matplotlib.figure.Figure`
Optionally use a custom `.Figure` instance.
clear : bool, optional, default: False
If True and the figure already exists, then it is cleared.
Returns
-------
figure : `~matplotlib.figure.Figure`
The `.Figure` instance returned will also be passed to
new_figure_manager in the backends, which allows to hook custom
`.Figure` classes into the pyplot interface. Additional kwargs will be
passed to the `.Figure` init function.
Notes
-----
If you are creating many figures, make sure you explicitly call
:func:`.pyplot.close` on the figures you are not using, because this will
enable pyplot to properly clean up the memory.
`~matplotlib.rcParams` defines the default values, which can be modified
in the matplotlibrc file.
"""
if figsize is None:
figsize = rcParams['figure.figsize']
if dpi is None:
dpi = rcParams['figure.dpi']
if facecolor is None:
facecolor = rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = rcParams['figure.edgecolor']
allnums = get_fignums()
next_num = max(allnums) + 1 if allnums else 1
figLabel = ''
if num is None:
num = next_num
elif isinstance(num, str):
figLabel = num
allLabels = get_figlabels()
if figLabel not in allLabels:
if figLabel == 'all':
cbook._warn_external(
"close('all') closes all existing figures")
num = next_num
else:
inum = allLabels.index(figLabel)
num = allnums[inum]
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
max_open_warning = rcParams['figure.max_open_warning']
if len(allnums) >= max_open_warning >= 1:
cbook._warn_external(
"More than %d figures have been opened. Figures "
"created through the pyplot interface "
"(`matplotlib.pyplot.figure`) are retained until "
"explicitly closed and may consume too much memory. "
"(To control this warning, see the rcParam "
"`figure.max_open_warning`)." %
max_open_warning, RuntimeWarning)
if get_backend().lower() == 'ps':
dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
if figLabel:
figManager.set_window_title(figLabel)
figManager.canvas.figure.set_label(figLabel)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
fig = figManager.canvas.figure
fig.number = num
# make sure backends (inline) that we don't ship that expect this
# to be called in plotting commands to make the figure call show
# still work. There is probably a better way to do this in the
# FigureManager base class.
if matplotlib.is_interactive():
draw_if_interactive()
if _INSTALL_FIG_OBSERVER:
fig.stale_callback = _auto_draw_if_interactive
if clear:
figManager.canvas.figure.clear()
return figManager.canvas.figure
def _auto_draw_if_interactive(fig, val):
"""
This is an internal helper function for making sure that auto-redrawing
works as intended in the plain python repl.
Parameters
----------
fig : Figure
A figure object which is assumed to be associated with a canvas
"""
if (val and matplotlib.is_interactive()
and not fig.canvas.is_saving()
and not fig.canvas._is_idle_drawing):
# Some artists can mark themselves as stale in the middle of drawing
# (e.g. axes position & tick labels being computed at draw time), but
# this shouldn't trigger a redraw because the current redraw will
# already take them into account.
with fig.canvas._idle_draw_cntx():
fig.canvas.draw_idle()
def gcf():
"""
Get the current figure.
If no current figure exists, a new one is created using
`~.pyplot.figure()`.
"""
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
return figManager.canvas.figure
else:
return figure()
def fignum_exists(num):
"""Return whether the figure with the given id exists."""
return _pylab_helpers.Gcf.has_fignum(num) or num in get_figlabels()
def get_fignums():
"""Return a list of existing figure numbers."""
return sorted(_pylab_helpers.Gcf.figs)
def get_figlabels():
"""Return a list of existing figure labels."""
figManagers = _pylab_helpers.Gcf.get_all_fig_managers()
figManagers.sort(key=lambda m: m.num)
return [m.canvas.figure.get_label() for m in figManagers]
def get_current_fig_manager():
"""
Return the figure manager of the current figure.
The figure manager is a container for the actual backend-depended window
that displays the figure on screen.
If if no current figure exists, a new one is created an its figure
manager is returned.
Returns
-------
manager : `.FigureManagerBase` or backend-dependent subclass thereof
"""
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
gcf() # creates an active figure as a side effect
figManager = _pylab_helpers.Gcf.get_active()
return figManager
@docstring.copy(FigureCanvasBase.mpl_connect)
def connect(s, func):
return get_current_fig_manager().canvas.mpl_connect(s, func)
@docstring.copy(FigureCanvasBase.mpl_disconnect)
def disconnect(cid):
return get_current_fig_manager().canvas.mpl_disconnect(cid)
def close(fig=None):
"""
Close a figure window.
Parameters
----------
fig : None or int or str or `.Figure`
The figure to close. There are a number of ways to specify this:
- *None*: the current figure
- `.Figure`: the given `.Figure` instance
- ``int``: a figure number
- ``str``: a figure name
- 'all': all figures
"""
if fig is None:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
return
else:
_pylab_helpers.Gcf.destroy(figManager.num)
elif fig == 'all':
_pylab_helpers.Gcf.destroy_all()
elif isinstance(fig, int):
_pylab_helpers.Gcf.destroy(fig)
elif hasattr(fig, 'int'):
# if we are dealing with a type UUID, we
# can use its integer representation
_pylab_helpers.Gcf.destroy(fig.int)
elif isinstance(fig, str):
allLabels = get_figlabels()
if fig in allLabels:
num = get_fignums()[allLabels.index(fig)]
_pylab_helpers.Gcf.destroy(num)
elif isinstance(fig, Figure):
_pylab_helpers.Gcf.destroy_fig(fig)
else:
raise TypeError("close() argument must be a Figure, an int, a string, "
"or None, not '%s'")
def clf():
"""Clear the current figure."""
gcf().clf()
def draw():
"""Redraw the current figure.
This is used to update a figure that has been altered, but not
automatically re-drawn. If interactive mode is on (:func:`.ion()`), this
should be only rarely needed, but there may be ways to modify the state of
a figure without marking it as `stale`. Please report these cases as
bugs.
A more object-oriented alternative, given any
:class:`~matplotlib.figure.Figure` instance, :attr:`fig`, that
was created using a :mod:`~matplotlib.pyplot` function, is::
fig.canvas.draw_idle()
"""
get_current_fig_manager().canvas.draw_idle()
@docstring.copy(Figure.savefig)
def savefig(*args, **kwargs):
fig = gcf()
res = fig.savefig(*args, **kwargs)
fig.canvas.draw_idle() # need this if 'transparent=True' to reset colors
return res
@docstring.copy(Figure.ginput)
def ginput(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is negative, does not timeout.
"""
return gcf().ginput(*args, **kwargs)
@docstring.copy(Figure.waitforbuttonpress)
def waitforbuttonpress(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* key or mouse clicks from the user and
return a list containing True's for keyboard clicks and False's
for mouse clicks.
If *timeout* is negative, does not timeout.
"""
return gcf().waitforbuttonpress(*args, **kwargs)
## Putting things in figures ##
@docstring.copy(Figure.text)
def figtext(x, y, s, *args, **kwargs):
return gcf().text(x, y, s, *args, **kwargs)
@docstring.copy(Figure.suptitle)
def suptitle(t, **kwargs):
return gcf().suptitle(t, **kwargs)
@docstring.copy(Figure.figimage)
def figimage(*args, **kwargs):
return gcf().figimage(*args, **kwargs)
def figlegend(*args, **kwargs):
return gcf().legend(*args, **kwargs)
if Figure.legend.__doc__:
figlegend.__doc__ = Figure.legend.__doc__.replace("legend(", "figlegend(")
## Axes ##
@docstring.dedent_interpd
def axes(arg=None, **kwargs):
"""
Add an axes to the current figure and make it the current axes.
Call signatures::
plt.axes()
plt.axes(rect, projection=None, polar=False, **kwargs)
plt.axes(ax)
Parameters
----------
arg : None or 4-tuple
The exact behavior of this function depends on the type:
- *None*: A new full window axes is added using
``subplot(111, **kwargs)``
- 4-tuple of floats *rect* = ``[left, bottom, width, height]``.
A new axes is added with dimensions *rect* in normalized
(0, 1) units using `~.Figure.add_axes` on the current figure.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the `~.axes.Axes`. *str* is the name of
a custom projection, see `~matplotlib.projections`. The default
None results in a 'rectilinear' projection.
polar : boolean, optional
If True, equivalent to projection='polar'.
sharex, sharey : `~.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey.
The axis will have the same limits, ticks, and scale as the axis
of the shared axes.
label : str
A label for the returned axes.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for
the returned axes class. The keyword arguments for the
rectilinear axes class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used, see the actual axes
class.
%(Axes)s
Returns
-------
axes : `~.axes.Axes` (or a subclass of `~.axes.Axes`)
The returned axes class depends on the projection used. It is
`~.axes.Axes` if rectilinear projection are used and
`.projections.polar.PolarAxes` if polar projection
are used.
Notes
-----
If the figure already has a axes with key (*args*,
*kwargs*) then it will simply make that axes current and
return it. This behavior is deprecated. Meanwhile, if you do
not want this behavior (i.e., you want to force the creation of a
new axes), you must use a unique set of args and kwargs. The axes
*label* attribute has been exposed for this purpose: if you want
two axes that are otherwise identical to be added to the figure,
make sure you give them unique labels.
See Also
--------
.Figure.add_axes
.pyplot.subplot
.Figure.add_subplot
.Figure.subplots
.pyplot.subplots
Examples
--------
::
# Creating a new full window axes
plt.axes()
# Creating a new axes with specified dimensions and some kwargs
plt.axes((left, bottom, width, height), facecolor='w')
"""
if arg is None:
return subplot(111, **kwargs)
else:
return gcf().add_axes(arg, **kwargs)
def delaxes(ax=None):
"""
Remove the `Axes` *ax* (defaulting to the current axes) from its figure.
A KeyError is raised if the axes doesn't exist.
"""
if ax is None:
ax = gca()
ax.figure.delaxes(ax)
def sca(ax):
"""
Set the current Axes instance to *ax*.
The current Figure is updated to the parent of *ax*.
"""
managers = _pylab_helpers.Gcf.get_all_fig_managers()
for m in managers:
if ax in m.canvas.figure.axes:
_pylab_helpers.Gcf.set_active(m)
m.canvas.figure.sca(ax)
return
raise ValueError("Axes instance argument was not found in a figure")
def gca(**kwargs):
"""
Get the current :class:`~matplotlib.axes.Axes` instance on the
current figure matching the given keyword args, or create one.
Examples
--------
To get the current polar axes on the current figure::
plt.gca(projection='polar')
If the current axes doesn't exist, or isn't a polar one, the appropriate
axes will be created and then returned.
See Also
--------
matplotlib.figure.Figure.gca : The figure's gca method.
"""
return gcf().gca(**kwargs)
## More ways of creating axes ##
@docstring.dedent_interpd
def subplot(*args, **kwargs):
"""
Add a subplot to the current figure.
Wrapper of `.Figure.add_subplot` with a difference in behavior
explained in the notes section.
Call signatures::
subplot(nrows, ncols, index, **kwargs)
subplot(pos, **kwargs)
subplot(ax)
Parameters
----------
*args
Either a 3-digit integer or three separate integers
describing the position of the subplot. If the three
integers are *nrows*, *ncols*, and *index* in order, the
subplot will take the *index* position on a grid with *nrows*
rows and *ncols* columns. *index* starts at 1 in the upper left
corner and increases to the right.
*pos* is a three digit integer, where the first digit is the
number of rows, the second the number of columns, and the third
the index of the subplot. i.e. fig.add_subplot(235) is the same as
fig.add_subplot(2, 3, 5). Note that all integers must be less than
10 for this form to work.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the subplot (`~.axes.Axes`). *str* is the name
of a custom projection, see `~matplotlib.projections`. The default
None results in a 'rectilinear' projection.
polar : boolean, optional
If True, equivalent to projection='polar'.
sharex, sharey : `~.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey. The
axis will have the same limits, ticks, and scale as the axis of the
shared axes.
label : str
A label for the returned axes.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for
the returned axes base class. The keyword arguments for the
rectilinear base class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used.
%(Axes)s
Returns
-------
axes : an `.axes.SubplotBase` subclass of `~.axes.Axes` (or a subclass \
of `~.axes.Axes`)
The axes of the subplot. The returned axes base class depends on
the projection used. It is `~.axes.Axes` if rectilinear projection
are used and `.projections.polar.PolarAxes` if polar projection
are used. The returned axes is then a subplot subclass of the
base class.
Notes
-----
Creating a subplot will delete any pre-existing subplot that overlaps
with it beyond sharing a boundary::
import matplotlib.pyplot as plt
# plot a line, implicitly creating a subplot(111)
plt.plot([1,2,3])
# now create a subplot which represents the top plot of a grid
# with 2 rows and 1 column. Since this subplot will overlap the
# first, the plot (and its axes) previously created, will be removed
plt.subplot(211)
If you do not want this behavior, use the `.Figure.add_subplot` method
or the `.pyplot.axes` function instead.
If the figure already has a subplot with key (*args*,
*kwargs*) then it will simply make that subplot current and
return it. This behavior is deprecated. Meanwhile, if you do
not want this behavior (i.e., you want to force the creation of a
new subplot), you must use a unique set of args and kwargs. The axes
*label* attribute has been exposed for this purpose: if you want
two subplots that are otherwise identical to be added to the figure,
make sure you give them unique labels.
In rare circumstances, `.add_subplot` may be called with a single
argument, a subplot axes instance already created in the
present figure but not in the figure's list of axes.
See Also
--------
.Figure.add_subplot
.pyplot.subplots
.pyplot.axes
.Figure.subplots
Examples
--------
::
plt.subplot(221)
# equivalent but more general
ax1=plt.subplot(2, 2, 1)
# add a subplot with no frame
ax2=plt.subplot(222, frameon=False)
# add a polar subplot
plt.subplot(223, projection='polar')
# add a red subplot that shares the x-axis with ax1
plt.subplot(224, sharex=ax1, facecolor='red')
# delete ax2 from the figure
plt.delaxes(ax2)
# add ax2 to the figure again
plt.subplot(ax2)
"""
# if subplot called without arguments, create subplot(1,1,1)
if len(args) == 0:
args = (1, 1, 1)
# This check was added because it is very easy to type
# subplot(1, 2, False) when subplots(1, 2, False) was intended
# (sharex=False, that is). In most cases, no error will
# ever occur, but mysterious behavior can result because what was
# intended to be the sharex argument is instead treated as a
# subplot index for subplot()
if len(args) >= 3 and isinstance(args[2], bool):
cbook._warn_external("The subplot index argument to subplot() appears "
"to be a boolean. Did you intend to use "
"subplots()?")
fig = gcf()
a = fig.add_subplot(*args, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other == a:
continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye:
delaxes(ax)
return a
def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, gridspec_kw=None, **fig_kw):
"""
Create a figure and a set of subplots.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Parameters
----------
nrows, ncols : int, optional, default: 1
Number of rows/columns of the subplot grid.
sharex, sharey : bool or {'none', 'all', 'row', 'col'}, default: False
Controls sharing of properties among x (`sharex`) or y (`sharey`)
axes:
- True or 'all': x- or y-axis will be shared among all
subplots.
- False or 'none': each subplot x- or y-axis will be
independent.
- 'row': each subplot row will share an x- or y-axis.
- 'col': each subplot column will share an x- or y-axis.
When subplots have a shared x-axis along a column, only the x tick
labels of the bottom subplot are created. Similarly, when subplots
have a shared y-axis along a row, only the y tick labels of the first
column subplot are created. To later turn other subplots' ticklabels
on, use `~matplotlib.axes.Axes.tick_params`.
squeeze : bool, optional, default: True
- If True, extra dimensions are squeezed out from the returned
array of `~matplotlib.axes.Axes`:
- if only one subplot is constructed (nrows=ncols=1), the
resulting single Axes object is returned as a scalar.
- for Nx1 or 1xM subplots, the returned object is a 1D numpy
object array of Axes objects.
- for NxM, subplots with N>1 and M>1 are returned as a 2D array.
- If False, no squeezing at all is done: the returned Axes object is
always a 2D array containing Axes instances, even if it ends up
being 1x1.
num : integer or string, optional, default: None
A `.pyplot.figure` keyword that sets the figure number or label.
subplot_kw : dict, optional
Dict with keywords passed to the
`~matplotlib.figure.Figure.add_subplot` call used to create each
subplot.
gridspec_kw : dict, optional
Dict with keywords passed to the `~matplotlib.gridspec.GridSpec`
constructor used to create the grid the subplots are placed on.
**fig_kw
All additional keyword arguments are passed to the
`.pyplot.figure` call.
Returns
-------
fig : `~.figure.Figure`
ax : `.axes.Axes` object or array of Axes objects.
*ax* can be either a single `~matplotlib.axes.Axes` object or an
array of Axes objects if more than one subplot was created. The
dimensions of the resulting array can be controlled with the squeeze
keyword, see above.
Examples
--------
::
# First create some toy data:
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Creates just a figure and only one subplot
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Creates two subplots and unpacks the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Creates four polar axes, and accesses them through the returned array
fig, axes = plt.subplots(2, 2, subplot_kw=dict(polar=True))
axes[0, 0].plot(x, y)
axes[1, 1].scatter(x, y)
# Share a X axis with each column of subplots
plt.subplots(2, 2, sharex='col')
# Share a Y axis with each row of subplots
plt.subplots(2, 2, sharey='row')
# Share both X and Y axes with all subplots
plt.subplots(2, 2, sharex='all', sharey='all')
# Note that this is the same as
plt.subplots(2, 2, sharex=True, sharey=True)
# Creates figure number 10 with a single subplot
# and clears it if it already exists.
fig, ax=plt.subplots(num=10, clear=True)
See Also
--------
.pyplot.figure
.pyplot.subplot
.pyplot.axes
.Figure.subplots
.Figure.add_subplot
"""
fig = figure(**fig_kw)
axs = fig.subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey,
squeeze=squeeze, subplot_kw=subplot_kw,
gridspec_kw=gridspec_kw)
return fig, axs
def subplot2grid(shape, loc, rowspan=1, colspan=1, fig=None, **kwargs):
"""
Create an axis at specific location inside a regular grid.
Parameters
----------
shape : sequence of 2 ints
Shape of grid in which to place axis.
First entry is number of rows, second entry is number of columns.
loc : sequence of 2 ints
Location to place axis within grid.
First entry is row number, second entry is column number.
rowspan : int
Number of rows for the axis to span to the right.
colspan : int
Number of columns for the axis to span downwards.
fig : `Figure`, optional
Figure to place axis in. Defaults to current figure.
**kwargs
Additional keyword arguments are handed to `add_subplot`.
Notes
-----
The following call ::
subplot2grid(shape, loc, rowspan=1, colspan=1)
is identical to ::
gridspec=GridSpec(shape[0], shape[1])
subplotspec=gridspec.new_subplotspec(loc, rowspan, colspan)
subplot(subplotspec)
"""
if fig is None:
fig = gcf()
s1, s2 = shape
subplotspec = GridSpec(s1, s2).new_subplotspec(loc,
rowspan=rowspan,
colspan=colspan)
a = fig.add_subplot(subplotspec, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other == a:
continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye:
delaxes(ax)
return a
def twinx(ax=None):
"""
Make and return a second axes that shares the *x*-axis. The new axes will
overlay *ax* (or the current axes if *ax* is *None*), and its ticks will be
on the right.
Examples
--------
:doc:`/gallery/subplots_axes_and_figures/two_scales`
"""
if ax is None:
ax = gca()
ax1 = ax.twinx()
return ax1
def twiny(ax=None):
"""
Make and return a second axes that shares the *y*-axis. The new axes will
overlay *ax* (or the current axes if *ax* is *None*), and its ticks will be
on the top.
Examples
--------
:doc:`/gallery/subplots_axes_and_figures/two_scales`
"""
if ax is None:
ax = gca()
ax1 = ax.twiny()
return ax1
def subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Tune the subplot layout.
The parameter meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for space between subplots,
# expressed as a fraction of the average axis width
hspace = 0.2 # the amount of height reserved for space between subplots,
# expressed as a fraction of the average axis height
The actual defaults are controlled by the rc file
"""
fig = gcf()
fig.subplots_adjust(left, bottom, right, top, wspace, hspace)
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for a figure.
A :class:`matplotlib.widgets.SubplotTool` instance is returned.
"""
tbar = rcParams['toolbar'] # turn off navigation toolbar for the toolfig
rcParams['toolbar'] = 'None'
if targetfig is None:
manager = get_current_fig_manager()
targetfig = manager.canvas.figure
else:
# find the manager for this figure
for manager in _pylab_helpers.Gcf._activeQue:
if manager.canvas.figure == targetfig:
break
else:
raise RuntimeError('Could not find manager for targetfig')
toolfig = figure(figsize=(6, 3))
toolfig.subplots_adjust(top=0.9)
ret = SubplotTool(targetfig, toolfig)
rcParams['toolbar'] = tbar
_pylab_helpers.Gcf.set_active(manager) # restore the current figure
return ret
def tight_layout(pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Automatically adjust subplot parameters to give specified padding.
Parameters
----------
pad : float
Padding between the figure edge and the edges of subplots,
as a fraction of the font size.
h_pad, w_pad : float, optional
Padding (height/width) between edges of adjacent subplots,
as a fraction of the font size. Defaults to *pad*.
rect : tuple (left, bottom, right, top), optional
A rectangle (left, bottom, right, top) in the normalized
figure coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
gcf().tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
def box(on=None):
"""
Turn the axes box on or off on the current axes.
Parameters
----------
on : bool or None
The new `~matplotlib.axes.Axes` box state. If ``None``, toggle
the state.
See Also
--------
:meth:`matplotlib.axes.Axes.set_frame_on`
:meth:`matplotlib.axes.Axes.get_frame_on`
"""
ax = gca()
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
## Axis ##
def xlim(*args, **kwargs):
"""
Get or set the x limits of the current axes.
Call signatures::
left, right = xlim() # return the current xlim
xlim((left, right)) # set the xlim to left, right
xlim(left, right) # set the xlim to left, right
If you do not specify args, you can pass *left* or *right* as kwargs,
i.e.::
xlim(right=3) # adjust the right leaving left unchanged
xlim(left=1) # adjust the left leaving right unchanged
Setting limits turns autoscaling off for the x-axis.
Returns
-------
left, right
A tuple of the new x-axis limits.
Notes
-----
Calling this function with no arguments (e.g. ``xlim()``) is the pyplot
equivalent of calling `~.Axes.get_xlim` on the current axes.
Calling this function with arguments is the pyplot equivalent of calling
`~.Axes.set_xlim` on the current axes. All arguments are passed though.
"""
ax = gca()
if not args and not kwargs:
return ax.get_xlim()
ret = ax.set_xlim(*args, **kwargs)
return ret
def ylim(*args, **kwargs):
"""
Get or set the y-limits of the current axes.
Call signatures::
bottom, top = ylim() # return the current ylim
ylim((bottom, top)) # set the ylim to bottom, top
ylim(bottom, top) # set the ylim to bottom, top
If you do not specify args, you can alternatively pass *bottom* or
*top* as kwargs, i.e.::
ylim(top=3) # adjust the top leaving bottom unchanged
ylim(bottom=1) # adjust the bottom leaving top unchanged
Setting limits turns autoscaling off for the y-axis.
Returns
-------
bottom, top
A tuple of the new y-axis limits.
Notes
-----
Calling this function with no arguments (e.g. ``ylim()``) is the pyplot
equivalent of calling `~.Axes.get_ylim` on the current axes.
Calling this function with arguments is the pyplot equivalent of calling
`~.Axes.set_ylim` on the current axes. All arguments are passed though.
"""
ax = gca()
if not args and not kwargs:
return ax.get_ylim()
ret = ax.set_ylim(*args, **kwargs)
return ret
def xticks(ticks=None, labels=None, **kwargs):
"""
Get or set the current tick locations and labels of the x-axis.
Call signatures::
locs, labels = xticks() # Get locations and labels
xticks(ticks, [labels], **kwargs) # Set locations and labels
Parameters
----------
ticks : array_like
A list of positions at which ticks should be placed. You can pass an
empty list to disable xticks.
labels : array_like, optional
A list of explicit labels to place at the given *locs*.
**kwargs
:class:`.Text` properties can be used to control the appearance of
the labels.
Returns
-------
locs
An array of label locations.
labels
A list of `.Text` objects.
Notes
-----
Calling this function with no arguments (e.g. ``xticks()``) is the pyplot
equivalent of calling `~.Axes.get_xticks` and `~.Axes.get_xticklabels` on
the current axes.
Calling this function with arguments is the pyplot equivalent of calling
`~.Axes.set_xticks` and `~.Axes.set_xticklabels` on the current axes.
Examples
--------
Get the current locations and labels:
>>> locs, labels = xticks()
Set label locations:
>>> xticks(np.arange(0, 1, step=0.2))
Set text labels:
>>> xticks(np.arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue'))
Set text labels and properties:
>>> xticks(np.arange(12), calendar.month_name[1:13], rotation=20)
Disable xticks:
>>> xticks([])
"""
ax = gca()
if ticks is None and labels is None:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif labels is None:
locs = ax.set_xticks(ticks)
labels = ax.get_xticklabels()
else:
locs = ax.set_xticks(ticks)
labels = ax.set_xticklabels(labels, **kwargs)
for l in labels:
l.update(kwargs)
return locs, silent_list('Text xticklabel', labels)
def yticks(ticks=None, labels=None, **kwargs):
"""
Get or set the current tick locations and labels of the y-axis.
Call signatures::
locs, labels = yticks() # Get locations and labels
yticks(ticks, [labels], **kwargs) # Set locations and labels
Parameters
----------
ticks : array_like
A list of positions at which ticks should be placed. You can pass an
empty list to disable yticks.
labels : array_like, optional
A list of explicit labels to place at the given *locs*.
**kwargs
:class:`.Text` properties can be used to control the appearance of
the labels.
Returns
-------
locs
An array of label locations.
labels
A list of `.Text` objects.
Notes
-----
Calling this function with no arguments (e.g. ``yticks()``) is the pyplot
equivalent of calling `~.Axes.get_yticks` and `~.Axes.get_yticklabels` on
the current axes.
Calling this function with arguments is the pyplot equivalent of calling
`~.Axes.set_yticks` and `~.Axes.set_yticklabels` on the current axes.
Examples
--------
Get the current locations and labels:
>>> locs, labels = yticks()
Set label locations:
>>> yticks(np.arange(0, 1, step=0.2))
Set text labels:
>>> yticks(np.arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue'))
Set text labels and properties:
>>> yticks(np.arange(12), calendar.month_name[1:13], rotation=45)
Disable yticks:
>>> yticks([])
"""
ax = gca()
if ticks is None and labels is None:
locs = ax.get_yticks()
labels = ax.get_yticklabels()
elif labels is None:
locs = ax.set_yticks(ticks)
labels = ax.get_yticklabels()
else:
locs = ax.set_yticks(ticks)
labels = ax.set_yticklabels(labels, **kwargs)
for l in labels:
l.update(kwargs)
return locs, silent_list('Text yticklabel', labels)
def rgrids(*args, **kwargs):
"""
Get or set the radial gridlines on the current polar plot.
Call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, fmt=None, **kwargs)
When called with no arguments, `.rgrids` simply returns the tuple
(*lines*, *labels*). When called with arguments, the labels will
appear at the specified radial distances and angle.
Parameters
----------
radii : tuple with floats
The radii for the radial gridlines
labels : tuple with strings or None
The labels to use at each radial gridline. The
`matplotlib.ticker.ScalarFormatter` will be used if None.
angle : float
The angular position of the radius labels in degrees.
fmt : str or None
Format string used in `matplotlib.ticker.FormatStrFormatter`.
For example '%f'.
Returns
-------
lines, labels : list of `.lines.Line2D`, list of `.text.Text`
*lines* are the radial gridlines and *labels* are the tick labels.
Other Parameters
----------------
**kwargs
*kwargs* are optional `~.Text` properties for the labels.
Examples
--------
::
# set the locations of the radial gridlines
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' ))
See Also
--------
.pyplot.thetagrids
.projections.polar.PolarAxes.set_rgrids
.Axis.get_gridlines
.Axis.get_ticklabels
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args) == 0:
lines = ax.yaxis.get_gridlines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(*args, **kwargs)
return (silent_list('Line2D rgridline', lines),
silent_list('Text rgridlabel', labels))
def thetagrids(*args, **kwargs):
"""
Get or set the theta gridlines on the current polar plot.
Call signatures::
lines, labels = thetagrids()
lines, labels = thetagrids(angles, labels=None, fmt=None, **kwargs)
When called with no arguments, `.thetagrids` simply returns the tuple
(*lines*, *labels*). When called with arguments, the labels will
appear at the specified angles.
Parameters
----------
angles : tuple with floats, degrees
The angles of the theta gridlines.
labels : tuple with strings or None
The labels to use at each radial gridline. The
`.projections.polar.ThetaFormatter` will be used if None.
fmt : str or None
Format string used in `matplotlib.ticker.FormatStrFormatter`.
For example '%f'. Note that the angle in radians will be used.
Returns
-------
lines, labels : list of `.lines.Line2D`, list of `.text.Text`
*lines* are the theta gridlines and *labels* are the tick labels.
Other Parameters
----------------
**kwargs
*kwargs* are optional `~.Text` properties for the labels.
Examples
--------
::
# set the locations of the angular gridlines
lines, labels = thetagrids( range(45,360,90) )
# set the locations and labels of the angular gridlines
lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
See Also
--------
.pyplot.rgrids
.projections.polar.PolarAxes.set_thetagrids
.Axis.get_gridlines
.Axis.get_ticklabels
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('thetagrids only defined for polar axes')
if len(args) == 0:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(*args, **kwargs)
return (silent_list('Line2D thetagridline', lines),
silent_list('Text thetagridlabel', labels))
## Plotting Info ##
def plotting():
pass
def get_plot_commands():
"""
Get a sorted list of all of the plotting commands.
"""
# This works by searching for all functions in this module and removing
# a few hard-coded exclusions, as well as all of the colormap-setting
# functions, and anything marked as private with a preceding underscore.
exclude = {'colormaps', 'colors', 'connect', 'disconnect',
'get_plot_commands', 'get_current_fig_manager', 'ginput',
'plotting', 'waitforbuttonpress'}
exclude |= set(colormaps())
this_module = inspect.getmodule(get_plot_commands)
return sorted(
name for name, obj in globals().items()
if not name.startswith('_') and name not in exclude
and inspect.isfunction(obj)
and inspect.getmodule(obj) is this_module)
def colormaps():
"""
Matplotlib provides a number of colormaps, and others can be added using
:func:`~matplotlib.cm.register_cmap`. This function documents the built-in
colormaps, and will also return a list of all registered colormaps if
called.
You can set the colormap for an image, pcolor, scatter, etc,
using a keyword argument::
imshow(X, cmap=cm.hot)
or using the :func:`set_cmap` function::
imshow(X)
pyplot.set_cmap('hot')
pyplot.set_cmap('jet')
In interactive mode, :func:`set_cmap` will update the colormap post-hoc,
allowing you to see which one works best for your data.
All built-in colormaps can be reversed by appending ``_r``: For instance,
``gray_r`` is the reverse of ``gray``.
There are several common color schemes used in visualization:
Sequential schemes
for unipolar data that progresses from low to high
Diverging schemes
for bipolar data that emphasizes positive or negative deviations from a
central value
Cyclic schemes
for plotting values that wrap around at the endpoints, such as phase
angle, wind direction, or time of day
Qualitative schemes
for nominal data that has no inherent ordering, where color is used
only to distinguish categories
Matplotlib ships with 4 perceptually uniform color maps which are
the recommended color maps for sequential data:
========= ===================================================
Colormap Description
========= ===================================================
inferno perceptually uniform shades of black-red-yellow
magma perceptually uniform shades of black-red-white
plasma perceptually uniform shades of blue-red-yellow
viridis perceptually uniform shades of blue-green-yellow
========= ===================================================
The following colormaps are based on the `ColorBrewer
<http://colorbrewer2.org>`_ color specifications and designs developed by
Cynthia Brewer:
ColorBrewer Diverging (luminance is highest at the midpoint, and
decreases towards differently-colored endpoints):
======== ===================================
Colormap Description
======== ===================================
BrBG brown, white, blue-green
PiYG pink, white, yellow-green
PRGn purple, white, green
PuOr orange, white, purple
RdBu red, white, blue
RdGy red, white, gray
RdYlBu red, yellow, blue
RdYlGn red, yellow, green
Spectral red, orange, yellow, green, blue
======== ===================================
ColorBrewer Sequential (luminance decreases monotonically):
======== ====================================
Colormap Description
======== ====================================
Blues white to dark blue
BuGn white, light blue, dark green
BuPu white, light blue, dark purple
GnBu white, light green, dark blue
Greens white to dark green
Greys white to black (not linear)
Oranges white, orange, dark brown
OrRd white, orange, dark red
PuBu white, light purple, dark blue
PuBuGn white, light purple, dark green
PuRd white, light purple, dark red
Purples white to dark purple
RdPu white, pink, dark purple
Reds white to dark red
YlGn light yellow, dark green
YlGnBu light yellow, light green, dark blue
YlOrBr light yellow, orange, dark brown
YlOrRd light yellow, orange, dark red
======== ====================================
ColorBrewer Qualitative:
(For plotting nominal data, :class:`ListedColormap` is used,
not :class:`LinearSegmentedColormap`. Different sets of colors are
recommended for different numbers of categories.)
* Accent
* Dark2
* Paired
* Pastel1
* Pastel2
* Set1
* Set2
* Set3
A set of colormaps derived from those of the same name provided
with Matlab are also included:
========= =======================================================
Colormap Description
========= =======================================================
autumn sequential linearly-increasing shades of red-orange-yellow
bone sequential increasing black-white color map with
a tinge of blue, to emulate X-ray film
cool linearly-decreasing shades of cyan-magenta
copper sequential increasing shades of black-copper
flag repetitive red-white-blue-black pattern (not cyclic at
endpoints)
gray sequential linearly-increasing black-to-white
grayscale
hot sequential black-red-yellow-white, to emulate blackbody
radiation from an object at increasing temperatures
jet a spectral map with dark endpoints, blue-cyan-yellow-red;
based on a fluid-jet simulation by NCSA [#]_
pink sequential increasing pastel black-pink-white, meant
for sepia tone colorization of photographs
prism repetitive red-yellow-green-blue-purple-...-green pattern
(not cyclic at endpoints)
spring linearly-increasing shades of magenta-yellow
summer sequential linearly-increasing shades of green-yellow
winter linearly-increasing shades of blue-green
========= =======================================================
A set of palettes from the `Yorick scientific visualisation
package <https://dhmunro.github.io/yorick-doc/>`_, an evolution of
the GIST package, both by David H. Munro are included:
============ =======================================================
Colormap Description
============ =======================================================
gist_earth mapmaker's colors from dark blue deep ocean to green
lowlands to brown highlands to white mountains
gist_heat sequential increasing black-red-orange-white, to emulate
blackbody radiation from an iron bar as it grows hotter
gist_ncar pseudo-spectral black-blue-green-yellow-red-purple-white
colormap from National Center for Atmospheric
Research [#]_
gist_rainbow runs through the colors in spectral order from red to
violet at full saturation (like *hsv* but not cyclic)
gist_stern "Stern special" color table from Interactive Data
Language software
============ =======================================================
A set of cyclic color maps:
================ =================================================
Colormap Description
================ =================================================
hsv red-yellow-green-cyan-blue-magenta-red, formed by
changing the hue component in the HSV color space
twilight perceptually uniform shades of
white-blue-black-red-white
twilight_shifted perceptually uniform shades of
black-blue-white-red-black
================ =================================================
Other miscellaneous schemes:
============= =======================================================
Colormap Description
============= =======================================================
afmhot sequential black-orange-yellow-white blackbody
spectrum, commonly used in atomic force microscopy
brg blue-red-green
bwr diverging blue-white-red
coolwarm diverging blue-gray-red, meant to avoid issues with 3D
shading, color blindness, and ordering of colors [#]_
CMRmap "Default colormaps on color images often reproduce to
confusing grayscale images. The proposed colormap
maintains an aesthetically pleasing color image that
automatically reproduces to a monotonic grayscale with
discrete, quantifiable saturation levels." [#]_
cubehelix Unlike most other color schemes cubehelix was designed
by D.A. Green to be monotonically increasing in terms
of perceived brightness. Also, when printed on a black
and white postscript printer, the scheme results in a
greyscale with monotonically increasing brightness.
This color scheme is named cubehelix because the r,g,b
values produced can be visualised as a squashed helix
around the diagonal in the r,g,b color cube.
gnuplot gnuplot's traditional pm3d scheme
(black-blue-red-yellow)
gnuplot2 sequential color printable as gray
(black-blue-violet-yellow-white)
ocean green-blue-white
rainbow spectral purple-blue-green-yellow-orange-red colormap
with diverging luminance
seismic diverging blue-white-red
nipy_spectral black-purple-blue-green-yellow-red-white spectrum,
originally from the Neuroimaging in Python project
terrain mapmaker's colors, blue-green-yellow-brown-white,
originally from IGOR Pro
============= =======================================================
The following colormaps are redundant and may be removed in future
versions. It's recommended to use the names in the descriptions
instead, which produce identical output:
========= =======================================================
Colormap Description
========= =======================================================
gist_gray identical to *gray*
gist_yarg identical to *gray_r*
binary identical to *gray_r*
========= =======================================================
.. rubric:: Footnotes
.. [#] Rainbow colormaps, ``jet`` in particular, are considered a poor
choice for scientific visualization by many researchers: `Rainbow Color
Map (Still) Considered Harmful
<http://ieeexplore.ieee.org/document/4118486/?arnumber=4118486>`_
.. [#] Resembles "BkBlAqGrYeOrReViWh200" from NCAR Command
Language. See `Color Table Gallery
<https://www.ncl.ucar.edu/Document/Graphics/color_table_gallery.shtml>`_
.. [#] See `Diverging Color Maps for Scientific Visualization
<http://www.kennethmoreland.com/color-maps/>`_ by Kenneth Moreland.
.. [#] See `A Color Map for Effective Black-and-White Rendering of
Color-Scale Images
<https://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m>`_
by Carey Rappaport
"""
return sorted(cm.cmap_d)
def _setup_pyplot_info_docstrings():
"""
Generates the plotting docstring.
These must be done after the entire module is imported, so it is
called from the end of this module, which is generated by
boilerplate.py.
"""
commands = get_plot_commands()
first_sentence = re.compile(r"(?:\s*).+?\.(?:\s+|$)", flags=re.DOTALL)
# Collect the first sentence of the docstring for all of the
# plotting commands.
rows = []
max_name = len("Function")
max_summary = len("Description")
for name in commands:
doc = globals()[name].__doc__
summary = ''
if doc is not None:
match = first_sentence.match(doc)
if match is not None:
summary = inspect.cleandoc(match.group(0)).replace('\n', ' ')
name = '`%s`' % name
rows.append([name, summary])
max_name = max(max_name, len(name))
max_summary = max(max_summary, len(summary))
separator = '=' * max_name + ' ' + '=' * max_summary
lines = [
separator,
'{:{}} {:{}}'.format('Function', max_name, 'Description', max_summary),
separator,
] + [
'{:{}} {:{}}'.format(name, max_name, summary, max_summary)
for name, summary in rows
] + [
separator,
]
plotting.__doc__ = '\n'.join(lines)
## Plotting part 1: manually generated functions and wrappers ##
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if mappable is None:
raise RuntimeError('No mappable was found to use for colorbar '
'creation. First define a mappable such as '
'an image (with imshow) or a contour set ('
'with contourf).')
if ax is None:
ax = gca()
ret = gcf().colorbar(mappable, cax=cax, ax=ax, **kw)
return ret
colorbar.__doc__ = matplotlib.colorbar.colorbar_doc
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image.
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images, use
`~.ScalarMappable.set_clim` on every image, for example::
for im in gca().get_images():
im.set_clim(0, 0.5)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, e.g., with imshow')
im.set_clim(vmin, vmax)
def set_cmap(cmap):
"""
Set the default colormap. Applies to the current image if any.
See help(colormaps) for more information.
*cmap* must be a :class:`~matplotlib.colors.Colormap` instance, or
the name of a registered colormap.
See :func:`matplotlib.cm.register_cmap` and
:func:`matplotlib.cm.get_cmap`.
"""
cmap = cm.get_cmap(cmap)
rc('image', cmap=cmap.name)
im = gci()
if im is not None:
im.set_cmap(cmap)
@docstring.copy(matplotlib.image.imread)
def imread(fname, format=None):
return matplotlib.image.imread(fname, format)
@docstring.copy(matplotlib.image.imsave)
def imsave(fname, arr, **kwargs):
return matplotlib.image.imsave(fname, arr, **kwargs)
def matshow(A, fignum=None, **kwargs):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
Parameters
----------
A : array-like(M, N)
The matrix to be displayed.
fignum : None or int or False
If *None*, create a new figure window with automatic numbering.
If a nonzero integer, draw into the figure with the given number
(create it if it does not exist).
If 0, use the current axes (or create one if it does not exist).
.. note::
Because of how `.Axes.matshow` tries to set the figure aspect
ratio to be the one of the array, strange things may happen if you
reuse an existing figure.
Returns
-------
image : `~matplotlib.image.AxesImage`
Other Parameters
----------------
**kwargs : `~matplotlib.axes.Axes.imshow` arguments
"""
A = np.asanyarray(A)
if fignum == 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized
# figure.
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kwargs)
sci(im)
return im
def polar(*args, **kwargs):
"""
Make a polar plot.
call signature::
polar(theta, r, **kwargs)
Multiple *theta*, *r* arguments are supported, with format
strings, as in :func:`~matplotlib.pyplot.plot`.
"""
# If an axis already exists, check if it has a polar projection
if gcf().get_axes():
if not isinstance(gca(), PolarAxes):
cbook._warn_external('Trying to create polar plot on an axis '
'that does not have a polar projection.')
ax = gca(polar=True)
ret = ax.plot(*args, **kwargs)
return ret
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
names=None, subplots=True, newfig=True, **kwargs):
"""
Plot the data in a file.
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots if *subplots* is *True*
(the default), or for lines in a single subplot if *subplots*
is *False*.
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, e.g., integer
column numbers in both or column names in both. If *subplots*
is *False*, then including any function such as 'semilogy'
that changes the axis scaling will set the scaling for all
columns.
- *comments*: the character used to indicate the start of a comment
in the file, or *None* to switch off the removal of comments
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *delimiter*: is the character(s) separating row items
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
If *newfig* is *True*, the plot always will be made in a new figure;
if *False*, it will be made in the current figure if one exists,
else in a new figure.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'),
plotfuncs={'volume': 'semilogy'})
Note: plotfile is intended as a convenience for quickly plotting
data from flat files; it is not intended as an alternative
interface to general plotting with pyplot or matplotlib.
"""
if newfig:
fig = figure()
else:
fig = gcf()
if len(cols) < 1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = {}
with cbook._suppress_matplotlib_deprecation_warning():
r = mlab._csv2rec(fname, comments=comments, skiprows=skiprows,
checkrows=checkrows, delimiter=delimiter,
names=names)
def getname_val(identifier):
'return the name and column data for identifier'
if isinstance(identifier, str):
return identifier, r[identifier]
elif isinstance(identifier, Number):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
ynamelist = []
if len(cols) == 1:
ax1 = fig.add_subplot(1, 1, 1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_ylabel(xname)
else:
N = len(cols)
for i in range(1, N):
if subplots:
if i == 1:
ax = ax1 = fig.add_subplot(N - 1, 1, i)
else:
ax = fig.add_subplot(N - 1, 1, i, sharex=ax1)
elif i == 1:
ax = fig.add_subplot(1, 1, 1)
yname, y = getname_val(cols[i])
ynamelist.append(yname)
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
if subplots:
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if not subplots:
ax.legend(ynamelist)
if xname == 'date':
fig.autofmt_xdate()
# If rcParams['backend_fallback'] is true, and an interactive backend is
# requested, ignore rcParams['backend'] and force selection of a backend that
# is compatible with the current running interactive framework.
if (rcParams["backend_fallback"]
and dict.__getitem__(rcParams, "backend") in (
set(_interactive_bk) - {'WebAgg', 'nbAgg'})
and _get_running_interactive_framework()):
dict.__setitem__(rcParams, "backend", rcsetup._auto_backend_sentinel)
# Set up the backend.
switch_backend(rcParams["backend"])
# Just to be safe. Interactive mode can be turned on without
# calling `plt.ion()` so register it again here.
# This is safe because multiple calls to `install_repl_displayhook`
# are no-ops and the registered function respect `mpl.is_interactive()`
# to determine if they should trigger a draw.
install_repl_displayhook()
################# REMAINING CONTENT GENERATED BY boilerplate.py ##############
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.acorr)
def acorr(x, *, data=None, **kwargs):
return gca().acorr(
x, **({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.angle_spectrum)
def angle_spectrum(
x, Fs=None, Fc=None, window=None, pad_to=None, sides=None, *,
data=None, **kwargs):
return gca().angle_spectrum(
x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to, sides=sides,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.annotate)
def annotate(s, xy, *args, **kwargs):
return gca().annotate(s, xy, *args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.arrow)
def arrow(x, y, dx, dy, **kwargs):
return gca().arrow(x, y, dx, dy, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.autoscale)
def autoscale(enable=True, axis='both', tight=None):
return gca().autoscale(enable=enable, axis=axis, tight=tight)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.axhline)
def axhline(y=0, xmin=0, xmax=1, **kwargs):
return gca().axhline(y=y, xmin=xmin, xmax=xmax, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.axhspan)
def axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs):
return gca().axhspan(ymin, ymax, xmin=xmin, xmax=xmax, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.axis)
def axis(*args, **kwargs):
return gca().axis(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.axvline)
def axvline(x=0, ymin=0, ymax=1, **kwargs):
return gca().axvline(x=x, ymin=ymin, ymax=ymax, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.axvspan)
def axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs):
return gca().axvspan(xmin, xmax, ymin=ymin, ymax=ymax, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.bar)
def bar(
x, height, width=0.8, bottom=None, *, align='center',
data=None, **kwargs):
return gca().bar(
x, height, width=width, bottom=bottom, align=align,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.barbs)
def barbs(*args, data=None, **kw):
return gca().barbs(
*args, **({"data": data} if data is not None else {}), **kw)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.barh)
def barh(y, width, height=0.8, left=None, *, align='center', **kwargs):
return gca().barh(
y, width, height=height, left=left, align=align, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.boxplot)
def boxplot(
x, notch=None, sym=None, vert=None, whis=None,
positions=None, widths=None, patch_artist=None,
bootstrap=None, usermedians=None, conf_intervals=None,
meanline=None, showmeans=None, showcaps=None, showbox=None,
showfliers=None, boxprops=None, labels=None, flierprops=None,
medianprops=None, meanprops=None, capprops=None,
whiskerprops=None, manage_ticks=True, autorange=False,
zorder=None, *, data=None):
return gca().boxplot(
x, notch=notch, sym=sym, vert=vert, whis=whis,
positions=positions, widths=widths, patch_artist=patch_artist,
bootstrap=bootstrap, usermedians=usermedians,
conf_intervals=conf_intervals, meanline=meanline,
showmeans=showmeans, showcaps=showcaps, showbox=showbox,
showfliers=showfliers, boxprops=boxprops, labels=labels,
flierprops=flierprops, medianprops=medianprops,
meanprops=meanprops, capprops=capprops,
whiskerprops=whiskerprops, manage_ticks=manage_ticks,
autorange=autorange, zorder=zorder, **({"data": data} if data
is not None else {}))
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.broken_barh)
def broken_barh(xranges, yrange, *, data=None, **kwargs):
return gca().broken_barh(
xranges, yrange, **({"data": data} if data is not None else
{}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.cla)
def cla():
return gca().cla()
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.clabel)
def clabel(CS, *args, **kwargs):
return gca().clabel(CS, *args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.cohere)
def cohere(
x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, *, data=None, **kwargs):
return gca().cohere(
x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend, window=window,
noverlap=noverlap, pad_to=pad_to, sides=sides,
scale_by_freq=scale_by_freq, **({"data": data} if data is not
None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.contour)
def contour(*args, data=None, **kwargs):
__ret = gca().contour(
*args, **({"data": data} if data is not None else {}),
**kwargs)
if __ret._A is not None: sci(__ret) # noqa
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.contourf)
def contourf(*args, data=None, **kwargs):
__ret = gca().contourf(
*args, **({"data": data} if data is not None else {}),
**kwargs)
if __ret._A is not None: sci(__ret) # noqa
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.csd)
def csd(
x, y, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
return_line=None, *, data=None, **kwargs):
return gca().csd(
x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend, window=window,
noverlap=noverlap, pad_to=pad_to, sides=sides,
scale_by_freq=scale_by_freq, return_line=return_line,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.errorbar)
def errorbar(
x, y, yerr=None, xerr=None, fmt='', ecolor=None,
elinewidth=None, capsize=None, barsabove=False, lolims=False,
uplims=False, xlolims=False, xuplims=False, errorevery=1,
capthick=None, *, data=None, **kwargs):
return gca().errorbar(
x, y, yerr=yerr, xerr=xerr, fmt=fmt, ecolor=ecolor,
elinewidth=elinewidth, capsize=capsize, barsabove=barsabove,
lolims=lolims, uplims=uplims, xlolims=xlolims,
xuplims=xuplims, errorevery=errorevery, capthick=capthick,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.eventplot)
def eventplot(
positions, orientation='horizontal', lineoffsets=1,
linelengths=1, linewidths=None, colors=None,
linestyles='solid', *, data=None, **kwargs):
return gca().eventplot(
positions, orientation=orientation, lineoffsets=lineoffsets,
linelengths=linelengths, linewidths=linewidths, colors=colors,
linestyles=linestyles, **({"data": data} if data is not None
else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.fill)
def fill(*args, data=None, **kwargs):
return gca().fill(
*args, **({"data": data} if data is not None else {}),
**kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.fill_between)
def fill_between(
x, y1, y2=0, where=None, interpolate=False, step=None, *,
data=None, **kwargs):
return gca().fill_between(
x, y1, y2=y2, where=where, interpolate=interpolate, step=step,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.fill_betweenx)
def fill_betweenx(
y, x1, x2=0, where=None, step=None, interpolate=False, *,
data=None, **kwargs):
return gca().fill_betweenx(
y, x1, x2=x2, where=where, step=step, interpolate=interpolate,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.grid)
def grid(b=None, which='major', axis='both', **kwargs):
return gca().grid(b=b, which=which, axis=axis, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.hexbin)
def hexbin(
x, y, C=None, gridsize=100, bins=None, xscale='linear',
yscale='linear', extent=None, cmap=None, norm=None, vmin=None,
vmax=None, alpha=None, linewidths=None, edgecolors='face',
reduce_C_function=np.mean, mincnt=None, marginals=False, *,
data=None, **kwargs):
__ret = gca().hexbin(
x, y, C=C, gridsize=gridsize, bins=bins, xscale=xscale,
yscale=yscale, extent=extent, cmap=cmap, norm=norm, vmin=vmin,
vmax=vmax, alpha=alpha, linewidths=linewidths,
edgecolors=edgecolors, reduce_C_function=reduce_C_function,
mincnt=mincnt, marginals=marginals, **({"data": data} if data
is not None else {}), **kwargs)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.hist)
def hist(
x, bins=None, range=None, density=None, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, color=None,
label=None, stacked=False, normed=None, *, data=None,
**kwargs):
return gca().hist(
x, bins=bins, range=range, density=density, weights=weights,
cumulative=cumulative, bottom=bottom, histtype=histtype,
align=align, orientation=orientation, rwidth=rwidth, log=log,
color=color, label=label, stacked=stacked, normed=normed,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.hist2d)
def hist2d(
x, y, bins=10, range=None, density=False, weights=None,
cmin=None, cmax=None, *, data=None, **kwargs):
__ret = gca().hist2d(
x, y, bins=bins, range=range, density=density,
weights=weights, cmin=cmin, cmax=cmax, **({"data": data} if
data is not None else {}), **kwargs)
sci(__ret[-1])
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.hlines)
def hlines(
y, xmin, xmax, colors='k', linestyles='solid', label='', *,
data=None, **kwargs):
return gca().hlines(
y, xmin, xmax, colors=colors, linestyles=linestyles,
label=label, **({"data": data} if data is not None else {}),
**kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.imshow)
def imshow(
X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=None, vmin=None, vmax=None, origin=None, extent=None,
shape=cbook.deprecation._deprecated_parameter, filternorm=1,
filterrad=4.0, imlim=cbook.deprecation._deprecated_parameter,
resample=None, url=None, *, data=None, **kwargs):
__ret = gca().imshow(
X, cmap=cmap, norm=norm, aspect=aspect,
interpolation=interpolation, alpha=alpha, vmin=vmin,
vmax=vmax, origin=origin, extent=extent, shape=shape,
filternorm=filternorm, filterrad=filterrad, imlim=imlim,
resample=resample, url=url, **({"data": data} if data is not
None else {}), **kwargs)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.legend)
def legend(*args, **kwargs):
return gca().legend(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.locator_params)
def locator_params(axis='both', tight=None, **kwargs):
return gca().locator_params(axis=axis, tight=tight, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.loglog)
def loglog(*args, **kwargs):
return gca().loglog(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.magnitude_spectrum)
def magnitude_spectrum(
x, Fs=None, Fc=None, window=None, pad_to=None, sides=None,
scale=None, *, data=None, **kwargs):
return gca().magnitude_spectrum(
x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to, sides=sides,
scale=scale, **({"data": data} if data is not None else {}),
**kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.margins)
def margins(*margins, x=None, y=None, tight=True):
return gca().margins(*margins, x=x, y=y, tight=tight)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.minorticks_off)
def minorticks_off():
return gca().minorticks_off()
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.minorticks_on)
def minorticks_on():
return gca().minorticks_on()
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.pcolor)
def pcolor(
*args, alpha=None, norm=None, cmap=None, vmin=None,
vmax=None, data=None, **kwargs):
__ret = gca().pcolor(
*args, alpha=alpha, norm=norm, cmap=cmap, vmin=vmin,
vmax=vmax, **({"data": data} if data is not None else {}),
**kwargs)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.pcolormesh)
def pcolormesh(
*args, alpha=None, norm=None, cmap=None, vmin=None,
vmax=None, shading='flat', antialiased=False, data=None,
**kwargs):
__ret = gca().pcolormesh(
*args, alpha=alpha, norm=norm, cmap=cmap, vmin=vmin,
vmax=vmax, shading=shading, antialiased=antialiased,
**({"data": data} if data is not None else {}), **kwargs)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.phase_spectrum)
def phase_spectrum(
x, Fs=None, Fc=None, window=None, pad_to=None, sides=None, *,
data=None, **kwargs):
return gca().phase_spectrum(
x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to, sides=sides,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.pie)
def pie(
x, explode=None, labels=None, colors=None, autopct=None,
pctdistance=0.6, shadow=False, labeldistance=1.1,
startangle=None, radius=None, counterclock=True,
wedgeprops=None, textprops=None, center=(0, 0), frame=False,
rotatelabels=False, *, data=None):
return gca().pie(
x, explode=explode, labels=labels, colors=colors,
autopct=autopct, pctdistance=pctdistance, shadow=shadow,
labeldistance=labeldistance, startangle=startangle,
radius=radius, counterclock=counterclock,
wedgeprops=wedgeprops, textprops=textprops, center=center,
frame=frame, rotatelabels=rotatelabels, **({"data": data} if
data is not None else {}))
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.plot)
def plot(*args, scalex=True, scaley=True, data=None, **kwargs):
return gca().plot(
*args, scalex=scalex, scaley=scaley, **({"data": data} if data
is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.plot_date)
def plot_date(
x, y, fmt='o', tz=None, xdate=True, ydate=False, *,
data=None, **kwargs):
return gca().plot_date(
x, y, fmt=fmt, tz=tz, xdate=xdate, ydate=ydate, **({"data":
data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.psd)
def psd(
x, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
return_line=None, *, data=None, **kwargs):
return gca().psd(
x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend, window=window,
noverlap=noverlap, pad_to=pad_to, sides=sides,
scale_by_freq=scale_by_freq, return_line=return_line,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.quiver)
def quiver(*args, data=None, **kw):
__ret = gca().quiver(
*args, **({"data": data} if data is not None else {}), **kw)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.quiverkey)
def quiverkey(Q, X, Y, U, label, **kw):
return gca().quiverkey(Q, X, Y, U, label, **kw)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.scatter)
def scatter(
x, y, s=None, c=None, marker=None, cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None, verts=None,
edgecolors=None, *, plotnonfinite=False, data=None, **kwargs):
__ret = gca().scatter(
x, y, s=s, c=c, marker=marker, cmap=cmap, norm=norm,
vmin=vmin, vmax=vmax, alpha=alpha, linewidths=linewidths,
verts=verts, edgecolors=edgecolors,
plotnonfinite=plotnonfinite, **({"data": data} if data is not
None else {}), **kwargs)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.semilogx)
def semilogx(*args, **kwargs):
return gca().semilogx(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.semilogy)
def semilogy(*args, **kwargs):
return gca().semilogy(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.specgram)
def specgram(
x, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
noverlap=None, cmap=None, xextent=None, pad_to=None,
sides=None, scale_by_freq=None, mode=None, scale=None,
vmin=None, vmax=None, *, data=None, **kwargs):
__ret = gca().specgram(
x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend, window=window,
noverlap=noverlap, cmap=cmap, xextent=xextent, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq, mode=mode,
scale=scale, vmin=vmin, vmax=vmax, **({"data": data} if data
is not None else {}), **kwargs)
sci(__ret[-1])
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.spy)
def spy(
Z, precision=0, marker=None, markersize=None, aspect='equal',
origin='upper', **kwargs):
__ret = gca().spy(
Z, precision=precision, marker=marker, markersize=markersize,
aspect=aspect, origin=origin, **kwargs)
if isinstance(__ret, cm.ScalarMappable): sci(__ret) # noqa
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.stackplot)
def stackplot(
x, *args, labels=(), colors=None, baseline='zero', data=None,
**kwargs):
return gca().stackplot(
x, *args, labels=labels, colors=colors, baseline=baseline,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.stem)
def stem(
*args, linefmt=None, markerfmt=None, basefmt=None, bottom=0,
label=None, use_line_collection=False, data=None):
return gca().stem(
*args, linefmt=linefmt, markerfmt=markerfmt, basefmt=basefmt,
bottom=bottom, label=label,
use_line_collection=use_line_collection, **({"data": data} if
data is not None else {}))
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.step)
def step(x, y, *args, where='pre', data=None, **kwargs):
return gca().step(
x, y, *args, where=where, **({"data": data} if data is not
None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.streamplot)
def streamplot(
x, y, u, v, density=1, linewidth=None, color=None, cmap=None,
norm=None, arrowsize=1, arrowstyle='-|>', minlength=0.1,
transform=None, zorder=None, start_points=None, maxlength=4.0,
integration_direction='both', *, data=None):
__ret = gca().streamplot(
x, y, u, v, density=density, linewidth=linewidth, color=color,
cmap=cmap, norm=norm, arrowsize=arrowsize,
arrowstyle=arrowstyle, minlength=minlength,
transform=transform, zorder=zorder, start_points=start_points,
maxlength=maxlength,
integration_direction=integration_direction, **({"data": data}
if data is not None else {}))
sci(__ret.lines)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.table)
def table(
cellText=None, cellColours=None, cellLoc='right',
colWidths=None, rowLabels=None, rowColours=None,
rowLoc='left', colLabels=None, colColours=None,
colLoc='center', loc='bottom', bbox=None, edges='closed',
**kwargs):
return gca().table(
cellText=cellText, cellColours=cellColours, cellLoc=cellLoc,
colWidths=colWidths, rowLabels=rowLabels,
rowColours=rowColours, rowLoc=rowLoc, colLabels=colLabels,
colColours=colColours, colLoc=colLoc, loc=loc, bbox=bbox,
edges=edges, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.text)
def text(
x, y, s, fontdict=None,
withdash=cbook.deprecation._deprecated_parameter, **kwargs):
return gca().text(x, y, s, fontdict=fontdict, withdash=withdash, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.tick_params)
def tick_params(axis='both', **kwargs):
return gca().tick_params(axis=axis, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.ticklabel_format)
def ticklabel_format(
*, axis='both', style='', scilimits=None, useOffset=None,
useLocale=None, useMathText=None):
return gca().ticklabel_format(
axis=axis, style=style, scilimits=scilimits,
useOffset=useOffset, useLocale=useLocale,
useMathText=useMathText)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.tricontour)
def tricontour(*args, **kwargs):
__ret = gca().tricontour(*args, **kwargs)
if __ret._A is not None: sci(__ret) # noqa
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.tricontourf)
def tricontourf(*args, **kwargs):
__ret = gca().tricontourf(*args, **kwargs)
if __ret._A is not None: sci(__ret) # noqa
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.tripcolor)
def tripcolor(
*args, alpha=1.0, norm=None, cmap=None, vmin=None, vmax=None,
shading='flat', facecolors=None, **kwargs):
__ret = gca().tripcolor(
*args, alpha=alpha, norm=norm, cmap=cmap, vmin=vmin,
vmax=vmax, shading=shading, facecolors=facecolors, **kwargs)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.triplot)
def triplot(*args, **kwargs):
return gca().triplot(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.violinplot)
def violinplot(
dataset, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False,
points=100, bw_method=None, *, data=None):
return gca().violinplot(
dataset, positions=positions, vert=vert, widths=widths,
showmeans=showmeans, showextrema=showextrema,
showmedians=showmedians, points=points, bw_method=bw_method,
**({"data": data} if data is not None else {}))
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.vlines)
def vlines(
x, ymin, ymax, colors='k', linestyles='solid', label='', *,
data=None, **kwargs):
return gca().vlines(
x, ymin, ymax, colors=colors, linestyles=linestyles,
label=label, **({"data": data} if data is not None else {}),
**kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.xcorr)
def xcorr(
x, y, normed=True, detrend=mlab.detrend_none, usevlines=True,
maxlags=10, *, data=None, **kwargs):
return gca().xcorr(
x, y, normed=normed, detrend=detrend, usevlines=usevlines,
maxlags=maxlags, **({"data": data} if data is not None else
{}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes._sci)
def sci(im):
return gca()._sci(im)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.set_title)
def title(label, fontdict=None, loc='center', pad=None, **kwargs):
return gca().set_title(
label, fontdict=fontdict, loc=loc, pad=pad, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.set_xlabel)
def xlabel(xlabel, fontdict=None, labelpad=None, **kwargs):
return gca().set_xlabel(
xlabel, fontdict=fontdict, labelpad=labelpad, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.set_ylabel)
def ylabel(ylabel, fontdict=None, labelpad=None, **kwargs):
return gca().set_ylabel(
ylabel, fontdict=fontdict, labelpad=labelpad, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.set_xscale)
def xscale(value, **kwargs):
return gca().set_xscale(value, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@docstring.copy(Axes.set_yscale)
def yscale(value, **kwargs):
return gca().set_yscale(value, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def autumn():
"""
Set the colormap to "autumn".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("autumn")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def bone():
"""
Set the colormap to "bone".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("bone")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def cool():
"""
Set the colormap to "cool".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("cool")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def copper():
"""
Set the colormap to "copper".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("copper")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def flag():
"""
Set the colormap to "flag".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("flag")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def gray():
"""
Set the colormap to "gray".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("gray")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def hot():
"""
Set the colormap to "hot".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("hot")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def hsv():
"""
Set the colormap to "hsv".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("hsv")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def jet():
"""
Set the colormap to "jet".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("jet")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def pink():
"""
Set the colormap to "pink".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("pink")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def prism():
"""
Set the colormap to "prism".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("prism")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def spring():
"""
Set the colormap to "spring".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("spring")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def summer():
"""
Set the colormap to "summer".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("summer")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def winter():
"""
Set the colormap to "winter".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("winter")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def magma():
"""
Set the colormap to "magma".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("magma")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def inferno():
"""
Set the colormap to "inferno".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("inferno")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def plasma():
"""
Set the colormap to "plasma".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("plasma")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def viridis():
"""
Set the colormap to "viridis".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("viridis")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def nipy_spectral():
"""
Set the colormap to "nipy_spectral".
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("nipy_spectral")
_setup_pyplot_info_docstrings()
|
from unittest import TestCase
from flask.ext.acl import ACLManager
from flask.ext.login import LoginManager
from flask import Flask
class FlaskTestCase(TestCase):
def setUp(self):
self.flask = Flask('tests')
self.flask.config['SECRET_KEY'] = 'deadbeef'
self.authn = LoginManager(self.flask)
self.authz = ACLManager(self.flask)
self.client = self.flask.test_client()
@self.flask.route('/login')
@self.authz.route_acl('ALLOW ANY ALL')
def login():
return 'please login', 401
|
#!/usr/bin/env python
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import print_function
from __future__ import absolute_import
import unittest
import time
from mock import mock
from yardstick.benchmark.runners.iteration import IterationRunner
class RunnerTestCase(unittest.TestCase):
@mock.patch("yardstick.benchmark.runners.iteration.multiprocessing")
def test_get_output(self, mock_process):
runner = IterationRunner({})
runner.output_queue.put({'case': 'opnfv_yardstick_tc002'})
runner.output_queue.put({'criteria': 'PASS'})
idle_result = {
'case': 'opnfv_yardstick_tc002',
'criteria': 'PASS'
}
for retries in range(1000):
time.sleep(0.01)
if not runner.output_queue.empty():
break
actual_result = runner.get_output()
self.assertEqual(idle_result, actual_result)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
from django.contrib import admin
from django import forms
from taggit_labels.widgets import LabelWidget
from taggit.forms import TagField
from addressbook.models import *
class SocialInline(admin.TabularInline):
model = SocialNetwork
extra = 0
class WebsiteInline(admin.TabularInline):
model = Website
extra = 0
class PhoneInline(admin.TabularInline):
model = PhoneNumber
extra = 0
class EmailInline(admin.TabularInline):
model = Email
extra = 0
class AddressInline(admin.StackedInline):
model = Address
extra = 0
#class ContactForm(forms.ModelForm):
# tags = TagField(required=False, widget=LabelWidget)
class ContactAdmin(admin.ModelAdmin):
#form = ContactForm
inlines = [
AddressInline,
EmailInline,
PhoneInline,
SocialInline,
WebsiteInline,
]
admin.site.register(Contact, ContactAdmin)
admin.site.register(ContactGroup, admin.ModelAdmin)
admin.site.register(PhoneNumber, admin.ModelAdmin)
admin.site.register(Website, admin.ModelAdmin)
admin.site.register(SocialNetwork, admin.ModelAdmin)
admin.site.register(Email, admin.ModelAdmin)
admin.site.register(Address, admin.ModelAdmin)
|
# coding: utf-8
# # Keras tutorial - the Happy House
#
# Welcome to the first assignment of week 2. In this assignment, you will:
# 1. Learn to use Keras, a high-level neural networks API (programming framework), written in Python and capable of running on top of several lower-level frameworks including TensorFlow and CNTK.
# 2. See how you can in a couple of hours build a deep learning algorithm.
#
# Why are we using Keras? Keras was developed to enable deep learning engineers to build and experiment with different models very quickly. Just as TensorFlow is a higher-level framework than Python, Keras is an even higher-level framework and provides additional abstractions. Being able to go from idea to result with the least possible delay is key to finding good models. However, Keras is more restrictive than the lower-level frameworks, so there are some very complex models that you can implement in TensorFlow but not (without more difficulty) in Keras. That being said, Keras will work fine for many common models.
#
# In this exercise, you'll work on the "Happy House" problem, which we'll explain below. Let's load the required packages and solve the problem of the Happy House!
# In[2]:
import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
get_ipython().magic('matplotlib inline')
# **Note**: As you can see, we've imported a lot of functions from Keras. You can use them easily just by calling them directly in the notebook. Ex: `X = Input(...)` or `X = ZeroPadding2D(...)`.
# ## 1 - The Happy House
#
# For your next vacation, you decided to spend a week with five of your friends from school. It is a very convenient house with many things to do nearby. But the most important benefit is that everybody has commited to be happy when they are in the house. So anyone wanting to enter the house must prove their current state of happiness.
#
# <img src="images/happy-house.jpg" style="width:350px;height:270px;">
# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **the Happy House**</center></caption>
#
#
# As a deep learning expert, to make sure the "Happy" rule is strictly applied, you are going to build an algorithm which that uses pictures from the front door camera to check if the person is happy or not. The door should open only if the person is happy.
#
# You have gathered pictures of your friends and yourself, taken by the front-door camera. The dataset is labbeled.
#
# <img src="images/house-members.png" style="width:550px;height:250px;">
#
# Run the following code to normalize the dataset and learn about its shapes.
# In[3]:
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
# **Details of the "Happy" dataset**:
# - Images are of shape (64,64,3)
# - Training: 600 pictures
# - Test: 150 pictures
#
# It is now time to solve the "Happy" Challenge.
# ## 2 - Building a model in Keras
#
# Keras is very good for rapid prototyping. In just a short time you will be able to build a model that achieves outstanding results.
#
# Here is an example of a model in Keras:
#
# ```python
# def model(input_shape):
# # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
# X_input = Input(input_shape)
#
# # Zero-Padding: pads the border of X_input with zeroes
# X = ZeroPadding2D((3, 3))(X_input)
#
# # CONV -> BN -> RELU Block applied to X
# X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
# X = BatchNormalization(axis = 3, name = 'bn0')(X)
# X = Activation('relu')(X)
#
# # MAXPOOL
# X = MaxPooling2D((2, 2), name='max_pool')(X)
#
# # FLATTEN X (means convert it to a vector) + FULLYCONNECTED
# X = Flatten()(X)
# X = Dense(1, activation='sigmoid', name='fc')(X)
#
# # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
# model = Model(inputs = X_input, outputs = X, name='HappyModel')
#
# return model
# ```
#
# Note that Keras uses a different convention with variable names than we've previously used with numpy and TensorFlow. In particular, rather than creating and assigning a new variable on each step of forward propagation such as `X`, `Z1`, `A1`, `Z2`, `A2`, etc. for the computations for the different layers, in Keras code each line above just reassigns `X` to a new value using `X = ...`. In other words, during each step of forward propagation, we are just writing the latest value in the commputation into the same variable `X`. The only exception was `X_input`, which we kept separate and did not overwrite, since we needed it at the end to create the Keras model instance (`model = Model(inputs = X_input, ...)` above).
#
# **Exercise**: Implement a `HappyModel()`. This assignment is more open-ended than most. We suggest that you start by implementing a model using the architecture we suggest, and run through the rest of this assignment using that as your initial model. But after that, come back and take initiative to try out other model architectures. For example, you might take inspiration from the model above, but then vary the network architecture and hyperparameters however you wish. You can also use other functions such as `AveragePooling2D()`, `GlobalMaxPooling2D()`, `Dropout()`.
#
# **Note**: You have to be careful with your data's shapes. Use what you've learned in the videos to make sure your convolutional, pooling and fully-connected layers are adapted to the volumes you're applying it to.
# In[54]:
# GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
### START CODE HERE ###
# Feel free to use the suggested outline in the text above to get started, and run through the whole
# exercise (including the later portions of this notebook) once. The come back also try out other
# network architectures as well.
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input = Input(input_shape)
# CONV1
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (3, 3), strides = (1, 1), name = 'conv_1')(X)
X = BatchNormalization(axis = 3, name = 'bn_1')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool_1')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc3')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
### END CODE HERE ###
return model
# You have now built a function to describe your model. To train and test this model, there are four steps in Keras:
# 1. Create the model by calling the function above
# 2. Compile the model by calling `model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])`
# 3. Train the model on train data by calling `model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)`
# 4. Test the model on test data by calling `model.evaluate(x = ..., y = ...)`
#
# If you want to know more about `model.compile()`, `model.fit()`, `model.evaluate()` and their arguments, refer to the official [Keras documentation](https://keras.io/models/model/).
#
# **Exercise**: Implement step 1, i.e. create the model.
# In[55]:
### START CODE HERE ### (1 line)
happyModel = HappyModel((64, 64, 3))
### END CODE HERE ###
# **Exercise**: Implement step 2, i.e. compile the model to configure the learning process. Choose the 3 arguments of `compile()` wisely. Hint: the Happy Challenge is a binary classification problem.
# In[56]:
### START CODE HERE ### (1 line)
happyModel.compile(optimizer = "Adam", loss = "binary_crossentropy", metrics = ["accuracy"])
### END CODE HERE ###
# **Exercise**: Implement step 3, i.e. train the model. Choose the number of epochs and the batch size.
# In[61]:
### START CODE HERE ### (1 line)
happyModel.fit(x = X_train, y = Y_train, epochs = 10, batch_size = 16)
### END CODE HERE ###
# Note that if you run `fit()` again, the `model` will continue to train with the parameters it has already learnt instead of reinitializing them.
#
# **Exercise**: Implement step 4, i.e. test/evaluate the model.
# In[62]:
### START CODE HERE ### (1 line)
preds = happyModel.evaluate(x = X_test, y = Y_test)
### END CODE HERE ###
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
# If your `happyModel()` function worked, you should have observed much better than random-guessing (50%) accuracy on the train and test sets.
#
# To give you a point of comparison, our model gets around **95% test accuracy in 40 epochs** (and 99% train accuracy) with a mini batch size of 16 and "adam" optimizer. But our model gets decent accuracy after just 2-5 epochs, so if you're comparing different models you can also train a variety of models on just a few epochs and see how they compare.
#
# If you have not yet achieved a very good accuracy (let's say more than 80%), here're some things you can play around with to try to achieve it:
#
# - Try using blocks of CONV->BATCHNORM->RELU such as:
# ```python
# X = Conv2D(32, (3, 3), strides = (1, 1), name = 'conv0')(X)
# X = BatchNormalization(axis = 3, name = 'bn0')(X)
# X = Activation('relu')(X)
# ```
# until your height and width dimensions are quite low and your number of channels quite large (≈32 for example). You are encoding useful information in a volume with a lot of channels. You can then flatten the volume and use a fully-connected layer.
# - You can use MAXPOOL after such blocks. It will help you lower the dimension in height and width.
# - Change your optimizer. We find Adam works well.
# - If the model is struggling to run and you get memory issues, lower your batch_size (12 is usually a good compromise)
# - Run on more epochs, until you see the train accuracy plateauing.
#
# Even if you have achieved a good accuracy, please feel free to keep playing with your model to try to get even better results.
#
# **Note**: If you perform hyperparameter tuning on your model, the test set actually becomes a dev set, and your model might end up overfitting to the test (dev) set. But just for the purpose of this assignment, we won't worry about that here.
#
# ## 3 - Conclusion
#
# Congratulations, you have solved the Happy House challenge!
#
# Now, you just need to link this model to the front-door camera of your house. We unfortunately won't go into the details of how to do that here.
# <font color='blue'>
# **What we would like you to remember from this assignment:**
# - Keras is a tool we recommend for rapid prototyping. It allows you to quickly try out different model architectures. Are there any applications of deep learning to your daily life that you'd like to implement using Keras?
# - Remember how to code a model in Keras and the four steps leading to the evaluation of your model on the test set. Create->Compile->Fit/Train->Evaluate/Test.
# ## 4 - Test with your own image (Optional)
#
# Congratulations on finishing this assignment. You can now take a picture of your face and see if you could enter the Happy House. To do that:
# 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
# 2. Add your image to this Jupyter Notebook's directory, in the "images" folder
# 3. Write your image's name in the following code
# 4. Run the code and check if the algorithm is right (0 is unhappy, 1 is happy)!
#
# The training/test sets were quite similar; for example, all the pictures were taken against the same background (since a front door camera is always mounted in the same position). This makes the problem easier, but a model trained on this data may or may not work on your own data. But feel free to give it a try!
# In[ ]:
### START CODE HERE ###
img_path = 'images/my_image.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))
# ## 5 - Other useful functions in Keras (Optional)
#
# Two other basic features of Keras that you'll find useful are:
# - `model.summary()`: prints the details of your layers in a table with the sizes of its inputs/outputs
# - `plot_model()`: plots your graph in a nice layout. You can even save it as ".png" using SVG() if you'd like to share it on social media ;). It is saved in "File" then "Open..." in the upper bar of the notebook.
#
# Run the following code.
# In[63]:
happyModel.summary()
# In[64]:
plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))
# In[ ]:
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
from synthtool import gcp
from synthtool.languages import python
common = gcp.CommonTemplates()
default_version = "v2"
for library in s.get_staging_dirs(default_version):
# Work around gapic generator bug
s.replace(
library / f"google/cloud/dialogflow_{library.name}/services/**/client.py",
"warnings.DeprecationWarning",
"DeprecationWarning"
)
s.move(library, excludes=["docs/index.rst", "setup.py", "README.rst"])
s.remove_staging_dirs()
# # ----------------------------------------------------------------------------
# # Add templated files
# # ----------------------------------------------------------------------------
templated_files = common.py_library(
samples=False, # set to True only if there are samples
microgenerator=True,
cov_level=98,
)
s.move(templated_files, excludes=[".coveragerc"]) # microgenerator has a good .coveragerc file
python.py_samples(skip_readmes=True)
# Don't treat warnings as errors
# Docstrings have unexpected idnentation and block quote formatting issues
s.replace(
"noxfile.py",
'''["']-W["'], # warnings as errors''',
"",
)
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
# ----------------------------------------------------------------------------
# Main Branch migration
# ----------------------------------------------------------------------------
s.replace(
"*.rst",
"master",
"main"
)
s.replace(
"CONTRIBUTING.rst",
"kubernetes/community/blob/main",
"kubernetes/community/blob/master"
)
s.replace(
"docs/conf.py",
"master",
"main"
)
s.replace(
"docs/conf.py",
"main_doc",
"root_doc"
)
s.replace(
".kokoro/*",
"master",
"main"
)
s.replace(
"README.rst",
"google-cloud-python/blob/main/README.rst",
"google-cloud-python/blob/master/README.rst"
)
|
# TODO include https://github.com/blue-yonder/tsfresh
import matplotlib.pyplot as plt
from sklearn.model_selection import TimeSeriesSplit
from sklearn.metrics import mean_absolute_error, mean_squared_log_error
from scipy.optimize import minimize
from tqdm import tqdm
import statsmodels.tsa.api as smt
import statsmodels.api as sm
import numpy as np
import pandas as pd
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
class SARIMAX:
def __init__(self, series, d, D, s):
"""
SARIMA model
Args:
series (pd.Series): The pandas series to work on
d (int): integration order in ARIMA model
D (int): seasonal integration order
s (int): length of season
"""
self.s = s
self.D = D
self.d = d
self.series = series
def optimize(self, parameters_list, freq='H'):
"""
Return DataFrame with parameters and corresponding AIC
Args:
parameters_list (list): list with (p, q, P, Q) tuples
freq (str): Frequency
Returns:
model: The model with the best parameters
"""
results = []
best_aic = float("inf")
for param in tqdm(parameters_list):
# we need try-except because on some combinations model fails to converge
try:
model = sm.tsa.statespace.SARIMAX(self.series, order=(param[0], self.d, param[1]),
seasonal_order=(param[3], self.D, param[3], self.s),
freq=freq).fit(disp=-1)
except:
continue
aic = model.aic
# saving best model, AIC and parameters
if aic < best_aic:
best_aic = aic
results.append([param, model.aic])
result_table = pd.DataFrame(results)
result_table.columns = ['parameters', 'aic']
# sorting in ascending order, the lower AIC is - the better
result_table = result_table.sort_values(by='aic', ascending=True).reset_index(drop=True)
p, q, P, Q = result_table.parameters[0]
# Fit the best model
best_model = sm.tsa.statespace.SARIMAX(self.series, order=(p, self.d, q),
seasonal_order=(P, self.D, Q, self.s)).fit(disp=-1)
return best_model
def plot(self, model, n_steps):
"""
Plots model vs predicted values
series - dataset with timeseries
model - fitted SARIMA model
n_steps - number of steps to predict in the future
Args:
model (statsmodels.tsa.statespace.sarimax.SARIMAXResultsWrapper): The model to plot
n_steps (int): t steps
Returns:
None
"""
# adding model values
data = pd.DataFrame(self.series)
data.columns = ['actual']
data['arima_model'] = model.fittedvalues
# making a shift on s+d steps, because these values were unobserved by the model
# due to the differentiating
data.loc[:self.s + self.d, 'arima_model'] = np.NaN
# forecasting on n_steps forward
forecast = model.predict(start=data.shape[0], end=data.shape[0] + n_steps)
forecast = data.arima_model.append(forecast)
# calculate error, again having shifted on s+d steps from the beginning
error = mean_absolute_percentage_error(data['actual'][self.s + self.d:], data['arima_model'][self.s + self.d:])
plt.figure(figsize=(15, 7))
plt.title("Mean Absolute Percentage Error: {0:.2f}%".format(error))
plt.plot(forecast, color='r', label="model")
plt.axvspan(data.index[-1], forecast.index[-1], alpha=0.5, color='lightgrey')
plt.plot(data.actual, label="actual")
plt.legend()
plt.grid(True)
plt.show()
class MovingAverage:
def __init__(self, series):
"""
Class with multiple function for plotting/predicting time series
Args:
series (pd.Series): A pandas Series with date as index and predictor as values
"""
self.series = series
def get_simple_moving_average(self, window, show_plot=False, *args, **kwargs):
"""
Returns a simple moving average (SMA)
Moving average is based on the assumption: "Tomorrow will be the same as today".
So to predict for next month m for example, take the moving average at m - 1.
Most common moving averages are 15, 20, 30, 50, 100 and 200 days.
Args:
window (int): The number of entries by which to apply the smoothing factor
show_plot (bool): If set to True will show moving average plot.
The arguments passed to (*args, **kwargs) will be transferred to
_plotMovingAverage().
*args (list): Arguments to pass to _plotMovingAverage()
**kwargs (dict): Arguments to pass to _plotMovingAverage()
Returns:
pd.Series: A pandas Series with moving average
"""
rolling_mean = self.series.rolling(window=window).mean()
if show_plot:
self._plot_simple_moving_average(self.series, rolling_mean, window, *args, **kwargs)
return rolling_mean
def _plot_simple_moving_average(self, series, rolling_mean, window, plot_intervals=False,
conf_interval=1.96, plot_anomalies=False,
title_prefix=""):
"""
Plot moving average (SMA) over a given pandas Series
Args:
series (pd.Series): A pandas Series with date index
rolling_mean (pd.Series): A pandas series
window (int): Rolling window size
plot_intervals (bool): Show confidence intervals
conf_interval (float): The confidence interval:
0.95 = 95% interval with z = 1.96
0.99 = 99% interval with z = 2.576
0.995 = 99.5% interval with z = 2.807
0.999 = 99.9% interval with z = 3.291
plot_anomalies (bool): show anomalies
title_prefix (str): A prefix for the plot title
Returns:
"""
z = {0.95: 1.96, 0.99: 2.576, 0.995: 2.807, 0.999: 3.291}
plt.figure(figsize=(15, 5))
plt.title(title_prefix + ": Moving average\n window size = {}".format(window))
plt.plot(rolling_mean, "g", label="Rolling mean trend")
# Plot confidence intervals for smoothed values
if plot_intervals:
mae = mean_absolute_error(series[window:], rolling_mean[window:])
deviation = np.std(series[window:] - rolling_mean[window:])
print("MAE: {}, Deviation: {}".format(mae, deviation))
# z = 1.96 is the 95% confidence interval
lower_bond = rolling_mean - (mae + z[conf_interval] * deviation)
upper_bond = rolling_mean + (mae + z[conf_interval] * deviation)
plt.plot(upper_bond, "r--", label="Upper Bond / Lower Bond")
plt.plot(lower_bond, "r--")
# Having the intervals, find abnormal values
if plot_anomalies:
anomalies = pd.DataFrame(index=series.index, columns=series.columns)
anomalies[series < lower_bond] = series[series < lower_bond]
anomalies[series > upper_bond] = series[series > upper_bond]
plt.plot(anomalies, "ro", markersize=10)
plt.plot(series[window:], label="Actual values")
plt.legend(loc="upper left")
plt.grid(True)
plt.show()
def get_exponential_moving_average(self, window):
"""
Exponential moving average (EMA) with a smoothing factor alpha.
Args:
window (int): Rolling window size
Returns:
pd.Series: The smoothed values
"""
alpha = 2 / (1 + window)
result = [self.series[0]] # first value is same as series
for n in range(1, len(self.series)):
result.append(alpha * self.series[n] + (1 - alpha) * result[n - 1])
return result
def plot_exponential_moving_average(self, windows):
"""
Plots exponential moving average (EMA) with different window size
Args:
windows (list): List of rolling window size
Returns:
None
"""
with plt.style.context('seaborn-white'):
plt.figure(figsize=(15, 7))
for w in windows:
plt.plot(self.get_exponential_moving_average(w), label="Window size {}".format(w))
plt.plot(self.series.values, "c", label="Actual")
plt.legend(loc="best")
plt.axis('tight')
plt.title("Exponential Smoothing")
plt.grid(True)
plt.show()
def get_double_exponential_moving_average(self, alpha, beta):
"""
Double exponential moving average (DEMA) with a smoothing level alpha and a trend beta.
Alpha is responsible for the series smoothing around the trend, Beta for the smoothing of the trend itself
The larger the values, the more weight the most recent observations will have and the
less smoothed the model series will be.
Args:
alpha (float): Smoothing parameter for level, float which value is between [0.0, 1.0]
beta (float): Smoothing parameter for trend, float which value is between [0.0, 1.0]
Returns:
pd.Series: Smoothed values
"""
# first value is same as series
result = [self.series[0]]
for n in range(1, len(self.series) + 1):
if n == 1:
level, trend = self.series[0], self.series[1] - self.series[0]
if n >= len(self.series): # forecasting
value = result[-1]
else:
value = self.series[n]
last_level, level = level, alpha * value + (1 - alpha) * (level + trend)
trend = beta * (level - last_level) + (1 - beta) * trend
result.append(level + trend)
return result
def plot_double_exponential_moving_average(self, alphas, betas):
"""
Plots double exponential moving average with different alphas and betas
Args:
alphas (list): Smoothing parameter for level, list of floats which value are between [0.0, 1.0]
betas (list): Smoothing parameter for trend, list of floats which value are between [0.0, 1.0]
Returns:
None
"""
with plt.style.context('seaborn-white'):
plt.figure(figsize=(20, 8))
for alpha in alphas:
for beta in betas:
plt.plot(self.get_double_exponential_moving_average(alpha, beta),
label="Alpha {}, beta {}".format(alpha, beta))
plt.plot(self.series.values, label="Actual")
plt.legend(loc="best")
plt.axis('tight')
plt.title("Double Exponential Smoothing")
plt.grid(True)
plt.show()
class HoltWinters:
def __init__(self, series, slen, alpha, beta, gamma, n_preds, scaling_factor=1.96):
"""
Holt-Winters model with the anomalies detection using Brutlag method.
This model is best used when the data has seasonality.
Args:
series (pd.Series): initial time series
slen (int): length of a season
alpha (float): Holt-Winters model coefficient
beta (float): Holt-Winters model coefficient
gamma (float): Holt-Winters model coefficient
n_preds (int): predictions horizon
scaling_factor (float): z value: sets the width of the confidence interval by Brutlag
95% interval with scaling_factor = 1.96
99% interval with scaling_factor = 2.576
99.5% interval with scaling_factor = 2.807
99.9% interval with scaling_factor = 3.291
"""
self.series = series
self.slen = slen
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.n_preds = n_preds
self.scaling_factor = scaling_factor
self.result = []
self.upper_bond = []
self.lower_bond = []
def _initial_trend(self):
sum = 0.0
for i in range(self.slen):
sum += float(self.series[i + self.slen] - self.series[i]) / self.slen
return sum / self.slen
def _initial_seasonal_components(self):
seasonals = {}
season_averages = []
n_seasons = int(len(self.series) / self.slen)
# let's calculate season averages
for j in range(n_seasons):
season_averages.append(sum(self.series[self.slen * j:self.slen * j + self.slen]) / float(self.slen))
# let's calculate initial values
for i in range(self.slen):
sum_of_vals_over_avg = 0.0
for j in range(n_seasons):
sum_of_vals_over_avg += self.series[self.slen * j + i] - season_averages[j]
seasonals[i] = sum_of_vals_over_avg / n_seasons
return seasonals
def _plot_holt_winters(self, plot_intervals=False, plot_anomalies=False):
"""
Plot holt winters
Args:
plot_intervals (bool): Show confidence intervals
plot_anomalies (bool): show anomalies
Returns:
None
"""
plt.figure(figsize=(20, 10))
plt.plot(self.result, label="Model")
plt.plot(self.series.values, label="Actual")
error = mean_absolute_percentage_error(self.series.values, self.result[:len(self.series)])
plt.title("Mean Absolute Percentage Error: {0:.2f}%".format(error))
if plot_anomalies:
anomalies = np.array([np.NaN] * len(self.series))
anomalies[self.series.values < self.lower_bond[:len(self.series)]] = \
self.series.values[self.series.values < self.lower_bond[:len(self.series)]]
anomalies[self.series.values > self.upper_bond[:len(self.series)]] = \
self.series.values[self.series.values > self.upper_bond[:len(self.series)]]
plt.plot(anomalies, "o", markersize=10, label="Anomalies")
if plot_intervals:
plt.plot(self.upper_bond, "r--", alpha=0.5, label="Up/Low confidence")
plt.plot(self.lower_bond, "r--", alpha=0.5)
plt.fill_between(x=range(0, len(self.result)), y1=self.upper_bond,
y2=self.lower_bond, alpha=0.2, color="grey")
plt.vlines(len(self.series), ymin=min(self.lower_bond), ymax=max(self.upper_bond), linestyles='dashed')
plt.axvspan(len(self.series) - 20, len(self.result), alpha=0.3, color='lightgrey')
plt.grid(True)
plt.axis('tight')
plt.legend(loc="best", fontsize=13)
plt.show()
def triple_exponential_smoothing(self, plot_results=False, *args, **kwargs):
"""
Returns the triple exponential smoothing results
Args:
plot_results (bool): If True the results will be plotted
*args (list): Arguments to pass to _plot_holt_winters()
**kwargs (dict): Arguments to pass to _plot_holt_winters()
Returns:
list: Triple exponential smoothing results
"""
smooth_l = []
season_l = []
trend_l = []
predicted_deviation = []
self.result = []
self.upper_bond = []
self.lower_bond = []
seasonals = self._initial_seasonal_components()
for i in range(len(self.series) + self.n_preds):
if i == 0: # components initialization
smooth = self.series[0]
trend = self._initial_trend()
self.result.append(self.series[0])
smooth_l.append(smooth)
trend_l.append(trend)
season_l.append(seasonals[i % self.slen])
predicted_deviation.append(0)
self.upper_bond.append(self.result[0] + self.scaling_factor * predicted_deviation[0])
self.lower_bond.append(self.result[0] - self.scaling_factor * predicted_deviation[0])
continue
if i >= len(self.series): # predicting
m = i - len(self.series) + 1
self.result.append((smooth + m * trend) + seasonals[i % self.slen])
# when predicting we increase uncertainty on each step
predicted_deviation.append(predicted_deviation[-1] * 1.01)
else:
val = self.series[i]
last_smooth, smooth = smooth, self.alpha * (val - seasonals[i % self.slen]) + (1 - self.alpha) * (
smooth + trend)
trend = self.beta * (smooth - last_smooth) + (1 - self.beta) * trend
seasonals[i % self.slen] = self.gamma * (val - smooth) + (1 - self.gamma) * seasonals[i % self.slen]
self.result.append(smooth + trend + seasonals[i % self.slen])
# Deviation is calculated according to Brutlag algorithm.
predicted_deviation.append(self.gamma * np.abs(self.series[i] - self.result[i]) +
(1 - self.gamma) * predicted_deviation[-1])
self.upper_bond.append(self.result[-1] + self.scaling_factor * predicted_deviation[-1])
self.lower_bond.append(self.result[-1] - self.scaling_factor * predicted_deviation[-1])
smooth_l.append(smooth)
trend_l.append(trend)
season_l.append(seasonals[i % self.slen])
if plot_results:
self._plot_holt_winters(*args, **kwargs)
return self.result
def get_best_parameters(self, inplace=True, loss_function=mean_squared_log_error, n_folds=3):
"""
Optimize for getting the best alpha, beta and gamma parameters
on cross validation time series split.
Args:
inplace (bool): If True the internal Alpha, Beta, Gamma of this class will be replaced
by the optimal ones
n_folds (int): Number of folds for cross validation
loss_function (function): Sklearn metric loss function
Returns:
tuple: Alpha, Beta, Gamma
"""
# initializing model parameters alpha, beta and gamma
x = np.array([0, 0, 0])
# Minimizing the loss function
opt = minimize(time_series_cv_score, x0=x, args=(self.series, loss_function, self.slen, n_folds),
method="TNC", bounds=((0, 1), (0, 1), (0, 1)))
# Take optimal values...
alpha, beta, gamma = opt.x
print("Alpha: {}, Beta: {}, Gamma: {}".format(alpha, beta, gamma))
if inplace:
self.alpha = alpha
self.beta = beta
self.gamma = gamma
return alpha, beta, gamma
def time_series_cv_score(params, series, loss_function, slen, n_folds=3):
"""
Returns error on Cross validation for time series
Args:
params (list): Vector of parameters for optimization
series (pd.Series): dataset with timeseries
loss_function (function): Sklearn metric loss function
slen (int): length of a season
n_folds (int): Number of folds for cross validation
Returns:
float: Error
"""
# errors array
errors = []
values = series.values
alpha, beta, gamma = params
# set the number of folds for cross-validation
tscv = TimeSeriesSplit(n_splits=n_folds)
# iterating over folds, train model on each, forecast and calculate error
for train, test in tscv.split(values):
model = HoltWinters(series=values[train], slen=slen,
alpha=alpha, beta=beta, gamma=gamma, n_preds=len(test))
model.triple_exponential_smoothing()
predictions = model.result[-len(test):]
actual = values[test]
error = loss_function(predictions, actual)
errors.append(error)
return np.mean(np.array(errors))
def test_stationary(y, show_plots=True, lags=None, figsize=(12, 7), style='bmh'):
"""
Plot time series, its ACF (Autocorrelation function) and PACF (Partial autocorrelation function),
calculate Augmented Dickey–Fuller test.
Used to check if a time series is stationary or not.
- p-value > 0.05: Accept the null hypothesis (H0), the data has a unit root and is non-stationary.
- p-value <= 0.05: Reject the null hypothesis (H0), the data does not have a unit root and is stationary.
Args:
y (pd.Series, list): Time series pandas series
show_plots (bool): True to show the TS/ACF/PACF plots
lags (int): How many lags to include in ACF, PACF plot calculation
figsize (tuple): Size of plot
style (str): Style of plot
Returns:
tuple: (p-value, is_stationary)
"""
if not isinstance(y, pd.Series):
y = pd.Series(y)
p_value = sm.tsa.stattools.adfuller(y)[1]
is_stationary = False
if show_plots:
with plt.style.context(style):
plt.figure(figsize=figsize)
layout = (2, 2)
ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2)
acf_ax = plt.subplot2grid(layout, (1, 0))
pacf_ax = plt.subplot2grid(layout, (1, 1))
y.plot(ax=ts_ax)
ts_ax.set_title('Time Series Analysis Plots\n Dickey-Fuller: p={0:.5f}'.format(p_value))
smt.graphics.plot_acf(y, lags=lags, ax=acf_ax)
smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax)
plt.tight_layout()
plt.show()
if p_value <= 0.05:
is_stationary = True
return p_value, is_stationary
|
# -*- coding: utf-8 -*-
r"""
Dirichlet characters
A :class:`DirichletCharacter` is the extension of a homomorphism
.. MATH::
(\ZZ/N\ZZ)^* \to R^*,
for some ring `R`, to the map `\ZZ/N\ZZ \to R` obtained by sending
those `x\in\ZZ/N\ZZ` with `\gcd(N,x)>1` to `0`.
EXAMPLES::
sage: G = DirichletGroup(35)
sage: x = G.gens()
sage: e = x[0]*x[1]^2; e
Dirichlet character modulo 35 of conductor 35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2 - 1
sage: e.order()
12
This illustrates a canonical coercion::
sage: e = DirichletGroup(5, QQ).0
sage: f = DirichletGroup(5,CyclotomicField(4)).0
sage: e*f
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4
AUTHORS:
- William Stein (2005-09-02): Fixed bug in comparison of Dirichlet
characters. It was checking that their values were the same, but
not checking that they had the same level!
- William Stein (2006-01-07): added more examples
- William Stein (2006-05-21): added examples of everything; fix a
*lot* of tiny bugs and design problem that became clear when
creating examples.
- Craig Citro (2008-02-16): speed up __call__ method for
Dirichlet characters, miscellaneous fixes
- Julian Rueth (2014-03-06): use UniqueFactory to cache DirichletGroups
"""
# ****************************************************************************
# Copyright (C) 2004-2006 William Stein <wstein@gmail.com>
# Copyright (C) 2014 Julian Rueth <julian.rueth@fsfe.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import print_function
import sage.categories.all as cat
from sage.misc.all import prod
import sage.misc.prandom as random
import sage.modules.free_module as free_module
import sage.modules.free_module_element as free_module_element
import sage.rings.all as rings
import sage.rings.number_field.number_field as number_field
from sage.libs.pari import pari
from sage.categories.map import Map
from sage.rings.rational_field import is_RationalField
from sage.rings.complex_field import is_ComplexField
from sage.rings.qqbar import is_AlgebraicField
from sage.rings.ring import is_Ring
from sage.misc.functional import round
from sage.misc.cachefunc import cached_method
from sage.misc.fast_methods import WithEqualityById
from sage.structure.element import MultiplicativeGroupElement
from sage.structure.gens_py import multiplicative_iterator
from sage.structure.parent import Parent
from sage.structure.sequence import Sequence
from sage.structure.factory import UniqueFactory
from sage.structure.richcmp import richcmp
from sage.arith.all import (binomial, bernoulli, kronecker, factor, gcd,
lcm, fundamental_discriminant, euler_phi, factorial, valuation)
def trivial_character(N, base_ring=rings.RationalField()):
r"""
Return the trivial character of the given modulus, with values in the given
base ring.
EXAMPLES::
sage: t = trivial_character(7)
sage: [t(x) for x in [0..20]]
[0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1]
sage: t(1).parent()
Rational Field
sage: trivial_character(7, Integers(3))(1).parent()
Ring of integers modulo 3
"""
return DirichletGroup(N, base_ring)(1)
TrivialCharacter = trivial_character
def kronecker_character(d):
"""
Return the quadratic Dirichlet character (d/.) of minimal
conductor.
EXAMPLES::
sage: kronecker_character(97*389*997^2)
Dirichlet character modulo 37733 of conductor 37733 mapping 1557 |--> -1, 37346 |--> -1
::
sage: a = kronecker_character(1)
sage: b = DirichletGroup(2401,QQ)(a) # NOTE -- over QQ!
sage: b.modulus()
2401
AUTHORS:
- Jon Hanke (2006-08-06)
"""
d = rings.Integer(d)
if d == 0:
raise ValueError("d must be nonzero")
D = fundamental_discriminant(d)
G = DirichletGroup(abs(D), rings.RationalField())
return G([kronecker(D,u) for u in G.unit_gens()])
def kronecker_character_upside_down(d):
"""
Return the quadratic Dirichlet character (./d) of conductor d, for
d0.
EXAMPLES::
sage: kronecker_character_upside_down(97*389*997^2)
Dirichlet character modulo 37506941597 of conductor 37733 mapping 13533432536 |--> -1, 22369178537 |--> -1, 14266017175 |--> 1
AUTHORS:
- Jon Hanke (2006-08-06)
"""
d = rings.Integer(d)
if d <= 0:
raise ValueError("d must be positive")
G = DirichletGroup(d, rings.RationalField())
return G([kronecker(u.lift(),d) for u in G.unit_gens()])
def is_DirichletCharacter(x):
r"""
Return True if x is of type DirichletCharacter.
EXAMPLES::
sage: from sage.modular.dirichlet import is_DirichletCharacter
sage: is_DirichletCharacter(trivial_character(3))
True
sage: is_DirichletCharacter([1])
False
"""
return isinstance(x, DirichletCharacter)
class DirichletCharacter(MultiplicativeGroupElement):
"""
A Dirichlet character.
"""
def __init__(self, parent, x, check=True):
r"""
Create a Dirichlet character with specified values on
generators of `(\ZZ/n\ZZ)^*`.
INPUT:
- ``parent`` -- :class:`DirichletGroup`, a group of Dirichlet
characters
- ``x`` -- one of the following:
- tuple or list of ring elements: the values of the
Dirichlet character on the standard generators of
`(\ZZ/N\ZZ)^*` as returned by
:meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`.
- vector over `\ZZ/e\ZZ`, where `e` is the order of the
standard root of unity for ``parent``.
In both cases, the orders of the elements must divide the
orders of the respective generators of `(\ZZ/N\ZZ)^*`.
OUTPUT:
The Dirichlet character defined by `x` (type
:class:`DirichletCharacter`).
EXAMPLES::
sage: G.<e> = DirichletGroup(13)
sage: G
Group of Dirichlet characters modulo 13 with values in Cyclotomic Field of order 12 and degree 4
sage: e
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12
sage: loads(e.dumps()) == e
True
::
sage: G, x = DirichletGroup(35).objgens()
sage: e = x[0]*x[1]; e
Dirichlet character modulo 35 of conductor 35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2
sage: e.order()
12
sage: loads(e.dumps()) == e
True
TESTS::
sage: G = DirichletGroup(10)
sage: TestSuite(G[1]).run()
It is checked that the orders of the elements in `x` are
admissible (see :trac:`17283`)::
sage: k.<i> = CyclotomicField(4)
sage: G = DirichletGroup(192)
sage: G([i, -1, -1])
Traceback (most recent call last):
...
ValueError: values (= (zeta16^4, -1, -1)) must have multiplicative orders dividing (2, 16, 2), respectively
sage: from sage.modular.dirichlet import DirichletCharacter
sage: M = FreeModule(Zmod(16), 3)
sage: DirichletCharacter(G, M([4, 8, 8]))
Traceback (most recent call last):
...
ValueError: values (= (4, 8, 8) modulo 16) must have additive orders dividing (2, 16, 2), respectively
"""
MultiplicativeGroupElement.__init__(self, parent)
if check:
orders = parent.integers_mod().unit_group().gens_orders()
if len(x) != len(orders):
raise ValueError("wrong number of values (= {}) on generators (want {})".format(x, len(orders)))
if free_module_element.is_FreeModuleElement(x):
x = parent._module(x)
if any(u * v for u, v in zip(x, orders)):
raise ValueError("values (= {} modulo {}) must have additive orders dividing {}, respectively"
.format(x, parent.zeta_order(), orders))
self.element.set_cache(x)
else:
R = parent.base_ring()
x = tuple(map(R, x))
if R.is_exact() and any(u**v != 1 for u, v in zip(x, orders)):
raise ValueError("values (= {}) must have multiplicative orders dividing {}, respectively"
.format(x, orders))
self.values_on_gens.set_cache(x)
else:
if free_module_element.is_FreeModuleElement(x):
self.element.set_cache(x)
else:
self.values_on_gens.set_cache(x)
@cached_method
def __eval_at_minus_one(self):
r"""
Efficiently evaluate the character at -1 using knowledge of its
order. This is potentially much more efficient than computing the
value of -1 directly using dlog and a large power of the image root
of unity.
We use the following. Proposition: Suppose eps is a character mod
`p^n`, where `p` is a prime. Then
`\varepsilon(-1) = -1` if and only if `p = 2` and
the factor of eps at 4 is nontrivial or `p > 2` and 2 does
not divide `\phi(p^n)/\mbox{\rm ord}(\varepsilon)`.
EXAMPLES::
sage: chi = DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one()
-1
"""
D = self.decomposition()
val = self.base_ring()(1)
for e in D:
if e.modulus() % 2 == 0:
if e.modulus() % 4 == 0:
val *= e.values_on_gens()[0] # first gen is -1 for 2-power modulus
elif (euler_phi(e.parent().modulus()) / e.order()) % 2:
val *= -1
return val
def __call__(self, m):
"""
Return the value of this character at the integer `m`.
.. warning::
A table of values of the character is made the first time
you call this (unless `m` equals -1)
EXAMPLES::
sage: G = DirichletGroup(60)
sage: e = prod(G.gens(), G(1))
sage: e
Dirichlet character modulo 60 of conductor 60 mapping 31 |--> -1, 41 |--> -1, 37 |--> zeta4
sage: e(-1)
-1
sage: e(2)
0
sage: e(7)
-zeta4
sage: Integers(60).unit_gens()
(31, 41, 37)
sage: e(31)
-1
sage: e(41)
-1
sage: e(37)
zeta4
sage: e(31*37)
-zeta4
sage: parent(e(31*37))
Cyclotomic Field of order 4 and degree 2
"""
N = self.modulus()
m = m % N
if self.values.is_in_cache() or m != N - 1:
return self.values()[m]
else:
return self.__eval_at_minus_one()
def change_ring(self, R):
"""
Return the base extension of ``self`` to ``R``.
INPUT:
- ``R`` -- either a ring admitting a conversion map from the
base ring of ``self``, or a ring homomorphism with the base
ring of ``self`` as its domain
EXAMPLES::
sage: e = DirichletGroup(7, QQ).0
sage: f = e.change_ring(QuadraticField(3, 'a'))
sage: f.parent()
Group of Dirichlet characters modulo 7 with values in Number Field in a with defining polynomial x^2 - 3 with a = 1.732050807568878?
::
sage: e = DirichletGroup(13).0
sage: e.change_ring(QQ)
Traceback (most recent call last):
...
TypeError: Unable to coerce zeta12 to a rational
We test the case where `R` is a map (:trac:`18072`)::
sage: K.<i> = QuadraticField(-1)
sage: chi = DirichletGroup(5, K)[1]
sage: chi(2)
i
sage: f = K.complex_embeddings()[0]
sage: psi = chi.change_ring(f)
sage: psi(2)
-1.83697019872103e-16 - 1.00000000000000*I
"""
if self.base_ring() is R:
return self
G = self.parent().change_ring(R)
return G.element_class(G, [R(x) for x in self.values_on_gens()])
def _richcmp_(self, other, op):
"""
Compare ``self`` to ``other``.
.. NOTE::
Since there is no coercion between Dirichlet groups
of different moduli, characters of different moduli
compare as unequal, even if they define identical
functions on ``ZZ``.
EXAMPLES::
sage: e = DirichletGroup(16)([-1, 1])
sage: f = e.restrict(8)
sage: e == e
True
sage: f == f
True
sage: e == f
False
sage: k = DirichletGroup(7)([-1])
sage: k == e
False
"""
return richcmp(self.values_on_gens(), other.values_on_gens(), op)
def __hash__(self):
"""
Return the hash of ``self``.
EXAMPLES::
sage: e = DirichletGroup(16)([-1, 1])
sage: hash(e)
-1497246046 # 32-bit
-3713082714463545694 # 64-bit
"""
return hash(self.values_on_gens())
def __invert__(self):
"""
Return the multiplicative inverse of self.
EXAMPLES::
sage: e = DirichletGroup(13).0
sage: f = ~e
sage: f*e
Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1
"""
G = self.parent()
if G.zeta.is_in_cache():
x = -self.element()
else:
x = tuple(~z for z in self.values_on_gens())
return G.element_class(G, x, check=False)
def _mul_(self, other):
"""
Return the product of self and other.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: b
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4
sage: a*b # indirect doctest
Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> zeta4
Multiplying elements whose parents have different zeta orders works::
sage: a = DirichletGroup(3, QQ, zeta=1, zeta_order=1)(1)
sage: b = DirichletGroup(3, QQ, zeta=-1, zeta_order=2)([-1])
sage: a * b # indirect doctest
Dirichlet character modulo 3 of conductor 3 mapping 2 |--> -1
"""
G = self.parent()
if G.zeta.is_in_cache():
x = self.element() + other.element()
else:
x = tuple(y * z for y, z in zip(self.values_on_gens(), other.values_on_gens()))
return G.element_class(G, x, check=False)
def __copy__(self):
"""
Return a (shallow) copy of this Dirichlet character.
EXAMPLES::
sage: G.<a> = DirichletGroup(11)
sage: b = copy(a)
sage: a is b
False
sage: a.element() is b.element()
False
sage: a.values_on_gens() is b.values_on_gens()
True
"""
# This method exists solely because of a bug in the cPickle module --
# see modsym/manin_symbols.py.
G = self.parent()
return G.element_class(G, self.values_on_gens(), check=False)
def __pow__(self, n):
"""
Return self raised to the power of n
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a^2
Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1
sage: b^2
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1
"""
G = self.parent()
if G.zeta.is_in_cache():
x = n * self.element()
else:
x = tuple(z**n for z in self.values_on_gens())
return G.element_class(G, x, check=False)
def _repr_short_(self):
r"""
A short string representation of self, often used in string representations of modular forms
EXAMPLES::
sage: chi = DirichletGroup(24).0
sage: chi._repr_short_()
'[-1, 1, 1]'
"""
return str(list(self.values_on_gens()))
def _repr_(self):
"""
String representation of self.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: repr(a) # indirect doctest
'Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1'
TESTS:
Dirichlet characters modulo 1 and 2 are printed correctly (see
:trac:`17338`)::
sage: DirichletGroup(1)[0]
Dirichlet character modulo 1 of conductor 1
sage: DirichletGroup(2)[0]
Dirichlet character modulo 2 of conductor 1
"""
s = 'Dirichlet character modulo %s of conductor %s' % (self.modulus(), self.conductor())
r = len(self.values_on_gens())
if r != 0:
s += ' mapping '
for i in range(r):
if i != 0:
s += ', '
s += str(self.parent().unit_gens()[i]) + ' |--> ' + str(self.values_on_gens()[i])
return s
def _latex_(self):
r"""
LaTeX representation of self.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(16)
sage: latex(b) # indirect doctest
\hbox{Dirichlet character modulo } 16 \hbox{ of conductor } 16 \hbox{ mapping } 15 \mapsto 1,\ 5 \mapsto \zeta_{4}
TESTS:
Dirichlet characters modulo 1 and 2 are printed correctly (see
:trac:`17338`)::
sage: latex(DirichletGroup(1)[0])
\hbox{Dirichlet character modulo } 1 \hbox{ of conductor } 1
sage: latex(DirichletGroup(2)[0])
\hbox{Dirichlet character modulo } 2 \hbox{ of conductor } 1
"""
s = r'\hbox{Dirichlet character modulo } %s \hbox{ of conductor } %s' % (self.modulus(), self.conductor())
r = len(self.values_on_gens())
if r != 0:
s += r' \hbox{ mapping } '
for i in range(r):
if i != 0:
s += r',\ '
s += self.parent().unit_gens()[i]._latex_() + r' \mapsto ' + self.values_on_gens()[i]._latex_()
return s
def base_ring(self):
"""
Returns the base ring of this Dirichlet character.
EXAMPLES::
sage: G = DirichletGroup(11)
sage: G.gen(0).base_ring()
Cyclotomic Field of order 10 and degree 4
sage: G = DirichletGroup(11, RationalField())
sage: G.gen(0).base_ring()
Rational Field
"""
return self.parent().base_ring()
def bar(self):
"""
Return the complex conjugate of this Dirichlet character.
EXAMPLES::
sage: e = DirichletGroup(5).0
sage: e
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4
sage: e.bar()
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4
"""
return ~self
def bernoulli(self, k, algorithm='recurrence', cache=True, **opts):
r"""
Returns the generalized Bernoulli number `B_{k,eps}`.
INPUT:
- ``k`` -- a non-negative integer
- ``algorithm`` -- either ``'recurrence'`` (default) or
``'definition'``
- ``cache`` -- if True, cache answers
- ``**opts`` -- optional arguments; not used directly, but
passed to the :func:`bernoulli` function if this is called
OUTPUT:
Let `\varepsilon` be a (not necessarily primitive) character
of modulus `N`. This function returns the generalized
Bernoulli number `B_{k,\varepsilon}`, as defined by the
following identity of power series (see for example
[DI1995]_, Section 2.2):
.. MATH::
\sum_{a=1}^N \frac{\varepsilon(a) t e^{at}}{e^{Nt}-1}
= sum_{k=0}^{\infty} \frac{B_{k,\varepsilon}}{k!} t^k.
ALGORITHM:
The ``'recurrence'`` algorithm computes generalized Bernoulli
numbers via classical Bernoulli numbers using the formula in
[Coh2007]_, Proposition 9.4.5; this is usually optimal. The
``definition`` algorithm uses the definition directly.
.. WARNING::
In the case of the trivial Dirichlet character modulo 1,
this function returns `B_{1,\varepsilon} = 1/2`, in
accordance with the above definition, but in contrast to
the value `B_1 = -1/2` for the classical Bernoulli number.
Some authors use an alternative definition giving
`B_{1,\varepsilon} = -1/2`; see the discussion in
[Coh2007]_, Section 9.4.1.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.bernoulli(5)
7430/13*zeta12^3 - 34750/13*zeta12^2 - 11380/13*zeta12 + 9110/13
sage: eps = DirichletGroup(9).0
sage: eps.bernoulli(3)
10*zeta6 + 4
sage: eps.bernoulli(3, algorithm="definition")
10*zeta6 + 4
TESTS:
Check that :trac:`17586` is fixed::
sage: DirichletGroup(1)[0].bernoulli(1)
1/2
"""
if cache:
try:
self.__bernoulli
except AttributeError:
self.__bernoulli = {}
if k in self.__bernoulli:
return self.__bernoulli[k]
N = self.modulus()
K = self.base_ring()
if N == 1:
# By definition, the first Bernoulli number of the trivial
# character is 1/2, in contrast to the value B_1 = -1/2.
ber = K.one()/2 if k == 1 else K(bernoulli(k))
elif self(-1) != K((-1)**k):
ber = K.zero()
elif algorithm == "recurrence":
# The following code is pretty fast, at least compared to
# the other algorithm below. That said, I'm sure it could
# be sped up by a factor of 10 or more in many cases,
# especially since we end up computing all the Bernoulli
# numbers up to k, which should be done with power series
# instead of calls to the Bernoulli function. Likewise
# computing all binomial coefficients can be done much
# more efficiently.
v = self.values()
S = lambda n: sum(v[r] * r**n for r in range(1, N))
ber = K(sum(binomial(k,j) * bernoulli(j, **opts) *
N**(j-1) * S(k-j) for j in range(k+1)))
elif algorithm == "definition":
# This is better since it computes the same thing, but requires
# no arith in a poly ring over a number field.
prec = k+2
R = rings.PowerSeriesRing(rings.QQ, 't')
t = R.gen()
# g(t) = t/(e^{Nt}-1)
g = t/((N*t).exp(prec) - 1)
# h(n) = g(t)*e^{nt}
h = [0] + [g * ((n*t).exp(prec)) for n in range(1,N+1)]
ber = sum([self(a)*h[a][k] for a in range(1,N+1)]) * factorial(k)
else:
raise ValueError("algorithm = '%s' unknown"%algorithm)
if cache:
self.__bernoulli[k] = ber
return ber
def lfunction(self, prec=53, algorithm='pari'):
"""
Return the L-function of ``self``.
The result is a wrapper around a PARI L-function or around
the ``lcalc`` program.
INPUT:
- ``prec`` -- precision (default 53)
- ``algorithm`` -- 'pari' (default) or 'lcalc'
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: L = a.lfunction(); L
PARI L-function associated to Dirichlet character modulo 20
of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: L(4)
0.988944551741105
With the algorithm "lcalc"::
sage: a = a.primitive_character()
sage: L = a.lfunction(algorithm='lcalc'); L
L-function with complex Dirichlet coefficients
sage: L.value(4) # abs tol 1e-14
0.988944551741105 - 5.16608739123418e-18*I
"""
if algorithm is None:
algorithm = 'pari'
if algorithm == 'pari':
from sage.lfunctions.pari import lfun_character, LFunction
Z = LFunction(lfun_character(self), prec=prec)
Z.rename('PARI L-function associated to %s' % self)
return Z
elif algorithm == 'lcalc':
from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character
return Lfunction_from_character(self)
raise ValueError('algorithm must be "pari" or "lcalc"')
@cached_method
def conductor(self):
"""
Computes and returns the conductor of this character.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.conductor()
4
sage: b.conductor()
5
sage: (a*b).conductor()
20
TESTS::
sage: G.<a, b> = DirichletGroup(20)
sage: type(G(1).conductor())
<type 'sage.rings.integer.Integer'>
"""
if self.modulus() == 1 or self.is_trivial():
return rings.Integer(1)
F = factor(self.modulus())
if len(F) > 1:
return prod([d.conductor() for d in self.decomposition()])
p = F[0][0]
# When p is odd, and x =/= 1, the conductor is the smallest p**r such that
# Order(x) divides EulerPhi(p**r) = p**(r-1)*(p-1).
# For a given r, whether or not the above divisibility holds
# depends only on the factor of p**(r-1) on the right hand side.
# Since p-1 is coprime to p, this smallest r such that the
# divisibility holds equals Valuation(Order(x),p)+1.
cond = p**(valuation(self.order(),p) + 1)
if p == 2 and F[0][1] > 2 and self.values_on_gens()[1].multiplicative_order() != 1:
cond *= 2
return rings.Integer(cond)
@cached_method
def decomposition(self):
r"""
Return the decomposition of self as a product of Dirichlet
characters of prime power modulus, where the prime powers exactly
divide the modulus of this character.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: c = a*b
sage: d = c.decomposition(); d
[Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4]
sage: d[0].parent()
Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2
sage: d[1].parent()
Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2
We can't multiply directly, since coercion of one element into the
other parent fails in both cases::
sage: d[0]*d[1] == c
Traceback (most recent call last):
...
TypeError: unsupported operand parent(s) for *: 'Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2' and 'Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2'
We can multiply if we're explicit about where we want the
multiplication to take place.
::
sage: G(d[0])*G(d[1]) == c
True
Conductors that are divisible by various powers of 2 present
some problems as the multiplicative group modulo `2^k` is
trivial for `k = 1` and non-cyclic for `k \ge 3`::
sage: (DirichletGroup(18).0).decomposition()
[Dirichlet character modulo 2 of conductor 1, Dirichlet character modulo 9 of conductor 9 mapping 2 |--> zeta6]
sage: (DirichletGroup(36).0).decomposition()
[Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1, Dirichlet character modulo 9 of conductor 1 mapping 2 |--> 1]
sage: (DirichletGroup(72).0).decomposition()
[Dirichlet character modulo 8 of conductor 4 mapping 7 |--> -1, 5 |--> 1, Dirichlet character modulo 9 of conductor 1 mapping 2 |--> 1]
"""
D = self.parent().decomposition()
vals = [[z] for z in self.values_on_gens()]
if self.modulus() % 8 == 0: # 2 factors at 2.
vals[0].append(vals[1][0])
del vals[1]
elif self.modulus() % 4 == 2: # 0 factors at 2.
vals = [1] + vals
return [D[i](vals[i]) for i in range(len(D))]
def extend(self, M):
"""
Returns the extension of this character to a Dirichlet character
modulo the multiple M of the modulus.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: H.<c> = DirichletGroup(4)
sage: c.extend(20)
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: a
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: c.extend(20) == a
True
"""
if M % self.modulus() != 0:
raise ArithmeticError("M(=%s) must be a multiple of the modulus(=%s)"%(M,self.modulus()))
H = DirichletGroup(M, self.base_ring())
return H(self)
def _pari_conversion(self):
r"""
Prepare data for the conversion of the character to Pari.
OUTPUT:
pair (G, v) where G is `(\ZZ / N \ZZ)^*` where `N` is the modulus
EXAMPLES::
sage: chi4 = DirichletGroup(4).gen()
sage: chi4._pari_conversion()
([[4, [0]], [2, [2], [3]], [[2]~, Vecsmall([2])],
[[4], [[1, matrix(0,2)]], Mat(1), [3], [2], [0]], Mat(1)], [1])
sage: chi = DirichletGroup(24)([1,-1,-1]); chi
Dirichlet character modulo 24 of conductor 24
mapping 7 |--> 1, 13 |--> -1, 17 |--> -1
sage: chi._pari_conversion()
([[24, [0]], [8, [2, 2, 2], [7, 13, 17]],
[[2, 2, 3]~, Vecsmall([3, 3, 1])],
[[8, 8, 3], [[1, matrix(0,2)], [1, matrix(0,2)], [2, Mat([2, 1])]],
[1, 0, 0; 0, 1, 0; 0, 0, 1], [7, 13, 17], [2, 2, 2], [0, 0, 0]],
[1, 0, 0; 0, 1, 0; 0, 0, 1]], [0, 1, 1])
"""
G = pari.znstar(self.modulus(), 1)
pari_orders = G[1][1]
pari_gens = G[1][2]
# one should use the following, but this does not work
# pari_orders = G.cyc()
# pari_gens = G.gen()
values_on_gens = (self(x) for x in pari_gens)
# now compute the input for pari (list of exponents)
P = self.parent()
if is_ComplexField(P.base_ring()):
zeta = P.zeta()
zeta_argument = zeta.argument()
v = [int(x.argument() / zeta_argument) for x in values_on_gens]
else:
dlog = P._zeta_dlog
v = [dlog[x] for x in values_on_gens]
m = P.zeta_order()
v = [(vi * oi) // m for vi, oi in zip(v, pari_orders)]
return (G, v)
def conrey_number(self):
r"""
Return the Conrey number for this character.
This is a positive integer coprime to q that identifies a
Dirichlet character of modulus q.
See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey
EXAMPLES::
sage: chi4 = DirichletGroup(4).gen()
sage: chi4.conrey_number()
3
sage: chi = DirichletGroup(24)([1,-1,-1]); chi
Dirichlet character modulo 24 of conductor 24
mapping 7 |--> 1, 13 |--> -1, 17 |--> -1
sage: chi.conrey_number()
5
sage: chi = DirichletGroup(60)([1,-1,I])
sage: chi.conrey_number()
17
sage: chi = DirichletGroup(420)([1,-1,-I,1])
sage: chi.conrey_number()
113
TESTS::
sage: eps1 = DirichletGroup(5)([-1])
sage: eps2 = DirichletGroup(5,QQ)([-1])
sage: eps1.conrey_number() == eps2.conrey_number()
True
"""
G, v = self._pari_conversion()
return pari.znconreyexp(G, v).sage()
def lmfdb_page(self):
r"""
Open the LMFDB web page of the character in a browser.
See https://www.lmfdb.org
EXAMPLES::
sage: E = DirichletGroup(4).gen()
sage: E.lmfdb_page() # optional -- webbrowser
"""
import webbrowser
lmfdb_url = 'https://www.lmfdb.org/Character/Dirichlet/{}/{}'
url = lmfdb_url.format(self.modulus(), self.conrey_number())
webbrowser.open(url)
def galois_orbit(self, sort=True):
r"""
Return the orbit of this character under the action of the absolute
Galois group of the prime subfield of the base ring.
EXAMPLES::
sage: G = DirichletGroup(30); e = G.1
sage: e.galois_orbit()
[Dirichlet character modulo 30 of conductor 5 mapping 11 |--> 1, 7 |--> -zeta4,
Dirichlet character modulo 30 of conductor 5 mapping 11 |--> 1, 7 |--> zeta4]
Another example::
sage: G = DirichletGroup(13)
sage: G.galois_orbits()
[
[Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1],
...,
[Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -1]
]
sage: e = G.0
sage: e
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12
sage: e.galois_orbit()
[Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12,
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12^3 + zeta12,
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^3 - zeta12,
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12]
sage: e = G.0^2; e
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^2
sage: e.galois_orbit()
[Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^2, Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12^2 + 1]
A non-example::
sage: chi = DirichletGroup(7, Integers(9), zeta = Integers(9)(2)).0
sage: chi.galois_orbit()
Traceback (most recent call last):
...
TypeError: Galois orbits only defined if base ring is an integral domain
"""
if not self.base_ring().is_integral_domain():
raise TypeError("Galois orbits only defined if base ring is an integral domain")
k = self.order()
if k <= 2:
return [self]
P = self.parent()
z = self.element()
o = int(z.additive_order())
Auts = set([m % o for m in P._automorphisms()])
v = [P.element_class(P, m * z, check=False) for m in Auts]
if sort:
v.sort()
return v
def gauss_sum(self, a=1):
r"""
Return a Gauss sum associated to this Dirichlet character.
The Gauss sum associated to `\chi` is
.. MATH::
g_a(\chi) = \sum_{r \in \ZZ/m\ZZ} \chi(r)\,\zeta^{ar},
where `m` is the modulus of `\chi` and `\zeta` is a primitive
`m^{th}` root of unity.
FACTS: If the modulus is a prime `p` and the character is
nontrivial, then the Gauss sum has absolute value `\sqrt{p}`.
CACHING: Computed Gauss sums are *not* cached with this character.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G([-1])
sage: e.gauss_sum(1)
2*zeta6 - 1
sage: e.gauss_sum(2)
-2*zeta6 + 1
sage: norm(e.gauss_sum())
3
::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.gauss_sum()
-zeta156^46 + zeta156^45 + zeta156^42 + zeta156^41 + 2*zeta156^40 + zeta156^37 - zeta156^36 - zeta156^34 - zeta156^33 - zeta156^31 + 2*zeta156^30 + zeta156^28 - zeta156^24 - zeta156^22 + zeta156^21 + zeta156^20 - zeta156^19 + zeta156^18 - zeta156^16 - zeta156^15 - 2*zeta156^14 - zeta156^10 + zeta156^8 + zeta156^7 + zeta156^6 + zeta156^5 - zeta156^4 - zeta156^2 - 1
sage: factor(norm(e.gauss_sum()))
13^24
TESTS:
The field of algebraic numbers is supported (:trac:`19056`)::
sage: G = DirichletGroup(7, QQbar)
sage: G[1].gauss_sum()
-2.440133358345538? + 1.022618791871794?*I
Check that :trac:`19060` is fixed::
sage: K.<z> = CyclotomicField(8)
sage: G = DirichletGroup(13, K)
sage: chi = G([z^2])
sage: chi.gauss_sum()
zeta52^22 + zeta52^21 + zeta52^19 - zeta52^16 + zeta52^15 + zeta52^14 + zeta52^12 - zeta52^11 - zeta52^10 - zeta52^7 - zeta52^5 + zeta52^4
Check that :trac:`25127` is fixed::
sage: G = DirichletGroup(1)
sage: chi = G.one()
sage: chi.gauss_sum()
1
.. SEEALSO::
- :func:`sage.arith.misc.gauss_sum` for general finite fields
- :func:`sage.rings.padics.misc.gauss_sum` for a `p`-adic version
"""
G = self.parent()
K = G.base_ring()
chi = self
m = G.modulus()
if is_ComplexField(K):
return self.gauss_sum_numerical(a=a)
elif is_AlgebraicField(K):
L = K
zeta = L.zeta(m)
elif number_field.is_CyclotomicField(K) or is_RationalField(K):
chi = chi.minimize_base_ring()
n = lcm(m, G.zeta_order())
L = rings.CyclotomicField(n)
zeta = L.gen(0) ** (n // m)
else:
raise NotImplementedError("Gauss sums only currently implemented when the base ring is a cyclotomic field, QQ, QQbar, or a complex field")
zeta = zeta ** a
g = L(chi(0))
z = L.one()
for c in chi.values()[1:]:
z *= zeta
g += L(c)*z
return g
def gauss_sum_numerical(self, prec=53, a=1):
r"""
Return a Gauss sum associated to this Dirichlet character as an
approximate complex number with prec bits of precision.
INPUT:
- ``prec`` -- integer (default: 53), *bits* of precision
- ``a`` -- integer, as for :meth:`gauss_sum`.
The Gauss sum associated to `\chi` is
.. MATH::
g_a(\chi) = \sum_{r \in \ZZ/m\ZZ} \chi(r)\,\zeta^{ar},
where `m` is the modulus of `\chi` and `\zeta` is a primitive
`m^{th}` root of unity.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G.0
sage: abs(e.gauss_sum_numerical())
1.7320508075...
sage: sqrt(3.0)
1.73205080756888
sage: e.gauss_sum_numerical(a=2)
-...e-15 - 1.7320508075...*I
sage: e.gauss_sum_numerical(a=2, prec=100)
4.7331654313260708324703713917e-30 - 1.7320508075688772935274463415*I
sage: G = DirichletGroup(13)
sage: H = DirichletGroup(13, CC)
sage: e = G.0
sage: f = H.0
sage: e.gauss_sum_numerical()
-3.07497205... + 1.8826966926...*I
sage: f.gauss_sum_numerical()
-3.07497205... + 1.8826966926...*I
sage: abs(e.gauss_sum_numerical())
3.60555127546...
sage: abs(f.gauss_sum_numerical())
3.60555127546...
sage: sqrt(13.0)
3.60555127546399
TESTS:
The field of algebraic numbers is supported (:trac:`19056`)::
sage: G = DirichletGroup(7, QQbar)
sage: G[1].gauss_sum_numerical()
-2.44013335834554 + 1.02261879187179*I
"""
G = self.parent()
K = G.base_ring()
if is_ComplexField(K):
phi = lambda t : t
CC = K
elif is_AlgebraicField(K):
from sage.rings.complex_field import ComplexField
CC = ComplexField(prec)
phi = CC.coerce_map_from(K)
elif number_field.is_CyclotomicField(K) or is_RationalField(K):
phi = K.complex_embedding(prec)
CC = phi.codomain()
else:
raise NotImplementedError("Gauss sums only currently implemented when the base ring is a cyclotomic field, QQ, QQbar, or a complex field")
zeta = CC.zeta(G.modulus()) ** a
g = phi(self(0))
z = CC.one()
for c in self.values()[1:]:
z *= zeta
g += phi(c)*z
return g
def jacobi_sum(self, char, check=True):
r"""
Return the Jacobi sum associated to these Dirichlet characters
(i.e., J(self,char)).
This is defined as
.. MATH::
J(\chi, \psi) = \sum_{a \in \ZZ / N\ZZ} \chi(a) \psi(1-a)
where `\chi` and `\psi` are both characters modulo `N`.
EXAMPLES::
sage: D = DirichletGroup(13)
sage: e = D.0
sage: f = D[-2]
sage: e.jacobi_sum(f)
3*zeta12^2 + 2*zeta12 - 3
sage: f.jacobi_sum(e)
3*zeta12^2 + 2*zeta12 - 3
sage: p = 7
sage: DP = DirichletGroup(p)
sage: f = DP.0
sage: e.jacobi_sum(f)
Traceback (most recent call last):
...
NotImplementedError: Characters must be from the same Dirichlet Group.
sage: all_jacobi_sums = [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j]))
....: for i in range(p-1) for j in range(i, p-1)]
sage: for s in all_jacobi_sums:
....: print(s)
((1,), (1,), 5)
((1,), (zeta6,), -1)
((1,), (zeta6 - 1,), -1)
((1,), (-1,), -1)
((1,), (-zeta6,), -1)
((1,), (-zeta6 + 1,), -1)
((zeta6,), (zeta6,), -zeta6 + 3)
((zeta6,), (zeta6 - 1,), 2*zeta6 + 1)
((zeta6,), (-1,), -2*zeta6 - 1)
((zeta6,), (-zeta6,), zeta6 - 3)
((zeta6,), (-zeta6 + 1,), 1)
((zeta6 - 1,), (zeta6 - 1,), -3*zeta6 + 2)
((zeta6 - 1,), (-1,), 2*zeta6 + 1)
((zeta6 - 1,), (-zeta6,), -1)
((zeta6 - 1,), (-zeta6 + 1,), -zeta6 - 2)
((-1,), (-1,), 1)
((-1,), (-zeta6,), -2*zeta6 + 3)
((-1,), (-zeta6 + 1,), 2*zeta6 - 3)
((-zeta6,), (-zeta6,), 3*zeta6 - 1)
((-zeta6,), (-zeta6 + 1,), -2*zeta6 + 3)
((-zeta6 + 1,), (-zeta6 + 1,), zeta6 + 2)
Let's check that trivial sums are being calculated correctly::
sage: N = 13
sage: D = DirichletGroup(N)
sage: g = D(1)
sage: g.jacobi_sum(g)
11
sage: sum([g(x)*g(1-x) for x in IntegerModRing(N)])
11
And sums where exactly one character is nontrivial (see :trac:`6393`)::
sage: G = DirichletGroup(5); X=G.list(); Y=X[0]; Z=X[1]
sage: Y.jacobi_sum(Z)
-1
sage: Z.jacobi_sum(Y)
-1
Now let's take a look at a non-prime modulus::
sage: N = 9
sage: D = DirichletGroup(N)
sage: g = D(1)
sage: g.jacobi_sum(g)
3
We consider a sum with values in a finite field::
sage: g = DirichletGroup(17, GF(9,'a')).0
sage: g.jacobi_sum(g**2)
2*a
TESTS:
This shows that :trac:`6393` has been fixed::
sage: G = DirichletGroup(5); X = G.list(); Y = X[0]; Z = X[1]
sage: # Y is trivial and Z is quartic
sage: sum([Y(x)*Z(1-x) for x in IntegerModRing(5)])
-1
sage: # The value -1 above is the correct value of the Jacobi sum J(Y, Z).
sage: Y.jacobi_sum(Z); Z.jacobi_sum(Y)
-1
-1
"""
if check:
if self.parent() != char.parent():
raise NotImplementedError("Characters must be from the same Dirichlet Group.")
return sum([self(x) * char(1-x) for x in rings.IntegerModRing(self.modulus())])
def kloosterman_sum(self, a=1, b=0):
r"""
Return the "twisted" Kloosterman sum associated to this Dirichlet character.
This includes Gauss sums, classical Kloosterman sums, Salié sums, etc.
The Kloosterman sum associated to `\chi` and the integers a,b is
.. MATH::
K(a,b,\chi) = \sum_{r \in (\ZZ/m\ZZ)^\times} \chi(r)\,\zeta^{ar+br^{-1}},
where `m` is the modulus of `\chi` and `\zeta` is a primitive
`m` th root of unity. This reduces to the Gauss sum if `b=0`.
This method performs an exact calculation and returns an element of a
suitable cyclotomic field; see also :meth:`.kloosterman_sum_numerical`,
which gives an inexact answer (but is generally much quicker).
CACHING: Computed Kloosterman sums are *not* cached with this
character.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G([-1])
sage: e.kloosterman_sum(3,5)
-2*zeta6 + 1
sage: G = DirichletGroup(20)
sage: e = G([1 for u in G.unit_gens()])
sage: e.kloosterman_sum(7,17)
-2*zeta20^6 + 2*zeta20^4 + 4
TESTS::
sage: G = DirichletGroup(20, UniversalCyclotomicField())
sage: e = G([1 for u in G.unit_gens()])
sage: e.kloosterman_sum(7,17)
-2*E(5) - 4*E(5)^2 - 4*E(5)^3 - 2*E(5)^4
sage: G = DirichletGroup(12, QQbar)
sage: e = G.gens()[0]
sage: e.kloosterman_sum(5,11)
Traceback (most recent call last):
...
NotImplementedError: Kloosterman sums not implemented over this ring
"""
G = self.parent()
zo = G.zeta_order()
m = G.modulus()
g = 0
L = rings.CyclotomicField(m.lcm(zo))
zeta = L.gen(0)
try:
self(1) * zeta**(a+b)
except TypeError:
raise NotImplementedError('Kloosterman sums not implemented '
'over this ring')
n = zeta.multiplicative_order()
zeta = zeta**(n // m)
for c in m.coprime_integers(m):
e = rings.Mod(c, m)
g += self(c) * zeta**int(a*e + b*e**(-1))
return g
def kloosterman_sum_numerical(self, prec=53, a=1, b=0):
r"""
Return the Kloosterman sum associated to this Dirichlet character as
an approximate complex number with prec bits of precision.
See also :meth:`.kloosterman_sum`, which calculates the sum
exactly (which is generally slower).
INPUT:
- ``prec`` -- integer (default: 53), *bits* of precision
- ``a`` -- integer, as for :meth:`.kloosterman_sum`
- ``b`` -- integer, as for :meth:`.kloosterman_sum`.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G.0
The real component of the numerical value of e is near zero::
sage: v=e.kloosterman_sum_numerical()
sage: v.real() < 1.0e15
True
sage: v.imag()
1.73205080756888
sage: G = DirichletGroup(20)
sage: e = G.1
sage: e.kloosterman_sum_numerical(53,3,11)
3.80422606518061 - 3.80422606518061*I
"""
G = self.parent()
K = G.base_ring()
if not (number_field.is_CyclotomicField(K) or is_RationalField(K)):
raise NotImplementedError("Kloosterman sums only currently implemented when the base ring is a cyclotomic field or QQ.")
phi = K.complex_embedding(prec)
CC = phi.codomain()
g = 0
m = G.modulus()
zeta = CC.zeta(m)
for c in m.coprime_integers(m):
e = rings.Mod(c, m)
z = zeta ** int(a*e + b*(e**(-1)))
g += phi(self(c))*z
return g
@cached_method
def is_even(self):
r"""
Return ``True`` if and only if `\varepsilon(-1) = 1`.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.is_even()
False
sage: e(-1)
-1
sage: [e.is_even() for e in G]
[True, False, True, False, True, False, True, False, True, False, True, False]
sage: G = DirichletGroup(13, CC)
sage: e = G.0
sage: e.is_even()
False
sage: e(-1)
-1.000000...
sage: [e.is_even() for e in G]
[True, False, True, False, True, False, True, False, True, False, True, False]
sage: G = DirichletGroup(100000, CC)
sage: G.1.is_even()
True
Note that ``is_even`` need not be the negation of
is_odd, e.g., in characteristic 2::
sage: G.<e> = DirichletGroup(13, GF(4,'a'))
sage: e.is_even()
True
sage: e.is_odd()
True
"""
R = self.base_ring()
# self(-1) is either +1 or -1
if not R.is_exact():
return abs(self(-1) - R(1)) < 0.5
return self(-1) == R(1)
@cached_method
def is_odd(self):
r"""
Return ``True`` if and only if
`\varepsilon(-1) = -1`.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.is_odd()
True
sage: [e.is_odd() for e in G]
[False, True, False, True, False, True, False, True, False, True, False, True]
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.is_odd()
True
sage: [e.is_odd() for e in G]
[False, True, False, True, False, True, False, True, False, True, False, True]
sage: G = DirichletGroup(100000, CC)
sage: G.0.is_odd()
True
Note that ``is_even`` need not be the negation of
is_odd, e.g., in characteristic 2::
sage: G.<e> = DirichletGroup(13, GF(4,'a'))
sage: e.is_even()
True
sage: e.is_odd()
True
"""
R = self.base_ring()
# self(-1) is either +1 or -1
if not R.is_exact():
return abs(self(-1) - R(-1)) < 0.5
return self(-1) == R(-1)
@cached_method
def is_primitive(self):
"""
Return ``True`` if and only if this character is
primitive, i.e., its conductor equals its modulus.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.is_primitive()
False
sage: b.is_primitive()
False
sage: (a*b).is_primitive()
True
sage: G.<a,b> = DirichletGroup(20, CC)
sage: a.is_primitive()
False
sage: b.is_primitive()
False
sage: (a*b).is_primitive()
True
"""
return (self.conductor() == self.modulus())
@cached_method
def is_trivial(self):
r"""
Returns ``True`` if this is the trivial character,
i.e., has order 1.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.is_trivial()
False
sage: (a^2).is_trivial()
True
"""
if self.element.is_in_cache():
return not self.element()
one = self.base_ring().one()
return all(x == one for x in self.values_on_gens())
def kernel(self):
r"""
Return the kernel of this character.
OUTPUT: Currently the kernel is returned as a list. This may
change.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.kernel()
[1, 9, 13, 17]
sage: b.kernel()
[1, 11]
"""
one = self.base_ring().one()
return [x for x in range(self.modulus()) if self(x) == one]
def maximize_base_ring(self):
r"""
Let
.. MATH::
\varepsilon : (\ZZ/N\ZZ)^* \to \QQ(\zeta_n)
be a Dirichlet character. This function returns an equal Dirichlet
character
.. MATH::
\chi : (\ZZ/N\ZZ)^* \to \QQ(\zeta_m)
where `m` is the least common multiple of `n` and
the exponent of `(\ZZ/N\ZZ)^*`.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20,QQ)
sage: b.maximize_base_ring()
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1
sage: b.maximize_base_ring().base_ring()
Cyclotomic Field of order 4 and degree 2
sage: DirichletGroup(20).base_ring()
Cyclotomic Field of order 4 and degree 2
"""
g = rings.IntegerModRing(self.modulus()).unit_group_exponent()
if g == 1:
g = 2
z = self.base_ring().zeta()
n = z.multiplicative_order()
m = lcm(g,n)
if n == m:
return self
K = rings.CyclotomicField(m)
return self.change_ring(K)
def minimize_base_ring(self):
r"""
Return a Dirichlet character that equals this one, but over as
small a subfield (or subring) of the base ring as possible.
.. note::
This function is currently only implemented when the base
ring is a number field. It's the identity function in
characteristic p.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = DirichletGroup(13).0
sage: e.base_ring()
Cyclotomic Field of order 12 and degree 4
sage: e.minimize_base_ring().base_ring()
Cyclotomic Field of order 12 and degree 4
sage: (e^2).minimize_base_ring().base_ring()
Cyclotomic Field of order 6 and degree 2
sage: (e^3).minimize_base_ring().base_ring()
Cyclotomic Field of order 4 and degree 2
sage: (e^12).minimize_base_ring().base_ring()
Rational Field
TESTS:
Check that :trac:`18479` is fixed::
sage: f = Newforms(Gamma1(25), names='a')[1]
sage: eps = f.character()
sage: eps.minimize_base_ring() == eps
True
A related bug (see :trac:`18086`)::
sage: K.<a,b>=NumberField([x^2 + 1, x^2 - 3])
sage: chi = DirichletGroup(7, K).0
sage: chi.minimize_base_ring()
Dirichlet character modulo 7 of conductor 7 mapping 3 |--> -1/2*b*a + 1/2
"""
R = self.base_ring()
if R.is_prime_field():
return self
p = R.characteristic()
if p:
K = rings.IntegerModRing(p)
elif self.order() <= 2:
K = rings.QQ
elif (isinstance(R, number_field.NumberField_generic)
and euler_phi(self.order()) < R.absolute_degree()):
K = rings.CyclotomicField(self.order())
else:
return self
try:
return self.change_ring(K)
except (TypeError, ValueError, ArithmeticError):
return self
def modulus(self):
"""
The modulus of this character.
EXAMPLES::
sage: e = DirichletGroup(100, QQ).0
sage: e.modulus()
100
sage: e.conductor()
4
"""
return self.parent().modulus()
def level(self):
"""
Synonym for modulus.
EXAMPLES::
sage: e = DirichletGroup(100, QQ).0
sage: e.level()
100
"""
return self.modulus()
@cached_method
def multiplicative_order(self):
"""
The order of this character.
EXAMPLES::
sage: e = DirichletGroup(100).1
sage: e.order() # same as multiplicative_order, since group is multiplicative
20
sage: e.multiplicative_order()
20
sage: e = DirichletGroup(100).0
sage: e.multiplicative_order()
2
"""
if self.parent().zeta.is_in_cache():
return self.element().additive_order()
return lcm([z.multiplicative_order() for z in self.values_on_gens()])
def primitive_character(self):
"""
Returns the primitive character associated to self.
EXAMPLES::
sage: e = DirichletGroup(100).0; e
Dirichlet character modulo 100 of conductor 4 mapping 51 |--> -1, 77 |--> 1
sage: e.conductor()
4
sage: f = e.primitive_character(); f
Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1
sage: f.modulus()
4
"""
return self.restrict(self.conductor())
def restrict(self, M):
"""
Returns the restriction of this character to a Dirichlet character
modulo the divisor M of the modulus, which must also be a multiple
of the conductor of this character.
EXAMPLES::
sage: e = DirichletGroup(100).0
sage: e.modulus()
100
sage: e.conductor()
4
sage: e.restrict(20)
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: e.restrict(4)
Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1
sage: e.restrict(50)
Traceback (most recent call last):
...
ValueError: conductor(=4) must divide M(=50)
"""
M = int(M)
if self.modulus()%M != 0:
raise ValueError("M(=%s) must divide the modulus(=%s)"%(M,self.modulus()))
if M%self.conductor() != 0:
raise ValueError("conductor(=%s) must divide M(=%s)"%(self.conductor(),M))
H = DirichletGroup(M, self.base_ring())
return H(self)
@cached_method
def values(self):
"""
Return a list of the values of this character on each integer
between 0 and the modulus.
EXAMPLES::
sage: e = DirichletGroup(20)(1)
sage: e.values()
[0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1]
sage: e = DirichletGroup(20).gen(0)
sage: e.values()
[0, 1, 0, -1, 0, 0, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0, 0, 1, 0, -1]
sage: e = DirichletGroup(20).gen(1)
sage: e.values()
[0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1, 0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1]
sage: e = DirichletGroup(21).gen(0) ; e.values()
[0, 1, -1, 0, 1, -1, 0, 0, -1, 0, 1, -1, 0, 1, 0, 0, 1, -1, 0, 1, -1]
sage: e = DirichletGroup(21, base_ring=GF(37)).gen(0) ; e.values()
[0, 1, 36, 0, 1, 36, 0, 0, 36, 0, 1, 36, 0, 1, 0, 0, 1, 36, 0, 1, 36]
sage: e = DirichletGroup(21, base_ring=GF(3)).gen(0) ; e.values()
[0, 1, 2, 0, 1, 2, 0, 0, 2, 0, 1, 2, 0, 1, 0, 0, 1, 2, 0, 1, 2]
::
sage: chi = DirichletGroup(100151, CyclotomicField(10)).0
sage: ls = chi.values() ; ls[0:10]
[0,
1,
-zeta10^3,
-zeta10,
-zeta10,
1,
zeta10^3 - zeta10^2 + zeta10 - 1,
zeta10,
zeta10^3 - zeta10^2 + zeta10 - 1,
zeta10^2]
TESTS:
Test that :trac:`11783` and :trac:`14368` are fixed::
sage: chi = DirichletGroup(1).list()[0]
sage: chi.values()
[1]
sage: chi(1)
1
"""
G = self.parent()
R = G.base_ring()
mod = self.parent().modulus()
if mod == 1:
return [R.one()]
elif mod == 2:
return [R.zero(), R.one()]
result_list = [R.zero()] * mod
gens = G.unit_gens()
orders = G.integers_mod().unit_group().gens_orders()
R_values = G._zeta_powers
val_on_gen = self.element()
exponents = [0] * len(orders)
n = G.integers_mod().one()
value = val_on_gen.base_ring().zero()
while True:
# record character value on n
result_list[n] = R_values[value]
# iterate:
# increase the exponent vector by 1,
# increase n accordingly, and increase value
i = 0
while True:
try:
exponents[i] += 1
except IndexError: # Done!
return result_list
value += val_on_gen[i]
n *= gens[i]
if exponents[i] < orders[i]:
break
exponents[i] = 0
i += 1
@cached_method(do_pickle=True)
def values_on_gens(self):
r"""
Return a tuple of the values of ``self`` on the standard
generators of `(\ZZ/N\ZZ)^*`, where `N` is the modulus.
EXAMPLES::
sage: e = DirichletGroup(16)([-1, 1])
sage: e.values_on_gens ()
(-1, 1)
.. NOTE::
The constructor of :class:`DirichletCharacter` sets the
cache of :meth:`element` or of :meth:`values_on_gens`. The cache of
one of these methods needs to be set for the other method to work properly,
these caches have to be stored when pickling an instance of
:class:`DirichletCharacter`.
"""
pows = self.parent()._zeta_powers
return tuple([pows[i] for i in self.element()])
@cached_method(do_pickle=True)
def element(self):
r"""
Return the underlying `\ZZ/n\ZZ`-module
vector of exponents.
.. warning::
Please do not change the entries of the returned vector;
this vector is mutable *only* because immutable vectors are
not implemented yet.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.element()
(2, 0)
sage: b.element()
(0, 1)
.. NOTE::
The constructor of :class:`DirichletCharacter` sets the
cache of :meth:`element` or of :meth:`values_on_gens`. The cache of
one of these methods needs to be set for the other method to work properly,
these caches have to be stored when pickling an instance of
:class:`DirichletCharacter`.
"""
P = self.parent()
M = P._module
if is_ComplexField(P.base_ring()):
zeta = P.zeta()
zeta_argument = zeta.argument()
v = M([int(round(x.argument() / zeta_argument))
for x in self.values_on_gens()])
else:
dlog = P._zeta_dlog
v = M([dlog[x] for x in self.values_on_gens()])
v.set_immutable()
return v
def __setstate__(self, state):
r"""
Restore a pickled element from ``state``.
TESTS::
sage: e = DirichletGroup(16)([-1, 1])
sage: loads(dumps(e)) == e
True
"""
# values_on_gens() used an explicit cache __values_on_gens in the past
# we need to set the cache of values_on_gens() from that if we encounter it in a pickle
values_on_gens_key = '_DirichletCharacter__values_on_gens'
values_on_gens = None
state_dict = state[1]
if values_on_gens_key in state_dict:
values_on_gens = state_dict[values_on_gens_key]
del state_dict[values_on_gens_key]
# element() used an explicit cache __element in the past
# we need to set the cache of element() from that if we encounter it in a pickle
element_key = '_DirichletCharacter__element'
element = None
if element_key in state_dict:
element = state_dict[element_key]
del state_dict[element_key]
super(DirichletCharacter, self).__setstate__(state)
if values_on_gens is not None:
self.values_on_gens.set_cache(values_on_gens)
if element is not None:
self.element.set_cache(element)
class DirichletGroupFactory(UniqueFactory):
r"""
Construct a group of Dirichlet characters modulo `N`.
INPUT:
- ``N`` -- positive integer
- ``base_ring`` -- commutative ring; the value ring for the
characters in this group (default: the cyclotomic field
`\QQ(\zeta_n)`, where `n` is the exponent of `(\ZZ/N\ZZ)^*`)
- ``zeta`` -- (optional) root of unity in ``base_ring``
- ``zeta_order`` -- (optional) positive integer; this must be the
order of ``zeta`` if both are specified
- ``names`` -- ignored (needed so ``G.<...> = DirichletGroup(...)``
notation works)
- ``integral`` -- boolean (default: ``False``); whether to replace
the default cyclotomic field by its rings of integers as the
base ring. This is ignored if ``base_ring`` is not ``None``.
OUTPUT:
The group of Dirichlet characters modulo `N` with values in a
subgroup `V` of the multiplicative group `R^*` of ``base_ring``.
This is the group of homomorphisms `(\ZZ/N\ZZ)^* \to V` with
pointwise multiplication. The group `V` is determined as follows:
- If both ``zeta`` and ``zeta_order`` are omitted, then `V` is
taken to be `R^*`, or equivalently its `n`-torsion subgroup,
where `n` is the exponent of `(\ZZ/N\ZZ)^*`. Many operations,
such as finding a set of generators for the group, are only
implemented if `V` is cyclic and a generator for `V` can be
found.
- If ``zeta`` is specified, then `V` is taken to be the cyclic
subgroup of `R^*` generated by ``zeta``. If ``zeta_order`` is
also given, it must be the multiplicative order of ``zeta``;
this is useful if the base ring is not exact or if the order of
``zeta`` is very large.
- If ``zeta`` is not specified but ``zeta_order`` is, then `V` is
taken to be the group of roots of unity of order dividing
``zeta_order`` in `R`. In this case, `R` must be a domain (so
`V` is cyclic), and `V` must have order ``zeta_order``.
Furthermore, a generator ``zeta`` of `V` is computed, and an
error is raised if such ``zeta`` cannot be found.
EXAMPLES:
The default base ring is a cyclotomic field of order the exponent
of `(\ZZ/N\ZZ)^*`::
sage: DirichletGroup(20)
Group of Dirichlet characters modulo 20 with values in Cyclotomic Field of order 4 and degree 2
We create the group of Dirichlet character mod 20 with values in
the rational numbers::
sage: G = DirichletGroup(20, QQ); G
Group of Dirichlet characters modulo 20 with values in Rational Field
sage: G.order()
4
sage: G.base_ring()
Rational Field
The elements of G print as lists giving the values of the character
on the generators of `(Z/NZ)^*`::
sage: list(G)
[Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1, Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1, Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> -1]
Next we construct the group of Dirichlet character mod 20, but with
values in `\QQ(\zeta_n)`::
sage: G = DirichletGroup(20)
sage: G.1
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4
We next compute several invariants of ``G``::
sage: G.gens()
(Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4)
sage: G.unit_gens()
(11, 17)
sage: G.zeta()
zeta4
sage: G.zeta_order()
4
In this example we create a Dirichlet group with values in a
number field::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<a> = NumberField(x^4 + 1)
sage: DirichletGroup(5, K)
Group of Dirichlet characters modulo 5 with values in Number Field in a with defining polynomial x^4 + 1
An example where we give ``zeta``, but not its order::
sage: G = DirichletGroup(5, K, a); G
Group of Dirichlet characters modulo 5 with values in the group of order 8 generated by a in Number Field in a with defining polynomial x^4 + 1
sage: G.list()
[Dirichlet character modulo 5 of conductor 1 mapping 2 |--> 1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> a^2, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -a^2]
We can also restrict the order of the characters, either with or
without specifying a root of unity::
sage: DirichletGroup(5, K, zeta=-1, zeta_order=2)
Group of Dirichlet characters modulo 5 with values in the group of order 2 generated by -1 in Number Field in a with defining polynomial x^4 + 1
sage: DirichletGroup(5, K, zeta_order=2)
Group of Dirichlet characters modulo 5 with values in the group of order 2 generated by -1 in Number Field in a with defining polynomial x^4 + 1
::
sage: G.<e> = DirichletGroup(13)
sage: loads(G.dumps()) == G
True
::
sage: G = DirichletGroup(19, GF(5))
sage: loads(G.dumps()) == G
True
We compute a Dirichlet group over a large prime field::
sage: p = next_prime(10^40)
sage: g = DirichletGroup(19, GF(p)); g
Group of Dirichlet characters modulo 19 with values in Finite Field of size 10000000000000000000000000000000000000121
Note that the root of unity has small order, i.e., it is not the
largest order root of unity in the field::
sage: g.zeta_order()
2
::
sage: r4 = CyclotomicField(4).ring_of_integers()
sage: G = DirichletGroup(60, r4)
sage: G.gens()
(Dirichlet character modulo 60 of conductor 4 mapping 31 |--> -1, 41 |--> 1, 37 |--> 1, Dirichlet character modulo 60 of conductor 3 mapping 31 |--> 1, 41 |--> -1, 37 |--> 1, Dirichlet character modulo 60 of conductor 5 mapping 31 |--> 1, 41 |--> 1, 37 |--> zeta4)
sage: val = G.gens()[2].values_on_gens()[2] ; val
zeta4
sage: parent(val)
Gaussian Integers in Cyclotomic Field of order 4 and degree 2
sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val)
17
sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) * GF(29)(3)
22
sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3
22
sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3)
Residue field of Fractional ideal (-2*zeta4 + 5)
::
sage: DirichletGroup(60, integral=True)
Group of Dirichlet characters modulo 60 with values in Gaussian Integers in Cyclotomic Field of order 4 and degree 2
sage: parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2])
Gaussian Integers in Cyclotomic Field of order 4 and degree 2
If the order of ``zeta`` cannot be determined automatically, we
can specify it using ``zeta_order``::
sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6))
Traceback (most recent call last):
...
NotImplementedError: order of element not known
sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6), zeta_order=6)
Group of Dirichlet characters modulo 7 with values in the group of order 6 generated by 0.500000000000000 + 0.866025403784439*I in Complex Field with 53 bits of precision
If the base ring is not a domain (in which case the group of roots
of unity is not necessarily cyclic), some operations still work,
such as creation of elements::
sage: G = DirichletGroup(5, Zmod(15)); G
Group of Dirichlet characters modulo 5 with values in Ring of integers modulo 15
sage: chi = G([13]); chi
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 13
sage: chi^2
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 4
sage: chi.multiplicative_order()
4
Other operations only work if ``zeta`` is specified::
sage: G.gens()
Traceback (most recent call last):
...
NotImplementedError: factorization of polynomials over rings with composite characteristic is not implemented
sage: G = DirichletGroup(5, Zmod(15), zeta=2); G
Group of Dirichlet characters modulo 5 with values in the group of order 4 generated by 2 in Ring of integers modulo 15
sage: G.gens()
(Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 2,)
TESTS:
Dirichlet groups are cached, creating two groups with the same parameters
yields the same object::
sage: DirichletGroup(60) is DirichletGroup(60)
True
"""
def create_key(self, N, base_ring=None, zeta=None, zeta_order=None,
names=None, integral=False):
"""
Create a key that uniquely determines a Dirichlet group.
TESTS::
sage: DirichletGroup.create_key(60)
(Cyclotomic Field of order 4 and degree 2, 60, None, None)
An example to illustrate that ``base_ring`` is a part of the key::
sage: k = DirichletGroup.create_key(2, base_ring=QQ); k
(Rational Field, 2, None, None)
sage: l = DirichletGroup.create_key(2, base_ring=CC); l
(Complex Field with 53 bits of precision, 2, None, None)
sage: k == l
False
sage: G = DirichletGroup.create_object(None, k); G
Group of Dirichlet characters modulo 2 with values in Rational Field
sage: H = DirichletGroup.create_object(None, l); H
Group of Dirichlet characters modulo 2 with values in Complex Field with 53 bits of precision
sage: G == H
False
If ``base_ring`` was not be a part of the key, the keys would compare
equal and the caching would be broken::
sage: k = k[1:]; k
(2, None, None)
sage: l = l[1:]; l
(2, None, None)
sage: k == l
True
sage: DirichletGroup(2, base_ring=QQ) is DirichletGroup(2, base_ring=CC)
False
If the base ring is not an integral domain, an error will be
raised if only ``zeta_order`` is specified::
sage: DirichletGroup(17, Integers(15))
Group of Dirichlet characters modulo 17 with values in Ring of integers modulo 15
sage: DirichletGroup(17, Integers(15), zeta_order=4)
Traceback (most recent call last):
...
ValueError: base ring (= Ring of integers modulo 15) must be an integral domain if only zeta_order is specified
sage: G = DirichletGroup(17, Integers(15), zeta=7); G
Group of Dirichlet characters modulo 17 with values in the group of order 4 generated by 7 in Ring of integers modulo 15
sage: G.order()
4
sage: DirichletGroup(-33)
Traceback (most recent call last):
...
ValueError: modulus should be positive
"""
modulus = rings.Integer(N)
if modulus <= 0:
raise ValueError('modulus should be positive')
if base_ring is None:
if not (zeta is None and zeta_order is None):
raise ValueError("zeta and zeta_order must be None if base_ring not specified")
e = rings.IntegerModRing(modulus).unit_group_exponent()
base_ring = rings.CyclotomicField(e)
if integral:
base_ring = base_ring.ring_of_integers()
if not is_Ring(base_ring):
raise TypeError("base_ring (= %s) must be a ring" % base_ring)
# If either zeta or zeta_order is given, compute the other.
if zeta is not None:
zeta = base_ring(zeta)
if zeta_order is None:
zeta_order = zeta.multiplicative_order()
elif zeta_order is not None:
if not base_ring.is_integral_domain():
raise ValueError("base ring (= %s) must be an integral domain if only zeta_order is specified"
% base_ring)
zeta_order = rings.Integer(zeta_order)
zeta = base_ring.zeta(zeta_order)
return (base_ring, modulus, zeta, zeta_order)
def create_object(self, version, key, **extra_args):
"""
Create the object from the key (extra arguments are ignored). This is
only called if the object was not found in the cache.
TESTS::
sage: K = CyclotomicField(4)
sage: DirichletGroup.create_object(None, (K, 60, K.gen(), 4))
Group of Dirichlet characters modulo 60 with values in the group of order 4 generated by zeta4 in Cyclotomic Field of order 4 and degree 2
"""
base_ring, modulus, zeta, zeta_order = key
return DirichletGroup_class(base_ring, modulus, zeta, zeta_order)
DirichletGroup = DirichletGroupFactory("DirichletGroup")
def is_DirichletGroup(x):
"""
Returns True if x is a Dirichlet group.
EXAMPLES::
sage: from sage.modular.dirichlet import is_DirichletGroup
sage: is_DirichletGroup(DirichletGroup(11))
True
sage: is_DirichletGroup(11)
False
sage: is_DirichletGroup(DirichletGroup(11).0)
False
"""
return isinstance(x, DirichletGroup_class)
class DirichletGroup_class(WithEqualityById, Parent):
"""
Group of Dirichlet characters modulo `N` with values in a ring `R`.
"""
Element = DirichletCharacter
def __init__(self, base_ring, modulus, zeta, zeta_order):
"""
Create a Dirichlet group.
Not to be called directly (use the factory function ``DirichletGroup``).
The ``DirichletGroup`` factory ensures that either both
``zeta`` and ``zeta_order`` are specified, or that both are
``None``. In the former case, it also ensures that ``zeta``
is an element of ``base_ring`` and that ``zeta_order`` is an
element of ``ZZ``.
TESTS::
sage: G = DirichletGroup(7, base_ring=Integers(9), zeta=2) # indirect doctest
sage: TestSuite(G).run()
sage: G.base() # check that Parent.__init__ has been called
Ring of integers modulo 9
sage: DirichletGroup(13) == DirichletGroup(13)
True
sage: DirichletGroup(13) == DirichletGroup(13, QQ)
False
"""
from sage.categories.groups import Groups
category = Groups().Commutative()
if base_ring.is_integral_domain() or base_ring.is_finite():
# The group of n-th roots of unity in the base ring is
# finite, and hence this Dirichlet group is finite too.
# In particular, it is finitely generated; the added
# FinitelyGenerated() here means that the group has a
# distinguished set of generators.
category = category.Finite().FinitelyGenerated()
Parent.__init__(self, base_ring, category=category)
self._zeta = zeta
self._zeta_order = zeta_order
self._modulus = modulus
self._integers = rings.IntegerModRing(modulus)
def __setstate__(self, state):
"""
Used for unpickling old instances.
TESTS::
sage: G = DirichletGroup(9)
sage: loads(dumps(G)) is G
True
"""
self._set_element_constructor()
if '_zeta_order' in state:
state['_zeta_order'] = rings.Integer(state['_zeta_order'])
super(DirichletGroup_class, self).__setstate__(state)
@property
def _module(self):
"""
Return the free module used to represent Dirichlet characters.
TESTS::
sage: DirichletGroup(12)._module
Vector space of dimension 2 over Ring of integers modulo 2
"""
return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()),
len(self.unit_gens()))
@property
def _zeta_powers(self):
"""
Return a list of powers of the distinguished root of unity.
TESTS::
sage: DirichletGroup(5)._zeta_powers
[1, zeta4, -1, -zeta4]
"""
R = self.base_ring()
a = R.one()
w = [a]
zeta = self.zeta()
zeta_order = self.zeta_order()
if is_ComplexField(R):
for i in range(1, zeta_order):
a = a * zeta
a._set_multiplicative_order(zeta_order/gcd(zeta_order, i))
w.append(a)
else:
for i in range(1, zeta_order):
a = a * zeta
w.append(a)
return w
@property
def _zeta_dlog(self):
"""
Return a dictionary that can be used to compute discrete
logarithms in the value group of this Dirichlet group.
TESTS::
sage: DirichletGroup(5)._zeta_dlog
{-1: 2, -zeta4: 3, zeta4: 1, 1: 0}
"""
return {z: i for i, z in enumerate(self._zeta_powers)}
def change_ring(self, R, zeta=None, zeta_order=None):
"""
Return the base extension of ``self`` to ``R``.
INPUT:
- ``R`` -- either a ring admitting a conversion map from the
base ring of ``self``, or a ring homomorphism with the base
ring of ``self`` as its domain
- ``zeta`` -- (optional) root of unity in ``R``
- ``zeta_order`` -- (optional) order of ``zeta``
EXAMPLES::
sage: G = DirichletGroup(7,QQ); G
Group of Dirichlet characters modulo 7 with values in Rational Field
sage: G.change_ring(CyclotomicField(6))
Group of Dirichlet characters modulo 7 with values in Cyclotomic Field of order 6 and degree 2
TESTS:
We test the case where `R` is a map (:trac:`18072`)::
sage: K.<i> = QuadraticField(-1)
sage: f = K.complex_embeddings()[0]
sage: D = DirichletGroup(5, K)
sage: D.change_ring(f)
Group of Dirichlet characters modulo 5 with values in Complex Field with 53 bits of precision
"""
if zeta is None and self._zeta is not None:
# A root of unity was explicitly given; we use it over the
# new base ring as well.
zeta = self._zeta
if zeta_order is None:
# We reuse _zeta_order if we know that it stays the
# same; otherwise it will be recomputed as the order
# of R(zeta) by the DirichletGroup factory.
p = R.characteristic()
if p == 0 or p.gcd(self._zeta_order) == 1:
zeta_order = self._zeta_order
else:
# No root of unity specified; use the same zeta_order
# (which may still be None).
zeta_order = self._zeta_order
# Map zeta to the new parent
if zeta is not None:
zeta = R(zeta)
if isinstance(R, Map):
R = R.codomain()
return DirichletGroup(self.modulus(), R,
zeta=zeta,
zeta_order=zeta_order)
def base_extend(self, R):
"""
Return the base extension of ``self`` to ``R``.
INPUT:
- ``R`` -- either a ring admitting a *coercion* map from the
base ring of ``self``, or a ring homomorphism with the base
ring of ``self`` as its domain
EXAMPLES::
sage: G = DirichletGroup(7,QQ); G
Group of Dirichlet characters modulo 7 with values in Rational Field
sage: H = G.base_extend(CyclotomicField(6)); H
Group of Dirichlet characters modulo 7 with values in Cyclotomic Field of order 6 and degree 2
Note that the root of unity can change::
sage: H.zeta()
zeta6
This method (in contrast to :meth:`change_ring`) requires a
coercion map to exist::
sage: G.base_extend(ZZ)
Traceback (most recent call last):
...
TypeError: no coercion map from Rational Field to Integer Ring is defined
Base-extended Dirichlet groups do not silently get roots of
unity with smaller order than expected (:trac:`6018`)::
sage: G = DirichletGroup(10, QQ).base_extend(CyclotomicField(4))
sage: H = DirichletGroup(10, CyclotomicField(4))
sage: G is H
True
sage: G3 = DirichletGroup(31, CyclotomicField(3))
sage: G5 = DirichletGroup(31, CyclotomicField(5))
sage: K30 = CyclotomicField(30)
sage: G3.gen(0).base_extend(K30) * G5.gen(0).base_extend(K30)
Dirichlet character modulo 31 of conductor 31 mapping 3 |--> -zeta30^7 + zeta30^5 + zeta30^4 + zeta30^3 - zeta30 - 1
When a root of unity is specified, base extension still works
if the new base ring is not an integral domain::
sage: f = DirichletGroup(17, ZZ, zeta=-1).0
sage: g = f.base_extend(Integers(15))
sage: g(3)
14
sage: g.parent().zeta()
14
"""
if not (isinstance(R, Map)
or R.has_coerce_map_from(self.base_ring())):
raise TypeError("no coercion map from %s to %s is defined"
% (self.base_ring(), R))
return self.change_ring(R)
def _element_constructor_(self, x):
"""
Construct a Dirichlet character from `x`.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: K = G.base_ring()
sage: G(1)
Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1
sage: G([-1])
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -1
sage: G([K.0])
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12
sage: G(0)
Traceback (most recent call last):
...
TypeError: cannot convert 0 to an element of Group of Dirichlet characters modulo 13 with values in Cyclotomic Field of order 12 and degree 4
sage: G = DirichletGroup(6)
sage: G(DirichletGroup(3).0)
Dirichlet character modulo 6 of conductor 3 mapping 5 |--> -1
sage: G(DirichletGroup(15).0)
Dirichlet character modulo 6 of conductor 3 mapping 5 |--> -1
sage: G(DirichletGroup(15).1)
Traceback (most recent call last):
...
TypeError: conductor must divide modulus
sage: H = DirichletGroup(16, QQ); H(DirichletGroup(16).1)
Traceback (most recent call last):
...
TypeError: Unable to coerce zeta4 to a rational
"""
R = self.base_ring()
try:
if x == R.one():
x = [R.one()] * len(self.unit_gens())
except (TypeError, ValueError, ArithmeticError):
pass
if isinstance(x, list): # list of values on each unit generator
return self.element_class(self, x)
elif not isinstance(x, DirichletCharacter):
raise TypeError("cannot convert %s to an element of %s" % (x, self))
elif not x.conductor().divides(self.modulus()):
raise TypeError("conductor must divide modulus")
a = []
for u in self.unit_gens():
v = u.lift()
# have to do this, since e.g., unit gens mod 11 are not units mod 22.
while x.modulus().gcd(v) != 1:
v += self.modulus()
a.append(R(x(v)))
return self.element_class(self, a)
def _coerce_map_from_(self, X):
"""
Decide whether there is a coercion map from `X`.
There is conversion between Dirichlet groups of different
moduli, but no coercion. This implies that Dirichlet
characters of different moduli do not compare as equal.
TESTS::
sage: trivial_character(6) == trivial_character(3) # indirect doctest
False
sage: trivial_character(3) == trivial_character(9)
False
sage: trivial_character(3) == DirichletGroup(3, QQ).0^2
True
"""
return (isinstance(X, DirichletGroup_class) and
self.modulus() == X.modulus() and
self.base_ring().has_coerce_map_from(X.base_ring()) and
(self._zeta is None or
(X._zeta is not None and
self.base_ring()(X._zeta) in self._zeta_powers)))
def __len__(self):
"""
Return the number of elements of this Dirichlet group. This is the
same as self.order().
EXAMPLES::
sage: len(DirichletGroup(20))
8
sage: len(DirichletGroup(20, QQ))
4
sage: len(DirichletGroup(20, GF(5)))
8
sage: len(DirichletGroup(20, GF(2)))
1
sage: len(DirichletGroup(20, GF(3)))
4
"""
return self.order()
def _repr_(self):
"""
Return a print representation of this group, which can be renamed.
EXAMPLES::
sage: G = DirichletGroup(11)
sage: repr(G) # indirect doctest
'Group of Dirichlet characters modulo 11 with values in Cyclotomic Field of order 10 and degree 4'
sage: G.rename('Dir(11)')
sage: G
Dir(11)
"""
s = "Group of Dirichlet characters modulo %s with values in " % self.modulus()
if self._zeta is not None:
s += "the group of order %s generated by %s in " % (self._zeta_order, self._zeta)
s += str(self.base_ring())
return s
@cached_method
def decomposition(self):
r"""
Returns the Dirichlet groups of prime power modulus corresponding
to primes dividing modulus.
(Note that if the modulus is 2 mod 4, there will be a "factor" of
`(\ZZ/2\ZZ)^*`, which is the trivial group.)
EXAMPLES::
sage: DirichletGroup(20).decomposition()
[
Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2,
Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2
]
sage: DirichletGroup(20,GF(5)).decomposition()
[
Group of Dirichlet characters modulo 4 with values in Finite Field of size 5,
Group of Dirichlet characters modulo 5 with values in Finite Field of size 5
]
"""
R = self.base_ring()
return Sequence([DirichletGroup(p**r,R) for p, r \
in factor(self.modulus())],
cr=True,
universe = cat.Objects())
def exponent(self):
"""
Return the exponent of this group.
EXAMPLES::
sage: DirichletGroup(20).exponent()
4
sage: DirichletGroup(20,GF(3)).exponent()
2
sage: DirichletGroup(20,GF(2)).exponent()
1
sage: DirichletGroup(37).exponent()
36
"""
return self.zeta_order()
@cached_method
def _automorphisms(self):
"""
Compute the automorphisms of self. These are always given by raising to
a power, so the return value is a list of integers.
At present this is only implemented if the base ring has characteristic 0 or a prime.
EXAMPLES::
sage: DirichletGroup(17)._automorphisms()
[1, 3, 5, 7, 9, 11, 13, 15]
sage: DirichletGroup(17, GF(11^4, 'a'))._automorphisms()
[1, 11, 121, 1331]
sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5))._automorphisms()
Traceback (most recent call last):
...
NotImplementedError: Automorphisms for finite non-field base rings not implemented
sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2))._automorphisms()
Traceback (most recent call last):
...
NotImplementedError: Automorphisms for finite non-field base rings not implemented
"""
n = self.zeta_order()
R = self.base_ring()
p = R.characteristic()
if p == 0:
Auts = [e for e in range(1,n) if gcd(e,n) == 1]
else:
if not rings.ZZ(p).is_prime():
raise NotImplementedError("Automorphisms for finite non-field base rings not implemented")
# The automorphisms in characteristic p are
# k-th powering for
# k = 1, p, p^2, ..., p^(r-1),
# where p^r = 1 (mod n), so r is the mult order of p modulo n.
r = rings.IntegerModRing(n)(p).multiplicative_order()
Auts = [p**m for m in range(0,r)]
return Auts
def galois_orbits(self, v=None, reps_only=False, sort=True, check=True):
"""
Return a list of the Galois orbits of Dirichlet characters in self,
or in v if v is not None.
INPUT:
- ``v`` - (optional) list of elements of self
- ``reps_only`` - (optional: default False) if True
only returns representatives for the orbits.
- ``sort`` - (optional: default True) whether to sort
the list of orbits and the orbits themselves (slightly faster if
False).
- ``check`` - (optional, default: True) whether or not
to explicitly coerce each element of v into self.
The Galois group is the absolute Galois group of the prime subfield
of Frac(R). If R is not a domain, an error will be raised.
EXAMPLES::
sage: DirichletGroup(20).galois_orbits()
[
[Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> -1],
...,
[Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1]
]
sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5)).galois_orbits()
Traceback (most recent call last):
...
TypeError: Galois orbits only defined if base ring is an integral domain
sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2)).galois_orbits()
Traceback (most recent call last):
...
TypeError: Galois orbits only defined if base ring is an integral domain
"""
if v is None:
v = self.list()
else:
if check:
v = [self(x) for x in v]
G = []
seen_so_far = set([])
for x in v:
z = x.element()
e = tuple(z) # change when there are immutable vectors (and below)
if e in seen_so_far:
continue
orbit = x.galois_orbit(sort=sort)
if reps_only:
G.append(x)
else:
G.append(orbit)
for z in orbit:
seen_so_far.add(tuple(z.element()))
G = Sequence(G, cr=True)
if sort:
G.sort()
return G
def gen(self, n=0):
"""
Return the n-th generator of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.gen(0)
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: G.gen(1)
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4
sage: G.gen(2)
Traceback (most recent call last):
...
IndexError: n(=2) must be between 0 and 1
::
sage: G.gen(-1)
Traceback (most recent call last):
...
IndexError: n(=-1) must be between 0 and 1
"""
n = int(n)
g = self.gens()
if n<0 or n>=len(g):
raise IndexError("n(=%s) must be between 0 and %s"%(n,len(g)-1))
return g[n]
@cached_method
def gens(self):
"""
Returns generators of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.gens()
(Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4)
"""
g = []
ord = self.zeta_order()
M = self._module
zero = M(0)
orders = self.integers_mod().unit_group().gens_orders()
for i in range(len(self.unit_gens())):
z = zero.__copy__()
z[i] = ord//gcd(ord, orders[i])
g.append(self.element_class(self, z, check=False))
return tuple(g)
def integers_mod(self):
r"""
Returns the group of integers `\ZZ/N\ZZ`
where `N` is the modulus of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.integers_mod()
Ring of integers modulo 20
"""
return self._integers
__iter__ = multiplicative_iterator
def list(self):
"""
Return a list of the Dirichlet characters in this group.
EXAMPLES::
sage: DirichletGroup(5).list()
[Dirichlet character modulo 5 of conductor 1 mapping 2 |--> 1,
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4,
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -1,
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4]
"""
return self._list_from_iterator()
def modulus(self):
"""
Returns the modulus of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.modulus()
20
"""
return self._modulus
def ngens(self):
"""
Returns the number of generators of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.ngens()
2
"""
return len(self.gens())
@cached_method
def order(self):
"""
Return the number of elements of self. This is the same as
len(self).
EXAMPLES::
sage: DirichletGroup(20).order()
8
sage: DirichletGroup(37).order()
36
"""
ord = rings.Integer(1)
for g in self.gens():
ord *= int(g.order())
return ord
def random_element(self):
"""
Return a random element of self.
The element is computed by multiplying a random power of each
generator together, where the power is between 0 and the order of
the generator minus 1, inclusive.
EXAMPLES::
sage: DirichletGroup(37).random_element()
Dirichlet character modulo 37 of conductor 37 mapping 2 |--> zeta36^4
sage: DirichletGroup(20).random_element()
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: DirichletGroup(60).random_element()
Dirichlet character modulo 60 of conductor 3 mapping 31 |--> 1, 41 |--> -1, 37 |--> 1
"""
e = self(1)
for i in range(self.ngens()):
g = self.gen(i)
n = random.randrange(g.order())
e *= g**n
return e
def unit_gens(self):
r"""
Returns the minimal generators for the units of
`(\ZZ/N\ZZ)^*`, where `N` is the
modulus of self.
EXAMPLES::
sage: DirichletGroup(37).unit_gens()
(2,)
sage: DirichletGroup(20).unit_gens()
(11, 17)
sage: DirichletGroup(60).unit_gens()
(31, 41, 37)
sage: DirichletGroup(20,QQ).unit_gens()
(11, 17)
"""
return self._integers.unit_gens()
@cached_method
def zeta(self):
"""
Return the chosen root of unity in the base ring.
EXAMPLES::
sage: DirichletGroup(37).zeta()
zeta36
sage: DirichletGroup(20).zeta()
zeta4
sage: DirichletGroup(60).zeta()
zeta4
sage: DirichletGroup(60,QQ).zeta()
-1
sage: DirichletGroup(60, GF(25,'a')).zeta()
2
"""
zeta = self._zeta
if zeta is None:
R = self.base_ring()
e = self._integers.unit_group_exponent()
for d in reversed(e.divisors()):
try:
zeta = R.zeta(d)
break
except ValueError:
pass
self.zeta_order.set_cache(d)
return zeta
@cached_method
def zeta_order(self):
"""
Return the order of the chosen root of unity in the base ring.
EXAMPLES::
sage: DirichletGroup(20).zeta_order()
4
sage: DirichletGroup(60).zeta_order()
4
sage: DirichletGroup(60, GF(25,'a')).zeta_order()
4
sage: DirichletGroup(19).zeta_order()
18
"""
order = self._zeta_order
if order is None:
order = self.zeta().multiplicative_order()
return order
|
import copy
from typing import Type
import pytest
from neuraxle.base import TrialStatus, synchroneous_flow_method
from neuraxle.metaopt.data.vanilla import (DEFAULT_CLIENT, DEFAULT_PROJECT,
RETRAIN_TRIAL_SPLIT_ID,
BaseDataclass, ClientDataclass,
MetricResultsDataclass,
ProjectDataclass, RootDataclass,
RoundDataclass, ScopedLocation,
TrialDataclass, TrialSplitDataclass,
as_named_odict, dataclass_2_id_attr,
from_json, to_json)
SOME_METRIC_NAME = 'MAE'
HYPERPARAMS = {'learning_rate': 0.01}
SOME_METRIC_RESULTS_DATACLASS = MetricResultsDataclass(
metric_name=SOME_METRIC_NAME,
validation_values=[3, 2, 1],
train_values=[2, 1, 0],
higher_score_is_better=False,
)
SOME_TRIAL_DATACLASS = TrialDataclass(
trial_number=0,
hyperparams=HYPERPARAMS,
).start()
SOME_TRIAL_SPLIT_DATACLASS = TrialSplitDataclass(
split_number=0,
metric_results=as_named_odict(SOME_METRIC_RESULTS_DATACLASS),
hyperparams=HYPERPARAMS,
).start()
SOME_TRIAL_SPLIT_DATACLASS.end(TrialStatus.SUCCESS)
SOME_TRIAL_DATACLASS.store(SOME_TRIAL_SPLIT_DATACLASS)
SOME_TRIAL_DATACLASS.end(TrialStatus.SUCCESS)
SOME_ROUND_DATACLASS = RoundDataclass(
round_number=0,
trials=[SOME_TRIAL_DATACLASS],
main_metric_name=SOME_METRIC_NAME,
)
SOME_CLIENT_DATACLASS = ClientDataclass(
client_name=DEFAULT_CLIENT,
rounds=[SOME_ROUND_DATACLASS],
)
SOME_PROJECT_DATACLASS = ProjectDataclass(
project_name=DEFAULT_PROJECT,
clients=as_named_odict(SOME_CLIENT_DATACLASS),
)
SOME_ROOT_DATACLASS = RootDataclass(
projects=as_named_odict(SOME_PROJECT_DATACLASS),
)
SOME_FULL_SCOPED_LOCATION: ScopedLocation = ScopedLocation(
DEFAULT_PROJECT, DEFAULT_CLIENT, 0, 0, 0, SOME_METRIC_NAME
)
ALL_DATACLASSES = [
SOME_ROOT_DATACLASS,
SOME_PROJECT_DATACLASS,
SOME_CLIENT_DATACLASS,
SOME_ROUND_DATACLASS,
SOME_TRIAL_DATACLASS,
SOME_TRIAL_SPLIT_DATACLASS,
SOME_METRIC_RESULTS_DATACLASS,
]
@pytest.mark.parametrize("scope_slice_len", list(range(len(ALL_DATACLASSES))))
def test_dataclass_getters(scope_slice_len: int):
expected_dataclass: BaseDataclass = ALL_DATACLASSES[scope_slice_len]
dataclass_type: Type[BaseDataclass] = expected_dataclass.__class__
sliced_scope = SOME_FULL_SCOPED_LOCATION[:dataclass_type]
assert len(sliced_scope) == scope_slice_len
sliced_scope = SOME_FULL_SCOPED_LOCATION[:scope_slice_len]
assert len(sliced_scope) == scope_slice_len
dc = SOME_ROOT_DATACLASS[sliced_scope]
assert isinstance(dc, dataclass_type)
assert dc.get_id() == expected_dataclass.get_id()
assert dc.get_id() == sliced_scope[scope_slice_len - 1]
assert dc.get_id() == SOME_FULL_SCOPED_LOCATION[dataclass_type]
assert dc == expected_dataclass
@pytest.mark.parametrize("dataclass_type, scope", [
(ProjectDataclass, ScopedLocation(DEFAULT_PROJECT)),
(ClientDataclass, ScopedLocation(DEFAULT_PROJECT, DEFAULT_CLIENT)),
])
def test_base_empty_default_dataclass_getters(
dataclass_type: Type[BaseDataclass],
scope: ScopedLocation,
):
root: RootDataclass = RootDataclass()
dc = root[scope]
assert isinstance(dc, dataclass_type)
assert dc.get_id() == scope.as_list()[-1]
@pytest.mark.parametrize("cp", [copy.copy, copy.deepcopy])
def test_scoped_location_can_copy_and_change(cp):
sl = ScopedLocation(DEFAULT_PROJECT, DEFAULT_CLIENT, 0, 0, 0, SOME_METRIC_NAME)
sl_copy = cp(sl)
sl_copy.pop()
sl_copy.pop()
assert sl_copy != sl
assert len(sl_copy) == len(sl) - 2
@pytest.mark.parametrize("dataclass_type", list(dataclass_2_id_attr.keys()))
def test_dataclass_id_attr_get_set(dataclass_type):
_id = 9000
dc = dataclass_type().set_id(_id)
assert dc.get_id() == _id
def test_dataclass_from_dict_to_dict():
root: RootDataclass = SOME_ROOT_DATACLASS
root_as_dict = root.to_dict()
root_restored = RootDataclass.from_dict(root_as_dict)
assert SOME_METRIC_RESULTS_DATACLASS == root_restored[SOME_FULL_SCOPED_LOCATION]
assert root == root_restored
def test_dataclass_from_json_to_json():
root: RootDataclass = SOME_ROOT_DATACLASS
root_as_dict = root.to_dict()
root_as_json = to_json(root_as_dict)
root_restored_dc = from_json(root_as_json)
assert SOME_METRIC_RESULTS_DATACLASS == root_restored_dc[SOME_FULL_SCOPED_LOCATION]
assert root == root_restored_dc
def test_trial_dataclass_can_store_and_contains_retrain_split():
tc: TrialDataclass = TrialDataclass(trial_number=5)
tc.store(TrialSplitDataclass(split_number=0))
tc.store(TrialSplitDataclass(split_number=1))
tc.store(TrialSplitDataclass(split_number=2))
sl: ScopedLocation = ScopedLocation.default(
round_number=0, trial_number=5, split_number=RETRAIN_TRIAL_SPLIT_ID)
tsc: TrialSplitDataclass = TrialSplitDataclass(split_number=RETRAIN_TRIAL_SPLIT_ID)
tc.store(tsc)
assert len(tc) == 3
assert sl in tc
assert tsc.get_id() == RETRAIN_TRIAL_SPLIT_ID
assert tc.retrained_split == tsc
assert tc[sl] == tc.retrained_split
|
"""
WSGI config for plasma_link_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'plasma_link_django.settings')
application = get_wsgi_application()
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FilepoolDefaultPolicyDefaultPolicyAction(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'action_param': 'str',
'action_type': 'str'
}
attribute_map = {
'action_param': 'action_param',
'action_type': 'action_type'
}
def __init__(self, action_param=None, action_type=None): # noqa: E501
"""FilepoolDefaultPolicyDefaultPolicyAction - a model defined in Swagger""" # noqa: E501
self._action_param = None
self._action_type = None
self.discriminator = None
if action_param is not None:
self.action_param = action_param
self.action_type = action_type
@property
def action_param(self):
"""Gets the action_param of this FilepoolDefaultPolicyDefaultPolicyAction. # noqa: E501
Varies according to action_type # noqa: E501
:return: The action_param of this FilepoolDefaultPolicyDefaultPolicyAction. # noqa: E501
:rtype: str
"""
return self._action_param
@action_param.setter
def action_param(self, action_param):
"""Sets the action_param of this FilepoolDefaultPolicyDefaultPolicyAction.
Varies according to action_type # noqa: E501
:param action_param: The action_param of this FilepoolDefaultPolicyDefaultPolicyAction. # noqa: E501
:type: str
"""
self._action_param = action_param
@property
def action_type(self):
"""Gets the action_type of this FilepoolDefaultPolicyDefaultPolicyAction. # noqa: E501
:return: The action_type of this FilepoolDefaultPolicyDefaultPolicyAction. # noqa: E501
:rtype: str
"""
return self._action_type
@action_type.setter
def action_type(self, action_type):
"""Sets the action_type of this FilepoolDefaultPolicyDefaultPolicyAction.
:param action_type: The action_type of this FilepoolDefaultPolicyDefaultPolicyAction. # noqa: E501
:type: str
"""
if action_type is None:
raise ValueError("Invalid value for `action_type`, must not be `None`") # noqa: E501
allowed_values = ["set_requested_protection", "set_data_access_pattern", "enable_coalescer", "apply_data_storage_policy", "apply_snapshot_storage_policy", "set_cloudpool_policy", "enable_packing"] # noqa: E501
if action_type not in allowed_values:
raise ValueError(
"Invalid value for `action_type` ({0}), must be one of {1}" # noqa: E501
.format(action_type, allowed_values)
)
self._action_type = action_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FilepoolDefaultPolicyDefaultPolicyAction):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Tests for half-precision syntax quirks. """
import dace
import math
import numpy as np
import pytest
from dace.transformation.dataflow import MapFusion, Vectorization
from dace.transformation.optimizer import Optimizer
N = dace.symbol('N')
def _config():
# Prerequisite for test: CUDA compute capability >= 6.0
dace.Config.set('compiler', 'cuda', 'cuda_arch', value='60')
def _test_half(veclen):
""" Tests a set of elementwise operations on a vector half type. """
_config()
@dace.program
def halftest(A: dace.float16[N], B: dace.float16[N]):
return A * B + A
A = np.random.rand(24).astype(np.float16)
B = np.random.rand(24).astype(np.float16)
sdfg = halftest.to_sdfg()
sdfg.apply_strict_transformations()
sdfg.apply_gpu_transformations()
# Apply vectorization on each map and count applied
applied = 0
for xform in Optimizer(sdfg).get_pattern_matches(
patterns=Vectorization,
options=dict(vector_len=veclen, postamble=False)):
xform.apply(sdfg)
applied += 1
assert applied == 2
out = sdfg(A=A, B=B, N=24)
assert np.allclose(out, A * B + A)
@pytest.mark.gpu
def test_half4():
""" Tests a set of elementwise operations on half with vector length 4. """
_test_half(4)
@pytest.mark.gpu
def test_half8():
""" Tests a set of elementwise operations on half with vector length 8. """
_test_half(8)
@pytest.mark.gpu
def test_exp_vec():
""" Tests an exp operator on a vector half type. """
_config()
@dace.program
def halftest(A: dace.float16[N]):
out = np.ndarray([N], dace.float16)
for i in dace.map[0:N]:
with dace.tasklet:
a << A[i]
o >> out[i]
o = math.exp(a)
return out
A = np.random.rand(24).astype(np.float16)
sdfg = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
assert sdfg.apply_transformations(Vectorization, dict(vector_len=8)) == 1
out = sdfg(A=A, N=24)
assert np.allclose(out, np.exp(A))
@pytest.mark.gpu
def test_relu_vec():
""" Tests a ReLU operator on a vector half type. """
_config()
@dace.program
def halftest(A: dace.float16[N]):
out = np.ndarray([N], dace.float16)
for i in dace.map[0:N]:
with dace.tasklet:
a << A[i]
o >> out[i]
o = max(a, dace.float16(0))
return out
A = np.random.rand(24).astype(np.float16)
sdfg = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
assert sdfg.apply_transformations(Vectorization, dict(vector_len=8)) == 1
out = sdfg(A=A, N=24)
assert np.allclose(out, np.maximum(A, 0))
@pytest.mark.gpu
def test_dropout_vec():
""" Tests a dropout operator on a vector half type. """
_config()
@dace.program
def halftest(A: dace.float16[N], mask: dace.float16[N]):
out = np.ndarray([N], dace.float16)
for i in dace.map[0:N]:
with dace.tasklet:
a << A[i]
d << mask[i]
o >> out[i]
o = a * d
return out
A = np.random.rand(24).astype(np.float16)
mask = np.random.randint(0, 2, size=[24]).astype(np.float16)
sdfg: dace.SDFG = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
assert sdfg.apply_transformations(Vectorization, dict(vector_len=8)) == 1
out = sdfg(A=A, mask=mask, N=24)
assert np.allclose(out, A * mask)
@pytest.mark.gpu
def test_gelu_vec():
""" Tests a GELU operator on a vector half type. """
_config()
s2pi = math.sqrt(2.0 / math.pi)
@dace.program
def halftest(A: dace.float16[N]):
out = np.ndarray([N], dace.float16)
for i in dace.map[0:N]:
with dace.tasklet:
a << A[i]
o >> out[i]
o = dace.float16(0.5) * a * (dace.float16(1) + math.tanh(
dace.float16(s2pi) * (a + dace.float16(0.044715) * (a**3))))
return out
A = np.random.rand(24).astype(np.float16)
sdfg = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
assert sdfg.apply_transformations(Vectorization, dict(vector_len=4)) == 1
out = sdfg(A=A, N=24)
expected = 0.5 * A * (
1 + np.tanh(math.sqrt(2.0 / math.pi) * (A + 0.044715 * (A**3))))
assert np.allclose(out, expected, rtol=1e-2, atol=1e-4)
if __name__ == '__main__':
test_half4()
test_half8()
test_exp_vec()
test_relu_vec()
test_dropout_vec()
test_gelu_vec()
|
class RSG: pass
|
import sys
import re
import py
from pypy.translator.translator import TranslationContext
from pypy.rpython.error import TyperError
from pypy.rpython.lltypesystem.lltype import *
from pypy.rpython.ootypesystem import ootype
from pypy.rpython.rlist import *
from pypy.rpython.lltypesystem.rlist import ListRepr, FixedSizeListRepr, ll_newlist, ll_fixed_newlist
from pypy.rpython.lltypesystem import rlist as ll_rlist
from pypy.rpython.llinterp import LLException
from pypy.rpython.ootypesystem import rlist as oo_rlist
from pypy.rpython.rint import signed_repr
from pypy.objspace.flow.model import Constant, Variable
from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin
from pypy.rlib.debug import ll_assert
# undo the specialization parameter
for n1 in 'get set del'.split():
for n2 in '','_nonneg':
name = 'll_%sitem%s' % (n1, n2)
globals()['_'+name] = globals()[name]
exec """if 1:
def %s(*args):
return _%s(dum_checkidx, *args)
""" % (name, name)
del n1, n2, name
class BaseTestListImpl:
def check_list(self, l1, expected):
assert ll_len(l1) == len(expected)
for i, x in zip(range(len(expected)), expected):
assert ll_getitem_nonneg(l1, i) == x
def test_rlist_basic(self):
l = self.sample_list()
assert ll_getitem(l, -4) == 42
assert ll_getitem_nonneg(l, 1) == 43
assert ll_getitem(l, 2) == 44
assert ll_getitem(l, 3) == 45
assert ll_len(l) == 4
self.check_list(l, [42, 43, 44, 45])
def test_rlist_set(self):
l = self.sample_list()
ll_setitem(l, -1, 99)
self.check_list(l, [42, 43, 44, 99])
ll_setitem_nonneg(l, 1, 77)
self.check_list(l, [42, 77, 44, 99])
def test_rlist_slice(self):
l = self.sample_list()
LIST = typeOf(l).TO
self.check_list(ll_listslice_startonly(LIST, l, 0), [42, 43, 44, 45])
self.check_list(ll_listslice_startonly(LIST, l, 1), [43, 44, 45])
self.check_list(ll_listslice_startonly(LIST, l, 2), [44, 45])
self.check_list(ll_listslice_startonly(LIST, l, 3), [45])
self.check_list(ll_listslice_startonly(LIST, l, 4), [])
for start in range(5):
for stop in range(start, 8):
self.check_list(ll_listslice_startstop(LIST, l, start, stop),
[42, 43, 44, 45][start:stop])
def test_rlist_setslice(self):
n = 100
for start in range(5):
for stop in range(start, 5):
l1 = self.sample_list()
l2 = self.sample_list()
expected = [42, 43, 44, 45]
for i in range(start, stop):
expected[i] = n
ll_setitem(l2, i, n)
n += 1
l2 = ll_listslice_startstop(typeOf(l2).TO, l2, start, stop)
ll_listsetslice(l1, start, stop, l2)
self.check_list(l1, expected)
# helper used by some tests below
def list_is_clear(lis, idx):
items = lis._obj.items._obj.items
for i in range(idx, len(items)):
if items[i]._obj is not None:
return False
return True
class TestListImpl(BaseTestListImpl):
def sample_list(self): # [42, 43, 44, 45]
rlist = ListRepr(None, signed_repr)
rlist.setup()
l = ll_newlist(rlist.lowleveltype.TO, 3)
ll_setitem(l, 0, 42)
ll_setitem(l, -2, 43)
ll_setitem_nonneg(l, 2, 44)
ll_append(l, 45)
return l
def test_rlist_del(self):
l = self.sample_list()
ll_delitem_nonneg(l, 0)
self.check_list(l, [43, 44, 45])
ll_delitem(l, -2)
self.check_list(l, [43, 45])
ll_delitem(l, 1)
self.check_list(l, [43])
ll_delitem(l, 0)
self.check_list(l, [])
def test_rlist_extend_concat(self):
l = self.sample_list()
ll_extend(l, l)
self.check_list(l, [42, 43, 44, 45] * 2)
l1 = ll_concat(typeOf(l).TO, l, l)
assert typeOf(l1) == typeOf(l)
assert l1 != l
self.check_list(l1, [42, 43, 44, 45] * 4)
def test_rlist_delslice(self):
l = self.sample_list()
ll_listdelslice_startonly(l, 3)
self.check_list(l, [42, 43, 44])
ll_listdelslice_startonly(l, 0)
self.check_list(l, [])
for start in range(5):
for stop in range(start, 8):
l = self.sample_list()
ll_listdelslice_startstop(l, start, stop)
expected = [42, 43, 44, 45]
del expected[start:stop]
self.check_list(l, expected)
class TestFixedSizeListImpl(BaseTestListImpl):
def sample_list(self): # [42, 43, 44, 45]
rlist = FixedSizeListRepr(None, signed_repr)
rlist.setup()
l = ll_fixed_newlist(rlist.lowleveltype.TO, 4)
ll_setitem(l, 0, 42)
ll_setitem(l, -3, 43)
ll_setitem_nonneg(l, 2, 44)
ll_setitem(l, 3, 45)
return l
def test_rlist_extend_concat(self):
l = self.sample_list()
lvar = TestListImpl.sample_list(TestListImpl())
ll_extend(lvar, l)
self.check_list(lvar, [42, 43, 44, 45] * 2)
l1 = ll_concat(typeOf(l).TO, lvar, l)
assert typeOf(l1) == typeOf(l)
assert l1 != l
self.check_list(l1, [42, 43, 44, 45] * 3)
l1 = ll_concat(typeOf(l).TO, l, lvar)
assert typeOf(l1) == typeOf(l)
assert l1 != l
self.check_list(l1, [42, 43, 44, 45] * 3)
lvar1 = ll_concat(typeOf(lvar).TO, lvar, l)
assert typeOf(lvar1) == typeOf(lvar)
assert lvar1 != lvar
self.check_list(l1, [42, 43, 44, 45] * 3)
lvar1 = ll_concat(typeOf(lvar).TO, l, lvar)
assert typeOf(lvar1) == typeOf(lvar)
assert lvar1 != lvar
self.check_list(lvar1, [42, 43, 44, 45] * 3)
# ____________________________________________________________
# these classes are used in the tests below
class Foo:
pass
class Bar(Foo):
pass
class Freezing:
def _freeze_(self):
return True
class BaseTestRlist(BaseRtypingTest):
def test_simple(self):
def dummyfn():
l = [10, 20, 30]
return l[2]
res = self.interpret(dummyfn, [])
assert res == 30
def test_append(self):
def dummyfn():
l = []
l.append(50)
l.append(60)
l.append(70)
l.append(80)
l.append(90)
return len(l), l[0], l[-1]
res = self.interpret(dummyfn, [])
assert res.item0 == 5
assert res.item1 == 50
assert res.item2 == 90
def test_len(self):
def dummyfn():
l = [5, 10]
return len(l)
res = self.interpret(dummyfn, [])
assert res == 2
def dummyfn():
l = [5]
l.append(6)
return len(l)
res = self.interpret(dummyfn, [])
assert res == 2
def test_iterate(self):
def dummyfn():
total = 0
for x in [1, 3, 5, 7, 9]:
total += x
return total
res = self.interpret(dummyfn, [])
assert res == 25
def dummyfn():
total = 0
l = [1, 3, 5, 7]
l.append(9)
for x in l:
total += x
return total
res = self.interpret(dummyfn, [])
assert res == 25
def test_iterate_next(self):
def dummyfn():
total = 0
it = iter([1, 3, 5, 7, 9])
while 1:
try:
x = it.next()
except StopIteration:
break
total += x
return total
res = self.interpret(dummyfn, [])
assert res == 25
def dummyfn():
total = 0
l = [1, 3, 5, 7]
l.append(9)
it = iter(l)
while 1:
try:
x = it.next()
except StopIteration:
break
total += x
return total
res = self.interpret(dummyfn, [])
assert res == 25
def test_recursive(self):
def dummyfn(N):
l = []
while N > 0:
l = [l]
N -= 1
return len(l)
res = self.interpret(dummyfn, [5])
assert res == 1
def dummyfn(N):
l = []
while N > 0:
l.append(l)
N -= 1
return len(l)
res = self.interpret(dummyfn, [5])
assert res == 5
def test_add(self):
def dummyfn():
l = [5]
l += [6,7]
return l + [8]
res = self.interpret(dummyfn, [])
assert self.ll_to_list(res) == [5, 6, 7, 8]
def dummyfn():
l = [5]
l += [6,7]
l2 = l + [8]
l2.append(9)
return l2
res = self.interpret(dummyfn, [])
assert self.ll_to_list(res) == [5, 6, 7, 8, 9]
def test_slice(self):
def dummyfn():
l = [5, 6, 7, 8, 9]
return l[:2], l[1:4], l[3:]
res = self.interpret(dummyfn, [])
assert self.ll_to_list(res.item0) == [5, 6]
assert self.ll_to_list(res.item1) == [6, 7, 8]
assert self.ll_to_list(res.item2) == [8, 9]
def dummyfn():
l = [5, 6, 7, 8]
l.append(9)
return l[:2], l[1:4], l[3:]
res = self.interpret(dummyfn, [])
assert self.ll_to_list(res.item0) == [5, 6]
assert self.ll_to_list(res.item1) == [6, 7, 8]
assert self.ll_to_list(res.item2) == [8, 9]
def test_getslice_not_constant_folded(self):
l = list('abcdef')
def dummyfn():
result = []
for i in range(3):
l2 = l[2:]
result.append(l2.pop())
return result
res = self.interpret(dummyfn, [])
assert self.ll_to_list(res) == ['f', 'f', 'f']
def test_set_del_item(self):
def dummyfn():
l = [5, 6, 7]
l[1] = 55
l[-1] = 66
return l
res = self.interpret(dummyfn, [])
assert self.ll_to_list(res) == [5, 55, 66]
def dummyfn():
l = []
l.append(5)
l.append(6)
l.append(7)
l[1] = 55
l[-1] = 66
return l
res = self.interpret(dummyfn, [])
assert self.ll_to_list(res) == [5, 55, 66]
def dummyfn():
l = [5, 6, 7]
l[1] = 55
l[-1] = 66
del l[0]
del l[-1]
del l[:]
return len(l)
res = self.interpret(dummyfn, [])
assert res == 0
def test_setslice(self):
def dummyfn():
l = [10, 9, 8, 7]
l[:2] = [6, 5]
return l[0], l[1], l[2], l[3]
res = self.interpret(dummyfn, ())
assert res.item0 == 6
assert res.item1 == 5
assert res.item2 == 8
assert res.item3 == 7
def dummyfn():
l = [10, 9, 8]
l.append(7)
l[:2] = [6, 5]
return l[0], l[1], l[2], l[3]
res = self.interpret(dummyfn, ())
assert res.item0 == 6
assert res.item1 == 5
assert res.item2 == 8
assert res.item3 == 7
def test_delslice(self):
def dummyfn():
l = [10, 9, 8, 7]
del l[:2]
return len(l), l[0], l[1]
res = self.interpret(dummyfn, ())
assert res.item0 == 2
assert res.item1 == 8
assert res.item2 == 7
def dummyfn():
l = [10, 9, 8, 7]
del l[2:]
return len(l), l[0], l[1]
res = self.interpret(dummyfn, ())
assert res.item0 == 2
assert res.item1 == 10
assert res.item2 == 9
def test_bltn_list(self):
# test for ll_copy()
for resize1 in [False, True]:
for resize2 in [False, True]:
def dummyfn():
l1 = [42]
if resize1: l1.append(43)
l2 = list(l1)
if resize2: l2.append(44)
l2[0] = 0
return l1[0]
res = self.interpret(dummyfn, ())
assert res == 42
def test_is_true(self):
def is_true(lst):
if lst:
return True
else:
return False
def dummyfn1():
return is_true(None)
def dummyfn2():
return is_true([])
def dummyfn3():
return is_true([0])
assert self.interpret(dummyfn1, ()) == False
assert self.interpret(dummyfn2, ()) == False
assert self.interpret(dummyfn3, ()) == True
def test_list_index_simple(self):
def dummyfn(i):
l = [5,6,7,8]
return l.index(i)
res = self.interpret(dummyfn, (6,))
assert res == 1
self.interpret_raises(ValueError, dummyfn, [42])
def test_insert_pop(self):
def dummyfn():
l = [6, 7, 8]
l.insert(0, 5)
l.insert(1, 42)
l.pop(2)
l.pop(0)
l.pop(-1)
l.pop()
return l[-1]
res = self.interpret(dummyfn, ())#, view=True)
assert res == 42
def test_insert_bug(self):
def dummyfn(n):
l = [1]
l = l[:]
l.pop(0)
if n < 0:
l.insert(0, 42)
else:
l.insert(n, 42)
return l
res = self.interpret(dummyfn, [0])
assert res.ll_length() == 1
assert res.ll_getitem_fast(0) == 42
res = self.interpret(dummyfn, [-1])
assert res.ll_length() == 1
assert res.ll_getitem_fast(0) == 42
def test_inst_pop(self):
class A:
pass
l = [A(), A()]
def f(idx):
try:
return l.pop(idx)
except IndexError:
return None
res = self.interpret(f, [1])
assert self.class_name(res) == 'A'
#''.join(res.super.typeptr.name) == 'A\00'
def test_reverse(self):
def dummyfn():
l = [5, 3, 2]
l.reverse()
return l[0]*100 + l[1]*10 + l[2]
res = self.interpret(dummyfn, ())
assert res == 235
def dummyfn():
l = [5]
l.append(3)
l.append(2)
l.reverse()
return l[0]*100 + l[1]*10 + l[2]
res = self.interpret(dummyfn, ())
assert res == 235
def test_prebuilt_list(self):
klist = [6, 7, 8, 9]
def dummyfn(n):
return klist[n]
res = self.interpret(dummyfn, [0])
assert res == 6
res = self.interpret(dummyfn, [3])
assert res == 9
res = self.interpret(dummyfn, [-2])
assert res == 8
klist = ['a', 'd', 'z']
def mkdummyfn():
def dummyfn(n):
klist.append('k')
return klist[n]
return dummyfn
res = self.interpret(mkdummyfn(), [0])
assert res == 'a'
res = self.interpret(mkdummyfn(), [3])
assert res == 'k'
res = self.interpret(mkdummyfn(), [-2])
assert res == 'z'
def test_bound_list_method(self):
klist = [1, 2, 3]
# for testing constant methods without actually mutating the constant
def dummyfn(n):
klist.extend([])
self.interpret(dummyfn, [7])
def test_list_is(self):
def dummyfn():
l1 = []
return l1 is l1
res = self.interpret(dummyfn, [])
assert res is True
def dummyfn():
l2 = [1, 2]
return l2 is l2
res = self.interpret(dummyfn, [])
assert res is True
def dummyfn():
l1 = [2]
l2 = [1, 2]
return l1 is l2
res = self.interpret(dummyfn, [])
assert res is False
def dummyfn():
l1 = [1, 2]
l2 = [1]
l2.append(2)
return l1 is l2
res = self.interpret(dummyfn, [])
assert res is False
def dummyfn():
l1 = None
l2 = [1, 2]
return l1 is l2
res = self.interpret(dummyfn, [])
assert res is False
def dummyfn():
l1 = None
l2 = [1]
l2.append(2)
return l1 is l2
res = self.interpret(dummyfn, [])
assert res is False
def test_list_compare(self):
def fn(i, j, neg=False):
s1 = [[1, 2, 3], [4, 5, 1], None]
s2 = [[1, 2, 3], [4, 5, 1], [1], [1, 2], [4, 5, 1, 6],
[7, 1, 1, 8, 9, 10], None]
if neg: return s1[i] != s2[i]
return s1[i] == s2[j]
for i in range(3):
for j in range(7):
for case in False, True:
res = self.interpret(fn, [i,j,case])
assert res is fn(i, j, case)
def fn(i, j, neg=False):
s1 = [[1, 2, 3], [4, 5, 1], None]
l = []
l.extend([1,2,3])
s2 = [l, [4, 5, 1], [1], [1, 2], [4, 5, 1, 6],
[7, 1, 1, 8, 9, 10], None]
if neg: return s1[i] != s2[i]
return s1[i] == s2[j]
for i in range(3):
for j in range(7):
for case in False, True:
res = self.interpret(fn, [i,j,case])
assert res is fn(i, j, case)
def test_list_comparestr(self):
def fn(i, j, neg=False):
s1 = [["hell"], ["hello", "world"]]
s1[0][0] += "o" # ensure no interning
s2 = [["hello"], ["world"]]
if neg: return s1[i] != s2[i]
return s1[i] == s2[j]
for i in range(2):
for j in range(2):
for case in False, True:
res = self.interpret(fn, [i,j,case])
assert res is fn(i, j, case)
def test_list_compareinst(self):
def fn(i, j, neg=False):
foo1 = Foo()
foo2 = Foo()
bar1 = Bar()
s1 = [[foo1], [foo2], [bar1]]
s2 = s1[:]
if neg: return s1[i] != s2[i]
return s1[i] == s2[j]
for i in range(3):
for j in range(3):
for case in False, True:
res = self.interpret(fn, [i, j, case])
assert res is fn(i, j, case)
def fn(i, j, neg=False):
foo1 = Foo()
foo2 = Foo()
bar1 = Bar()
s1 = [[foo1], [foo2], [bar1]]
s2 = s1[:]
s2[0].extend([])
if neg: return s1[i] != s2[i]
return s1[i] == s2[j]
for i in range(3):
for j in range(3):
for case in False, True:
res = self.interpret(fn, [i, j, case])
assert res is fn(i, j, case)
def test_list_contains(self):
def fn(i, neg=False):
foo1 = Foo()
foo2 = Foo()
bar1 = Bar()
bar2 = Bar()
lis = [foo1, foo2, bar1]
args = lis + [bar2]
if neg : return args[i] not in lis
return args[i] in lis
for i in range(4):
for case in False, True:
res = self.interpret(fn, [i, case])
assert res is fn(i, case)
def fn(i, neg=False):
foo1 = Foo()
foo2 = Foo()
bar1 = Bar()
bar2 = Bar()
lis = [foo1, foo2, bar1]
lis.append(lis.pop())
args = lis + [bar2]
if neg : return args[i] not in lis
return args[i] in lis
for i in range(4):
for case in False, True:
res = self.interpret(fn, [i, case])
assert res is fn(i, case)
def test_not_a_char_list_after_all(self):
def fn():
l = ['h', 'e', 'l', 'l', 'o']
return 'world' in l
res = self.interpret(fn, [])
assert res is False
def test_list_index(self):
def fn(i):
foo1 = Foo()
foo2 = Foo()
bar1 = Bar()
bar2 = Bar()
lis = [foo1, foo2, bar1]
args = lis + [bar2]
return lis.index(args[i])
for i in range(4):
for varsize in False, True:
try:
res2 = fn(i)
res1 = self.interpret(fn, [i])
assert res1 == res2
except Exception, e:
self.interpret_raises(e.__class__, fn, [i])
def fn(i):
foo1 = Foo()
foo2 = Foo()
bar1 = Bar()
bar2 = Bar()
lis = [foo1, foo2, bar1]
args = lis + [bar2]
lis.append(lis.pop())
return lis.index(args[i])
for i in range(4):
for varsize in False, True:
try:
res2 = fn(i)
res1 = self.interpret(fn, [i])
assert res1 == res2
except Exception, e:
self.interpret_raises(e.__class__, fn, [i])
def test_list_str(self):
def fn():
return str([1,2,3])
res = self.interpret(fn, [])
assert self.ll_to_string(res) == fn()
def fn():
return str([[1,2,3]])
res = self.interpret(fn, [])
assert self.ll_to_string(res) == fn()
def fn():
l = [1,2]
l.append(3)
return str(l)
res = self.interpret(fn, [])
assert self.ll_to_string(res) == fn()
def fn():
l = [1,2]
l.append(3)
return str([l])
res = self.interpret(fn, [])
assert self.ll_to_string(res) == fn()
def fn():
return str([])
res = self.interpret(fn, [])
assert self.ll_to_string(res) == fn()
def fn():
return str([1.25])
res = self.interpret(fn, [])
assert eval(self.ll_to_string(res)) == [1.25]
def test_list_or_None(self):
empty_list = []
nonempty_list = [1, 2]
def fn(i):
test = [None, empty_list, nonempty_list][i]
if test:
return 1
else:
return 0
res = self.interpret(fn, [0])
assert res == 0
res = self.interpret(fn, [1])
assert res == 0
res = self.interpret(fn, [2])
assert res == 1
nonempty_list = [1, 2]
def fn(i):
empty_list = [1]
empty_list.pop()
nonempty_list = []
nonempty_list.extend([1,2])
test = [None, empty_list, nonempty_list][i]
if test:
return 1
else:
return 0
res = self.interpret(fn, [0])
assert res == 0
res = self.interpret(fn, [1])
assert res == 0
res = self.interpret(fn, [2])
assert res == 1
def test_inst_list(self):
def fn():
l = [None]
l[0] = Foo()
l.append(Bar())
l2 = [l[1], l[0], l[0]]
l.extend(l2)
for x in l2:
l.append(x)
x = l.pop()
x = l.pop()
x = l.pop()
x = l2.pop()
return str(x)+";"+str(l)
res = self.ll_to_string(self.interpret(fn, []))
res = res.replace('pypy.rpython.test.test_rlist.', '')
res = re.sub(' at 0x[a-z0-9]+', '', res)
assert res == '<Foo object>;[<Foo object>, <Bar object>, <Bar object>, <Foo object>, <Foo object>]'
def fn():
l = [None] * 2
l[0] = Foo()
l[1] = Bar()
l2 = [l[1], l[0], l[0]]
l = l + [None] * 3
i = 2
for x in l2:
l[i] = x
i += 1
return str(l)
res = self.ll_to_string(self.interpret(fn, []))
res = res.replace('pypy.rpython.test.test_rlist.', '')
res = re.sub(' at 0x[a-z0-9]+', '', res)
assert res == '[<Foo object>, <Bar object>, <Bar object>, <Foo object>, <Foo object>]'
def test_list_slice_minusone(self):
def fn(i):
lst = [i, i+1, i+2]
lst2 = lst[:-1]
return lst[-1] * lst2[-1]
res = self.interpret(fn, [5])
assert res == 42
def fn(i):
lst = [i, i+1, i+2, 7]
lst.pop()
lst2 = lst[:-1]
return lst[-1] * lst2[-1]
res = self.interpret(fn, [5])
assert res == 42
def test_list_multiply(self):
def fn(i):
lst = [i] * i
ret = len(lst)
if ret:
ret *= lst[-1]
return ret
for arg in (1, 9, 0, -1, -27):
res = self.interpret(fn, [arg])
assert res == fn(arg)
def fn(i):
lst = [i, i + 1] * i
ret = len(lst)
if ret:
ret *= lst[-1]
return ret
for arg in (1, 9, 0, -1, -27):
res = self.interpret(fn, [arg])
assert res == fn(arg)
def test_list_inplace_multiply(self):
def fn(i):
lst = [i]
lst *= i
ret = len(lst)
if ret:
ret *= lst[-1]
return ret
for arg in (1, 9, 0, -1, -27):
res = self.interpret(fn, [arg])
assert res == fn(arg)
def fn(i):
lst = [i, i + 1]
lst *= i
ret = len(lst)
if ret:
ret *= lst[-1]
return ret
for arg in (1, 9, 0, -1, -27):
res = self.interpret(fn, [arg])
assert res == fn(arg)
def test_indexerror(self):
def fn(i):
l = [5, 8, 3]
try:
l[i] = 99
except IndexError:
pass
try:
del l[i]
except IndexError:
pass
try:
return l[2]
except IndexError:
return -1
res = self.interpret(fn, [6])
assert res == 3
res = self.interpret(fn, [-2])
assert res == -1
def fn(i):
l = [5, 8]
l.append(3)
try:
l[i] = 99
except IndexError:
pass
try:
del l[i]
except IndexError:
pass
try:
return l[2]
except IndexError:
return -1
res = self.interpret(fn, [6])
assert res == 3
res = self.interpret(fn, [-2])
assert res == -1
def test_list_basic_ops(self):
def list_basic_ops(i=int, j=int):
l = [1,2,3]
l.insert(0, 42)
del l[1]
l.append(i)
listlen = len(l)
l.extend(l)
del l[listlen:]
l += [5,6]
l[1] = i
return l[j]
for i in range(6):
for j in range(6):
res = self.interpret(list_basic_ops, [i, j])
assert res == list_basic_ops(i, j)
def test_valueerror(self):
def fn(i):
l = [4, 7, 3]
try:
j = l.index(i)
except ValueError:
j = 100
return j
res = self.interpret(fn, [4])
assert res == 0
res = self.interpret(fn, [7])
assert res == 1
res = self.interpret(fn, [3])
assert res == 2
res = self.interpret(fn, [6])
assert res == 100
def fn(i):
l = [5, 8]
l.append(3)
try:
l[i] = 99
except IndexError:
pass
try:
del l[i]
except IndexError:
pass
try:
return l[2]
except IndexError:
return -1
res = self.interpret(fn, [6])
assert res == 3
res = self.interpret(fn, [-2])
assert res == -1
def test_voidlist_prebuilt(self):
frlist = [Freezing()] * 17
def mylength(l):
return len(l)
def f():
return mylength(frlist)
res = self.interpret(f, [])
assert res == 17
def test_voidlist_fixed(self):
fr = Freezing()
def f():
return len([fr, fr])
res = self.interpret(f, [])
assert res == 2
def test_voidlist_nonfixed(self):
class Freezing:
def _freeze_(self):
return True
fr = Freezing()
def f():
lst = [fr, fr]
lst.append(fr)
del lst[1]
assert lst[0] is fr
return len(lst)
res = self.interpret(f, [])
assert res == 2
def test_access_in_try(self):
def f(sq):
try:
return sq[2]
except ZeroDivisionError:
return 42
return -1
def g(n):
l = [1] * n
return f(l)
res = self.interpret(g, [3])
assert res == 1
def test_access_in_try_set(self):
def f(sq):
try:
sq[2] = 77
except ZeroDivisionError:
return 42
return -1
def g(n):
l = [1] * n
f(l)
return l[2]
res = self.interpret(g, [3])
assert res == 77
def test_list_equality(self):
def dummyfn(n):
lst = [12] * n
assert lst == [12, 12, 12]
lst2 = [[12, 34], [5], [], [12, 12, 12], [5]]
assert lst in lst2
self.interpret(dummyfn, [3])
def test_list_remove(self):
def dummyfn(n, p):
l = range(n)
l.remove(p)
return len(l)
res = self.interpret(dummyfn, [1, 0])
assert res == 0
def test_getitem_exc_1(self):
def f(x):
l = [1]
return l[x]
res = self.interpret(f, [0])
assert res == 1
if self.type_system == 'lltype':
# on lltype we always get an AssertionError
py.test.raises(AssertionError, self.interpret, f, [1])
else:
# on ootype we happen to get through the ll_asserts and to
# hit the IndexError from ootype.py
self.interpret_raises(IndexError, f, [1])
def f(x):
l = [1]
try:
return l[x]
except IndexError:
return -1
except Exception:
return 0
res = self.interpret(f, [0])
assert res == 1
res = self.interpret(f, [1])
assert res == -1
def f(x):
l = [1]
try:
return l[x]
except Exception:
return 0
res = self.interpret(f, [0])
assert res == 1
res = self.interpret(f, [1])
assert res == 0
def f(x):
l = [1]
try:
return l[x]
except ValueError:
return 0
res = self.interpret(f, [0])
assert res == 1
def test_getitem_exc_2(self):
def f(x):
l = [1]
return l[x]
res = self.interpret(f, [0])
assert res == 1
if self.type_system == 'lltype':
# on lltype we always get an AssertionError
py.test.raises(AssertionError, self.interpret, f, [1])
else:
# on ootype we happen to get through the ll_asserts and to
# hit the IndexError from ootype.py
self.interpret_raises(IndexError, f, [1])
def f(x):
l = [1]
try:
return l[x]
except IndexError:
return -1
except Exception:
return 0
res = self.interpret(f, [0])
assert res == 1
res = self.interpret(f, [1])
assert res == -1
def f(x):
l = [1]
try:
return l[x]
except Exception:
return 0
res = self.interpret(f, [0])
assert res == 1
res = self.interpret(f, [1])
assert res == 0
def f(x):
l = [1]
try:
return l[x]
except ValueError:
return 0
res = self.interpret(f, [0])
assert res == 1
if self.type_system == 'lltype':
# on lltype we always get an AssertionError
py.test.raises(AssertionError, self.interpret, f, [1])
else:
# on ootype we happen to get through the ll_asserts and to
# hit the IndexError from ootype.py
self.interpret_raises(IndexError, f, [1])
def test_charlist_extension_1(self):
def f(n):
s = 'hello%d' % n
l = ['a', 'b']
l += s
return ''.join(l)
res = self.interpret(f, [58])
assert self.ll_to_string(res) == 'abhello58'
def test_unicharlist_extension_1(self):
def f(n):
s = 'hello%d' % n
l = [u'a', u'b']
l += s
return ''.join([chr(ord(c)) for c in l])
res = self.interpret(f, [58])
assert self.ll_to_string(res) == 'abhello58'
def test_extend_a_non_char_list_1(self):
def f(n):
s = 'hello%d' % n
l = ['foo', 'bar']
l += s # NOT SUPPORTED for now if l is not a list of chars
return ''.join(l)
py.test.raises(TyperError, self.interpret, f, [58])
def test_charlist_extension_2(self):
def f(n, i):
s = 'hello%d' % n
assert 0 <= i <= len(s)
l = ['a', 'b']
l += s[i:]
return ''.join(l)
res = self.interpret(f, [9381701, 3])
assert self.ll_to_string(res) == 'ablo9381701'
def test_unicharlist_extension_2(self):
def f(n, i):
s = 'hello%d' % n
assert 0 <= i <= len(s)
l = [u'a', u'b']
l += s[i:]
return ''.join([chr(ord(c)) for c in l])
res = self.interpret(f, [9381701, 3])
assert self.ll_to_string(res) == 'ablo9381701'
def test_extend_a_non_char_list_2(self):
def f(n, i):
s = 'hello%d' % n
assert 0 <= i <= len(s)
l = ['foo', 'bar']
l += s[i:] # NOT SUPPORTED for now if l is not a list of chars
return ''.join(l)
py.test.raises(TyperError, self.interpret, f, [9381701, 3])
def test_charlist_extension_3(self):
def f(n, i, j):
s = 'hello%d' % n
assert 0 <= i <= j <= len(s)
l = ['a', 'b']
l += s[i:j]
return ''.join(l)
res = self.interpret(f, [9381701, 3, 7])
assert self.ll_to_string(res) == 'ablo93'
def test_unicharlist_extension_3(self):
def f(n, i, j):
s = 'hello%d' % n
assert 0 <= i <= j <= len(s)
l = [u'a', u'b']
l += s[i:j]
return ''.join([chr(ord(c)) for c in l])
res = self.interpret(f, [9381701, 3, 7])
assert self.ll_to_string(res) == 'ablo93'
def test_charlist_extension_4(self):
def f(n):
s = 'hello%d' % n
l = ['a', 'b']
l += s[:-1]
return ''.join(l)
res = self.interpret(f, [9381701])
assert self.ll_to_string(res) == 'abhello938170'
def test_unicharlist_extension_4(self):
def f(n):
s = 'hello%d' % n
l = [u'a', u'b']
l += s[:-1]
return ''.join([chr(ord(c)) for c in l])
res = self.interpret(f, [9381701])
assert self.ll_to_string(res) == 'abhello938170'
def test_charlist_extension_5(self):
def f(count):
l = ['a', 'b']
l += '.' * count # char * count
return ''.join(l)
res = self.interpret(f, [7])
assert self.ll_to_string(res) == 'ab.......'
res = self.interpret(f, [0])
assert self.ll_to_string(res) == 'ab'
def test_unicharlist_extension_5(self):
def f(count):
l = [u'a', u'b']
l += '.' * count # NON-UNICODE-char * count
return ''.join([chr(ord(c)) for c in l])
res = self.interpret(f, [7])
assert self.ll_to_string(res) == 'ab.......'
res = self.interpret(f, [0])
assert self.ll_to_string(res) == 'ab'
def test_charlist_extension_6(self):
def f(count):
l = ['a', 'b']
l += count * '.' # count * char
return ''.join(l)
res = self.interpret(f, [7])
assert self.ll_to_string(res) == 'ab.......'
res = self.interpret(f, [0])
assert self.ll_to_string(res) == 'ab'
def test_extend_a_non_char_list_6(self):
def f(count):
l = ['foo', 'bar']
# NOT SUPPORTED for now if l is not a list of chars
l += count * '.'
return ''.join(l)
py.test.raises(TyperError, self.interpret, f, [5])
def test_r_short_list(self):
from pypy.rpython.lltypesystem.rffi import r_short
from pypy.rlib import rarithmetic
def f(i):
l = [r_short(0)] * 10
l[i+1] = r_short(3)
return rarithmetic.widen(l[i])
res = self.interpret(f, [3])
assert res == 0
def test_make_new_list(self):
class A:
def _freeze_(self):
return True
a1 = A()
a2 = A()
def f(i):
lst = [a1, a1]
lst2 = list(lst)
lst2.append(a2)
return lst2[i] is a2
res = self.interpret(f, [1])
assert res == False
res = self.interpret(f, [2])
assert res == True
def test_immutable_list_out_of_instance(self):
from pypy.translator.simplify import get_funcobj
for immutable_fields in (["a", "b"], ["a", "b", "y[*]"]):
class A(object):
_immutable_fields_ = immutable_fields
class B(A):
pass
def f(i):
b = B()
lst = [i]
lst[0] += 1
b.y = lst
ll_assert(b.y is lst, "copying when reading out the attr?")
return b.y[0]
res = self.interpret(f, [10])
assert res == 11
t, rtyper, graph = self.gengraph(f, [int])
block = graph.startblock
op = block.operations[-1]
assert op.opname == 'direct_call'
func = get_funcobj(op.args[0].value)._callable
assert ('foldable' in func.func_name) == \
("y[*]" in immutable_fields)
def test_hints(self):
from pypy.rlib.objectmodel import newlist_hint
strings = ['abc', 'def']
def f(i):
z = strings[i]
x = newlist_hint(sizehint=13)
x += z
return ''.join(x)
res = self.interpret(f, [0])
assert self.ll_to_string(res) == 'abc'
class TestLLtype(BaseTestRlist, LLRtypeMixin):
type_system = 'lltype'
rlist = ll_rlist
def test_memoryerror(self):
def fn(i):
lst = [0] * i
lst[i-1] = 5
return lst[0]
res = self.interpret(fn, [1])
assert res == 5
res = self.interpret(fn, [2])
assert res == 0
self.interpret_raises(MemoryError, fn, [sys.maxint])
def test_type_erase_fixed_size(self):
class A(object):
pass
class B(object):
pass
def f():
return [A()], [B()]
t = TranslationContext()
s = t.buildannotator().build_types(f, [])
rtyper = t.buildrtyper(type_system=self.type_system)
rtyper.specialize()
s_A_list = s.items[0]
s_B_list = s.items[1]
r_A_list = rtyper.getrepr(s_A_list)
assert isinstance(r_A_list, self.rlist.FixedSizeListRepr)
r_B_list = rtyper.getrepr(s_B_list)
assert isinstance(r_B_list, self.rlist.FixedSizeListRepr)
assert r_A_list.lowleveltype == r_B_list.lowleveltype
def test_type_erase_var_size(self):
class A(object):
pass
class B(object):
pass
def f():
la = [A()]
lb = [B()]
la.append(None)
lb.append(None)
return la, lb
t = TranslationContext()
s = t.buildannotator().build_types(f, [])
rtyper = t.buildrtyper(type_system=self.type_system)
rtyper.specialize()
s_A_list = s.items[0]
s_B_list = s.items[1]
r_A_list = rtyper.getrepr(s_A_list)
assert isinstance(r_A_list, self.rlist.ListRepr)
r_B_list = rtyper.getrepr(s_B_list)
assert isinstance(r_B_list, self.rlist.ListRepr)
assert r_A_list.lowleveltype == r_B_list.lowleveltype
def test_no_unneeded_refs(self):
def fndel(p, q):
lis = ["5", "3", "99"]
assert q >= 0
assert p >= 0
del lis[p:q]
return lis
def fnpop(n):
lis = ["5", "3", "99"]
while n:
lis.pop()
n -=1
return lis
for i in range(2, 3+1):
lis = self.interpret(fndel, [0, i])
assert list_is_clear(lis, 3-i)
for i in range(3):
lis = self.interpret(fnpop, [i])
assert list_is_clear(lis, 3-i)
def test_oopspec(self):
lst1 = [123, 456] # non-mutated list
def f(i):
lst2 = [i]
lst2.append(42) # mutated list
return lst1[i] + lst2[i]
from pypy.annotation import model as annmodel
_, _, graph = self.gengraph(f, [annmodel.SomeInteger(nonneg=True)])
block = graph.startblock
lst1_getitem_op = block.operations[-3] # XXX graph fishing
lst2_getitem_op = block.operations[-2]
func1 = lst1_getitem_op.args[0].value._obj._callable
func2 = lst2_getitem_op.args[0].value._obj._callable
assert func1.oopspec == 'list.getitem_foldable(l, index)'
assert not hasattr(func2, 'oopspec')
def test_iterate_over_immutable_list(self):
from pypy.rpython import rlist
class MyException(Exception):
pass
lst = list('abcdef')
def dummyfn():
total = 0
for c in lst:
total += ord(c)
return total
#
prev = rlist.ll_getitem_foldable_nonneg
try:
def seen_ok(l, index):
if index == 5:
raise KeyError # expected case
return prev(l, index)
rlist.ll_getitem_foldable_nonneg = seen_ok
e = raises(LLException, self.interpret, dummyfn, [])
assert 'KeyError' in str(e.value)
finally:
rlist.ll_getitem_foldable_nonneg = prev
def test_iterate_over_immutable_list_quasiimmut_attr(self):
from pypy.rpython import rlist
class MyException(Exception):
pass
class Foo:
_immutable_fields_ = ['lst?[*]']
lst = list('abcdef')
foo = Foo()
def dummyfn():
total = 0
for c in foo.lst:
total += ord(c)
return total
#
prev = rlist.ll_getitem_foldable_nonneg
try:
def seen_ok(l, index):
if index == 5:
raise KeyError # expected case
return prev(l, index)
rlist.ll_getitem_foldable_nonneg = seen_ok
e = raises(LLException, self.interpret, dummyfn, [])
assert 'KeyError' in str(e.value)
finally:
rlist.ll_getitem_foldable_nonneg = prev
def test_iterate_over_mutable_list(self):
from pypy.rpython import rlist
class MyException(Exception):
pass
lst = list('abcdef')
def dummyfn():
total = 0
for c in lst:
total += ord(c)
lst[0] = 'x'
return total
#
prev = rlist.ll_getitem_foldable_nonneg
try:
def seen_ok(l, index):
if index == 5:
raise KeyError # expected case
return prev(l, index)
rlist.ll_getitem_foldable_nonneg = seen_ok
res = self.interpret(dummyfn, [])
assert res == sum(map(ord, 'abcdef'))
finally:
rlist.ll_getitem_foldable_nonneg = prev
class TestOOtype(BaseTestRlist, OORtypeMixin):
rlist = oo_rlist
type_system = 'ootype'
|
# -*- coding: UTF-8 -*-
import os
import sys
import requests
from pprint import pprint
import time
import logging
import numpy as np
from binance.spot import Spot as Client
from decimal import Decimal
import json
from binance.error import ClientError
from key import *
from utils import *
from module import bots
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler('log.txt')
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def _get_server_time():
return int(time.time())
class BotManager(object):
def __init__(self):
logging.info('给狗修金大人请安~这里是量化姬,准备开始本次调教(调试)了!!')
self.bot_list = []
self.client = Client(key, secret, base_url=url)
def add_grid_bot(self, symbol, price_mode, price_diff, max_order, fund_each):
insbot = bots.grid_bot(self.client, symbol, price_mode, price_diff, max_order, fund_each)
self.bot_list.append(insbot)
def add_balance_bot(self, asset, symbol, multiple, diff):
insbot = bots.balance_bot(self.client, asset, symbol, multiple, diff)
self.bot_list.append(insbot)
def run_init(self):
self.bot_num = len(self.bot_list)
logging.info("交易分姬数:{}".format(self.bot_num))
def do_manager_loop(self):
with open('BotManager.json', 'w') as f:
save = {'status': "running", 'time': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), }
json_str = json.dumps(save)
f.write(json_str)
for bot in self.bot_list:
bot.do_a_loop()
if __name__ == '__main__':
bot_manager = BotManager()
# 交易对,价差模式:等差/等比,价差数额,最大订单数,每格金额
bot_manager.add_grid_bot(symbol='BNBBUSD', price_mode='geometric', price_diff=0.015, max_order=20, fund_each=20)
bot_manager.add_grid_bot(symbol='AVAXBUSD', price_mode='geometric', price_diff=0.04, max_order=20, fund_each=20)
# bot_manager.add_grid_bot(symbol='GTCBUSD', price_mode='geometric', price_diff=0.055, max_order=20, fund_each=20)
bot_manager.run_init()
last_err_time = 0.0
re_err_cnt = 0
while True:
try:
bot_manager.do_manager_loop()
time.sleep(20)
except Exception as error:
this_err_time = time.time()
if this_err_time - last_err_time <= 10.0:
re_err_cnt += 1
else:
re_err_cnt = 1
if re_err_cnt == 3:
logging.info("警告!发生严重错误!!请检查交易姬后台")
with open('BotManager.json', 'w') as f:
save = {'status': "error", 'time': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
'error': str(error.error_message)}
json_str = json.dumps(save)
f.write(json_str)
exit()
logging.info(f"catch error:{error}")
last_err_time = this_err_time
time.sleep(5)
|
import glob
import optparse
import os.path
import subprocess
import sys
def main():
""" Repeatedly run GUI functional tests. """
parser = optparse.OptionParser()
parser.add_option('-t', '--trials', type='int', default=100,
help='# trials to attempt')
parser.add_option('-n', '--nonose', action='store_true',
help='if present, run outside of nose')
options, files = parser.parse_args()
args = ['-v']
if options.nonose:
args.append('--nonose')
for stop in ('STOP', 'STOP.txt'):
if os.path.exists(stop):
os.remove(stop)
logfile = open('stressit.log', 'w')
for trial in range(options.trials):
for stop in ('STOP', 'STOP.txt'):
if os.path.exists(stop):
break
if sys.platform == 'win32':
msg = 'Trial %s:' % (trial+1)
else:
avgs = os.getloadavg()
msg = 'Trial %s: %.2f, %.2f, %.2f' \
% (trial+1, avgs[0], avgs[1], avgs[2])
print msg
logfile.write('\n'+msg+'\n')
if not files:
files = sorted(glob.glob('test_*.py'))
for test_script in files:
msg = ' Running %s' % test_script
print msg
logfile.write(msg+'\n')
logfile.flush()
cmd = ['python', test_script]
cmd.extend(args)
status = subprocess.call(cmd, stdout=logfile, stderr=subprocess.STDOUT)
if status:
msg = ' exit status %s' % status
print msg
logfile.write(msg+'\n')
sys.exit(status)
logfile.close()
if __name__ == '__main__':
main()
|
"""
Inits the summary bot. It starts a Reddit instance using PRAW, gets the latest posts
and filters those who have already been processed.
"""
import praw
import requests
import tldextract
import cloud
import config
import scraper
import summary
# We don't reply to posts which have a very small or very high reduction.
MINIMUM_REDUCTION_THRESHOLD = 20
MAXIMUM_REDUCTION_THRESHOLD = 68
# File locations
POSTS_LOG = "./processed_posts.txt"
WHITELIST_FILE = "./assets/whitelist.txt"
ERROR_LOG = "./error.log"
# Templates.
TEMPLATE = open("./templates/es.txt", "r", encoding="utf-8").read()
HEADERS = {"User-Agent": "Summarizer v2.0"}
def load_whitelist():
"""Reads the processed posts log file and creates it if it doesn't exist.
Returns
-------
list
A list of domains that are confirmed to have an 'article' tag.
"""
with open(WHITELIST_FILE, "r", encoding="utf-8") as log_file:
return log_file.read().splitlines()
def load_log():
"""Reads the processed posts log file and creates it if it doesn't exist.
Returns
-------
list
A list of Reddit posts ids.
"""
try:
with open(POSTS_LOG, "r", encoding="utf-8") as log_file:
return log_file.read().splitlines()
except FileNotFoundError:
with open(POSTS_LOG, "a", encoding="utf-8") as log_file:
return []
def update_log(post_id):
"""Updates the processed posts log with the given post id.
Parameters
----------
post_id : str
A Reddit post id.
"""
with open(POSTS_LOG, "a", encoding="utf-8") as log_file:
log_file.write("{}\n".format(post_id))
def log_error(error_message):
"""Updates the error log.
Parameters
----------
error_message : str
A string containing the faulty url and the exception message.
"""
with open(ERROR_LOG, "a", encoding="utf-8") as log_file:
log_file.write("{}\n".format(error_message))
def init():
"""Inits the bot."""
reddit = praw.Reddit(client_id=config.APP_ID, client_secret=config.APP_SECRET,
user_agent=config.USER_AGENT, username=config.REDDIT_USERNAME,
password=config.REDDIT_PASSWORD)
processed_posts = load_log()
whitelist = load_whitelist()
for subreddit in config.SUBREDDITS:
for submission in reddit.subreddit(subreddit).new(limit=50):
if submission.id not in processed_posts:
clean_url = submission.url.replace("amp.", "")
ext = tldextract.extract(clean_url)
domain = "{}.{}".format(ext.domain, ext.suffix)
if domain in whitelist:
try:
with requests.get(clean_url, headers=HEADERS, timeout=10) as response:
# Most of the times the encoding is utf-8 but in edge cases
# we set it to ISO-8859-1 when it is present in the HTML header.
if "iso-8859-1" in response.text.lower():
response.encoding = "iso-8859-1"
elif response.encoding == "ISO-8859-1":
response.encoding = "utf-8"
html_source = response.text
article_title, article_date, article_body = scraper.scrape_html(
html_source)
summary_dict = summary.get_summary(article_body)
except Exception as e:
log_error("{},{}".format(clean_url, e))
update_log(submission.id)
print("Failed:", submission.id)
continue
# To reduce low quality submissions, we only process those that made a meaningful summary.
if summary_dict["reduction"] >= MINIMUM_REDUCTION_THRESHOLD and summary_dict["reduction"] <= MAXIMUM_REDUCTION_THRESHOLD:
# Create a wordcloud, upload it to Imgur and get back the url.
image_url = cloud.generate_word_cloud(
summary_dict["article_words"])
# We start creating the comment body.
post_body = "\n\n".join(
["> " + item for item in summary_dict["top_sentences"]])
top_words = ""
for index, word in enumerate(summary_dict["top_words"]):
top_words += "{}^#{} ".format(word, index+1)
post_message = TEMPLATE.format(
article_title, clean_url, summary_dict["reduction"], article_date, post_body, image_url, top_words)
reddit.submission(submission).reply(post_message)
update_log(submission.id)
print("Replied to:", submission.id)
else:
update_log(submission.id)
print("Skipped:", submission.id)
if __name__ == "__main__":
init()
|
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license, see LICENSE.
"""
Tests for the skhep.dataset.defs module.
"""
# -----------------------------------------------------------------------------
# Import statements
# -----------------------------------------------------------------------------
import pytest
from skhep.dataset.defs import *
# -----------------------------------------------------------------------------
# Actual tests
# -----------------------------------------------------------------------------
def test_base_classes():
with pytest.raises(TypeError):
Dataset()
def test_mixins():
FromPersistent()
ToPersistent()
ConvertibleCopy()
ff = FromFiles()
with pytest.raises(NotImplementedError):
ff.from_file('non_existent_file')
tf = ToFiles()
with pytest.raises(NotImplementedError):
tf.to_file('non_existent_file')
np = NewNumpy()
with pytest.raises(NotImplementedError):
np.to_array()
nr = NewROOT()
with pytest.raises(NotImplementedError):
nr.to_tree()
|
#Distributed under the MIT licesnse.
#Copyright (c) 2015 Dave McCoy (dave.mccoy@cospandesign.com)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
""" DMA
Facilitates communication with the DMA controller
"""
__author__ = 'dave.mccoy@cospandesign.com (Dave McCoy)'
import sys
import os
import time
from array import array as Array
from nysa.host.driver import driver
#Artemis USB2 Identifier
ARTEMIS_USB2_ID = 0x03
#Register Constants
CONTROL = 0x00
STATUS = 0x01
SATA_CLK_COUNT = 0x02
SATA_FST_CLK_COUNT = 0x03
PCIE_RESET = 2
SATA_RESET = 3
GTP_RX_PRE_AMP_LOW = 4
GTP_RX_PRE_AMP_HIGH = 5
GTP_TX_DIFF_SWING_LOW = 8
GTP_TX_DIFF_SWING_HIGH = 11
PCIE_RX_POLARITY = 12
SATA_PLL_DETECT_K = 0
PCIE_PLL_DETECT_K = 1
SATA_RESET_DONE = 2
PCIE_RESET_DONE = 3
SATA_DCM_PLL_LOCKED = 4
PCIE_DCM_PLL_LOCKED = 5
SATA_RX_IDLE = 6
PCIE_RX_IDLE = 7
SATA_TX_IDLE = 8
PCIE_TX_IDLE = 9
SATA_LOSS_OF_SYNC = 10
PCIE_LOSS_OF_SYNC = 11
SATA_BYTE_IS_ALIGNED = 12
PCIE_BYTE_IS_ALIGNED = 13
class ArtemisUSB2DriverError(Exception):
pass
class ArtemisUSB2Driver(driver.Driver):
"""
Artemis Driver
"""
@staticmethod
def get_abi_class():
return 0
@staticmethod
def get_abi_major():
return driver.get_device_id_from_name("platform")
@staticmethod
def get_abi_minor():
return ARTEMIS_USB2_ID
def __init__(self, nysa, urn, debug = False):
super (ArtemisUSB2Driver, self).__init__(nysa, urn, debug)
def __del__(self):
pass
def enable_pcie_reset(self, enable):
"""
Reset the PCIE GTP State Machine
Args:
enable: Reset
enable: Release Reset
Returns:
Nothing
Raises:
Nothing
"""
self.enable_register_bit(CONTROL, PCIE_RESET, enable)
def enable_sata_reset(self, enable):
"""
Reset the SATA GTP State Machine
Args:
enable: Reset
enable: Release Reset
Returns:
Nothing
Raises:
Nothing
"""
self.enable_register_bit(CONTROL, SATA_RESET, enable)
def set_gtp_rx_preamp(self, value):
"""
Set the value of the Receiver Preamplifier (0 - 7)
Args:
value (Integer): 0 - 7 (Higher has more gain)
Returns:
Nothing
Raises:
Nothing
"""
reg = self.read_register(CONTROL)
bitmask = (((1 << (GTP_RX_PRE_AMP_HIGH + 1))) - (1 << GTP_RX_PRE_AMP_LOW))
reg &= ~(bitmask)
reg |= value << GTP_RX_PRE_AMP_LOW
self.write_register(CONTROL, reg)
def set_gtp_tx_diff_swing(self, value):
"""
Sets the value of the transmitter differential swing
Args:
value (Integer): 0 - 3 (Higher has larger swing)
Returns:
Nothing
Raises:
Nothing
"""
reg = self.read_register(CONTROL)
bitmask = (((1 << (GTP_TX_DIFF_SWING_HIGH + 1))) - (1 << GTP_TX_DIFF_SWING_LOW))
reg &= ~(bitmask)
reg |= value << GTP_TX_DIFF_SWING_LOW
self.write_register(CONTROL, reg)
def set_pcie_rx_polarity(self, positive):
"""
Sets the polarity of the PCIE Receiver phy signals
Note: A reset of the GTP stack is probably required after this
Args:
positive: If set true this will set the polarity positive
Returns:
Nothing
Raises:
Nothing
"""
self.enable_register_bit(CONTROL, PCIE_RX_POLARITY, not positive)
def is_pcie_reset(self):
"""
Return true if the GTP State machine for the PCIE is in reset state
Args:
Nothing
Returns (Boolean):
True: PCIE state machine is in a reset state
False: PCIE state machine is not in a reset state
Raises:
Nothing
"""
return self.is_register_bit_set(CONTROL, PCIE_RESET)
def is_sata_reset(self):
"""
Return true if the GTP State machine for the SATA is in reset state
Args:
Nothing
Returns (Boolean):
True: SATA state machine is in a reset state
False: SATA state machine is not in a reset state
Raises:
Nothing
"""
return self.is_register_bit_set(CONTROL, SATA_RESET)
def get_gtp_rx_preamp(self):
"""
Gets the current pre amplifier settings for the receiver
Args:
Nothing
Returns (Integer):
3-bit value between 0 to 7
Raises:
Nothing
"""
value = self.read_register(CONTROL)
bitmask = (((1 << (GTP_RX_PRE_AMP_HIGH + 1))) - (1 << GTP_RX_PRE_AMP_LOW))
value = value & bitmask
value = value >> GTP_RX_PRE_AMP_LOW
return value
def get_gtp_tx_diff_swing(self):
"""
Gets the current transmitter differential swing settings
Args:
Nothing
Returns (Integer):
2-bit value between 0 to 3
Raises:
Nothing
"""
value = self.read_register(CONTROL)
bitmask = ((1 << (GTP_TX_DIFF_SWING_HIGH + 1)) - (1 << GTP_TX_DIFF_SWING_LOW))
value = value & bitmask
value = value >> GTP_TX_DIFF_SWING_LOW
return value
def is_pcie_rx_polarity_positive(self):
"""
Returns True if the polarity of the PCIE Receiver is positive
Args:
Nothing
Returns (Boolean):
True: Polarity is positive
False: Polarity is negative
Raises:
Nothing
"""
return not self.is_register_bit_set(CONTROL, PCIE_RX_POLARITY)
def is_sata_pll_locked(self):
"""
Returns True if the GTP SATA PLL is locked
Args:
Nothing
Returns (Boolean):
True: PLL is Locked
False: PLL is not Locked
Raises:
Nothing
"""
return self.is_register_bit_set(STATUS, SATA_PLL_DETECT_K)
def is_pcie_pll_locked(self):
"""
Returns True if the GTP PCIE PLL is locked
Args:
Nothing
Returns (Boolean):
True: PLL is Locked
False: PLL is not Locked
Raises:
Nothing
"""
return self.is_register_bit_set(STATUS, PCIE_PLL_DETECT_K)
def is_sata_reset_done(self):
"""
Returns True if the SATA GTP state machine has finished it's reset
sequency
Args:
Nothing
Returns (Boolean):
True: SATA GTP is ready
False: SATA GTP is not ready
Raises:
Nothing
"""
return self.is_register_bit_set(STATUS, SATA_RESET_DONE)
def is_pcie_reset_done(self):
"""
Returns True if the PCIE GTP state machine has finished it's reset
sequency
Args:
Nothing
Returns (Boolean):
True: PCIE GTP is ready
False: PCIE GTP is not ready
Raises:
Nothing
"""
return self.is_register_bit_set(STATUS, PCIE_RESET_DONE)
def is_sata_dcm_pll_locked(self):
"""
Returns True if the DCM that synthesizes the two user clocks for SATA
(300MHz and 75MHz) is locked
Args:
Nothing
Returns (Boolean):
True: DCM locked
False: DCM is not locked
Raises:
Nothing
"""
return self.is_register_bit_set(STATUS, SATA_DCM_PLL_LOCKED)
def is_pcie_dcm_pll_locked(self):
"""
Returns True if the DCM that synthesizes the two user clocks for PCIE
(250MHz and 62.5MHz) is locked
Args:
Nothing
Returns (Boolean):
True: DCM locked
False: DCM is not locked
Raises:
Nothing
"""
return self.is_register_bit_set(STATUS, PCIE_DCM_PLL_LOCKED)
def is_sata_rx_idle(self):
"""
Returns True if SATA receiver is idle
Args:
Nothing
Returns (Boolean):
True: SATA Receiver is receiving an IDLE signal (no activity)
False: SATA Receiver is currently active
Raises:
Nothing
"""
return self.is_register_bit_set(STATUS, SATA_RX_IDLE)
def is_pcie_rx_idle(self):
"""
Returns True if PCIE receiver is idle
Args:
Nothing
Returns (Boolean):
True: PCIE Receiver is receiving an IDLE signal (no activity)
False: PCIE Receiver is currently active
Raises:
Nothing
"""
return self.is_register_bit_set(STATUS, PCIE_RX_IDLE)
def is_sata_tx_idle(self):
"""
Returns True if SATA transmitter is idle
Args:
Nothing
Returns (Boolean):
True: SATA Receiver is receiving an IDLE signal (no activity)
False: SATA Receiver is currently active
Raises:
Nothing
"""
return self.is_register_bit_set(STATUS, SATA_TX_IDLE)
def is_pcie_tx_idle(self):
"""
Returns True if PCIE transmitter is idle
Args:
Nothing
Returns (Boolean):
True: PCIE Receiver is receiving an IDLE signal (no activity)
False: PCIE Receiver is currently active
Raises:
Nothing
"""
return self.is_register_bit_set(STATUS, PCIE_TX_IDLE)
def is_sata_lost_sync(self):
"""
Returns True if SATA Receiver has lost synchronization with the hard
drive
Args:
Nothing
Returns (Boolean):
True: SATA Receiver has lost sync with the hard drive
False: SATA Receiver has not lost sync with the hard drive
Raises:
Nothing
"""
return self.is_register_bit_set(STATUS, SATA_LOSS_OF_SYNC)
def is_pcie_lost_sync(self):
"""
Returns True if PCIE Receiver has lost synchronization with the host
Args:
Nothing
Returns (Boolean):
True: PCIE Receiver has lost sync with the host
False: PCIE Receiver has not lost sync with the host
Raises:
Nothing
"""
return self.is_register_bit_set(STATUS, PCIE_LOSS_OF_SYNC)
def get_ref_clock_count(self):
return self.read_register(SATA_CLK_COUNT);
def get_ref_fst_clock_count(self):
return self.read_register(SATA_FST_CLK_COUNT);
def is_sata_byte_aligned(self):
return self.is_register_bit_set(STATUS, SATA_BYTE_IS_ALIGNED)
def is_pcie_byte_aligned(self):
return self.is_register_bit_set(STATUS, PCIE_BYTE_IS_ALIGNED)
|
"""
:mod:`websockets.server` defines the WebSocket server APIs.
"""
import asyncio
import collections.abc
import email.utils
import functools
import http
import logging
import socket
import sys
import warnings
from types import TracebackType
from typing import (
Any,
Awaitable,
Callable,
Generator,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
)
from .exceptions import (
AbortHandshake,
InvalidHandshake,
InvalidHeader,
InvalidMessage,
InvalidOrigin,
InvalidUpgrade,
NegotiationError,
)
from .extensions.base import Extension, ServerExtensionFactory
from .extensions.permessage_deflate import ServerPerMessageDeflateFactory
from .handshake import build_response, check_request
from .headers import build_extension, parse_extension, parse_subprotocol
from .http import USER_AGENT, Headers, HeadersLike, MultipleValuesError, read_request
from .protocol import WebSocketCommonProtocol
from .typing import ExtensionHeader, Origin, Subprotocol
__all__ = ["serve", "unix_serve", "WebSocketServerProtocol", "WebSocketServer"]
logger = logging.getLogger(__name__)
HeadersLikeOrCallable = Union[HeadersLike, Callable[[str, Headers], HeadersLike]]
HTTPResponse = Tuple[http.HTTPStatus, HeadersLike, bytes]
class WebSocketServerProtocol(WebSocketCommonProtocol):
"""
:class:`~asyncio.Protocol` subclass implementing a WebSocket server.
This class inherits most of its methods from
:class:`~websockets.protocol.WebSocketCommonProtocol`.
For the sake of simplicity, it doesn't rely on a full HTTP implementation.
Its support for HTTP responses is very limited.
"""
is_client = False
side = "server"
def __init__(
self,
ws_handler: Callable[["WebSocketServerProtocol", str], Awaitable[Any]],
ws_server: "WebSocketServer",
*,
origins: Optional[Sequence[Optional[Origin]]] = None,
extensions: Optional[Sequence[ServerExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLikeOrCallable] = None,
process_request: Optional[
Callable[[str, Headers], Awaitable[Optional[HTTPResponse]]]
] = None,
select_subprotocol: Optional[
Callable[[Sequence[Subprotocol], Sequence[Subprotocol]], Subprotocol]
] = None,
**kwargs: Any,
) -> None:
# For backwards compatibility with 6.0 or earlier.
if origins is not None and "" in origins:
warnings.warn("use None instead of '' in origins", DeprecationWarning)
origins = [None if origin == "" else origin for origin in origins]
self.ws_handler = ws_handler
self.ws_server = ws_server
self.origins = origins
self.available_extensions = extensions
self.available_subprotocols = subprotocols
self.extra_headers = extra_headers
self._process_request = process_request
self._select_subprotocol = select_subprotocol
super().__init__(**kwargs)
def connection_made(self, transport: asyncio.BaseTransport) -> None:
"""
Register connection and initialize a task to handle it.
"""
super().connection_made(transport)
# Register the connection with the server before creating the handler
# task. Registering at the beginning of the handler coroutine would
# create a race condition between the creation of the task, which
# schedules its execution, and the moment the handler starts running.
self.ws_server.register(self)
self.handler_task = self.loop.create_task(self.handler())
async def handler(self) -> None:
"""
Handle the lifecycle of a WebSocket connection.
Since this method doesn't have a caller able to handle exceptions, it
attemps to log relevant ones and guarantees that the TCP connection is
closed before exiting.
"""
try:
try:
path = await self.handshake(
origins=self.origins,
available_extensions=self.available_extensions,
available_subprotocols=self.available_subprotocols,
extra_headers=self.extra_headers,
)
except ConnectionError:
logger.debug("Connection error in opening handshake", exc_info=True)
raise
except Exception as exc:
if isinstance(exc, AbortHandshake):
status, headers, body = exc.status, exc.headers, exc.body
elif isinstance(exc, InvalidOrigin):
logger.debug("Invalid origin", exc_info=True)
status, headers, body = (
http.HTTPStatus.FORBIDDEN,
Headers(),
f"Failed to open a WebSocket connection: {exc}.\n".encode(),
)
elif isinstance(exc, InvalidUpgrade):
logger.debug("Invalid upgrade", exc_info=True)
status, headers, body = (
http.HTTPStatus.UPGRADE_REQUIRED,
Headers([("Upgrade", "websocket")]),
(
f"Failed to open a WebSocket connection: {exc}.\n"
f"\n"
f"You cannot access a WebSocket server directly "
f"with a browser. You need a WebSocket client.\n"
).encode(),
)
elif isinstance(exc, InvalidHandshake):
logger.debug("Invalid handshake", exc_info=True)
status, headers, body = (
http.HTTPStatus.BAD_REQUEST,
Headers(),
f"Failed to open a WebSocket connection: {exc}.\n".encode(),
)
else:
logger.warning("Error in opening handshake", exc_info=True)
status, headers, body = (
http.HTTPStatus.INTERNAL_SERVER_ERROR,
Headers(),
(
b"Failed to open a WebSocket connection.\n"
b"See server log for more information.\n"
),
)
headers.setdefault("Date", email.utils.formatdate(usegmt=True))
headers.setdefault("Server", USER_AGENT)
headers.setdefault("Content-Length", str(len(body)))
headers.setdefault("Content-Type", "text/plain")
headers.setdefault("Connection", "close")
self.write_http_response(status, headers, body)
self.fail_connection()
await self.wait_closed()
return
try:
await self.ws_handler(self, path)
except Exception:
logger.error("Error in connection handler", exc_info=True)
if not self.closed:
self.fail_connection(1011)
raise
try:
await self.close()
except ConnectionError:
logger.debug("Connection error in closing handshake", exc_info=True)
raise
except Exception:
logger.warning("Error in closing handshake", exc_info=True)
raise
except Exception:
# Last-ditch attempt to avoid leaking connections on errors.
try:
self.transport.close()
except Exception: # pragma: no cover
pass
finally:
# Unregister the connection with the server when the handler task
# terminates. Registration is tied to the lifecycle of the handler
# task because the server waits for tasks attached to registered
# connections before terminating.
self.ws_server.unregister(self)
async def read_http_request(self) -> Tuple[str, Headers]:
"""
Read request line and headers from the HTTP request.
If the request contains a body, it may be read from ``self.reader``
after this coroutine returns.
:raises ~websockets.exceptions.InvalidMessage: if the HTTP message is
malformed or isn't an HTTP/1.1 GET request
"""
try:
path, headers = await read_request(self.reader)
except Exception as exc:
raise InvalidMessage("did not receive a valid HTTP request") from exc
logger.debug("%s < GET %s HTTP/1.1", self.side, path)
logger.debug("%s < %r", self.side, headers)
self.path = path
self.request_headers = headers
return path, headers
def write_http_response(
self, status: http.HTTPStatus, headers: Headers, body: Optional[bytes] = None
) -> None:
"""
Write status line and headers to the HTTP response.
This coroutine is also able to write a response body.
"""
self.response_headers = headers
logger.debug("%s > HTTP/1.1 %d %s", self.side, status.value, status.phrase)
logger.debug("%s > %r", self.side, headers)
# Since the status line and headers only contain ASCII characters,
# we can keep this simple.
response = f"HTTP/1.1 {status.value} {status.phrase}\r\n"
response += str(headers)
self.transport.write(response.encode())
if body is not None:
logger.debug("%s > body (%d bytes)", self.side, len(body))
self.transport.write(body)
async def process_request(
self, path: str, request_headers: Headers
) -> Optional[HTTPResponse]:
"""
Intercept the HTTP request and return an HTTP response if appropriate.
If ``process_request`` returns ``None``, the WebSocket handshake
continues. If it returns 3-uple containing a status code, response
headers and a response body, that HTTP response is sent and the
connection is closed. In that case:
* The HTTP status must be a :class:`~http.HTTPStatus`.
* HTTP headers must be a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name,
value)`` pairs.
* The HTTP response body must be :class:`bytes`. It may be empty.
This coroutine may be overridden in a :class:`WebSocketServerProtocol`
subclass, for example:
* to return a HTTP 200 OK response on a given path; then a load
balancer can use this path for a health check;
* to authenticate the request and return a HTTP 401 Unauthorized or a
HTTP 403 Forbidden when authentication fails.
Instead of subclassing, it is possible to override this method by
passing a ``process_request`` argument to the :func:`serve` function
or the :class:`WebSocketServerProtocol` constructor. This is
equivalent, except ``process_request`` won't have access to the
protocol instance, so it can't store information for later use.
``process_request`` is expected to complete quickly. If it may run for
a long time, then it should await :meth:`wait_closed` and exit if
:meth:`wait_closed` completes, or else it could prevent the server
from shutting down.
:param path: request path, including optional query string
:param request_headers: request headers
"""
if self._process_request is not None:
response = self._process_request(path, request_headers)
if isinstance(response, Awaitable):
return await response
else:
# For backwards compatibility with 7.0.
warnings.warn(
"declare process_request as a coroutine", DeprecationWarning
)
return response # type: ignore
return None
@staticmethod
def process_origin(
headers: Headers, origins: Optional[Sequence[Optional[Origin]]] = None
) -> Optional[Origin]:
"""
Handle the Origin HTTP request header.
:param headers: request headers
:param origins: optional list of acceptable origins
:raises ~websockets.exceptions.InvalidOrigin: if the origin isn't
acceptable
"""
# "The user agent MUST NOT include more than one Origin header field"
# per https://tools.ietf.org/html/rfc6454#section-7.3.
try:
origin = cast(Origin, headers.get("Origin"))
except MultipleValuesError:
raise InvalidHeader("Origin", "more than one Origin header found")
if origins is not None:
if origin not in origins:
raise InvalidOrigin(origin)
return origin
@staticmethod
def process_extensions(
headers: Headers,
available_extensions: Optional[Sequence[ServerExtensionFactory]],
) -> Tuple[Optional[str], List[Extension]]:
"""
Handle the Sec-WebSocket-Extensions HTTP request header.
Accept or reject each extension proposed in the client request.
Negotiate parameters for accepted extensions.
Return the Sec-WebSocket-Extensions HTTP response header and the list
of accepted extensions.
:rfc:`6455` leaves the rules up to the specification of each
:extension.
To provide this level of flexibility, for each extension proposed by
the client, we check for a match with each extension available in the
server configuration. If no match is found, the extension is ignored.
If several variants of the same extension are proposed by the client,
it may be accepted severel times, which won't make sense in general.
Extensions must implement their own requirements. For this purpose,
the list of previously accepted extensions is provided.
This process doesn't allow the server to reorder extensions. It can
only select a subset of the extensions proposed by the client.
Other requirements, for example related to mandatory extensions or the
order of extensions, may be implemented by overriding this method.
:param headers: request headers
:param extensions: optional list of supported extensions
:raises ~websockets.exceptions.InvalidHandshake: to abort the
handshake with an HTTP 400 error code
"""
response_header_value: Optional[str] = None
extension_headers: List[ExtensionHeader] = []
accepted_extensions: List[Extension] = []
header_values = headers.get_all("Sec-WebSocket-Extensions")
if header_values and available_extensions:
parsed_header_values: List[ExtensionHeader] = sum(
[parse_extension(header_value) for header_value in header_values], []
)
for name, request_params in parsed_header_values:
for ext_factory in available_extensions:
# Skip non-matching extensions based on their name.
if ext_factory.name != name:
continue
# Skip non-matching extensions based on their params.
try:
response_params, extension = ext_factory.process_request_params(
request_params, accepted_extensions
)
except NegotiationError:
continue
# Add matching extension to the final list.
extension_headers.append((name, response_params))
accepted_extensions.append(extension)
# Break out of the loop once we have a match.
break
# If we didn't break from the loop, no extension in our list
# matched what the client sent. The extension is declined.
# Serialize extension header.
if extension_headers:
response_header_value = build_extension(extension_headers)
return response_header_value, accepted_extensions
# Not @staticmethod because it calls self.select_subprotocol()
def process_subprotocol(
self, headers: Headers, available_subprotocols: Optional[Sequence[Subprotocol]]
) -> Optional[Subprotocol]:
"""
Handle the Sec-WebSocket-Protocol HTTP request header.
Return Sec-WebSocket-Protocol HTTP response header, which is the same
as the selected subprotocol.
:param headers: request headers
:param available_subprotocols: optional list of supported subprotocols
:raises ~websockets.exceptions.InvalidHandshake: to abort the
handshake with an HTTP 400 error code
"""
subprotocol: Optional[Subprotocol] = None
header_values = headers.get_all("Sec-WebSocket-Protocol")
if header_values and available_subprotocols:
parsed_header_values: List[Subprotocol] = sum(
[parse_subprotocol(header_value) for header_value in header_values], []
)
subprotocol = self.select_subprotocol(
parsed_header_values, available_subprotocols
)
return subprotocol
def select_subprotocol(
self,
client_subprotocols: Sequence[Subprotocol],
server_subprotocols: Sequence[Subprotocol],
) -> Optional[Subprotocol]:
"""
Pick a subprotocol among those offered by the client.
If several subprotocols are supported by the client and the server,
the default implementation selects the preferred subprotocols by
giving equal value to the priorities of the client and the server.
If no subprotocol is supported by the client and the server, it
proceeds without a subprotocol.
This is unlikely to be the most useful implementation in practice, as
many servers providing a subprotocol will require that the client uses
that subprotocol. Such rules can be implemented in a subclass.
Instead of subclassing, it is possible to override this method by
passing a ``select_subprotocol`` argument to the :func:`serve`
function or the :class:`WebSocketServerProtocol` constructor
:param client_subprotocols: list of subprotocols offered by the client
:param server_subprotocols: list of subprotocols available on the server
"""
if self._select_subprotocol is not None:
return self._select_subprotocol(client_subprotocols, server_subprotocols)
subprotocols = set(client_subprotocols) & set(server_subprotocols)
if not subprotocols:
return None
priority = lambda p: (
client_subprotocols.index(p) + server_subprotocols.index(p)
)
return sorted(subprotocols, key=priority)[0]
async def handshake(
self,
origins: Optional[Sequence[Optional[Origin]]] = None,
available_extensions: Optional[Sequence[ServerExtensionFactory]] = None,
available_subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLikeOrCallable] = None,
) -> str:
"""
Perform the server side of the opening handshake.
Return the path of the URI of the request.
:param origins: list of acceptable values of the Origin HTTP header;
include ``None`` if the lack of an origin is acceptable
:param available_extensions: list of supported extensions in the order
in which they should be used
:param available_subprotocols: list of supported subprotocols in order
of decreasing preference
:param extra_headers: sets additional HTTP response headers when the
handshake succeeds; it can be a :class:`~websockets.http.Headers`
instance, a :class:`~collections.abc.Mapping`, an iterable of
``(name, value)`` pairs, or a callable taking the request path and
headers in arguments and returning one of the above.
:raises ~websockets.exceptions.InvalidHandshake: if the handshake
fails
"""
path, request_headers = await self.read_http_request()
# Hook for customizing request handling, for example checking
# authentication or treating some paths as plain HTTP endpoints.
early_response_awaitable = self.process_request(path, request_headers)
if isinstance(early_response_awaitable, Awaitable):
early_response = await early_response_awaitable
else:
# For backwards compatibility with 7.0.
warnings.warn("declare process_request as a coroutine", DeprecationWarning)
early_response = early_response_awaitable # type: ignore
# Change the response to a 503 error if the server is shutting down.
if not self.ws_server.is_serving():
early_response = (
http.HTTPStatus.SERVICE_UNAVAILABLE,
[],
b"Server is shutting down.\n",
)
if early_response is not None:
raise AbortHandshake(*early_response)
key = check_request(request_headers)
self.origin = self.process_origin(request_headers, origins)
extensions_header, self.extensions = self.process_extensions(
request_headers, available_extensions
)
protocol_header = self.subprotocol = self.process_subprotocol(
request_headers, available_subprotocols
)
response_headers = Headers()
build_response(response_headers, key)
if extensions_header is not None:
response_headers["Sec-WebSocket-Extensions"] = extensions_header
if protocol_header is not None:
response_headers["Sec-WebSocket-Protocol"] = protocol_header
if callable(extra_headers):
extra_headers = extra_headers(path, self.request_headers)
if extra_headers is not None:
if isinstance(extra_headers, Headers):
extra_headers = extra_headers.raw_items()
elif isinstance(extra_headers, collections.abc.Mapping):
extra_headers = extra_headers.items()
for name, value in extra_headers:
response_headers[name] = value
response_headers.setdefault("Date", email.utils.formatdate(usegmt=True))
response_headers.setdefault("Server", USER_AGENT)
self.write_http_response(http.HTTPStatus.SWITCHING_PROTOCOLS, response_headers)
self.connection_open()
return path
class WebSocketServer:
"""
WebSocket server returned by :func:`~websockets.server.serve`.
This class provides the same interface as
:class:`~asyncio.AbstractServer`, namely the
:meth:`~asyncio.AbstractServer.close` and
:meth:`~asyncio.AbstractServer.wait_closed` methods.
It keeps track of WebSocket connections in order to close them properly
when shutting down.
Instances of this class store a reference to the :class:`~asyncio.Server`
object returned by :meth:`~asyncio.loop.create_server` rather than inherit
from :class:`~asyncio.Server` in part because
:meth:`~asyncio.loop.create_server` doesn't support passing a custom
:class:`~asyncio.Server` class.
"""
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
# Store a reference to loop to avoid relying on self.server._loop.
self.loop = loop
# Keep track of active connections.
self.websockets: Set[WebSocketServerProtocol] = set()
# Task responsible for closing the server and terminating connections.
self.close_task: Optional[asyncio.Task[None]] = None
# Completed when the server is closed and connections are terminated.
self.closed_waiter: asyncio.Future[None] = loop.create_future()
def wrap(self, server: asyncio.AbstractServer) -> None:
"""
Attach to a given :class:`~asyncio.Server`.
Since :meth:`~asyncio.loop.create_server` doesn't support injecting a
custom ``Server`` class, the easiest solution that doesn't rely on
private :mod:`asyncio` APIs is to:
- instantiate a :class:`WebSocketServer`
- give the protocol factory a reference to that instance
- call :meth:`~asyncio.loop.create_server` with the factory
- attach the resulting :class:`~asyncio.Server` with this method
"""
self.server = server
def register(self, protocol: WebSocketServerProtocol) -> None:
"""
Register a connection with this server.
"""
self.websockets.add(protocol)
def unregister(self, protocol: WebSocketServerProtocol) -> None:
"""
Unregister a connection with this server.
"""
self.websockets.remove(protocol)
def is_serving(self) -> bool:
"""
Tell whether the server is accepting new connections or shutting down.
"""
try:
# Python ≥ 3.7
return self.server.is_serving()
except AttributeError: # pragma: no cover
# Python < 3.7
return self.server.sockets is not None
def close(self) -> None:
"""
Close the server.
This method:
* closes the underlying :class:`~asyncio.Server`;
* rejects new WebSocket connections with an HTTP 503 (service
unavailable) error; this happens when the server accepted the TCP
connection but didn't complete the WebSocket opening handshake prior
to closing;
* closes open WebSocket connections with close code 1001 (going away).
:meth:`close` is idempotent.
"""
if self.close_task is None:
self.close_task = self.loop.create_task(self._close())
async def _close(self) -> None:
"""
Implementation of :meth:`close`.
This calls :meth:`~asyncio.Server.close` on the underlying
:class:`~asyncio.Server` object to stop accepting new connections and
then closes open connections with close code 1001.
"""
# Stop accepting new connections.
self.server.close()
# Wait until self.server.close() completes.
await self.server.wait_closed()
# Wait until all accepted connections reach connection_made() and call
# register(). See https://bugs.python.org/issue34852 for details.
await asyncio.sleep(
0, loop=self.loop if sys.version_info[:2] < (3, 8) else None
)
# Close OPEN connections with status code 1001. Since the server was
# closed, handshake() closes OPENING conections with a HTTP 503 error.
# Wait until all connections are closed.
# asyncio.wait doesn't accept an empty first argument
if self.websockets:
await asyncio.wait(
[websocket.close(1001) for websocket in self.websockets],
loop=self.loop if sys.version_info[:2] < (3, 8) else None,
)
# Wait until all connection handlers are complete.
# asyncio.wait doesn't accept an empty first argument.
if self.websockets:
await asyncio.wait(
[websocket.handler_task for websocket in self.websockets],
loop=self.loop if sys.version_info[:2] < (3, 8) else None,
)
# Tell wait_closed() to return.
self.closed_waiter.set_result(None)
async def wait_closed(self) -> None:
"""
Wait until the server is closed.
When :meth:`wait_closed` returns, all TCP connections are closed and
all connection handlers have returned.
"""
await asyncio.shield(self.closed_waiter)
@property
def sockets(self) -> Optional[List[socket.socket]]:
"""
List of :class:`~socket.socket` objects the server is listening to.
``None`` if the server is closed.
"""
return self.server.sockets
class Serve:
"""
Create, start, and return a WebSocket server on ``host`` and ``port``.
Whenever a client connects, the server accepts the connection, creates a
:class:`WebSocketServerProtocol`, performs the opening handshake, and
delegates to the connection handler defined by ``ws_handler``. Once the
handler completes, either normally or with an exception, the server
performs the closing handshake and closes the connection.
Awaiting :func:`serve` yields a :class:`WebSocketServer`. This instance
provides :meth:`~websockets.server.WebSocketServer.close` and
:meth:`~websockets.server.WebSocketServer.wait_closed` methods for
terminating the server and cleaning up its resources.
When a server is closed with :meth:`~WebSocketServer.close`, it closes all
connections with close code 1001 (going away). Connections handlers, which
are running the ``ws_handler`` coroutine, will receive a
:exc:`~websockets.exceptions.ConnectionClosedOK` exception on their
current or next interaction with the WebSocket connection.
:func:`serve` can also be used as an asynchronous context manager. In
this case, the server is shut down when exiting the context.
:func:`serve` is a wrapper around the event loop's
:meth:`~asyncio.loop.create_server` method. It creates and starts a
:class:`~asyncio.Server` with :meth:`~asyncio.loop.create_server`. Then it
wraps the :class:`~asyncio.Server` in a :class:`WebSocketServer` and
returns the :class:`WebSocketServer`.
The ``ws_handler`` argument is the WebSocket handler. It must be a
coroutine accepting two arguments: a :class:`WebSocketServerProtocol` and
the request URI.
The ``host`` and ``port`` arguments, as well as unrecognized keyword
arguments, are passed along to :meth:`~asyncio.loop.create_server`.
For example, you can set the ``ssl`` keyword argument to a
:class:`~ssl.SSLContext` to enable TLS.
The ``create_protocol`` parameter allows customizing the
:class:`~asyncio.Protocol` that manages the connection. It should be a
callable or class accepting the same arguments as
:class:`WebSocketServerProtocol` and returning an instance of
:class:`WebSocketServerProtocol` or a subclass. It defaults to
:class:`WebSocketServerProtocol`.
The behavior of ``ping_interval``, ``ping_timeout``, ``close_timeout``,
``max_size``, ``max_queue``, ``read_limit``, and ``write_limit`` is
described in :class:`~websockets.protocol.WebSocketCommonProtocol`.
:func:`serve` also accepts the following optional arguments:
* ``compression`` is a shortcut to configure compression extensions;
by default it enables the "permessage-deflate" extension; set it to
``None`` to disable compression
* ``origins`` defines acceptable Origin HTTP headers; include ``None`` if
the lack of an origin is acceptable
* ``extensions`` is a list of supported extensions in order of
decreasing preference
* ``subprotocols`` is a list of supported subprotocols in order of
decreasing preference
* ``extra_headers`` sets additional HTTP response headers when the
handshake succeeds; it can be a :class:`~websockets.http.Headers`
instance, a :class:`~collections.abc.Mapping`, an iterable of ``(name,
value)`` pairs, or a callable taking the request path and headers in
arguments and returning one of the above
* ``process_request`` allows intercepting the HTTP request; it must be a
coroutine taking the request path and headers in argument; see
:meth:`~WebSocketServerProtocol.process_request` for details
* ``select_subprotocol`` allows customizing the logic for selecting a
subprotocol; it must be a callable taking the subprotocols offered by
the client and available on the server in argument; see
:meth:`~WebSocketServerProtocol.select_subprotocol` for details
Since there's no useful way to propagate exceptions triggered in handlers,
they're sent to the ``'websockets.server'`` logger instead. Debugging is
much easier if you configure logging to print them::
import logging
logger = logging.getLogger('websockets.server')
logger.setLevel(logging.ERROR)
logger.addHandler(logging.StreamHandler())
"""
def __init__(
self,
ws_handler: Callable[[WebSocketServerProtocol, str], Awaitable[Any]],
host: Optional[Union[str, Sequence[str]]] = None,
port: Optional[int] = None,
*,
path: Optional[str] = None,
create_protocol: Optional[Type[WebSocketServerProtocol]] = None,
ping_interval: Optional[float] = 20,
ping_timeout: Optional[float] = 20,
close_timeout: Optional[float] = None,
max_size: Optional[int] = 2 ** 20,
max_queue: Optional[int] = 2 ** 5,
read_limit: int = 2 ** 16,
write_limit: int = 2 ** 16,
loop: Optional[asyncio.AbstractEventLoop] = None,
legacy_recv: bool = False,
klass: Optional[Type[WebSocketServerProtocol]] = None,
timeout: Optional[float] = None,
compression: Optional[str] = "deflate",
origins: Optional[Sequence[Optional[Origin]]] = None,
extensions: Optional[Sequence[ServerExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLikeOrCallable] = None,
process_request: Optional[
Callable[[str, Headers], Awaitable[Optional[HTTPResponse]]]
] = None,
select_subprotocol: Optional[
Callable[[Sequence[Subprotocol], Sequence[Subprotocol]], Subprotocol]
] = None,
**kwargs: Any,
) -> None:
# Backwards compatibility: close_timeout used to be called timeout.
if timeout is None:
timeout = 10
else:
warnings.warn("rename timeout to close_timeout", DeprecationWarning)
# If both are specified, timeout is ignored.
if close_timeout is None:
close_timeout = timeout
# Backwards compatibility: create_protocol used to be called klass.
if klass is None:
klass = WebSocketServerProtocol
else:
warnings.warn("rename klass to create_protocol", DeprecationWarning)
# If both are specified, klass is ignored.
if create_protocol is None:
create_protocol = klass
if loop is None:
loop = asyncio.get_event_loop()
ws_server = WebSocketServer(loop)
secure = kwargs.get("ssl") is not None
if compression == "deflate":
if extensions is None:
extensions = []
if not any(
ext_factory.name == ServerPerMessageDeflateFactory.name
for ext_factory in extensions
):
extensions = list(extensions) + [ServerPerMessageDeflateFactory()]
elif compression is not None:
raise ValueError(f"unsupported compression: {compression}")
factory = functools.partial(
create_protocol,
ws_handler,
ws_server,
host=host,
port=port,
secure=secure,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
close_timeout=close_timeout,
max_size=max_size,
max_queue=max_queue,
read_limit=read_limit,
write_limit=write_limit,
loop=loop,
legacy_recv=legacy_recv,
origins=origins,
extensions=extensions,
subprotocols=subprotocols,
extra_headers=extra_headers,
process_request=process_request,
select_subprotocol=select_subprotocol,
)
if path is None:
create_server = functools.partial(
loop.create_server, factory, host, port, **kwargs
)
else:
# unix_serve(path) must not specify host and port parameters.
assert host is None and port is None
create_server = functools.partial(
loop.create_unix_server, factory, path, **kwargs
)
# This is a coroutine function.
self._create_server = create_server
self.ws_server = ws_server
# async with serve(...)
async def __aenter__(self) -> WebSocketServer:
return await self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.ws_server.close()
await self.ws_server.wait_closed()
# await serve(...)
def __await__(self) -> Generator[Any, None, WebSocketServer]:
# Create a suitable iterator by calling __await__ on a coroutine.
return self.__await_impl__().__await__()
async def __await_impl__(self) -> WebSocketServer:
server = await self._create_server()
self.ws_server.wrap(server)
return self.ws_server
# yield from serve(...)
__iter__ = __await__
serve = Serve
def unix_serve(
ws_handler: Callable[[WebSocketServerProtocol, str], Awaitable[Any]],
path: str,
**kwargs: Any,
) -> Serve:
"""
Similar to :func:`serve`, but for listening on Unix sockets.
This function calls the event loop's
:meth:`~asyncio.loop.create_unix_server` method.
It is only available on Unix.
It's useful for deploying a server behind a reverse proxy such as nginx.
:param path: file system path to the Unix socket
"""
return serve(ws_handler, path=path, **kwargs)
|
"""
Licensed under the Unlicense License;
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://unlicense.org
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import threading
import tensorflow as tf
import numpy as np
class Network:
def __init__(self, synaptic_weights_0=np.array([]), synaptic_weights_1=np.array([]),
images=np.array([]), labels=np.array([]),
train_iterations=20, train_output_labels=4, debug=True):
self.synaptic_weights_0 = synaptic_weights_0
self.synaptic_weights_1 = synaptic_weights_1
self.images = images
self.labels = labels
self.train_iterations = train_iterations
self.debug = debug
self.train_output_labels = train_output_labels
self.model = None
def save_weights(self, file):
if self.debug:
print('Saving model...')
self.model.save(os.path.splitext(file)[0] + '.h5')
if self.debug:
print('Done. File', file, 'saved.')
def load_weights(self, file):
if self.debug:
print('Loading model...')
self.model = tf.keras.models.load_model(os.path.splitext(file)[0] + '.h5')
if self.debug:
print('Done. Model from file', file, 'loaded.')
def start_training(self):
self.model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(600,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(self.train_output_labels)
])
self.model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# TRAINING
thread = threading.Thread(target=self.training)
thread.start()
def predict(self):
predicted_classes = []
predictions = self.model.predict(self.images)
for prediction in predictions:
predicted_classes.append(np.argmax(prediction))
return predicted_classes
def training(self):
# noinspection PyBroadException
try:
self.labels = np.reshape(self.labels, len(self.labels[0]))
self.model.fit(self.images, self.labels, epochs=self.train_iterations)
except:
print(sys.exc_info())
|
import unittest
from collections import namedtuple
from tests.harness import instrumentGooey
from gooey import GooeyParser
from gooey.tests import *
Case = namedtuple('Case', 'inputs initialExpected expectedAfterClearing')
class TestTextField(unittest.TestCase):
def makeParser(self, **kwargs):
parser = GooeyParser(description='description')
parser.add_argument('--widget', widget="TextField", **kwargs)
return parser
def testPlaceholder(self):
cases = [
[{}, ''],
[{'placeholder': 'Hello'}, 'Hello']
]
for options, expected in cases:
parser = self.makeParser(gooey_options=options)
with instrumentGooey(parser) as (app, gooeyApp):
# because of how poorly designed the Gooey widgets are
# we have to reach down 3 levels in order to find the
# actual WX object we need to test.
widget = gooeyApp.configs[0].reifiedWidgets[0].widget
self.assertEqual(widget.widget.GetHint(), expected)
def testDefaultAndInitialValue(self):
cases = [
# initial_value takes precedence when both are present
Case(
{'default': 'default_val', 'gooey_options': {'initial_value': 'some val'}},
'some val',
None),
# when no default is present
# Case({'gooey_options': {'initial_value': 'some val'}},
# 'some val',
# ''),
# [{'default': 'default', 'gooey_options': {}},
# 'default'],
# [{'default': 'default'},
# 'default'],
]
for case in cases:
parser = self.makeParser(**case.inputs)
with instrumentGooey(parser) as (app, gooeyApp):
widget = gooeyApp.configs[0].reifiedWidgets[0]
self.assertEqual(widget.getValue()['rawValue'], case.initialExpected)
widget.setValue('')
print(widget.getValue())
self.assertEqual(widget.getValue()['cmd'], case.expectedAfterClearing)
if __name__ == '__main__':
unittest.main()
|
# import the basic python packages we need
import os
import sys
import tempfile
import pprint
import traceback
# disable python from generating a .pyc file
sys.dont_write_bytecode = True
# change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API
pytan_loc = "~/gh/pytan"
pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib')
# Determine our script name, script dir
my_file = os.path.abspath(sys.argv[0])
my_dir = os.path.dirname(my_file)
# try to automatically determine the pytan lib directory by assuming it is in '../../lib/'
parent_dir = os.path.dirname(my_dir)
pytan_root_dir = os.path.dirname(parent_dir)
lib_dir = os.path.join(pytan_root_dir, 'lib')
# add pytan_loc and lib_dir to the PYTHONPATH variable
path_adds = [lib_dir, pytan_static_path]
[sys.path.append(aa) for aa in path_adds if aa not in sys.path]
# import pytan
import pytan
# create a dictionary of arguments for the pytan handler
handler_args = {}
# establish our connection info for the Tanium Server
handler_args['username'] = "Administrator"
handler_args['password'] = "Tanium2015!"
handler_args['host'] = "10.0.1.240"
handler_args['port'] = "443" # optional
# optional, level 0 is no output except warnings/errors
# level 1 through 12 are more and more verbose
handler_args['loglevel'] = 1
# optional, use a debug format for the logging output (uses two lines per log entry)
handler_args['debugformat'] = False
# optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES
# very useful for capturing the full exchange of XML requests and responses
handler_args['record_all_requests'] = True
# instantiate a handler using all of the arguments in the handler_args dictionary
print "...CALLING: pytan.handler() with args: {}".format(handler_args)
handler = pytan.Handler(**handler_args)
# print out the handler string
print "...OUTPUT: handler string: {}".format(handler)
# setup the arguments for the handler() class
kwargs = {}
kwargs["objtype"] = u'sensor'
kwargs["name"] = u'Computer Name'
print "...CALLING: handler.get with args: {}".format(kwargs)
response = handler.get(**kwargs)
print "...OUTPUT: Type of response: ", type(response)
print "...OUTPUT: print of response:"
print response
# call the export_obj() method to convert response to JSON and store it in out
export_kwargs = {}
export_kwargs['obj'] = response
export_kwargs['export_format'] = 'json'
print "...CALLING: handler.export_obj() with args {}".format(export_kwargs)
out = handler.export_obj(**export_kwargs)
# trim the output if it is more than 15 lines long
if len(out.splitlines()) > 15:
out = out.splitlines()[0:15]
out.append('..trimmed for brevity..')
out = '\n'.join(out)
print "...OUTPUT: print the objects returned in JSON format:"
print out
|
from selenium import webdriver
from test_plus.test import TestCase
from selenium.webdriver.common.keys import Keys
from .data_for_test import visitor_urls, login_user_urls, admin_urls
from time import sleep
class NewVisitorTest(TestCase):
def setUp(self):
self.browser = webdriver.Chrome()
self.browser.maximize_window()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def test_can_visit_home_page(self):
self.browser.get('http://localhost:8000/')
assert 'Balaji' in self.browser.title
links = []
urls = []
for link in self.browser.find_elements_by_css_selector(".nav-link"):
links.append(link)
urls.append((link.get_attribute("href")))
assert link.text in visitor_urls
assert len(links) == len(visitor_urls)
for u in urls:
self.browser.get(u)
self.browser.back()
def test_can_visit_signup_page(self):
self.browser.get('http://localhost:8000/accounts/signup')
assert 'Signup' in self.browser.title
links = []
urls = []
for link in self.browser.find_elements_by_css_selector(".nav-link"):
links.append(link)
urls.append((link.get_attribute("href")))
assert link.text in visitor_urls
assert len(links) == len(visitor_urls)
for u in urls:
self.browser.get(u)
self.browser.back()
def test_can_visit_signin_page(self):
self.browser.get('http://localhost:8000/accounts/login')
assert 'Sign In' in self.browser.title
links = []
urls = []
for link in self.browser.find_elements_by_css_selector(".nav-link"):
links.append(link)
urls.append((link.get_attribute("href")))
assert link.text in visitor_urls
assert len(links) == len(visitor_urls)
for u in urls:
self.browser.get(u)
self.browser.back()
def test_can_signin_user(self):
self.browser.get('http://localhost:8000/accounts/login')
sleep(3)
username = self.browser.find_element_by_id("id_login")
password = self.browser.find_element_by_id("id_password")
username.send_keys("venkat")
password.send_keys("vesneven")
login_attempt = self.browser.find_element_by_xpath("//*[@type='submit']")
login_attempt.submit()
sleep(3)
assert 'http://localhost:8000/users/venkat/' == self.browser.current_url
links = []
urls = []
for link in self.browser.find_elements_by_css_selector(".nav-link"):
links.append(link)
urls.append((link.get_attribute("href")))
assert link.text in login_user_urls
assert len(links) == len(login_user_urls)
for u in urls:
self.browser.get(u)
self.browser.back()
self.browser.get('http://localhost:8000/accounts/logout')
signout = self.browser.find_element_by_xpath("//*[@type='submit']")
signout.submit()
sleep(1)
assert 'Balaji' in self.browser.title
def test_can_signin_admin(self):
self.browser.get('http://localhost:8000/accounts/login')
sleep(3)
username = self.browser.find_element_by_id("id_login")
password = self.browser.find_element_by_id("id_password")
username.send_keys("kvreddy")
password.send_keys("vesneven")
login_attempt = self.browser.find_element_by_xpath("//*[@type='submit']")
login_attempt.submit()
sleep(3)
assert 'http://localhost:8000/users/kvreddy/' == self.browser.current_url
links = []
urls = []
for link in self.browser.find_elements_by_css_selector(".nav-link"):
links.append(link)
urls.append((link.get_attribute("href")))
assert link.text in admin_urls
assert len(links) == len(admin_urls)
for u in urls:
self.browser.get(u)
self.browser.back()
self.browser.get('http://localhost:8000/accounts/logout')
signout = self.browser.find_element_by_xpath("//*[@type='submit']")
signout.submit()
sleep(1)
assert 'Balaji' in self.browser.title
|
'''
Created on 17 Mar 2018
@author: julianporter
'''
from numbers import Number
def dot(v1,v2):
return sum([x*y for (x,y) in zip(v1,v2)])
def allLists(args):
return all([isinstance(x,list) for x in args])
def isList(x):
return isinstance(x,list)
def allNumbers(args):
return all([isinstance(x,Number) and not isinstance(x,bool) for x in args])
|
import sys, os
import matplotlib.pyplot as plt
import numpy as np
import utils
import subprocess
metro = [True, False]
points= 20
temps = np.linspace(0.2,3.0,num=points)
nspins = 50
j = 1.0
hs = [0.0, 0.02]
nblk = 100
nsteps = 10000
iter = 0
dbiter = 0
nrestarts = 5
total = len(metro)*len(temps)*len(hs)*nrestarts
utils.clean()
for i1, m in enumerate(metro):
for i2, t in enumerate(temps):
for i3, h in enumerate(hs):
dbname = "database"+str(dbiter)
print(f"dbname: database{dbiter}")
for restart in range(nrestarts):
print(f"---> Iter {iter+1}/{total}: restart={restart}\th={h}\ttemp={t}\tmetro={m}")
utils.create_temp_ini(t,nspins,j,h,m,nsteps,nblk)
p = subprocess.Popen(f"make && ./main tempfile.ini {restart} {dbname}", shell=True, stdout = subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
#print(out.decode("utf-8"))
#print(err.decode("utf-8"))
iter+=1
dbiter +=1
|
'''
@Description:common 公用组件
@Author:Zigar
@Date:2021/03/05 15:37:00
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.attention import SEModule, CBAM
from config.config import cfg
#---------------------------------------------------------------#
# Mish激活函数
#---------------------------------------------------------------#
class Mish(nn.Module):
def __init__(self):
super(Mish, self).__init__()
def forward(self, x):
return x * torch.tanh(F.softplus(x))
#---------------------------------------------------------------#
# 标准化和激活函数
#---------------------------------------------------------------#
norm_name = {"bn": nn.BatchNorm2d}
activate_name = {
"relu" : nn.ReLU,
"leaky_relu": nn.LeakyReLU,
"linear" : nn.Identity(),
"mish" : Mish(),
}
#---------------------------------------------------------------#
# Convolutional
# 卷积块 = 卷积 + 标准化 + 激活函数
# Conv2d + BatchNormalization + Mish / Relu / LeakyRelu
#---------------------------------------------------------------#
class Convolutional(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride = 1,
norm = "bn",
activate="mish"):
super(Convolutional, self).__init__()
self.norm = norm
self.activate = activate
self.__conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
kernel_size // 2,
bias=False
)
if norm:
assert norm in norm_name.keys()
if norm == "bn":
self.__norm = norm_name[norm](out_channels)
if activate:
assert activate in activate_name.keys()
if activate == "leaky_relu":
self.__activate = activate_name[activate](
negative_slope=0.1, inplace=False
)
if activate == "relu":
self.__activate = activate_name[activate](inplace=False)
if activate == "mish":
self.__activate = activate_name[activate]
def forward(self, x):
x = self.__conv(x)
if self.norm:
x = self.__norm(x)
if self.activate:
x = self.__activate(x)
return x
#---------------------------------------------------------------#
# Resblock
# CSPdarknet结构块的组成部分
# 内部堆叠的小残差块
# 包含注意力机制
#---------------------------------------------------------------#
class Resblock(nn.Module):
def __init__(
self,
channels,
hidden_channels = None,
# residual_activation="linear",
):
super(Resblock, self).__init__()
if hidden_channels is None:
hidden_channels = channels
self.block = nn.Sequential(
Convolutional(channels, hidden_channels, 1),
Convolutional(hidden_channels, channels, 3)
)
# self.activation = activate_name[residual_activation]
self.attention = cfg.MODEL.ATTENTION["TYPE"]
if self.attention == "SEnet":
self.attention_module = SEModule(channels)
elif self.attention == "CBAM":
self.attention_module = CBAM(channels)
else:
self.attention = None
def forward(self, x):
residual = x
out = self.block(x)
if self.attention is not None:
out = self.attention_module(out)
out += residual
return out
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: val.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from cmdb_sdk.model.inspection import condition_pb2 as cmdb__sdk_dot_model_dot_inspection_dot_condition__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='val.proto',
package='inspection',
syntax='proto3',
serialized_options=_b('ZDgo.easyops.local/contracts/protorepo-models/easyops/model/inspection'),
serialized_pb=_b('\n\tval.proto\x12\ninspection\x1a)cmdb_sdk/model/inspection/condition.proto\"\x98\x01\n\rInspectionVal\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04memo\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0c\n\x04unit\x18\x05 \x01(\t\x12\x0e\n\x06weight\x18\x06 \x01(\x05\x12\x33\n\nconditions\x18\x07 \x03(\x0b\x32\x1f.inspection.InspectionConditionBFZDgo.easyops.local/contracts/protorepo-models/easyops/model/inspectionb\x06proto3')
,
dependencies=[cmdb__sdk_dot_model_dot_inspection_dot_condition__pb2.DESCRIPTOR,])
_INSPECTIONVAL = _descriptor.Descriptor(
name='InspectionVal',
full_name='inspection.InspectionVal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='inspection.InspectionVal.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='inspection.InspectionVal.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memo', full_name='inspection.InspectionVal.memo', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='inspection.InspectionVal.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unit', full_name='inspection.InspectionVal.unit', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight', full_name='inspection.InspectionVal.weight', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conditions', full_name='inspection.InspectionVal.conditions', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=69,
serialized_end=221,
)
_INSPECTIONVAL.fields_by_name['conditions'].message_type = cmdb__sdk_dot_model_dot_inspection_dot_condition__pb2._INSPECTIONCONDITION
DESCRIPTOR.message_types_by_name['InspectionVal'] = _INSPECTIONVAL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InspectionVal = _reflection.GeneratedProtocolMessageType('InspectionVal', (_message.Message,), {
'DESCRIPTOR' : _INSPECTIONVAL,
'__module__' : 'val_pb2'
# @@protoc_insertion_point(class_scope:inspection.InspectionVal)
})
_sym_db.RegisterMessage(InspectionVal)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
from datetime import datetime
from pprint import pprint, pformat
import requests
import scapy.all as scapy
from scapy2dict import to_dict
def get_filter(ip, protocol, port):
capture_filter = ""
if ip:
capture_filter += "host " + ip
if protocol:
if ip:
capture_filter += " and " + protocol
else:
capture_filter += protocol
if port:
capture_filter += " port " + port
return capture_filter
def get_packets_from_capture(capture):
packets = list()
for packet in capture:
packet_dict = to_dict(packet, strict=True)
if "Raw" in packet_dict:
del packet_dict["Raw"]
packet_dict["hexdump"] = scapy.hexdump(packet, dump=True)
packet_dict_no_bytes = bytes_to_string(packet_dict)
packets.append(packet_dict_no_bytes)
pprint(packet_dict_no_bytes)
return packets
def send_capture(source, destination, timestamp, packets):
capture_payload = {
"source": source,
"timestamp": timestamp,
"packets": packets,
}
rsp = requests.post(
"http://" + destination + "/capture/store", json=capture_payload
)
if rsp.status_code != 200:
print(
f"{str(datetime.now())[:-3]}: Error calling /capture/store response: {rsp.status_code}, {rsp.content}"
)
return rsp.status_code
def bytes_to_string(data):
if isinstance(data, dict):
for k, v in data.items():
data[k] = bytes_to_string(v)
return data
elif isinstance(data, list):
for index, v in enumerate(data):
data[index] = bytes_to_string(v)
return data
elif isinstance(data, tuple):
data_as_list = list(data)
for index, v in enumerate(data_as_list):
data_as_list[index] = bytes_to_string(v)
return tuple(data_as_list)
elif isinstance(data, bytes):
return data.decode("latin-1")
else:
return data
def send_portscan(source, destination, target, token, timestamp, scan_output):
portscan_payload = {
"source": source,
"target": target,
"token": token,
"timestamp": timestamp,
"scan_output": pformat(scan_output),
}
rsp = requests.post(
"http://" + destination + "/worker/portscan", json=portscan_payload
)
if rsp.status_code != 204:
print(
f"{str(datetime.now())[:-3]}: Error calling /worker/portscan response: {rsp.status_code}, {rsp.content}"
)
return rsp.status_code
def send_traceroute(source, destination, target, token, timestamp, traceroute_graph_bytes):
portscan_payload = {
"source": source,
"target": target,
"token": token,
"timestamp": timestamp,
"traceroute_img": traceroute_graph_bytes,
}
rsp = requests.post(
"http://" + destination + "/worker/traceroute", json=portscan_payload
)
if rsp.status_code != 204:
print(
f"{str(datetime.now())[:-3]}: Error calling /worker/traceroute response: {rsp.status_code}, {rsp.content}"
)
return rsp.status_code
|
import os
from seleniumbase import BaseCase
class ScreenshotTests(BaseCase):
def test_save_screenshot(self):
self.open("https://seleniumbase.io/demo_page")
# "./downloaded_files" is a special SeleniumBase folder for downloads
self.save_screenshot("demo_page.png", folder="./downloaded_files")
self.assert_downloaded_file("demo_page.png")
print('\n"%s/%s" was saved!' % ("downloaded_files", "demo_page.png"))
def test_save_screenshot_to_logs(self):
self.open("https://seleniumbase.io/demo_page")
self.save_screenshot_to_logs()
# "self.log_path" is the absolute path to the "./latest_logs" folder.
# Each test that generates log files will create a subfolder in there
test_logpath = os.path.join(self.log_path, self.test_id)
expected_screenshot = os.path.join(test_logpath, "_1_screenshot.png")
self.assert_true(os.path.exists(expected_screenshot))
print('\n"%s" was saved!' % (expected_screenshot))
self.open("https://seleniumbase.io/tinymce/")
self.save_screenshot_to_logs()
expected_screenshot = os.path.join(test_logpath, "_2_screenshot.png")
self.assert_true(os.path.exists(expected_screenshot))
print('"%s" was saved!' % (expected_screenshot))
self.open("https://seleniumbase.io/error_page/")
self.save_screenshot_to_logs("error_page")
expected_screenshot = os.path.join(test_logpath, "_3_error_page.png")
self.assert_true(os.path.exists(expected_screenshot))
print('"%s" was saved!' % (expected_screenshot))
self.open("https://seleniumbase.io/devices/")
self.save_screenshot_to_logs("devices")
expected_screenshot = os.path.join(test_logpath, "_4_devices.png")
self.assert_true(os.path.exists(expected_screenshot))
print('"%s" was saved!' % (expected_screenshot))
|
"""
Multi-dimensional Scaling (MDS)
"""
# author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD
import numpy as np
from joblib import Parallel, delayed, effective_n_jobs
import warnings
from ..base import BaseEstimator
from ..metrics import euclidean_distances
from ..utils import check_random_state, check_array, check_symmetric
from ..isotonic import IsotonicRegression
from ..utils.validation import _deprecate_positional_args
def _smacof_single(dissimilarities, metric=True, n_components=2, init=None,
max_iter=300, verbose=0, eps=1e-3, random_state=None):
"""Computes multidimensional scaling using SMACOF algorithm
Parameters
----------
dissimilarities : ndarray, shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : boolean, optional, default: True
Compute metric or nonmetric SMACOF algorithm.
n_components : int, optional, default: 2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : ndarray, shape (n_samples, n_components), optional, default: None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, optional, default: 0
Level of verbosity.
eps : float, optional, default: 1e-3
Relative tolerance with respect to stress at which to declare
convergence.
random_state : int, RandomState instance, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
Returns
-------
X : ndarray, shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
n_iter : int
The number of iterations corresponding to the best stress.
"""
dissimilarities = check_symmetric(dissimilarities, raise_exception=True)
n_samples = dissimilarities.shape[0]
random_state = check_random_state(random_state)
sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel()
sim_flat_w = sim_flat[sim_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.rand(n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError("init matrix should be of shape (%d, %d)" %
(n_samples, n_components))
X = init
old_stress = None
ir = IsotonicRegression()
for it in range(max_iter):
# Compute distance and monotonic regression
dis = euclidean_distances(X)
if metric:
disparities = dissimilarities
else:
dis_flat = dis.ravel()
# dissimilarities with 0 are considered as missing values
dis_flat_w = dis_flat[sim_flat != 0]
# Compute the disparities using a monotonic regression
disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
disparities = dis_flat.copy()
disparities[sim_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) /
(disparities ** 2).sum())
# Compute stress
stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2
# Update X using the Guttman transform
dis[dis == 0] = 1e-5
ratio = disparities / dis
B = - ratio
B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
X = 1. / n_samples * np.dot(B, X)
dis = np.sqrt((X ** 2).sum(axis=1)).sum()
if verbose >= 2:
print('it: %d, stress %s' % (it, stress))
if old_stress is not None:
if(old_stress - stress / dis) < eps:
if verbose:
print('breaking at iteration %d with stress %s' % (it,
stress))
break
old_stress = stress / dis
return X, stress, it + 1
@_deprecate_positional_args
def smacof(dissimilarities, *, metric=True, n_components=2, init=None,
n_init=8, n_jobs=None, max_iter=300, verbose=0, eps=1e-3,
random_state=None, return_n_iter=False):
"""Computes multidimensional scaling using the SMACOF algorithm.
The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a
multidimensional scaling algorithm which minimizes an objective function
(the *stress*) using a majorization technique. Stress majorization, also
known as the Guttman Transform, guarantees a monotone convergence of
stress, and is more powerful than traditional techniques such as gradient
descent.
The SMACOF algorithm for metric MDS can summarized by the following steps:
1. Set an initial start configuration, randomly or not.
2. Compute the stress
3. Compute the Guttman Transform
4. Iterate 2 and 3 until convergence.
The nonmetric algorithm adds a monotonic regression step before computing
the stress.
Parameters
----------
dissimilarities : ndarray, shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : boolean, optional, default: True
Compute metric or nonmetric SMACOF algorithm.
n_components : int, optional, default: 2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : ndarray, shape (n_samples, n_components), optional, default: None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
n_init : int, optional, default: 8
Number of times the SMACOF algorithm will be run with different
initializations. The final results will be the best output of the runs,
determined by the run with the smallest final stress. If ``init`` is
provided, this option is overridden and a single run is performed.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation. If multiple
initializations are used (``n_init``), each run of the algorithm is
computed in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, optional, default: 0
Level of verbosity.
eps : float, optional, default: 1e-3
Relative tolerance with respect to stress at which to declare
convergence.
random_state : int, RandomState instance, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
return_n_iter : bool, optional, default: False
Whether or not to return the number of iterations.
Returns
-------
X : ndarray, shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
n_iter : int
The number of iterations corresponding to the best stress. Returned
only if ``return_n_iter`` is set to ``True``.
Notes
-----
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
dissimilarities = check_array(dissimilarities)
random_state = check_random_state(random_state)
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
if not n_init == 1:
warnings.warn(
'Explicit initial positions passed: '
'performing only one init of the MDS instead of %d'
% n_init)
n_init = 1
best_pos, best_stress = None, None
if effective_n_jobs(n_jobs) == 1:
for it in range(n_init):
pos, stress, n_iter_ = _smacof_single(
dissimilarities, metric=metric,
n_components=n_components, init=init,
max_iter=max_iter, verbose=verbose,
eps=eps, random_state=random_state)
if best_stress is None or stress < best_stress:
best_stress = stress
best_pos = pos.copy()
best_iter = n_iter_
else:
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))(
delayed(_smacof_single)(
dissimilarities, metric=metric, n_components=n_components,
init=init, max_iter=max_iter, verbose=verbose, eps=eps,
random_state=seed)
for seed in seeds)
positions, stress, n_iters = zip(*results)
best = np.argmin(stress)
best_stress = stress[best]
best_pos = positions[best]
best_iter = n_iters[best]
if return_n_iter:
return best_pos, best_stress, best_iter
else:
return best_pos, best_stress
class MDS(BaseEstimator):
"""Multidimensional scaling
Read more in the :ref:`User Guide <multidimensional_scaling>`.
Parameters
----------
n_components : int, optional, default: 2
Number of dimensions in which to immerse the dissimilarities.
metric : boolean, optional, default: True
If ``True``, perform metric MDS; otherwise, perform nonmetric MDS.
n_init : int, optional, default: 4
Number of times the SMACOF algorithm will be run with different
initializations. The final results will be the best output of the runs,
determined by the run with the smallest final stress.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, optional, default: 0
Level of verbosity.
eps : float, optional, default: 1e-3
Relative tolerance with respect to stress at which to declare
convergence.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation. If multiple
initializations are used (``n_init``), each run of the algorithm is
computed in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
dissimilarity : 'euclidean' | 'precomputed', optional, default: 'euclidean'
Dissimilarity measure to use:
- 'euclidean':
Pairwise Euclidean distances between points in the dataset.
- 'precomputed':
Pre-computed dissimilarities are passed directly to ``fit`` and
``fit_transform``.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the position of the dataset in the embedding space.
stress_ : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import MDS
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = MDS(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
References
----------
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
@_deprecate_positional_args
def __init__(self, n_components=2, *, metric=True, n_init=4,
max_iter=300, verbose=0, eps=1e-3, n_jobs=None,
random_state=None, dissimilarity="euclidean"):
self.n_components = n_components
self.dissimilarity = dissimilarity
self.metric = metric
self.n_init = n_init
self.max_iter = max_iter
self.eps = eps
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, init=None):
"""
Computes the position of the points in the embedding space
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
init : ndarray, shape (n_samples,), optional, default: None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
"""
self.fit_transform(X, init=init)
return self
def fit_transform(self, X, y=None, init=None):
"""
Fit the data from X, and returns the embedded coordinates
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
init : ndarray, shape (n_samples,), optional, default: None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
"""
X = self._validate_data(X)
if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed":
warnings.warn("The MDS API has changed. ``fit`` now constructs an"
" dissimilarity matrix from data. To use a custom "
"dissimilarity matrix, set "
"``dissimilarity='precomputed'``.")
if self.dissimilarity == "precomputed":
self.dissimilarity_matrix_ = X
elif self.dissimilarity == "euclidean":
self.dissimilarity_matrix_ = euclidean_distances(X)
else:
raise ValueError("Proximity must be 'precomputed' or 'euclidean'."
" Got %s instead" % str(self.dissimilarity))
self.embedding_, self.stress_, self.n_iter_ = smacof(
self.dissimilarity_matrix_, metric=self.metric,
n_components=self.n_components, init=init, n_init=self.n_init,
n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose,
eps=self.eps, random_state=self.random_state,
return_n_iter=True)
return self.embedding_
|
import os
import sys
import subprocess
import importlib.util as il
spec = il.spec_from_file_location("config", snakemake.params.config)
config = il.module_from_spec(spec)
sys.modules[spec.name] = config
spec.loader.exec_module(config)
sys.path.append(snakemake.config['args']['mcc_path'])
import scripts.mccutils as mccutils
import scripts.output as output
def main():
mccutils.log("temp2","running TEMP2 post processing")
insert_bed = snakemake.input.insert_bed
absence_summary = snakemake.input.absence_summary
te_gff = snakemake.input.te_gff
reference_fasta = snakemake.input.reference_fasta
log = snakemake.params.log
sample_name = snakemake.params.sample_name
chromosomes = snakemake.params.chromosomes.split(",")
out_dir = snakemake.params.out_dir
status_log = snakemake.params.status_log
prev_steps_succeeded = mccutils.check_status_file(status_log)
if prev_steps_succeeded:
insertions = read_insertions(insert_bed, sample_name, chromosomes, config)
absence_bed = make_absence_bed(absence_summary, sample_name, out_dir)
non_absent_ref_insertions = get_non_absent_ref_tes(te_gff, absence_bed, sample_name, chromosomes, out_dir, log)
insertions += non_absent_ref_insertions
if len(insertions) > 0:
insertions = output.make_redundant_bed(insertions, sample_name, out_dir, method="temp2")
insertions = output.make_nonredundant_bed(insertions, sample_name, out_dir, method="temp2")
output.write_vcf(insertions, reference_fasta, sample_name, "temp2", out_dir)
else:
mccutils.run_command(["touch", out_dir+"/"+sample_name+"_temp2_redundant.bed"])
mccutils.run_command(["touch", out_dir+"/"+sample_name+"_temp2_nonredundant.bed"])
else:
mccutils.run_command(["touch", out_dir+"/"+sample_name+"_temp2_redundant.bed"])
mccutils.run_command(["touch", out_dir+"/"+sample_name+"_temp2_nonredundant.bed"])
mccutils.log("temp2","TEMP2 postprocessing complete")
def read_insertions(insert_bed, sample_name, chromosomes, config):
insertions = []
with open(insert_bed, "r") as inf:
for x,line in enumerate(inf):
if x > 0:
insert = output.Insertion(output.Temp2())
split_line = line.split("\t")
if len(split_line) == 15:
insert.chromosome = split_line[0]
insert.start = int(split_line[1])+1
insert.end = int(split_line[2])
insert.family = split_line[3].split(":")[0]
insert.type = "non-reference"
insert.support_info.support["frequency"].value = float(split_line[4])
insert.strand = split_line[5]
insert.support_info.support["class"].value = split_line[6]
insert.support_info.support["supportreads"].value = float(split_line[7])
insert.support_info.support["referencereads"].value = float(split_line[8])
insert.support_info.support["fiveprimesupport"].value = float(split_line[9])
insert.support_info.support["threeprimesupport"].value = float(split_line[10])
insert.support_info.support["reliability"].value = float(split_line[12].replace("%","")) # rare enties have a % sign for some reason
insert.support_info.support["fiveprimejunctionsupport"].value = float(split_line[13])
insert.support_info.support["threeprimejunctionsupport"].value = float(split_line[14])
insert.name = insert.family+"|non-reference|"+str(insert.support_info.support['frequency'].value)+"|"+sample_name+"|temp2|"
if insert.support_info.support["fiveprimejunctionsupport"].value > 0 and insert.support_info.support["threeprimejunctionsupport"].value > 0:
insert.name += "sr|"
else:
insert.name += "rp|"
if (
insert.chromosome in chromosomes and
insert.support_info.support["frequency"].value >= config.PARAMS["frequency_threshold"] and
insert.support_info.support["class"].value in config.PARAMS["acceptable_insertion_support_classes"]
):
insertions.append(insert)
return insertions
def make_absence_bed(summary_file, sample, out):
out_bed = out+"/"+sample+".absent.bed"
lines = []
with open(summary_file, "r") as inf:
for x,line in enumerate(inf):
if x > 0:
split_line = line.split("\t")
new_line = "\t".join([split_line[0], split_line[1], split_line[2]])
new_line += "\n"
lines.append(new_line)
if len(lines) < 1:
lines.append("empty\t0\t1\n")
with open(out_bed,"w") as bed:
for line in lines:
bed.write(line)
return out_bed
def get_non_absent_ref_tes(te_gff, absence_bed, sample, chromosomes, out, log):
insertions = []
tmp_gff = out+"/tmp.ref_nonabs.gff"
command = ["bedtools", "subtract", "-A", "-a", te_gff, "-b", absence_bed]
mccutils.run_command_stdout(command, tmp_gff, log=log)
with open(tmp_gff,"r") as gff:
for line in gff:
if "#" not in line:
line = line.replace(";","\t")
split_line = line.split("\t")
insert = output.Insertion(output.Temp())
insert.chromosome = split_line[0]
insert.start = int(split_line[3])
insert.end = int(split_line[4])
insert.name = split_line[9].split("=")[1]+"|reference|NA|"+sample+"|temp2|nonab|"
insert.strand = split_line[6]
insert.type = "reference"
if insert.chromosome in chromosomes:
insertions.append(insert)
mccutils.remove(tmp_gff)
return insertions
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
## Author: Philip Larsson
#
# Check if there is any precipitation for specified hours forward (8 is default).
# Use this link to get correct latitude and longitude: http://opendata.smhi.se/apidocs/metfcst/demo_point.html
# Change using --latitude and --longitude to get correct geographic area. Default is Lund Sweden.
# See more using -h or --help.
#
# Using API from SMHI Open Data.
# Read more here: http://opendata.smhi.se/apidocs/
#
##
import requests, json, time, sys
from pprint import pprint
from datetime import datetime
import argparse
# Global variables (default values)
hours_forward = 8
latitude = 55.71
longitude = 13.19
print_category = True
only_temp = False
include_temp = False
time_now_as_string = "%d-%02d-%02dT%02d:00:00Z" % (datetime.now().year, datetime.now().month, datetime.now().day, datetime.now().hour)
categorys = {
0: "no precipitation",
1: "snow",
2: "snow and rain",
3: "rain",
4: "drizzle",
5: "freezing rain",
6: "freezing drizzle"
}
def parse_command_line_options():
global hours_forward, latitude, longitude, print_category, only_temp, include_temp
parser = argparse.ArgumentParser(description='Check if there is any precipitation in specified latitude longitude.')
parser.add_argument("--hours", help="specify upper limit on hours to check for precipitation", required=False)
parser.add_argument("--latitude", help="latitude for where to get precipitation data")
parser.add_argument("--longitude", help="longitude for where to get precipitation data")
parser.add_argument("--list_categorys", help="list all categorys that are returned when using the --only_value flag.", action="store_true")
parser.add_argument("--only_value", help="only prints the precipitation category and not it's meaning.", action="store_true")
parser.add_argument("--only_temp", help="only prints the current temperature.", action="store_true")
parser.add_argument("--include_temp", help="prints the temperature after precipitation.", action="store_true")
args = parser.parse_args()
if args.hours:
hours_forward = int(args.hours)
if args.latitude:
latitude = args.latitude
if args.longitude:
longitude = args.longitude
if args.list_categorys:
print("Printing categorys.\n value: meaning")
pprint(categorys)
sys.exit()
if args.only_value:
print_category = False
if args.only_temp:
only_temp = True
if args.include_temp:
include_temp = True
def get_data():
base_api_link = "http://opendata-download-metfcst.smhi.se/api/category/pmp2g/version/2/"
r = base_api_link + "geotype/point/lon/" + str(longitude) + "/lat/" + str(latitude) + "/"
r += "data.json"
req = requests.get(r)
data = json.loads(req.text)
return data
def get_start_index(data):
start_index = 0
# Find current time 'index'
for x in range(0, len(data["timeSeries"])) :
if (data["timeSeries"][x]["validTime"] == time_now_as_string):
start_index = x
return start_index
return -1
def get_type_of_downfall(start_index, data):
# Check if any downfall in the next 'hours_forward'.
for x in range(start_index, start_index + hours_forward):
for i in range(0, 19):
if (data["timeSeries"][x]["parameters"][i]["name"] == "pcat"):
# We have pacs data.
pacs_data = data["timeSeries"][x]["parameters"][i]["values"][0]
# Rain- or snow-fall
if (pacs_data > 0):
# print(data["timeSeries"][x]["validTime"])
return pacs_data
return 0;
def get_current_temperature(start_index, data):
for i in range(0, 19):
if (data["timeSeries"][start_index]["parameters"][i]["name"] == "t"):
temperature = data["timeSeries"][start_index]["parameters"][i]["values"][0]
return temperature;
return None;
# ====================== End of Functions =====================
parse_command_line_options()
data = get_data()
start_index = get_start_index(data)
pcat = get_type_of_downfall(start_index, data)
temp = get_current_temperature(start_index, data)
return_string = ""
if only_temp:
print(temp)
sys.exit()
elif print_category:
return_string += categorys[pcat]
else:
return_string += str(pcat)
if include_temp:
return_string += " " + str(temp)
print(return_string)
|
from abc import abstractmethod
import json
import sys
import datetime
import traceback
if sys.version_info[0] < 3:
from urllib import quote_plus
else:
from urllib.parse import quote_plus
import gevent
class Transport:
def __init__(self, session, connection):
self._session = session
self._connection = connection
@abstractmethod
def _get_name(self):
pass
def negotiate(self):
url = self.__get_base_url(self._connection,
'negotiate',
connectionData=self._connection.data)
negotiate = self._session.get(url)
negotiate.raise_for_status()
return negotiate.json()
@abstractmethod
def start(self):
pass
@abstractmethod
def send(self, data):
pass
@abstractmethod
def close(self):
pass
def accept(self, negotiate_data):
return True
def _handle_notification(self, message):
if len(message) > 0:
try:
data = json.loads(message)
self._connection.received.fire(**data)
except Exception as e:
traceback.print_exc()
print(str(datetime.datetime.now())+': Json decode error: ' + str(e))
gevent.sleep()
def _get_url(self, action, **kwargs):
args = kwargs.copy()
args['transport'] = self._get_name()
args['connectionToken'] = self._connection.token
args['connectionData'] = self._connection.data
return self.__get_base_url(self._connection, action, **args)
@staticmethod
def __get_base_url(connection, action, **kwargs):
args = kwargs.copy()
args.update(connection.qs)
args['clientProtocol'] = connection.protocol_version
query = '&'.join(['{key}={value}'.format(key=key, value=quote_plus(args[key])) for key in args])
return '{url}/{action}?{query}'.format(url=connection.url,
action=action,
query=query)
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 12, transform = "RelativeDifference", sigma = 0.0, exog_count = 0, ar_order = 0);
|
from quart import g, render_template
from lnbits.decorators import check_user_exists, validate_uuids
from . import splitpayments_ext
@splitpayments_ext.route("/")
@validate_uuids(["usr"], required=True)
@check_user_exists()
async def index():
return await render_template("splitpayments/index.html", user=g.user)
|
from random import randint
print('=-' * 15)
print('VAMOS JOGAR PAR OU IMPAR')
print('=-' * 15)
cont = 0
while True:
pc = randint(1, 10)
valor = int(input('Diga um valor: '))
escolha = str(input('Par ou Impar [P/I] ')).upper()
soma = pc + valor
print('--' * 15)
if soma % 2 == 0:
print(f'Você jogou {valor} e o computador {pc}. Total de {soma}, deu PAR')
resultado = 'P'
else:
print(f'Você jogou {valor} e o computador {pc}. Total de {soma}, deu IMPAR')
resultado = 'I'
print('--' * 15)
if escolha == resultado:
print('Você GANHOU')
print('Vamos jogar novamente...')
print('=-' * 15)
cont += 1
else:
print('Você PERDEU')
print(f'GAME OVER! Você venceu {cont}ª vezes.')
break
|
'''
given the string in the variable blah, program the necessary steps to print out the
day of the week in our given date
'''
blah = '21/03/2012'
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r"^~create-incoming-(?P<case_pk>\d+)$",
views.IncomingParcelPostCreateView.as_view(),
name="incoming-create",
),
url(
r"^incoming-(?P<pk>[\w-]+)$",
views.IncomingParcelPostDetailView.as_view(),
name="incoming-details",
),
url(
r"^incoming-(?P<pk>[\w-]+)/~update$",
views.IncomingParcelPostUpdateView.as_view(),
name="incoming-update",
),
url(
r"^incoming-(?P<pk>[\w-]+)/~delete$",
views.IncomingParcelPostDeleteView.as_view(),
name="incoming-delete",
),
url(
r"^incoming-(?P<pk>[\w-]+)/~download$",
views.IncomingAttachmentParcelPostXSendFileView.as_view(),
name="incoming-download",
),
url(
r"^~create-outgoing-(?P<case_pk>\d+)$",
views.OutgoingParcelPostCreateView.as_view(),
name="outgoing-create",
),
url(
r"^outgoing-(?P<pk>[\w-]+)$",
views.OutgoingParcelPostDetailView.as_view(),
name="outgoing-details",
),
url(
r"^outgoing-(?P<pk>[\w-]+)/~update$",
views.OutgoingParcelPostUpdateView.as_view(),
name="outgoing-update",
),
url(
r"^outgoing-(?P<pk>[\w-]+)/~delete$",
views.OutgoingParcelPostDeleteView.as_view(),
name="outgoing-delete",
),
url(
r"^outgoing-(?P<pk>[\w-]+)/~download$",
views.OutgoingAttachmentParcelPostXSendFileView.as_view(),
name="outgoing-download",
),
]
app_name = "feder.parcels"
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for coders that must be consistent across all Beam SDKs.
"""
from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import os.path
import sys
import unittest
from builtins import map
import yaml
from apache_beam.coders import coder_impl
from apache_beam.coders import coders
from apache_beam.transforms import window
from apache_beam.transforms.window import IntervalWindow
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import Timestamp
STANDARD_CODERS_YAML = os.path.join(
os.path.dirname(__file__), '..', 'testing', 'data', 'standard_coders.yaml')
def _load_test_cases(test_yaml):
"""Load test data from yaml file and return an iterable of test cases.
See ``standard_coders.yaml`` for more details.
"""
if not os.path.exists(test_yaml):
raise ValueError('Could not find the test spec: %s' % test_yaml)
for ix, spec in enumerate(yaml.load_all(open(test_yaml))):
spec['index'] = ix
name = spec.get('name', spec['coder']['urn'].split(':')[-2])
yield [name, spec]
class StandardCodersTest(unittest.TestCase):
_urn_to_coder_class = {
'beam:coder:bytes:v1': coders.BytesCoder,
'beam:coder:varint:v1': coders.VarIntCoder,
'beam:coder:kv:v1': lambda k, v: coders.TupleCoder((k, v)),
'beam:coder:interval_window:v1': coders.IntervalWindowCoder,
'beam:coder:iterable:v1': lambda t: coders.IterableCoder(t),
'beam:coder:global_window:v1': coders.GlobalWindowCoder,
'beam:coder:windowed_value:v1':
lambda v, w: coders.WindowedValueCoder(v, w)
}
_urn_to_json_value_parser = {
'beam:coder:bytes:v1': lambda x: x.encode('utf-8'),
'beam:coder:varint:v1': lambda x: x,
'beam:coder:kv:v1':
lambda x, key_parser, value_parser: (key_parser(x['key']),
value_parser(x['value'])),
'beam:coder:interval_window:v1':
lambda x: IntervalWindow(
start=Timestamp(micros=(x['end'] - x['span']) * 1000),
end=Timestamp(micros=x['end'] * 1000)),
'beam:coder:iterable:v1': lambda x, parser: list(map(parser, x)),
'beam:coder:global_window:v1': lambda x: window.GlobalWindow(),
'beam:coder:windowed_value:v1':
lambda x, value_parser, window_parser: windowed_value.create(
value_parser(x['value']), x['timestamp'] * 1000,
tuple([window_parser(w) for w in x['windows']]))
}
def test_standard_coders(self):
for name, spec in _load_test_cases(STANDARD_CODERS_YAML):
logging.info('Executing %s test.', name)
self._run_standard_coder(name, spec)
def _run_standard_coder(self, name, spec):
coder = self.parse_coder(spec['coder'])
parse_value = self.json_value_parser(spec['coder'])
nested_list = [spec['nested']] if 'nested' in spec else [True, False]
for nested in nested_list:
for expected_encoded, json_value in spec['examples'].items():
value = parse_value(json_value)
expected_encoded = expected_encoded.encode('latin1')
if not spec['coder'].get('non_deterministic', False):
actual_encoded = encode_nested(coder, value, nested)
if self.fix and actual_encoded != expected_encoded:
self.to_fix[spec['index'], expected_encoded] = actual_encoded
else:
self.assertEqual(expected_encoded, actual_encoded)
self.assertEqual(decode_nested(coder, expected_encoded, nested),
value)
else:
# Only verify decoding for a non-deterministic coder
self.assertEqual(decode_nested(coder, expected_encoded, nested),
value)
def parse_coder(self, spec):
return self._urn_to_coder_class[spec['urn']](
*[self.parse_coder(c) for c in spec.get('components', ())])
def json_value_parser(self, coder_spec):
component_parsers = [
self.json_value_parser(c) for c in coder_spec.get('components', ())]
return lambda x: self._urn_to_json_value_parser[coder_spec['urn']](
x, *component_parsers)
# Used when --fix is passed.
fix = False
to_fix = {}
@classmethod
def tearDownClass(cls):
if cls.fix and cls.to_fix:
print("FIXING", len(cls.to_fix), "TESTS")
doc_sep = '\n---\n'
docs = open(STANDARD_CODERS_YAML).read().split(doc_sep)
def quote(s):
return json.dumps(s.decode('latin1')).replace(r'\u0000', r'\0')
for (doc_ix, expected_encoded), actual_encoded in cls.to_fix.items():
print(quote(expected_encoded), "->", quote(actual_encoded))
docs[doc_ix] = docs[doc_ix].replace(
quote(expected_encoded) + ':', quote(actual_encoded) + ':')
open(STANDARD_CODERS_YAML, 'w').write(doc_sep.join(docs))
def encode_nested(coder, value, nested=True):
out = coder_impl.create_OutputStream()
coder.get_impl().encode_to_stream(value, out, nested)
return out.get()
def decode_nested(coder, encoded, nested=True):
return coder.get_impl().decode_from_stream(
coder_impl.create_InputStream(encoded), nested)
if __name__ == '__main__':
if '--fix' in sys.argv:
StandardCodersTest.fix = True
sys.argv.remove('--fix')
unittest.main()
|
import torch
from torch.utils import data
from torch.utils.data import Dataset, DataLoader
import numpy as np
import functools
import librosa
import glob
import os
from tqdm import tqdm
import multiprocessing as mp
import json
from utils.augment import time_shift, resample, spec_augment
from audiomentations import AddBackgroundNoise
def get_train_val_test_split(root: str, val_file: str, test_file: str):
"""Creates train, val, and test split according to provided val and test files.
Args:
root (str): Path to base directory of the dataset.
val_file (str): Path to file containing list of validation data files.
test_file (str): Path to file containing list of test data files.
Returns:
train_list (list): List of paths to training data items.
val_list (list): List of paths to validation data items.
test_list (list): List of paths to test data items.
label_map (dict): Mapping of indices to label classes.
"""
####################
# Labels
####################
label_list = [label for label in sorted(os.listdir(root)) if os.path.isdir(os.path.join(root, label)) and label[0] != "_"]
label_map = {idx: label for idx, label in enumerate(label_list)}
###################
# Split
###################
all_files_set = set()
for label in label_list:
all_files_set.update(set(glob.glob(os.path.join(root, label, "*.wav"))))
with open(val_file, "r") as f:
val_files_set = set(map(lambda a: os.path.join(root, a), f.read().rstrip("\n").split("\n")))
with open(test_file, "r") as f:
test_files_set = set(map(lambda a: os.path.join(root, a), f.read().rstrip("\n").split("\n")))
assert len(val_files_set.intersection(test_files_set)) == 0, "Sanity check: No files should be common between val and test."
all_files_set -= val_files_set
all_files_set -= test_files_set
train_list, val_list, test_list = list(all_files_set), list(val_files_set), list(test_files_set)
print(f"Number of training samples: {len(train_list)}")
print(f"Number of validation samples: {len(val_list)}")
print(f"Number of test samples: {len(test_list)}")
return train_list, val_list, test_list, label_map
class GoogleSpeechDataset(Dataset):
"""Dataset wrapper for Google Speech Commands V2."""
def __init__(self, data_list: list, audio_settings: dict, label_map: dict = None, aug_settings: dict = None, cache: int = 0):
super().__init__()
self.audio_settings = audio_settings
self.aug_settings = aug_settings
self.cache = cache
if cache:
print("Caching dataset into memory.")
self.data_list = init_cache(data_list, audio_settings["sr"], cache, audio_settings)
else:
self.data_list = data_list
# labels: if no label map is provided, will not load labels. (Use for inference)
if label_map is not None:
self.label_list = []
label_2_idx = {v: int(k) for k, v in label_map.items()}
for path in data_list:
self.label_list.append(label_2_idx[path.split("/")[-2]])
else:
self.label_list = None
if aug_settings is not None:
if "bg_noise" in self.aug_settings:
self.bg_adder = AddBackgroundNoise(sounds_path=aug_settings["bg_noise"]["bg_folder"])
def __len__(self):
return len(self.data_list)
def __getitem__(self, idx):
if self.cache:
x = self.data_list[idx]
else:
x = librosa.load(self.data_list[idx], self.audio_settings["sr"])[0]
x = self.transform(x)
if self.label_list is not None:
label = self.label_list[idx]
return x, label
else:
return x
def transform(self, x):
"""Applies necessary preprocessing to audio.
Args:
x (np.ndarray) - Input waveform; array of shape (n_samples, ).
Returns:
x (torch.FloatTensor) - MFCC matrix of shape (n_mfcc, T).
"""
sr = self.audio_settings["sr"]
###################
# Waveform
###################
if self.cache < 2:
if self.aug_settings is not None:
if "bg_noise" in self.aug_settings:
x = self.bg_adder(samples=x, sample_rate=sr)
if "time_shift" in self.aug_settings:
x = time_shift(x, sr, **self.aug_settings["time_shift"])
if "resample" in self.aug_settings:
x, _ = resample(x, sr, **self.aug_settings["resample"])
x = librosa.util.fix_length(x, sr)
###################
# Spectrogram
###################
x = librosa.feature.melspectrogram(y=x, **self.audio_settings)
x = librosa.power_to_db(x, ref=np.max)
x = x - np.amin(x)
x = np.clip(x,a_min=0,a_max=None)
if np.amax(x) > 0:
x = x/np.amax(x)
else:
x = x*0
#x = librosa.feature.mfcc(S=librosa.power_to_db(x), n_mfcc=self.audio_settings["n_mels"])
if self.aug_settings is not None:
if "spec_aug" in self.aug_settings:
x = spec_augment(x, **self.aug_settings["spec_aug"])
x = torch.from_numpy(x).float().unsqueeze(0)
return x
def cache_item_loader(path: str, sr: int, cache_level: int, audio_settings: dict) -> np.ndarray:
x = librosa.load(path, sr)[0]
if cache_level == 2:
x = librosa.util.fix_length(x, sr)
x = librosa.feature.melspectrogram(y=x, **audio_settings)
x = librosa.power_to_db(x, ref=np.max)
x = x - np.amin(x)
x = np.clip(x,a_min=0,a_max=None)
if np.amax(x) > 0:
x = x/np.amax(x)
else:
x = x*0
#x = librosa.feature.mfcc(S=librosa.power_to_db(x), n_mfcc=audio_settings["n_mels"])
return x
def init_cache(data_list: list, sr: int, cache_level: int, audio_settings: dict, n_cache_workers: int = 4) -> list:
"""Loads entire dataset into memory for later use.
Args:
data_list (list): List of data items.
sr (int): Sampling rate.
cache_level (int): Cache levels, one of (1, 2), caching wavs and spectrograms respectively.
n_cache_workers (int, optional): Number of workers. Defaults to 4.
Returns:
cache (list): List of data items.
"""
cache = []
loader_fn = functools.partial(cache_item_loader, sr=sr, cache_level=cache_level, audio_settings=audio_settings)
pool = mp.Pool(n_cache_workers)
for audio in tqdm(pool.imap(func=loader_fn, iterable=data_list), total=len(data_list)):
cache.append(audio)
pool.close()
pool.join()
return cache
def get_loader(data_list, config, train=True):
"""Creates dataloaders for training, validation and testing.
Args:
config (dict): Dict containing various settings for the training run.
train (bool): Training or evaluation mode.
Returns:
dataloader (DataLoader): DataLoader wrapper for training/validation/test data.
"""
with open(config["label_map"], "r") as f:
label_map = json.load(f)
dataset = GoogleSpeechDataset(
data_list=data_list,
label_map=label_map,
audio_settings=config["hparams"]["audio"],
aug_settings=config["hparams"]["augment"] if train else None,
cache=config["exp"]["cache"]
)
dataloader = DataLoader(
dataset,
batch_size=config["hparams"]["batch_size"],
num_workers=config["exp"]["n_workers"],
pin_memory=config["exp"]["pin_memory"],
shuffle=True if train else False
)
return dataloader
|
import pytest
import sys
import StringIO
from pypy.module.cpyext.state import State
from pypy.module.cpyext.pyobject import make_ref
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from rpython.rtyper.lltypesystem import rffi
class TestExceptions(BaseApiTest):
def test_GivenExceptionMatches(self, space, api):
old_style_exception = space.appexec([], """():
class OldStyle:
pass
return OldStyle
""")
exc_matches = api.PyErr_GivenExceptionMatches
string_exception = space.wrap('exception')
instance = space.call_function(space.w_ValueError)
old_style_instance = space.call_function(old_style_exception)
assert exc_matches(string_exception, string_exception)
assert exc_matches(old_style_exception, old_style_exception)
assert not exc_matches(old_style_exception, space.w_Exception)
assert exc_matches(instance, space.w_ValueError)
assert exc_matches(old_style_instance, old_style_exception)
assert exc_matches(space.w_ValueError, space.w_ValueError)
assert exc_matches(space.w_IndexError, space.w_LookupError)
assert not exc_matches(space.w_ValueError, space.w_LookupError)
exceptions = space.newtuple([space.w_LookupError, space.w_ValueError])
assert exc_matches(space.w_ValueError, exceptions)
def test_ExceptionMatches(self, space, api):
api.PyErr_SetObject(space.w_ValueError, space.wrap("message"))
assert api.PyErr_ExceptionMatches(space.w_Exception)
assert api.PyErr_ExceptionMatches(space.w_ValueError)
assert not api.PyErr_ExceptionMatches(space.w_TypeError)
api.PyErr_Clear()
def test_Occurred(self, space, api):
assert not api.PyErr_Occurred()
string = rffi.str2charp("spam and eggs")
api.PyErr_SetString(space.w_ValueError, string)
rffi.free_charp(string)
assert api.PyErr_Occurred() is space.w_ValueError
api.PyErr_Clear()
def test_SetObject(self, space, api):
api.PyErr_SetObject(space.w_ValueError, space.wrap("a value"))
assert api.PyErr_Occurred() is space.w_ValueError
state = space.fromcache(State)
operror = state.get_exception()
assert space.eq_w(operror.get_w_value(space),
space.wrap("a value"))
api.PyErr_Clear()
def test_SetNone(self, space, api):
api.PyErr_SetNone(space.w_KeyError)
state = space.fromcache(State)
operror = state.get_exception()
assert space.eq_w(operror.w_type, space.w_KeyError)
assert space.eq_w(operror.get_w_value(space), space.w_None)
api.PyErr_Clear()
api.PyErr_NoMemory()
operror = state.get_exception()
assert space.eq_w(operror.w_type, space.w_MemoryError)
api.PyErr_Clear()
def test_Warning(self, space, api, capfd):
message = rffi.str2charp("this is a warning")
api.PyErr_WarnEx(None, message, 1)
out, err = capfd.readouterr()
assert ": UserWarning: this is a warning" in err
rffi.free_charp(message)
def test_print_err(self, space, api, capfd):
api.PyErr_SetObject(space.w_Exception, space.wrap("cpyext is cool"))
api.PyErr_Print()
out, err = capfd.readouterr()
assert "cpyext is cool" in err
assert not api.PyErr_Occurred()
def test_WriteUnraisable(self, space, api, capfd):
api.PyErr_SetObject(space.w_ValueError, space.wrap("message"))
w_where = space.wrap("location")
api.PyErr_WriteUnraisable(w_where)
out, err = capfd.readouterr()
assert "Exception ValueError: 'message' in 'location' ignored" == err.strip()
def test_ExceptionInstance_Class(self, space, api):
instance = space.call_function(space.w_ValueError)
assert api.PyExceptionInstance_Class(instance) is space.w_ValueError
@pytest.mark.skipif(True, reason='not implemented yet')
def test_interrupt_occurred(self, space, api):
assert not api.PyOS_InterruptOccurred()
import signal, os
recieved = []
def default_int_handler(*args):
recieved.append('ok')
signal.signal(signal.SIGINT, default_int_handler)
os.kill(os.getpid(), signal.SIGINT)
assert recieved == ['ok']
assert api.PyOS_InterruptOccurred()
def test_restore_traceback(self, space, api):
string = rffi.str2charp("spam and eggs")
api.PyErr_SetString(space.w_ValueError, string)
state = space.fromcache(State)
operror = state.clear_exception()
# Fake a traceback.
operror.set_traceback(space.w_True) # this doesn't really need to be a real traceback for this test.
w_type = operror.w_type
w_value = operror.get_w_value(space)
w_tb = operror.get_w_traceback(space)
assert not space.eq_w(w_tb, space.w_None)
api.PyErr_Restore(make_ref(space, w_type), make_ref(space, w_value), make_ref(space, w_tb))
operror = state.clear_exception()
w_tb_restored = operror.get_w_traceback(space)
assert space.eq_w(w_tb_restored, w_tb)
rffi.free_charp(string)
class AppTestFetch(AppTestCpythonExtensionBase):
def test_occurred(self):
module = self.import_extension('foo', [
("check_error", "METH_NOARGS",
'''
PyErr_SetString(PyExc_TypeError, "message");
PyErr_Occurred();
PyErr_Clear();
Py_RETURN_TRUE;
'''
),
])
assert module.check_error()
def test_fetch_and_restore(self):
module = self.import_extension('foo', [
("check_error", "METH_NOARGS",
'''
PyObject *type, *val, *tb;
PyErr_SetString(PyExc_TypeError, "message");
PyErr_Fetch(&type, &val, &tb);
if (PyErr_Occurred())
return NULL;
if (type != PyExc_TypeError)
Py_RETURN_FALSE;
PyErr_Restore(type, val, tb);
if (!PyErr_Occurred())
Py_RETURN_FALSE;
PyErr_Clear();
Py_RETURN_TRUE;
'''
),
])
assert module.check_error()
def test_normalize(self):
module = self.import_extension('foo', [
("check_error", "METH_NOARGS",
'''
PyObject *type, *val, *tb;
PyErr_SetString(PyExc_TypeError, "message");
PyErr_Fetch(&type, &val, &tb);
if (type != PyExc_TypeError)
Py_RETURN_FALSE;
if (!PyString_Check(val))
Py_RETURN_FALSE;
/* Normalize */
PyErr_NormalizeException(&type, &val, &tb);
if (type != PyExc_TypeError)
Py_RETURN_FALSE;
if ((PyObject*)Py_TYPE(val) != PyExc_TypeError)
Py_RETURN_FALSE;
/* Normalize again */
PyErr_NormalizeException(&type, &val, &tb);
if (type != PyExc_TypeError)
Py_RETURN_FALSE;
if ((PyObject*)Py_TYPE(val) != PyExc_TypeError)
Py_RETURN_FALSE;
PyErr_Restore(type, val, tb);
PyErr_Clear();
Py_RETURN_TRUE;
'''
),
])
assert module.check_error()
def test_normalize_no_exception(self):
module = self.import_extension('foo', [
("check_error", "METH_NOARGS",
'''
PyObject *type, *val, *tb;
PyErr_Fetch(&type, &val, &tb);
if (type != NULL)
Py_RETURN_FALSE;
if (val != NULL)
Py_RETURN_FALSE;
PyErr_NormalizeException(&type, &val, &tb);
Py_RETURN_TRUE;
'''
),
])
assert module.check_error()
def test_SetFromErrno(self):
import sys
if sys.platform != 'win32':
skip("callbacks through ll2ctypes modify errno")
import errno, os
module = self.import_extension('foo', [
("set_from_errno", "METH_NOARGS",
'''
errno = EBADF;
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
'''),
],
prologue="#include <errno.h>")
try:
module.set_from_errno()
except OSError as e:
assert e.errno == errno.EBADF
assert e.strerror == os.strerror(errno.EBADF)
assert e.filename is None
def test_SetFromErrnoWithFilename(self):
import errno, os
module = self.import_extension('foo', [
("set_from_errno", "METH_NOARGS",
'''
errno = EBADF;
PyErr_SetFromErrnoWithFilename(PyExc_OSError, "/path/to/file");
return NULL;
'''),
],
prologue="#include <errno.h>")
exc_info = raises(OSError, module.set_from_errno)
assert exc_info.value.filename == "/path/to/file"
assert exc_info.value.errno == errno.EBADF
assert exc_info.value.strerror == os.strerror(errno.EBADF)
def test_SetFromErrnoWithFilename_NULL(self):
import errno, os
module = self.import_extension('foo', [
("set_from_errno", "METH_NOARGS",
'''
errno = EBADF;
PyErr_SetFromErrnoWithFilename(PyExc_OSError, NULL);
return NULL;
'''),
],
prologue="#include <errno.h>")
exc_info = raises(OSError, module.set_from_errno)
assert exc_info.value.filename is None
assert exc_info.value.errno == errno.EBADF
assert exc_info.value.strerror == os.strerror(errno.EBADF)
def test_SetFromErrnoWithFilenameObject__PyString(self):
import errno, os
module = self.import_extension('foo', [
("set_from_errno", "METH_NOARGS",
'''
PyObject *filenameObject = PyString_FromString("/path/to/file");
errno = EBADF;
PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, filenameObject);
Py_DECREF(filenameObject);
return NULL;
'''),
],
prologue="#include <errno.h>")
exc_info = raises(OSError, module.set_from_errno)
assert exc_info.value.filename == "/path/to/file"
assert exc_info.value.errno == errno.EBADF
assert exc_info.value.strerror == os.strerror(errno.EBADF)
def test_SetFromErrnoWithFilenameObject__PyInt(self):
import errno, os
module = self.import_extension('foo', [
("set_from_errno", "METH_NOARGS",
'''
PyObject *intObject = PyInt_FromLong(3);
errno = EBADF;
PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, intObject);
Py_DECREF(intObject);
return NULL;
'''),
],
prologue="#include <errno.h>")
exc_info = raises(OSError, module.set_from_errno)
assert exc_info.value.filename == 3
assert exc_info.value.errno == errno.EBADF
assert exc_info.value.strerror == os.strerror(errno.EBADF)
def test_SetFromErrnoWithFilenameObject__PyList(self):
import errno, os
module = self.import_extension('foo', [
("set_from_errno", "METH_NOARGS",
'''
PyObject *lst = Py_BuildValue("[iis]", 1, 2, "three");
errno = EBADF;
PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, lst);
Py_DECREF(lst);
return NULL;
'''),
],
prologue="#include <errno.h>")
exc_info = raises(OSError, module.set_from_errno)
assert exc_info.value.filename == [1, 2, "three"]
assert exc_info.value.errno == errno.EBADF
assert exc_info.value.strerror == os.strerror(errno.EBADF)
def test_SetFromErrnoWithFilenameObject__PyTuple(self):
import errno, os
module = self.import_extension('foo', [
("set_from_errno", "METH_NOARGS",
'''
PyObject *tuple = Py_BuildValue("(iis)", 1, 2, "three");
errno = EBADF;
PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, tuple);
Py_DECREF(tuple);
return NULL;
'''),
],
prologue="#include <errno.h>")
exc_info = raises(OSError, module.set_from_errno)
assert exc_info.value.filename == (1, 2, "three")
assert exc_info.value.errno == errno.EBADF
assert exc_info.value.strerror == os.strerror(errno.EBADF)
def test_SetFromErrnoWithFilenameObject__Py_None(self):
import errno, os
module = self.import_extension('foo', [
("set_from_errno", "METH_NOARGS",
'''
PyObject *none = Py_BuildValue("");
errno = EBADF;
PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, none);
Py_DECREF(none);
return NULL;
'''),
],
prologue="#include <errno.h>")
exc_info = raises(OSError, module.set_from_errno)
assert exc_info.value.filename is None
assert exc_info.value.errno == errno.EBADF
assert exc_info.value.strerror == os.strerror(errno.EBADF)
def test_PyErr_Display(self):
from sys import version_info
if self.runappdirect and (version_info.major < 3 or version_info.minor < 3):
skip('PyErr_{GS}etExcInfo introduced in python 3.3')
module = self.import_extension('foo', [
("display_error", "METH_VARARGS",
r'''
PyObject *type, *val, *tb;
PyErr_GetExcInfo(&type, &val, &tb);
PyErr_Display(type, val, tb);
Py_XDECREF(type);
Py_XDECREF(val);
Py_XDECREF(tb);
Py_RETURN_NONE;
'''),
])
import sys, StringIO
sys.stderr = StringIO.StringIO()
try:
1 / 0
except ZeroDivisionError:
module.display_error()
finally:
output = sys.stderr.getvalue()
sys.stderr = sys.__stderr__
assert "in test_PyErr_Display\n" in output
assert "ZeroDivisionError" in output
@pytest.mark.skipif(True, reason=
"XXX seems to pass, but doesn't: 'py.test -s' shows errors in PyObject_Free")
def test_GetSetExcInfo(self):
import sys
if self.runappdirect and (sys.version_info.major < 3 or
sys.version_info.minor < 3):
skip('PyErr_{GS}etExcInfo introduced in python 3.3')
module = self.import_extension('foo', [
("getset_exc_info", "METH_VARARGS",
r'''
PyObject *type, *val, *tb;
PyObject *new_type, *new_val, *new_tb;
PyObject *result;
if (!PyArg_ParseTuple(args, "OOO", &new_type, &new_val, &new_tb))
return NULL;
PyErr_GetExcInfo(&type, &val, &tb);
Py_INCREF(new_type);
Py_INCREF(new_val);
Py_INCREF(new_tb);
PyErr_SetExcInfo(new_type, new_val, new_tb);
result = Py_BuildValue("OOO",
type ? type : Py_None,
val ? val : Py_None,
tb ? tb : Py_None);
Py_XDECREF(type);
Py_XDECREF(val);
Py_XDECREF(tb);
return result;
'''
),
])
try:
raise ValueError(5)
except ValueError as old_exc:
new_exc = TypeError("TEST")
orig_sys_exc_info = sys.exc_info()
orig_exc_info = module.getset_exc_info(new_exc.__class__,
new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = module.getset_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
assert orig_exc_info[0] is old_exc.__class__
assert orig_exc_info[1] is old_exc
assert orig_exc_info == orig_sys_exc_info
assert orig_exc_info == reset_sys_exc_info
assert new_exc_info == (new_exc.__class__, new_exc, None)
assert new_exc_info == new_sys_exc_info
def test_PyErr_BadInternalCall(self):
# NB. it only seemed to fail when run with '-s'... but I think
# that it always printed stuff to stderr
module = self.import_extension('foo', [
("oops", "METH_NOARGS",
r'''
PyErr_BadInternalCall();
return NULL;
'''),
])
raises(SystemError, module.oops)
def test_error_thread_race(self):
# Check race condition: thread 0 returns from cpyext with error set,
# after thread 1 has set an error but before it returns.
module = self.import_extension('foo', [
("emit_error", "METH_VARARGS",
'''
PyThreadState *save = NULL;
PyGILState_STATE gilsave;
/* NB. synchronization due to GIL */
static volatile int flag = 0;
int id;
if (!PyArg_ParseTuple(args, "i", &id))
return NULL;
/* Proceed in thread 1 first */
save = PyEval_SaveThread();
while (id == 0 && flag == 0);
gilsave = PyGILState_Ensure();
PyErr_Format(PyExc_ValueError, "%d", id);
/* Proceed in thread 0 first */
if (id == 1) flag = 1;
PyGILState_Release(gilsave);
while (id == 1 && flag == 1);
PyEval_RestoreThread(save);
if (id == 0) flag = 0;
return NULL;
'''
),
])
import threading
failures = []
def worker(arg):
try:
module.emit_error(arg)
failures.append(True)
except Exception as exc:
if str(exc) != str(arg):
failures.append(exc)
threads = [threading.Thread(target=worker, args=(j,))
for j in (0, 1)]
for t in threads:
t.start()
for t in threads:
t.join()
assert not failures
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import json
import logging
from django.db.models import Q, Avg, Count, Sum, Value, BooleanField, Case, When
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.db.models import JSONField
from django.core.validators import MinLengthValidator, MaxLengthValidator
from django.db import transaction, models
from annoying.fields import AutoOneToOneField
from tasks.models import Task, Prediction, Annotation, Q_task_finished_annotations, bulk_update_stats_project_tasks
from core.utils.common import create_hash, sample_query, get_attr_or_item, load_func
from core.utils.exceptions import LabelStudioValidationErrorSentryIgnored
from core.label_config import (
parse_config, validate_label_config, extract_data_types, get_all_object_tag_names, config_line_stipped,
get_sample_task, get_all_labels, get_all_control_tag_tuples, get_annotation_tuple
)
logger = logging.getLogger(__name__)
class ProjectManager(models.Manager):
def for_user(self, user):
return self.filter(organization=user.active_organization)
def with_counts(self):
return self.annotate(
task_number=Count('tasks', distinct=True),
finished_task_number=Count(
'tasks', distinct=True,
filter=Q(tasks__is_labeled=True)
),
total_predictions_number=Count('tasks__predictions', distinct=True),
total_annotations_number=Count(
'tasks__annotations__id', distinct=True,
filter=Q(tasks__annotations__was_cancelled=False)
),
num_tasks_with_annotations=Count(
'tasks__id', distinct=True,
filter=Q(tasks__annotations__isnull=False) &
Q(tasks__annotations__ground_truth=False) &
Q(tasks__annotations__was_cancelled=False) &
Q(tasks__annotations__result__isnull=False)
),
useful_annotation_number=Count(
'tasks__annotations__id', distinct=True,
filter=Q(tasks__annotations__was_cancelled=False) &
Q(tasks__annotations__ground_truth=False) &
Q(tasks__annotations__result__isnull=False)
),
ground_truth_number=Count(
'tasks__annotations__id', distinct=True,
filter=Q(tasks__annotations__ground_truth=True)
),
skipped_annotations_number=Count(
'tasks__annotations__id', distinct=True,
filter=Q(tasks__annotations__was_cancelled=True)
),
)
ProjectMixin = load_func(settings.PROJECT_MIXIN)
class Project(ProjectMixin, models.Model):
"""
"""
objects = ProjectManager()
__original_label_config = None
title = models.CharField(_('title'), null=True, blank=True, default='', max_length=settings.PROJECT_TITLE_MAX_LEN,
help_text=f'Project name. Must be between {settings.PROJECT_TITLE_MIN_LEN} and {settings.PROJECT_TITLE_MAX_LEN} characters long.',
validators=[MinLengthValidator(settings.PROJECT_TITLE_MIN_LEN), MaxLengthValidator(settings.PROJECT_TITLE_MAX_LEN)])
description = models.TextField(_('description'), blank=True, null=True, default='', help_text='Project description')
organization = models.ForeignKey('organizations.Organization', on_delete=models.CASCADE, related_name='projects', null=True)
label_config = models.TextField(_('label config'), blank=True, null=True, default='<View></View>',
help_text='Label config in XML format. See more about it in documentation')
expert_instruction = models.TextField(_('expert instruction'), blank=True, null=True, default='', help_text='Labeling instructions in HTML format')
show_instruction = models.BooleanField(_('show instruction'), default=False, help_text='Show instructions to the annotator before they start')
show_skip_button = models.BooleanField(_('show skip button'), default=True, help_text='Show a skip button in interface and allow annotators to skip the task')
enable_empty_annotation = models.BooleanField(_('enable empty annotation'), default=True, help_text='Allow annotators to submit empty annotations')
show_annotation_history = models.BooleanField(_('show annotation history'), default=False, help_text='Show annotation history to annotator')
show_collab_predictions = models.BooleanField(_('show predictions to annotator'), default=True, help_text='If set, the annotator can view model predictions')
evaluate_predictions_automatically = models.BooleanField(_('evaluate predictions automatically'), default=False, help_text='Retrieve and display predictions when loading a task')
token = models.CharField(_('token'), max_length=256, default=create_hash, null=True, blank=True)
result_count = models.IntegerField(_('result count'), default=0, help_text='Total results inside of annotations counter')
color = models.CharField(_('color'), max_length=16, default='#FFFFFF', null=True, blank=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='created_projects',
on_delete=models.SET_NULL,
null=True,
verbose_name=_('created by')
)
maximum_annotations = models.IntegerField(_('maximum annotation number'), default=1,
help_text='Maximum number of annotations for one task. '
'If the number of annotations per task is equal or greater '
'to this value, the task is completed (is_labeled=True)')
min_annotations_to_start_training = models.IntegerField(
_('min_annotations_to_start_training'),
default=10,
help_text='Minimum number of completed tasks after which model training is started'
)
control_weights = JSONField(_('control weights'), null=True, default=dict, help_text='Weights for control tags')
model_version = models.TextField(_('model version'), blank=True, null=True, default='',
help_text='Machine learning model version')
data_types = JSONField(_('data_types'), default=dict, null=True)
is_draft = models.BooleanField(
_('is draft'), default=False, help_text='Whether or not the project is in the middle of being created')
is_published = models.BooleanField(_('published'), default=False, help_text='Whether or not the project is published to annotators')
created_at = models.DateTimeField(_('created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('updated at'), auto_now=True)
SEQUENCE = 'Sequential sampling'
UNIFORM = 'Uniform sampling'
UNCERTAINTY = 'Uncertainty sampling'
SAMPLING_CHOICES = (
(SEQUENCE, 'Tasks are ordered by Data manager ordering'),
(UNIFORM, 'Tasks are chosen randomly'),
(UNCERTAINTY, 'Tasks are chosen according to model uncertainty scores (active learning mode)')
)
sampling = models.CharField(max_length=100, choices=SAMPLING_CHOICES, null=True, default=SEQUENCE)
show_ground_truth_first = models.BooleanField(_('show ground truth first'), default=False)
show_overlap_first = models.BooleanField(_('show overlap first'), default=False)
overlap_cohort_percentage = models.IntegerField(_('overlap_cohort_percentage'), default=100)
task_data_login = models.CharField(
_('task_data_login'), max_length=256, blank=True, null=True, help_text='Task data credentials: login')
task_data_password = models.CharField(
_('task_data_password'), max_length=256, blank=True, null=True, help_text='Task data credentials: password')
def __init__(self, *args, **kwargs):
super(Project, self).__init__(*args, **kwargs)
self.__original_label_config = self.label_config
self.__maximum_annotations = self.maximum_annotations
self.__overlap_cohort_percentage = self.overlap_cohort_percentage
# TODO: once bugfix with incorrect data types in List
# logging.warning('! Please, remove code below after patching of all projects (extract_data_types)')
if self.label_config is not None:
if self.data_types != extract_data_types(self.label_config):
self.data_types = extract_data_types(self.label_config)
@property
def num_tasks(self):
return self.tasks.count()
def get_current_predictions(self):
return Prediction.objects.filter(Q(task__project=self.id) & Q(model_version=self.model_version))
@property
def num_predictions(self):
return self.get_current_predictions().count()
@property
def num_annotations(self):
return Annotation.objects.filter(task__project=self).count()
@property
def has_predictions(self):
return self.get_current_predictions().exists()
@property
def has_any_predictions(self):
return Prediction.objects.filter(Q(task__project=self.id)).exists()
@property
def business(self):
return self.created_by.business
@property
def is_private(self):
return None
@property
def secure_mode(self):
return False
@property
def one_object_in_label_config(self):
return len(self.data_types) <= 1
@property
def only_undefined_field(self):
return self.one_object_in_label_config and self.summary.common_data_columns \
and self.summary.common_data_columns[0] == settings.DATA_UNDEFINED_NAME
@property
def get_labeled_count(self):
return self.tasks.filter(is_labeled=True).count()
@property
def get_collected_count(self):
return self.tasks.count()
@property
def get_total_possible_count(self):
"""
Tasks has overlap - how many tc should be accepted
possible count = sum [ t.overlap for t in tasks]
:return: N int total amount of Annotations that should be submitted
"""
if self.tasks.count() == 0:
return 0
return self.tasks.aggregate(Sum('overlap'))['overlap__sum']
@property
def get_available_for_labeling(self):
return self.get_collected_count - self.get_labeled_count
@property
def need_annotators(self):
return self.maximum_annotations - self.num_annotators
@classmethod
def find_by_invite_url(cls, url):
token = url.strip('/').split('/')[-1]
if len(token):
return Project.objects.get(token=token)
else:
raise KeyError(f'Can\'t find Project by invite URL: {url}')
def reset_token(self):
self.token = create_hash()
self.save()
def add_collaborator(self, user):
created = False
with transaction.atomic():
try:
ProjectMember.objects.get(user=user, project=self)
except ProjectMember.DoesNotExist:
ProjectMember.objects.create(user=user, project=self)
created = True
else:
logger.debug(f'Project membership {self} for user {user} already exists')
return created
def has_collaborator(self, user):
return ProjectMember.objects.filter(user=user, project=self).exists()
def has_collaborator_enabled(self, user):
membership = ProjectMember.objects.filter(user=user, project=self)
return membership.exists() and membership.first().enabled
def update_tasks_states(self, maximum_annotations_changed, overlap_cohort_percentage_changed,
tasks_number_changed):
# if only maximum annotations parameter is tweaked
if maximum_annotations_changed and not overlap_cohort_percentage_changed:
tasks_with_overlap = self.tasks.filter(overlap__gt=1)
if tasks_with_overlap.exists():
# if there is a part with overlaped tasks, affect only them
tasks_with_overlap.update(overlap=self.maximum_annotations)
else:
# otherwise affect all tasks
self.tasks.update(overlap=self.maximum_annotations)
# if cohort slider is tweaked
elif overlap_cohort_percentage_changed and self.maximum_annotations > 1:
self._rearrange_overlap_cohort()
# if adding/deleting tasks and cohort settings are applied
elif tasks_number_changed and self.overlap_cohort_percentage < 100 and self.maximum_annotations > 1:
self._rearrange_overlap_cohort()
if maximum_annotations_changed or overlap_cohort_percentage_changed:
bulk_update_stats_project_tasks(self.tasks.filter(
Q(annotations__isnull=False) &
Q(annotations__ground_truth=False)))
def _rearrange_overlap_cohort(self):
tasks_with_overlap = self.tasks.filter(overlap__gt=1)
tasks_with_overlap_count = tasks_with_overlap.count()
total_tasks = self.tasks.count()
new_tasks_with_overlap_count = int(self.overlap_cohort_percentage / 100 * total_tasks + 0.5)
if tasks_with_overlap_count > new_tasks_with_overlap_count:
# TODO: warn if we try to reduce current cohort that is already labeled with overlap
reduce_by = tasks_with_overlap_count - new_tasks_with_overlap_count
reduce_tasks = sample_query(tasks_with_overlap, reduce_by)
reduce_tasks.update(overlap=1)
reduced_tasks_ids = reduce_tasks.values_list('id', flat=True)
tasks_with_overlap.exclude(id__in=reduced_tasks_ids).update(overlap=self.maximum_annotations)
elif tasks_with_overlap_count < new_tasks_with_overlap_count:
increase_by = new_tasks_with_overlap_count - tasks_with_overlap_count
tasks_without_overlap = self.tasks.filter(overlap=1)
increase_tasks = sample_query(tasks_without_overlap, increase_by)
increase_tasks.update(overlap=self.maximum_annotations)
tasks_with_overlap.update(overlap=self.maximum_annotations)
def remove_tasks_by_file_uploads(self, file_upload_ids):
self.tasks.filter(file_upload_id__in=file_upload_ids).delete()
def advance_onboarding(self):
""" Move project to next onboarding step
"""
po_qs = self.steps_left.order_by('step__order')
count = po_qs.count()
if count:
po = po_qs.first()
po.finished = True
po.save()
return count != 1
def created_at_prettify(self):
return self.created_at.strftime("%d %b %Y %H:%M:%S")
def onboarding_step_finished(self, step):
""" Mark specific step as finished
"""
pos = ProjectOnboardingSteps.objects.get(code=step)
po = ProjectOnboarding.objects.get(project=self, step=pos)
po.finished = True
po.save()
return po
def data_types_json(self):
return json.dumps(self.data_types)
def available_data_keys(self):
return sorted(list(self.data_types.keys()))
@classmethod
def validate_label_config(cls, config_string):
validate_label_config(config_string)
def validate_config(self, config_string):
self.validate_label_config(config_string)
if not hasattr(self, 'summary'):
return
if self.num_tasks == 0:
logger.debug(f'Project {self} has no tasks: nothing to validate here. Ensure project summary is empty')
self.summary.reset()
return
# validate data columns consistency
fields_from_config = get_all_object_tag_names(config_string)
if not fields_from_config:
logger.debug(f'Data fields not found in labeling config')
return
fields_from_data = set(self.summary.common_data_columns)
fields_from_data.discard(settings.DATA_UNDEFINED_NAME)
if fields_from_data and not fields_from_config.issubset(fields_from_data):
different_fields = list(fields_from_config.difference(fields_from_data))
raise LabelStudioValidationErrorSentryIgnored(f'These fields are not present in the data: {",".join(different_fields)}')
if self.num_annotations == 0:
logger.debug(f'Project {self} has no annotations: nothing to validate here. '
f'Ensure annotations-related project summary is empty')
self.summary.reset(tasks_data_based=False)
return
# validate annotations consistency
annotations_from_config = set(get_all_control_tag_tuples(config_string))
if not annotations_from_config:
logger.debug(f'Annotation schema is not found in config')
return
annotations_from_data = set(self.summary.created_annotations)
if annotations_from_data and not annotations_from_data.issubset(annotations_from_config):
different_annotations = list(annotations_from_data.difference(annotations_from_config))
diff_str = []
for ann_tuple in different_annotations:
from_name, to_name, t = ann_tuple.split('|')
diff_str.append(
f'{self.summary.created_annotations[ann_tuple]} '
f'with from_name={from_name}, to_name={to_name}, type={t}')
diff_str = '\n'.join(diff_str)
raise LabelStudioValidationErrorSentryIgnored(
f'Created annotations are incompatible with provided labeling schema, we found:\n{diff_str}')
# validate labels consistency
labels_from_config = get_all_labels(config_string)
created_labels = self.summary.created_labels
for control_tag_from_data, labels_from_data in created_labels.items():
# Check if labels created in annotations, and their control tag has been removed
if labels_from_data and control_tag_from_data not in labels_from_config:
raise LabelStudioValidationErrorSentryIgnored(
f'There are {sum(labels_from_data.values(), 0)} annotation(s) created with tag '
f'"{control_tag_from_data}", you can\'t remove it')
labels_from_config_by_tag = set(labels_from_config[control_tag_from_data])
if not set(labels_from_data).issubset(set(labels_from_config_by_tag)):
different_labels = list(set(labels_from_data).difference(labels_from_config_by_tag))
diff_str = '\n'.join(f'{l} ({labels_from_data[l]} annotations)' for l in different_labels)
raise LabelStudioValidationErrorSentryIgnored(f'These labels still exist in annotations:\n{diff_str}')
def _label_config_has_changed(self):
return self.label_config != self.__original_label_config
def delete_predictions(self):
predictions = Prediction.objects.filter(task__project=self)
count = predictions.count()
predictions.delete()
return {'deleted_predictions': count}
def get_updated_weights(self):
outputs = parse_config(self.label_config)
control_weights = {}
exclude_control_types = ('Filter',)
for control_name in outputs:
control_type = outputs[control_name]['type']
if control_type in exclude_control_types:
continue
control_weights[control_name] = {
'overall': 1.0,
'type': control_type,
'labels': {label: 1.0 for label in outputs[control_name].get('labels', [])}
}
return control_weights
def save(self, *args, recalc=True, **kwargs):
exists = True if self.pk else False
if self.label_config and (self._label_config_has_changed() or not exists or not self.control_weights):
self.control_weights = self.get_updated_weights()
super(Project, self).save(*args, **kwargs)
project_with_config_just_created = not exists and self.pk and self.label_config
if self._label_config_has_changed() or project_with_config_just_created:
self.data_types = extract_data_types(self.label_config)
if self._label_config_has_changed():
self.__original_label_config = self.label_config
if not exists:
steps = ProjectOnboardingSteps.objects.all()
objs = [ProjectOnboarding(project=self, step=step) for step in steps]
ProjectOnboarding.objects.bulk_create(objs)
# argument for recalculate project task stats
if recalc:
self.update_tasks_states(
maximum_annotations_changed=self.__maximum_annotations != self.maximum_annotations,
overlap_cohort_percentage_changed=self.__overlap_cohort_percentage != self.overlap_cohort_percentage,
tasks_number_changed=False
)
self.__maximum_annotations = self.maximum_annotations
self.__overlap_cohort_percentage = self.overlap_cohort_percentage
if hasattr(self, 'summary'):
# Ensure project.summary is consistent with current tasks / annotations
if self.num_tasks == 0:
self.summary.reset()
elif self.num_annotations == 0:
self.summary.reset(tasks_data_based=False)
def get_member_ids(self):
if hasattr(self, 'team_link'):
# project has defined team scope
# TODO: avoid checking team but rather add all project members when creating a project
return self.team_link.team.members.values_list('user', flat=True)
else:
from users.models import User
# TODO: may want to return all users from organization
return User.objects.none()
def has_team_user(self, user):
return hasattr(self, 'team_link') and self.team_link.team.has_user(user)
def annotators(self):
""" Annotators connected to this project including team members
"""
from users.models import User
member_ids = self.get_member_ids()
team_members = User.objects.filter(id__in=member_ids).order_by('email')
# add members from invited projects
project_member_ids = self.members.values_list('user__id', flat=True)
project_members = User.objects.filter(id__in=project_member_ids)
annotators = team_members | project_members
# set annotator.team_member=True if annotator is not an invited user
annotators = annotators.annotate(
team_member=Case(
When(id__in=project_member_ids, then=Value(False)),
default=Value(True),
output_field=BooleanField(),
)
)
return annotators
def annotators_with_annotations(self, min_count=500):
""" Annotators with annotation number > min_number
:param min_count: minimal annotation number to leave an annotators
:return: filtered annotators
"""
annotators = self.annotators()
q = Q(annotations__task__project=self) & Q_task_finished_annotations & Q(annotations__ground_truth=False)
annotators = annotators.annotate(annotation_count=Count('annotations', filter=q, distinct=True))
return annotators.filter(annotation_count__gte=min_count)
def labeled_tasks(self):
return self.tasks.filter(is_labeled=True)
def has_annotations(self):
from tasks.models import Annotation # prevent cycling imports
return Annotation.objects.filter(Q(task__project=self) & Q(ground_truth=False)).count() > 0
# [TODO] this should be a template tag or something like this
@property
def label_config_line(self):
c = self.label_config
return config_line_stipped(c)
def get_sample_task(self, label_config=None):
config = label_config or self.label_config
task, _, _ = get_sample_task(config)
return task
def eta(self):
"""
Show eta for project to be finished
eta = avg task annotations finish time * remain annotations
task has overlap = amount of task annotations to consider as finished (is_labeled)
remain annotations = sum ( task annotations to be done to fulfill each unfinished task overlap)
:return: time in seconds
"""
# finished tasks * overlap
finished_tasks = Task.objects.filter(project=self.id, is_labeled=True)
# one could make more than need to overlap
min_n_finished_annotations = sum([ft.overlap for ft in finished_tasks])
annotations_unfinished_tasks = Annotation.objects.filter(
task__project=self.id, task__is_labeled=False, ground_truth=False, result__isnull=False).count()
# get minimum remain annotations
total_annotations_needed = self.get_total_possible_count
annotations_remain = total_annotations_needed - min_n_finished_annotations - annotations_unfinished_tasks
# get average time of all finished TC
finished_annotations = Annotation.objects.filter(
Q(task__project=self.id) & Q(ground_truth=False), result__isnull=False).values('lead_time')
avg_lead_time = finished_annotations.aggregate(avg_lead_time=Avg('lead_time'))['avg_lead_time']
if avg_lead_time is None:
return None
return avg_lead_time * annotations_remain
def finished(self):
return not self.tasks.filter(is_labeled=False).exists()
def annotations_lead_time(self):
annotations = Annotation.objects.filter(Q(task__project=self.id) & Q(ground_truth=False))
return annotations.aggregate(avg_lead_time=Avg('lead_time'))['avg_lead_time']
@staticmethod
def django_settings():
return settings
@staticmethod
def max_tasks_file_size():
return settings.TASKS_MAX_FILE_SIZE
def get_control_tags_from_config(self):
return parse_config(self.label_config)
def get_parsed_config(self):
return parse_config(self.label_config)
def __str__(self):
return f'{self.title} (id={self.id})' or _("Business number %d") % self.pk
class Meta:
db_table = 'project'
class ProjectOnboardingSteps(models.Model):
"""
"""
DATA_UPLOAD = "DU"
CONF_SETTINGS = "CF"
PUBLISH = "PB"
INVITE_EXPERTS = "IE"
STEPS_CHOICES = (
(DATA_UPLOAD, "Import your data"),
(CONF_SETTINGS, "Configure settings"),
(PUBLISH, "Publish project"),
(INVITE_EXPERTS, "Invite collaborators")
)
code = models.CharField(max_length=2, choices=STEPS_CHOICES, null=True)
title = models.CharField(_('title'), max_length=1000, null=False)
description = models.TextField(_('description'), null=False)
order = models.IntegerField(default=0)
created_at = models.DateTimeField(_('created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('updated at'), auto_now=True)
class Meta:
ordering = ['order']
class ProjectOnboarding(models.Model):
"""
"""
step = models.ForeignKey(ProjectOnboardingSteps, on_delete=models.CASCADE, related_name="po_through")
project = models.ForeignKey(Project, on_delete=models.CASCADE)
finished = models.BooleanField(default=False)
created_at = models.DateTimeField(_('created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('updated at'), auto_now=True)
def save(self, *args, **kwargs):
super(ProjectOnboarding, self).save(*args, **kwargs)
if ProjectOnboarding.objects.filter(project=self.project, finished=True).count() == 4:
self.project.skip_onboarding = True
self.project.save(recalc=False)
class ProjectMember(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='project_memberships', help_text='User ID') # noqa
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='members', help_text='Project ID')
enabled = models.BooleanField(default=True, help_text='Project member is enabled')
created_at = models.DateTimeField(_('created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('updated at'), auto_now=True)
class ProjectSummary(models.Model):
project = AutoOneToOneField(Project, primary_key=True, on_delete=models.CASCADE, related_name='summary')
created_at = models.DateTimeField(_('created at'), auto_now_add=True, help_text='Creation time')
# { col1: task_count_with_col1, col2: task_count_with_col2 }
all_data_columns = JSONField(
_('all data columns'), null=True, default=dict, help_text='All data columns found in imported tasks')
# [col1, col2]
common_data_columns = JSONField(
_('common data columns'), null=True, default=list, help_text='Common data columns found across imported tasks')
# { (from_name, to_name, type): annotation_count }
created_annotations = JSONField(
_('created annotations'), null=True, default=dict, help_text='Unique annotation types identified by tuple (from_name, to_name, type)') # noqa
# { from_name: {label1: task_count_with_label1, label2: task_count_with_label2} }
created_labels = JSONField(
_('created labels'), null=True, default=dict, help_text='Unique labels')
def has_permission(self, user):
return self.project.has_permission(user)
def reset(self, tasks_data_based=True):
if tasks_data_based:
self.all_data_columns = {}
self.common_data_columns = []
self.created_annotations = {}
self.created_labels = {}
self.save()
def update_data_columns(self, tasks):
common_data_columns = set()
all_data_columns = dict(self.all_data_columns)
for task in tasks:
try:
task_data = get_attr_or_item(task, 'data')
except KeyError:
task_data = task
task_data_keys = task_data.keys()
for column in task_data_keys:
all_data_columns[column] = all_data_columns.get(column, 0) + 1
if not common_data_columns:
common_data_columns = set(task_data_keys)
else:
common_data_columns &= set(task_data_keys)
self.all_data_columns = all_data_columns
if not self.common_data_columns:
self.common_data_columns = list(sorted(common_data_columns))
else:
self.common_data_columns = list(sorted(set(self.common_data_columns) & common_data_columns))
logger.debug(f'summary.all_data_columns = {self.all_data_columns}')
logger.debug(f'summary.common_data_columns = {self.common_data_columns}')
self.save()
def remove_data_columns(self, tasks):
all_data_columns = dict(self.all_data_columns)
keys_to_remove = []
for task in tasks:
task_data = get_attr_or_item(task, 'data')
for key in task_data.keys():
if key in all_data_columns:
all_data_columns[key] -= 1
if all_data_columns[key] == 0:
keys_to_remove.append(key)
all_data_columns.pop(key)
self.all_data_columns = all_data_columns
if keys_to_remove:
common_data_columns = list(self.common_data_columns)
for key in keys_to_remove:
if key in common_data_columns:
common_data_columns.remove(key)
self.common_data_columns = common_data_columns
logger.debug(f'summary.all_data_columns = {self.all_data_columns}')
logger.debug(f'summary.common_data_columns = {self.common_data_columns}')
self.save()
def _get_annotation_key(self, result):
result_type = result.get('type', None)
if result_type in ('relation', 'pairwise', None):
return None
if 'from_name' not in result or 'to_name' not in result:
logger.error(
'Unexpected annotation.result format: "from_name" or "to_name" not found in %r', result,
extra={'sentry_skip': True}
)
return None
result_from_name = result['from_name']
key = get_annotation_tuple(result_from_name, result['to_name'], result_type or '')
return key
def _get_labels(self, result):
result_type = result.get('type')
result_value = result['value'].get(result_type)
if not result_value or not isinstance(result_value, list) or result_type == 'text':
# Non-list values are not labels. TextArea list values (texts) are not labels too.
return []
# Labels are stored in list
labels = []
for label in result_value:
labels.append(str(label))
return labels
def update_created_annotations_and_labels(self, annotations):
created_annotations = dict(self.created_annotations)
labels = dict(self.created_labels)
for annotation in annotations:
results = get_attr_or_item(annotation, 'result') or []
if not isinstance(results, list):
continue
for result in results:
# aggregate annotation types
key = self._get_annotation_key(result)
if not key:
continue
created_annotations[key] = created_annotations.get(key, 0) + 1
from_name = result['from_name']
# aggregate labels
if from_name not in self.created_labels:
labels[from_name] = dict()
for label in self._get_labels(result):
labels[from_name][label] = labels[from_name].get(label, 0) + 1
logger.debug(f'summary.created_annotations = {created_annotations}')
logger.debug(f'summary.created_labels = {labels}')
self.created_annotations = created_annotations
self.created_labels = labels
self.save()
def remove_created_annotations_and_labels(self, annotations):
created_annotations = dict(self.created_annotations)
labels = dict(self.created_labels)
for annotation in annotations:
results = get_attr_or_item(annotation, 'result') or []
if not isinstance(results, list):
continue
for result in results:
# reduce annotation counters
key = self._get_annotation_key(result)
if key in created_annotations:
created_annotations[key] -= 1
if created_annotations[key] == 0:
created_annotations.pop(key)
# reduce labels counters
from_name = result.get('from_name', None)
if from_name not in labels:
continue
for label in self._get_labels(result):
label = str(label)
if label in labels[from_name]:
labels[from_name][label] -= 1
if labels[from_name][label] == 0:
labels[from_name].pop(label)
if not labels[from_name]:
labels.pop(from_name)
logger.debug(f'summary.created_annotations = {created_annotations}')
logger.debug(f'summary.created_labels = {labels}')
self.created_annotations = created_annotations
self.created_labels = labels
self.save()
|
from TikTokApi import TikTokApi
api = TikTokApi()
count = 1
for video in api.hashtag(name="funny").videos(count=count):
print(video)
user = api.user(username='rotififi')
print(user.as_dict)
for video in user.videos(count=count):
print(video)
|
#a=eval(input(">>"))
#b=eval(input(">>"))
a=10000
for i in range(5):
a=a*(1+0.05)
print(a)
|
from django.apps import AppConfig
class ViewcountConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'viewcount'
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitsend Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for skipping signature validation on old blocks.
Test logic for skipping signature validation on blocks which we've assumed
valid (https://github.com/bitcoin/bitcoin/pull/9484)
We build a chain that includes and invalid signature for one of the
transactions:
0: genesis block
1: block 1 with coinbase transaction output.
2-101: bury that block with 100 blocks so the coinbase transaction
output can be spent
102: a block containing a transaction spending the coinbase
transaction output. The transaction has an invalid signature.
103-2202: bury the bad block with just over two weeks' worth of blocks
(2100 blocks)
Start three nodes:
- node0 has no -assumevalid parameter. Try to sync to block 2202. It will
reject block 102 and only sync as far as block 101
- node1 has -assumevalid set to the hash of block 102. Try to sync to
block 2202. node1 will sync all the way to block 2202.
- node2 has -assumevalid set to the hash of block 102. Try to sync to
block 200. node2 will reject block 102 since it's assumed valid, but it
isn't buried by at least two weeks' work.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.key import CECKey
from test_framework.messages import (
CBlockHeader,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
msg_block,
msg_headers
)
from test_framework.mininode import P2PInterface
from test_framework.script import (CScript, OP_TRUE)
from test_framework.test_framework import BitsendTestFramework
from test_framework.util import assert_equal
class BaseNode(P2PInterface):
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
class AssumeValidTest(BitsendTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.add_nodes(3)
# Start node0. We don't start the other nodes yet since
# we need to pre-mine a block with an invalid transaction
# signature so we can pass in the block hash as assumevalid.
self.start_node(0)
def send_blocks_until_disconnected(self, p2p_conn):
"""Keep sending blocks to the node until we're disconnected."""
for i in range(len(self.blocks)):
if not p2p_conn.is_connected:
break
try:
p2p_conn.send_message(msg_block(self.blocks[i]))
except IOError as e:
assert not p2p_conn.is_connected
break
def assert_blockchain_height(self, node, height):
"""Wait until the blockchain is no longer advancing and verify it's reached the expected height."""
last_height = node.getblock(node.getbestblockhash())['height']
timeout = 10
while True:
time.sleep(0.25)
current_height = node.getblock(node.getbestblockhash())['height']
if current_height != last_height:
last_height = current_height
if timeout < 0:
assert False, "blockchain too short after timeout: %d" % current_height
timeout - 0.25
continue
elif current_height > height:
assert False, "blockchain too long: %d" % current_height
elif current_height == height:
break
def run_test(self):
p2p0 = self.nodes[0].add_p2p_connection(BaseNode())
# Build the blockchain
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
self.blocks = []
# Get a pubkey for the coinbase TXO
coinbase_key = CECKey()
coinbase_key.set_secretbytes(b"horsebattery")
coinbase_pubkey = coinbase_key.get_pubkey()
# Create the first block with a coinbase output to our key
height = 1
block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time)
self.blocks.append(block)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
# Bury the block 100 deep so the coinbase output is spendable
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
# Create a transaction spending the coinbase output with an invalid (null) signature
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE])))
tx.calc_sha256()
block102 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block102.vtx.extend([tx])
block102.hashMerkleRoot = block102.calc_merkle_root()
block102.rehash()
block102.solve()
self.blocks.append(block102)
self.tip = block102.sha256
self.block_time += 1
height += 1
# Bury the assumed valid block 2100 deep
for i in range(2100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.nVersion = 4
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
self.nodes[0].disconnect_p2ps()
# Start node1 and node2 with assumevalid so they accept a block with a bad signature.
self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)])
self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)])
p2p0 = self.nodes[0].add_p2p_connection(BaseNode())
p2p1 = self.nodes[1].add_p2p_connection(BaseNode())
p2p2 = self.nodes[2].add_p2p_connection(BaseNode())
# send header lists to all three nodes
p2p0.send_header_for_blocks(self.blocks[0:2000])
p2p0.send_header_for_blocks(self.blocks[2000:])
p2p1.send_header_for_blocks(self.blocks[0:2000])
p2p1.send_header_for_blocks(self.blocks[2000:])
p2p2.send_header_for_blocks(self.blocks[0:200])
# Send blocks to node0. Block 102 will be rejected.
self.send_blocks_until_disconnected(p2p0)
self.assert_blockchain_height(self.nodes[0], 101)
# Send all blocks to node1. All blocks will be accepted.
for i in range(2202):
p2p1.send_message(msg_block(self.blocks[i]))
# Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync.
p2p1.sync_with_ping(120)
assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)
# Send blocks to node2. Block 102 will be rejected.
self.send_blocks_until_disconnected(p2p2)
self.assert_blockchain_height(self.nodes[2], 101)
if __name__ == '__main__':
AssumeValidTest().main()
|
def json_to_attrs(obj, json_res, types=(str, int, type(None), )):
for key, val in json_res.items():
if type(val) in types:
setattr(obj, key, val if type(val) is not str else val.strip())
|
#encoding=utf-8
chinese_non_stops = u'"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏'
chinese_stops = u"!?。。"
chinese_punctuations = chinese_non_stops + chinese_stops
# 判断一个unicode是否是汉字
def is_chinese(uchar):
if u'\u4e00' <= uchar <= u'\u9fff':
return True
else:
return False
# 判断一个unicode是否是数字
def is_number(uchar):
if u'\u0030' <= uchar and uchar <= u'\u0039':
return True
else:
return False
def is_chinese_punctuation(uchar):
if uchar in chinese_punctuations:
return True
return False
# 判断一个unicode是否是英文字母
def is_alphabet(uchar):
if (u'\u0041' <= uchar <= u'\u005a') or (u'\u0061' <= uchar <= u'\u007a'):
return True
else:
return False
# 判断是否非汉字,数字和英文字符
def is_other(uchar):
if not (is_chinese(uchar) or is_number(uchar) or is_alphabet(uchar)):
return True
else:
return False
if __name__ == "__main__":
print(is_chinese(";"))
|
# --------------
class_1 = ['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
class_2 = ['Hilary Mason','Carla Gentry','Corinna Cortes']
new_class = class_1 + class_2
print(new_class)
new_class.append('Peter Warden')
print(new_class)
new_class.remove('Carla Gentry')
print(new_class)
# --------------
# Code starts here
courses = {'Math':65, 'English':70, 'History':80, 'French':70, 'Science': 60}
print(courses.keys())
print(courses.values())
total = 65+70+80+70+60
print(total)
percentage = total/500 * 100
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics = {'Geoffrey Hinton':78, 'Andrew Ng':95, 'Sebastian Raschka':65, 'Yoshua Benjio':50 ,'Hilary Mason':70, 'Corinna Cortes':66, 'Peter Warden':75}
# print(mathematics)
topper = max(mathematics, key = mathematics.get)
print(topper)
# Code ends here
# --------------
# Given string
topper = 'andrew ng'
first_name = (topper.split()[0])
last_name = (topper.split()[1])
full_name = last_name +' '+ first_name
certificate_name = full_name.upper()
# Code starts here
print(certificate_name)
# Code ends here
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import configparser
from enum import Enum
import logging
import argparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .mininode import NetworkThread
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "bitcoin_func_test_"
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class BitcoinTestMetaClass(type):
"""Metaclass for BitcoinTestFramework.
Ensures that any attempt to register a subclass of `BitcoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'BitcoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("BitcoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("BitcoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = False
self.bind_to_localhost_only = True
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
help="profile running nodes with perf for the duration of the test")
self.add_options(parser)
self.options = parser.parse_args()
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.config = config
self.options.bitcoind = os.getenv("BITCOIND", default=config["environment"]["BUILDDIR"] + '/src/particld' + config["environment"]["EXEEXT"])
self.options.bitcoincli = os.getenv("BITCOINCLI", default=config["environment"]["BUILDDIR"] + '/src/particl-cli' + config["environment"]["EXEEXT"])
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
success = TestStatus.FAILED
try:
if self.options.usecli:
if not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_if_no_cli()
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: bitcoinds were not stopped and may still be running")
should_clean_up = (
not self.options.nocleanup and
not self.options.noshutdown and
success != TestStatus.FAILED and
not self.options.perf
)
if should_clean_up:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
elif self.options.perf:
self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
cleanup_tree_on_exit = False
else:
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
if not self.setup_clean_chain:
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
block_hash = self.nodes[0].generate(1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
n.submitblock(block)
chain_info = n.getblockchaininfo()
assert_equal(chain_info["blocks"], 200)
assert_equal(chain_info["initialblockdownload"], False)
def import_deterministic_coinbase_privkeys(self):
for n in self.nodes:
try:
n.getwalletinfo()
except JSONRPCException as e:
assert str(e).startswith('Method not found')
continue
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [self.options.bitcoind] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
rpchost=rpchost,
timewait=self.rpc_timeout,
bitcoind=binary[i],
bitcoin_cli=self.options.bitcoincli,
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
start_perf=self.options.perf,
))
def start_node(self, i, *args, **kwargs):
"""Start a bitcoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitcoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
self.nodes[i].wait_until_stopped()
def stop_nodes(self, wait=0):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_blocks(self, nodes=None, **kwargs):
sync_blocks(nodes or self.nodes, **kwargs)
def sync_mempools(self, nodes=None, **kwargs):
sync_mempools(nodes or self.nodes, **kwargs)
def sync_all(self, nodes=None, **kwargs):
self.sync_blocks(nodes, **kwargs)
self.sync_mempools(nodes, **kwargs)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 199-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [self.options.bitcoind, "-datadir=" + datadir, '-disablewallet']
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.cachedir, i),
extra_conf=["bind=127.0.0.1"],
extra_args=[],
rpchost=None,
timewait=self.rpc_timeout,
bitcoind=self.options.bitcoind,
bitcoin_cli=self.options.bitcoincli,
coverage_dir=None,
cwd=self.options.tmpdir,
))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 199-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# The 4th node gets only 24 immature blocks so that the very last
# block in the cache does not age too much (have an old tip age).
# This is needed so that we are out of IBD when the test starts,
# see the tip age check in IsInitialBlockDownload().
for i in range(8):
self.nodes[0].generatetoaddress(25 if i != 7 else 24, self.nodes[i % 4].get_deterministic_priv_key().address)
self.sync_blocks()
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
os.rmdir(cache_path(i, 'wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path(i)):
if entry not in ['chainstate', 'blocks']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_bitcoind_zmq(self):
"""Skip the running test if bitcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("bitcoind has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
def skip_if_no_cli(self):
"""Skip the running test if bitcoin-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("bitcoin-cli has not been compiled.")
def is_cli_compiled(self):
"""Checks whether bitcoin-cli was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_CLI")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_WALLET")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_ZMQ")
def is_usbdevice_compiled(self):
"""Checks whether the usbdevice module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_USBDEVICE")
|
from model_seq_cnn import *
from tqdm import trange
import torch
import numpy as np
import unittest
class TestSeqCNN(unittest.TestCase):
def test_shape_2d(self):
for i in trange(20, desc='test_shape_2d'):
in_C = np.random.randint(2, 4)
in_H = np.random.randint(30, 100)
in_W = np.random.randint(30, 100)
model = SequentialCNN(
in_C, in_H, in_W,
[
(np.random.randint(5, 10), np.random.randint(1, 3) * 2 + 1)
for _ in range(np.random.randint(1, 3))
]
)
x = torch.zeros((
np.random.randint(5, 10), # batchsize
np.random.randint(5, 10), # clip_length
in_C, in_H, in_W
)) # (B, N, C, H, W)
y = model(x) # (B, N, CC, HH, WW)
assert y.shape[-3:] == model.output_shape()
assert y.shape[:2] == x.shape[:2]
def test_shape_3d(self):
for i in trange(20, desc='test_shape_3d'):
in_C = np.random.randint(2, 4)
in_H = np.random.randint(30, 100)
in_W = np.random.randint(30, 100)
B = np.random.randint(5, 10)
L = np.random.randint(5, 10)
model = SequentialCNN3D(
in_C, in_H, in_W,
[
(
np.random.randint(5, 10), # outC
np.random.randint(1, 3) * 2 + 1, # time_kernel
np.random.randint(1, 3) * 2 + 1, # space_kernel
True
)
for _ in range(np.random.randint(1, 3))
]
)
x = torch.zeros((B, L, in_C, in_H, in_W)) # (B, L, C, H, W)
y = model(x) # (B, L, CC, HH, WW)
assert y.shape[-3:] == model.output_shape() # CC HH WW
assert y.shape[:2] == x.shape[:2] # B L
def test_shape_3d_front_time_pad(self):
for i in trange(20, desc='test_shape_3d'):
in_C = np.random.randint(2, 4)
in_H = np.random.randint(30, 100)
in_W = np.random.randint(30, 100)
B = np.random.randint(5, 10)
L = np.random.randint(5, 10)
model = SequentialCNN3DFrontTimePad(
in_C, in_H, in_W,
[
(
np.random.randint(5, 10), # outC
np.random.randint(1, 3) * 2 + 1, # time_kernel
np.random.randint(1, 3) * 2 + 1, # space_kernel
True
)
for _ in range(np.random.randint(1, 3))
]
)
x = torch.zeros((B, L, in_C, in_H, in_W)) # (B, L, C, H, W)
y = model(x) # (B, L, CC, HH, WW)
assert y.shape[-3:] == model.output_shape() # CC HH WW
assert y.shape[:2] == x.shape[:2] # B L
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
###
from castor.config import CassandraKeyspace
from cassandra import InvalidRequest
import re
class Operation:
def add(stack):
x = stack.pop()
y = stack.pop()
return x+y
def minus(stack):
x = stack.pop()
y = stack.pop()
return y-x
def div(stack):
x = stack.pop()
y = stack.pop()
return y/x
def mult(stack):
x = stack.pop()
y = stack.pop()
return x*y
def getmax(stack):
x = stack.pop()
y = stack.pop()
return max(x, y)
def variable_defined(stack):
substitution = stack.pop()
evaluated_variable = stack.pop()
if evaluated_variable:
return substitution
return float(0)
def variable_undefined(stack):
substitution = stack.pop()
evaluated_variable = stack.pop()
if evaluated_variable is None:
return substitution
return evaluated_variable
def is_undef(stack):
return stack.pop() is None
def if_condition(stack):
is_false = stack.pop()
is_true = stack.pop()
condition = stack.pop()
if condition:
return is_true
else:
return is_false
@staticmethod
def compare(stack, op):
y = stack.pop()
x = stack.pop()
return op(x,y)
def is_gt(stack): return Operation.compare(stack, lambda x,y: x>y)
def is_ge(stack): return Operation.compare(stack, lambda x,y: x>=y)
def is_lt(stack): return Operation.compare(stack, lambda x,y: x<y)
def is_le(stack): return Operation.compare(stack, lambda x,y: x<=y)
operations = { '+': add,'-': minus, '/': div, '*': mult, 'MAX': getmax, 'V_DEF': variable_defined,
'V_UNDEF': variable_undefined, 'UN': is_undef, 'IF': if_condition,
'GT': is_gt, 'LT': is_lt, 'GE': is_ge, 'LE': is_le
}
class BadRPNException(Exception):
pass
class NoDataSetException(Exception):
pass
class StackError(Exception):
pass
class HostNeededError(Exception):
pass
def __init__(self, rpn_expr):
self.const = re.compile('^-?\d+(\.\d+)?$')
rpn_array = rpn_expr.split(',')
#datasets need to be interpolated before calling method evaluate
self.datasets_interpolated = False
self.datasets = {}
self.rpn = []
self.variables = []
for elem in rpn_array:
is_const = self.const.match(elem)
if is_const:
self.rpn.append(float(elem))
elif Operation.operations.has_key(elem):
self.rpn.append(elem)
else:
#it is a variable
#variable name can finish with UNDEF:constant or DEF:constant (default value defined)
#character : is allowed in variable name, so we split variable name with : and we reappend
#elements before UNDEF or DEF
variable_elements = elem.split(':')
clause_index = None
if 'UNDEF' in variable_elements:
clause_index = variable_elements.index('UNDEF')
if 'DEF' in variable_elements:
clause_index = variable_elements.index('DEF')
if clause_index is not None:
if clause_index == 0 or clause_index == len(variable_elements):
raise BadRPNException("Variable %s For expression %s"%(elem, rpn_expr))
variable_name = ':'.join(variable_elements[:clause_index])
self.variables.append(variable_name)
self.rpn.append(variable_name)
self.rpn.append(float(variable_elements[clause_index+1]))
self.rpn.append('V_%s'%variable_elements[clause_index])
else:
variable_name = elem
self.variables.append(variable_name)
self.rpn.append(variable_name)
def get_variables(self):
return self.variables
def set_dataset(self,name, dataset):
self.datasets_interpolated = False
self.datasets[name] = dataset
def __interpolate_datasets__(self):
"""
until the smallest step of all datasets exceed required_step + 30% we multiply required_step by two
(only one value on two value is taken) and method interpolate is called on dataset
"""
self.datasets_interpolated = True
first_dataset = self.datasets[self.datasets.keys()[0]]
required_step = first_dataset.get_step()
start = first_dataset.get_first_ts()
end = first_dataset.get_last_ts()
if required_step is None:
return (start, None, end)
min_real_step = min(first_dataset.get_raw_mean_step(), 1800)
#sometimes we can have a big hole of data, and so get_raw_mean_step will be enormous
#and so it's more desirable to have a graph with a step of 1800
for dataset in self.datasets.values():
if dataset.get_raw_mean_step() < min_real_step:
min_real_step = dataset.get_raw_mean_step()
while min_real_step > (required_step * 1.3):
"""
if the most precise real step for all datasets is less precise than required_step
we need to augment required_step. We will multiply it by two. (==> Only only one value on two will be taken)
to avoid to create unused points
"""
required_step = required_step*2
for dataset in self.datasets.values():
dataset.interpolate(required_step)
return(start,required_step,end)
def get_available_ts(self):
"""
Returns timestamps avaibable
If required step is 30 but all datasets have only a point each 60s 1385430:5 1385490:None 1385520:10 1385590:None
this function function will return an iterator giving a step of 60
"""
(start, consolidated_step, end) = self.__interpolate_datasets__()
if start is None:
return
if consolidated_step is None:
first_dataset = self.datasets[self.datasets.keys()[0]]
for k in first_dataset.get_data().keys():
yield k
else:
for i in range(start,end+1,consolidated_step):
yield i
def evaluate(self, timestamp, consolidation_func='AVG'):
if not self.datasets_interpolated:
self.__interpolate_datasets__()
stack = []
for element in self.rpn:
if Operation.operations.has_key(element):
try:
stack.append(Operation.operations[element](stack))
except ZeroDivisionError:
stack.append(None)
except IndexError:
raise Operation.StackError("Empty stack evaluating %s at %s"%(self.rpn, element))
except TypeError:
stack.append(None)
elif self.const.match(str(element)):
stack.append(element)
else:
#its a variable
if self.datasets.has_key(element):
extracted_value = None
try:
extracted_value = self.datasets[element][timestamp][consolidation_func]
except KeyError:
pass
stack.append(extracted_value)
else:
raise Operation.NoDataSetException("Dataset for %s doesn't exist"%element)
#print stack
if len(stack) == 1:
return stack[0]
else:
raise Operation.StackError("Bad number of elements in stack (%s) at the end of RPN expression %s, stack is %s"%(len(stack), self.rpn, stack))
|
def f():
global xx
xx = 1
f()
print(x<ref>x)
|
import numpy as np
from multiprocessing import cpu_count
from napari_plugin_engine import napari_hook_implementation
from pathlib import Path
from napari_czifile2.io import CZISceneFile
@napari_hook_implementation
def napari_get_reader(path):
if isinstance(path, list):
if any(Path(p).suffix.lower() != '.czi' for p in path):
return None
else:
if Path(path).suffix.lower() != '.czi':
return None
return reader_function
def reader_function(paths):
layer_data = []
if not isinstance(paths, list):
paths = [paths]
for path in paths:
num_scenes = CZISceneFile.get_num_scenes(path)
for scene_index in range(num_scenes):
with CZISceneFile(path, scene_index) as f:
data = f.as_tzcyx0_array(max_workers=cpu_count())
# https://github.com/napari/napari/issues/2348
# https://github.com/BodenmillerGroup/napari-czifile2/issues/4
if not f.is_rgb:
data = data[:, :, :, :, :, 0]
metadata = {
'rgb': f.is_rgb,
'channel_axis': 2,
'translate': (f.pos_t_seconds, f.pos_z_um, f.pos_y_um, f.pos_x_um),
'scale': (f.scale_t_seconds, f.scale_z_um, f.scale_y_um, f.scale_x_um),
}
if f.channel_names is not None:
if num_scenes == 1:
metadata['name'] = f.channel_names
elif num_scenes > 1:
metadata['name'] = [f'S{scene_index:02d} {channel_name}' for channel_name in f.channel_names]
layer_data.append((data, metadata, 'image'))
return layer_data
|
import argparse
import sys
import httplib2
from apiclient.discovery import build
from oauth2client import client
from oauth2client import file
from oauth2client import tools
def get_service(api_name, api_version, scope, client_secrets_path):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser]
)
flags = parser.parse_args([])
flow = client.flow_from_clientsecrets(
client_secrets_path,
scope=scope,
message=tools.message_if_missing(client_secrets_path),
)
storage = file.Storage(api_name + ".dat")
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(flow, storage, flags)
http = credentials.authorize(http=httplib2.Http())
service = build(api_name, api_version, http=http)
return service
|
from dataclasses import dataclass
from typing import Tuple, Type
from lahja import BaseEvent, BaseRequestResponseEvent
from libp2p.peer.id import ID
@dataclass
class Libp2pPeersResponse(BaseEvent):
"""
libp2p_peers: Handshaked Peer IDs.
"""
result: Tuple[ID, ...]
class Libp2pPeersRequest(BaseRequestResponseEvent[Libp2pPeersResponse]):
@staticmethod
def expected_response_type() -> Type[Libp2pPeersResponse]:
return Libp2pPeersResponse
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pprint
import pytest
import shlex
from subprocess import call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.environ import HIVE_MAJOR_VERSION
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon,
SkipIfGCS, SkipIfLocal)
from tests.common.test_dimensions import create_uncompressed_text_dimension
from tests.util.test_file_parser import QueryTestSectionReader
# The purpose of view compatibility testing is to check whether views created in Hive
# can be queried in Impala and vice versa. A test typically consists of
# the following actions specified in different test sections.
# 1. create a view with a certain definition using Hive and Impala
# 2. explain a "select *" query on the view created by Hive using Hive and Impala
# 3. explain a "select *" query on the view created by Impala using Hive and Impala
# For each of the steps above its corresponding test section specifies our expectations
# on whether Impala and Hive will succeed or fail.
#
# Impala and Hive's SQL dialects are not fully compatible. We intentionally rely
# on the view creation mechanism instead of just testing various SQL statements in
# Impala and Hive, because view creation transforms the original view definition into
# a so-called "extended view definition". As this process of transformation could
# potentially change in Impala and/or Hive simply testing various SQL statements
# in Impala and Hive would be insufficient.
# Missing Coverage: Views created by Hive and Impala being visible and queryble by each
# other on non hdfs storage.
@SkipIfS3.hive
@SkipIfGCS.hive
@SkipIfABFS.hive
@SkipIfADLS.hive
@SkipIfIsilon.hive
@SkipIfLocal.hive
class TestViewCompatibility(ImpalaTestSuite):
VALID_SECTION_NAMES = ["CREATE_VIEW", "CREATE_VIEW_RESULTS",\
"QUERY_HIVE_VIEW_RESULTS", "QUERY_IMPALA_VIEW_RESULTS"]
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestViewCompatibility, cls).add_test_dimensions()
if cls.exploration_strategy() != 'exhaustive':
pytest.skip("Should only run in exhaustive due to long execution time.")
# don't use any exec options, running exactly once is fine
cls.ImpalaTestMatrix.clear_dimension('exec_option')
# There is no reason to run these tests using all dimensions.
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def test_view_compatibility(self, vector, unique_database):
self._run_view_compat_test_case('QueryTest/views-compatibility', vector,
unique_database)
if HIVE_MAJOR_VERSION == 2:
self._run_view_compat_test_case('QueryTest/views-compatibility-hive2-only', vector,
unique_database)
if HIVE_MAJOR_VERSION >= 3:
self._run_view_compat_test_case('QueryTest/views-compatibility-hive3-only', vector,
unique_database)
def _run_view_compat_test_case(self, test_file_name, vector, test_db_name):
"""
Runs a view-compatibility test file, containing the following sections:
---- CREATE_VIEW
contains a view creation statement to be executed in Impala and Hive
---- CREATE_VIEW_RESULTS
whether we expect the view creation in Impala/Hive to succeed/fail
---- QUERY_HIVE_VIEW_RESULTS
whether we expect to be able to query the view created by Hive in Hive/Impala
---- QUERY_IMPALA_VIEW_RESULTS
whether we expect to be able to query the view created by Impala in Hive/Impala
"""
sections = self.load_query_test_file(self.get_workload(), test_file_name,\
self.VALID_SECTION_NAMES)
for test_section in sections:
# validate the test
test_case = ViewCompatTestCase(test_section, test_file_name, test_db_name)
# create views in Hive and Impala checking against the expected results
self._exec_in_hive(test_case.get_create_view_sql('HIVE'),\
test_case.get_create_view_sql('HIVE'),\
test_case.get_create_exp_res())
# The table may or may not have been created in Hive. And so, "invalidate metadata"
# may throw an exception.
try:
self.client.execute("invalidate metadata {0}".format(test_case.hive_view_name))
except ImpalaBeeswaxException as e:
assert "TableNotFoundException" in str(e)
self._exec_in_impala(test_case.get_create_view_sql('IMPALA'),\
test_case.get_create_view_sql('IMPALA'),\
test_case.get_create_exp_res())
# explain a simple query on the view created by Hive in Hive and Impala
if test_case.has_query_hive_section():
exp_res = test_case.get_query_exp_res('HIVE');
if 'HIVE' in exp_res:
self._exec_in_hive(test_case.get_query_view_sql('HIVE'),\
test_case.get_create_view_sql('HIVE'), exp_res)
if 'IMPALA' in exp_res:
self._exec_in_impala(test_case.get_query_view_sql('HIVE'),\
test_case.get_create_view_sql('HIVE'), exp_res)
# explain a simple query on the view created by Impala in Hive and Impala
if test_case.has_query_impala_section():
exp_res = test_case.get_query_exp_res('IMPALA');
if 'HIVE' in exp_res:
self._exec_in_hive(test_case.get_query_view_sql('IMPALA'),\
test_case.get_create_view_sql('IMPALA'), exp_res)
if 'IMPALA' in exp_res:
self._exec_in_impala(test_case.get_query_view_sql('IMPALA'),\
test_case.get_create_view_sql('IMPALA'), exp_res)
# drop the views without checking success or failure
self._exec_in_hive(test_case.get_drop_view_sql('HIVE'),\
test_case.get_create_view_sql('HIVE'), None)
try:
self.client.execute("invalidate metadata {0}".format(test_case.hive_view_name))
except ImpalaBeeswaxException as e:
assert "TableNotFoundException" in str(e)
self._exec_in_impala(test_case.get_drop_view_sql('IMPALA'),\
test_case.get_create_view_sql('IMPALA'), None)
def _exec_in_hive(self, sql_str, create_view_sql, exp_res):
try:
self.run_stmt_in_hive(sql_str)
success = True
except: # consider any exception a failure
success = False
self._cmp_expected(sql_str, create_view_sql, exp_res, "HIVE", success)
def _exec_in_impala(self, sql_str, create_view_sql, exp_res):
success = True
try:
impala_ret = self.execute_query(sql_str)
success = impala_ret.success
except: # consider any exception a failure
success = False
self._cmp_expected(sql_str, create_view_sql, exp_res, "IMPALA", success)
def _cmp_expected(self, sql_str, create_view_sql, exp_res, engine, success):
if exp_res is None:
return
if exp_res[engine] and not success:
assert 0, '%s failed to execute\n%s\nwhile testing a view created as\n%s'\
% (engine, sql_str, create_view_sql)
if not exp_res[engine] and success:
assert 0, '%s unexpectedly succeeded in executing\n%s\nwhile testing '\
'a view created as\n%s' % (engine, create_view_sql, sql_str)
# Represents one view-compatibility test case. Performs validation of the test sections
# and provides SQL to execute for each section.
class ViewCompatTestCase(object):
RESULT_KEYS = ["IMPALA", "HIVE"]
def __init__(self, test_section, test_file_name, test_db_name):
if 'CREATE_VIEW' not in test_section:
assert 0, 'Error in test file %s. Test cases require a '\
'CREATE_VIEW section.\n%s' %\
(test_file_name, pprint.pformat(test_section))
self.create_exp_res = None
# get map of expected results from test sections
if 'CREATE_VIEW_RESULTS' in test_section:
self.create_exp_res =\
self._get_expected_results(test_section['CREATE_VIEW_RESULTS'])
else:
assert 0, 'Error in test file %s. Test cases require a '\
'CREATE_VIEW_RESULTS section.\n%s' %\
(test_file_name, pprint.pformat(test_section))
self.query_hive_exp_res = None
if 'QUERY_HIVE_VIEW_RESULTS' in test_section:
self.query_hive_exp_res =\
self._get_expected_results(test_section['QUERY_HIVE_VIEW_RESULTS'])
self.query_impala_exp_res = None
if 'QUERY_IMPALA_VIEW_RESULTS' in test_section:
self.query_impala_exp_res =\
self._get_expected_results(test_section['QUERY_IMPALA_VIEW_RESULTS'])
if self.query_hive_exp_res is None and self.query_impala_exp_res is None:
assert 0, 'Error in test file %s. Test cases require a QUERY_HIVE_VIEW_RESULTS '\
'or QUERY_IMPALA_VIEW_RESULTS section.\n%s' %\
(test_file_name, pprint.pformat(test_section))
# clean test section, remove comments etc.
self.create_view_sql = QueryTestSectionReader.build_query(test_section['CREATE_VIEW'])
view_name = self._get_view_name(self.create_view_sql)
if view_name.find(".") != -1:
assert 0, 'Error in test file %s. Found unexpected view name %s that is '\
'qualified with a database' % (test_file_name, view_name)
# add db prefix and suffixes to indicate which engine created the view
self.hive_view_name = test_db_name + '.' + view_name + '_hive'
self.impala_view_name = test_db_name + '.' + view_name + '_impala'
self.hive_create_view_sql =\
self.create_view_sql.replace(view_name, self.hive_view_name, 1)
self.impala_create_view_sql =\
self.create_view_sql.replace(view_name, self.impala_view_name, 1)
# SQL to explain a simple query on the view created by Hive in Hive and Impala
if self.query_hive_exp_res is not None:
self.query_hive_view_sql = 'explain select * from %s' % (self.hive_view_name)
# SQL to explain a simple query on the view created by Impala in Hive and Impala
if self.query_impala_exp_res is not None:
self.query_impala_view_sql = 'explain select * from %s' % (self.impala_view_name)
self.drop_hive_view_sql = "drop view %s" % (self.hive_view_name)
self.drop_impala_view_sql = "drop view %s" % (self.impala_view_name)
def _get_view_name(self, create_view_sql):
lexer = shlex.shlex(create_view_sql)
tokens = list(lexer)
# sanity check the create view statement
if len(tokens) < 3:
assert 0, 'Error in test. Invalid CREATE VIEW statement: %s' % (create_view_sql)
if tokens[0].lower() != "create" or tokens[1].lower() != "view":
assert 0, 'Error in test. Invalid CREATE VIEW statement: %s' % (create_view_sql)
if tokens[2].lower() == "if":
# expect an "if not exists" clause
return tokens[5]
else:
# expect a create view view_name ...
return tokens[2]
def _get_expected_results(self, section_text):
lines = section_text.splitlines()
exp_res = dict()
for line in lines:
components = line.partition("=")
component_value = components[2].upper()
if component_value == 'SUCCESS':
exp_res[components[0]] = True
elif component_value == 'FAILURE':
exp_res[components[0]] = False
else:
raise Exception("Unexpected result declared: " + line)
# check that the results section contains at least one entry
if not (lambda a, b: any(i in b for i in a)):
assert 0, 'No valid entry in expected-results section. '\
'Expected an IMPALA or HIVE entry.'
return exp_res
def get_create_view_sql(self, engine):
engine = engine.upper();
if engine == "HIVE":
return self.hive_create_view_sql
elif engine == "IMPALA":
return self.impala_create_view_sql
else:
assert 0, "Unknown execution engine %s" % (engine)
def get_create_exp_res(self):
return self.create_exp_res
def get_drop_view_sql(self, engine):
engine = engine.upper();
if engine == "HIVE":
return self.drop_hive_view_sql
elif engine == "IMPALA":
return self.drop_impala_view_sql
else:
assert 0, "Unknown execution engine %s" % (engine)
def get_query_exp_res(self, engine):
engine = engine.upper();
if engine == "HIVE":
return self.query_hive_exp_res
elif engine == "IMPALA":
return self.query_impala_exp_res
else:
assert 0, "Unknown execution engine %s" % (engine)
def get_query_view_sql(self, engine):
engine = engine.upper();
if engine == "HIVE":
return self.query_hive_view_sql
elif engine == "IMPALA":
return self.query_impala_view_sql
else:
assert 0, "Unknown execution engine %s" % (engine)
return self.query_hive_view_sql
def has_query_hive_section(self):
return hasattr(self, 'query_hive_view_sql')
def has_query_impala_section(self):
return hasattr(self, 'query_impala_view_sql')
|
from django import forms
from shortener.models import Shortener
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Row, Column, HTML
from crispy_forms.bootstrap import PrependedText
class ShortenerForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_show_labels = False
self.helper.form_show_errors = True
self.helper.form_method = 'POST'
self.helper.layout = Layout(
Row(
Column('original', css_class='form-group col-lg-6'),
css_class='form-row justify-content-center align-items-center mt-5'
),
Row(
Column(HTML('<i class="bi bi-arrow-down-circle-fill" style="font-size: 30px"></i>'), css_class='form-group col-lg-1 d-flex justify-content-center'),
css_class='form-row justify-content-center align-items-center mx-3'
),
Row(
Column(PrependedText('shortened', "http://localhost:8000/"), css_class='form-group col-lg-3'),
Column(HTML('<a class="btn btn-secondary" href="/suggest/name/" role="button">Suggest Name</a>'), css_class='form-group col-lg-2'),
css_class='form-row justify-content-center mt-3'
),
Row(
Column(Submit('submit', ' Save '), css_class='form-group col-lg-1 d-flex justify-content-center'),
css_class='form-row justify-content-center align-items-center mt-3'
)
)
shortened = forms.SlugField(
max_length=50,
widget=forms.TextInput(attrs={
"class": "form-control",
"placeholder": "idk",
})
)
original = forms.URLField(
max_length=400,
widget=forms.TextInput(attrs={
"class": "form-control",
"placeholder": "http://www.some.thing/very/long",
})
)
class Meta:
model = Shortener
fields = [
'shortened',
'original'
]
def clean_shortened(self, *args, **kwargs):
shortened = self.cleaned_data.get('shortened')
if Shortener.objects.filter(shortened=shortened):
raise forms.ValidationError("This name is already taken.")
return shortened
|
"""
IP Routes
object abstractions for representing IP routes in VPP
"""
from vpp_object import VppObject
from socket import inet_pton, inet_ntop, AF_INET, AF_INET6
from vpp_ip import DpoProto, VppIpPrefix, INVALID_INDEX, VppIpAddressUnion, \
VppIpMPrefix
from ipaddress import ip_address, IPv4Network, IPv6Network
# from vnet/vnet/mpls/mpls_types.h
MPLS_IETF_MAX_LABEL = 0xfffff
MPLS_LABEL_INVALID = MPLS_IETF_MAX_LABEL + 1
try:
text_type = unicode
except NameError:
text_type = str
class MRouteItfFlags:
MFIB_ITF_FLAG_NONE = 0
MFIB_ITF_FLAG_NEGATE_SIGNAL = 1
MFIB_ITF_FLAG_ACCEPT = 2
MFIB_ITF_FLAG_FORWARD = 4
MFIB_ITF_FLAG_SIGNAL_PRESENT = 8
MFIB_ITF_FLAG_INTERNAL_COPY = 16
class MRouteEntryFlags:
MFIB_ENTRY_FLAG_NONE = 0
MFIB_ENTRY_FLAG_SIGNAL = 1
MFIB_ENTRY_FLAG_DROP = 2
MFIB_ENTRY_FLAG_CONNECTED = 4
MFIB_ENTRY_FLAG_INHERIT_ACCEPT = 8
class FibPathProto:
FIB_PATH_NH_PROTO_IP4 = 0
FIB_PATH_NH_PROTO_IP6 = 1
FIB_PATH_NH_PROTO_MPLS = 2
FIB_PATH_NH_PROTO_ETHERNET = 3
FIB_PATH_NH_PROTO_BIER = 4
FIB_PATH_NH_PROTO_NSH = 5
class FibPathType:
FIB_PATH_TYPE_NORMAL = 0
FIB_PATH_TYPE_LOCAL = 1
FIB_PATH_TYPE_DROP = 2
FIB_PATH_TYPE_UDP_ENCAP = 3
FIB_PATH_TYPE_BIER_IMP = 4
FIB_PATH_TYPE_ICMP_UNREACH = 5
FIB_PATH_TYPE_ICMP_PROHIBIT = 6
FIB_PATH_TYPE_SOURCE_LOOKUP = 7
FIB_PATH_TYPE_DVR = 8
FIB_PATH_TYPE_INTERFACE_RX = 9
FIB_PATH_TYPE_CLASSIFY = 10
class FibPathFlags:
FIB_PATH_FLAG_NONE = 0
FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED = 1
FIB_PATH_FLAG_RESOLVE_VIA_HOST = 2
class MplsLspMode:
PIPE = 0
UNIFORM = 1
def ip_to_dpo_proto(addr):
if addr.version == 6:
return DpoProto.DPO_PROTO_IP6
else:
return DpoProto.DPO_PROTO_IP4
def address_proto(ip_addr):
if ip_addr.ip_addr.version is 4:
return FibPathProto.FIB_PATH_NH_PROTO_IP4
else:
return FibPathProto.FIB_PATH_NH_PROTO_IP6
def find_route(test, addr, len, table_id=0):
ip_addr = ip_address(text_type(addr))
if 4 is ip_addr.version:
routes = test.vapi.ip_route_dump(table_id, False)
prefix = IPv4Network("%s/%d" % (text_type(addr), len), strict=False)
else:
routes = test.vapi.ip_route_dump(table_id, True)
prefix = IPv6Network("%s/%d" % (text_type(addr), len), strict=False)
for e in routes:
if table_id == e.route.table_id \
and prefix == e.route.prefix:
return True
return False
def find_mroute(test, grp_addr, src_addr, grp_addr_len,
table_id=0):
ip_mprefix = VppIpMPrefix(text_type(src_addr),
text_type(grp_addr),
grp_addr_len)
if 4 is ip_mprefix.version:
routes = test.vapi.ip_mroute_dump(table_id, False)
else:
routes = test.vapi.ip_mroute_dump(table_id, True)
for e in routes:
if table_id == e.route.table_id and ip_mprefix == e.route.prefix:
return True
return False
def find_mpls_route(test, table_id, label, eos_bit, paths=None):
dump = test.vapi.mpls_route_dump(table_id)
for e in dump:
if label == e.mr_route.mr_label \
and eos_bit == e.mr_route.mr_eos \
and table_id == e.mr_route.mr_table_id:
if not paths:
return True
else:
if (len(paths) != len(e.mr_route.mr_paths)):
return False
for i in range(len(paths)):
if (paths[i] != e.mr_route.mr_paths[i]):
return False
return True
return False
def fib_interface_ip_prefix(test, address, length, sw_if_index):
ip_addr = ip_address(text_type(address))
if 4 is ip_addr.version:
addrs = test.vapi.ip_address_dump(sw_if_index)
prefix = IPv4Network("%s/%d" % (text_type(address), length),
strict=False)
else:
addrs = test.vapi.ip_address_dump(sw_if_index, is_ipv6=1)
prefix = IPv6Network("%s/%d" % (text_type(address), length),
strict=False)
# TODO: refactor this to VppIpPrefix.__eq__
for a in addrs:
if a.sw_if_index == sw_if_index and \
a.prefix == prefix:
return True
return False
class VppIpTable(VppObject):
def __init__(self,
test,
table_id,
is_ip6=0):
self._test = test
self.table_id = table_id
self.is_ip6 = is_ip6
def add_vpp_config(self):
self._test.vapi.ip_table_add_del(is_ipv6=self.is_ip6, is_add=1,
table_id=self.table_id)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.ip_table_add_del(is_ipv6=self.is_ip6, is_add=0,
table_id=self.table_id)
def query_vpp_config(self):
if self.table_id == 0:
# the default table always exists
return False
# find the default route
return find_route(self._test,
"::" if self.is_ip6 else "0.0.0.0",
0,
self.table_id)
def object_id(self):
return ("table-%s-%d" %
("v6" if self.is_ip6 == 1 else "v4",
self.table_id))
class VppIpInterfaceAddress(VppObject):
def __init__(self, test, intf, addr, len):
self._test = test
self.intf = intf
self.prefix = VppIpPrefix(addr, len)
def add_vpp_config(self):
self._test.vapi.sw_interface_add_del_address(
sw_if_index=self.intf.sw_if_index, address=self.prefix.bytes,
address_length=self.prefix.length, is_ipv6=self.prefix.is_ip6,
is_add=1)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.sw_interface_add_del_address(
sw_if_index=self.intf.sw_if_index, address=self.prefix.bytes,
address_length=self.prefix.length, is_ipv6=self.prefix.is_ip6,
is_add=0)
def query_vpp_config(self):
return fib_interface_ip_prefix(self._test,
self.prefix.address,
self.prefix.length,
self.intf.sw_if_index)
def object_id(self):
return "interface-ip-%s-%s" % (self.intf, self.prefix)
class VppIpInterfaceBind(VppObject):
def __init__(self, test, intf, table):
self._test = test
self.intf = intf
self.table = table
def add_vpp_config(self):
if self.table.is_ip6:
self.intf.set_table_ip6(self.table.table_id)
else:
self.intf.set_table_ip4(self.table.table_id)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
if 0 == self.table.table_id:
return
if self.table.is_ip6:
self.intf.set_table_ip6(0)
else:
self.intf.set_table_ip4(0)
def query_vpp_config(self):
if 0 == self.table.table_id:
return False
return self._test.vapi.sw_interface_get_table(
self.intf.sw_if_index,
self.table.is_ip6).vrf_id == self.table.table_id
def object_id(self):
return "interface-bind-%s-%s" % (self.intf, self.table)
class VppMplsLabel(object):
def __init__(self, value, mode=MplsLspMode.PIPE, ttl=64, exp=0):
self.value = value
self.mode = mode
self.ttl = ttl
self.exp = exp
def encode(self):
is_uniform = 0 if self.mode is MplsLspMode.PIPE else 1
return {'label': self.value,
'ttl': self.ttl,
'exp': self.exp,
'is_uniform': is_uniform}
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.value == other.value and
self.ttl == other.ttl and
self.exp == other.exp and
self.mode == other.mode)
elif hasattr(other, 'label'):
return (self.value == other.label and
self.ttl == other.ttl and
self.exp == other.exp and
(self.mode == MplsLspMode.UNIFORM) == other.is_uniform)
else:
return False
def __ne__(self, other):
return not (self == other)
class VppFibPathNextHop(object):
def __init__(self, addr,
via_label=MPLS_LABEL_INVALID,
next_hop_id=INVALID_INDEX):
self.addr = VppIpAddressUnion(addr)
self.via_label = via_label
self.obj_id = next_hop_id
def encode(self):
if self.via_label is not MPLS_LABEL_INVALID:
return {'via_label': self.via_label}
if self.obj_id is not INVALID_INDEX:
return {'obj_id': self.obj_id}
else:
return {'address': self.addr.encode()}
def proto(self):
if self.via_label is MPLS_LABEL_INVALID:
return address_proto(self.addr)
else:
return FibPathProto.FIB_PATH_NH_PROTO_MPLS
def __eq__(self, other):
if not isinstance(other, self.__class__):
# try the other instance's __eq__.
return NotImplemented
return (self.addr == other.addr and
self.via_label == other.via_label and
self.obj_id == other.obj_id)
class VppRoutePath(object):
def __init__(
self,
nh_addr,
nh_sw_if_index,
nh_table_id=0,
labels=[],
nh_via_label=MPLS_LABEL_INVALID,
rpf_id=0,
next_hop_id=INVALID_INDEX,
proto=None,
flags=FibPathFlags.FIB_PATH_FLAG_NONE,
type=FibPathType.FIB_PATH_TYPE_NORMAL):
self.nh_itf = nh_sw_if_index
self.nh_table_id = nh_table_id
self.nh_labels = labels
self.weight = 1
self.rpf_id = rpf_id
self.proto = proto
self.flags = flags
self.type = type
self.nh = VppFibPathNextHop(nh_addr, nh_via_label, next_hop_id)
if proto is None:
self.proto = self.nh.proto()
else:
self.proto = proto
self.next_hop_id = next_hop_id
def encode_labels(self):
lstack = []
for l in self.nh_labels:
if type(l) == VppMplsLabel:
lstack.append(l.encode())
else:
lstack.append({'label': l,
'ttl': 255})
while (len(lstack) < 16):
lstack.append({})
return lstack
def encode(self):
return {'weight': 1,
'preference': 0,
'table_id': self.nh_table_id,
'nh': self.nh.encode(),
'next_hop_id': self.next_hop_id,
'sw_if_index': self.nh_itf,
'rpf_id': self.rpf_id,
'proto': self.proto,
'type': self.type,
'flags': self.flags,
'n_labels': len(self.nh_labels),
'label_stack': self.encode_labels()}
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.nh == other.nh
elif hasattr(other, 'sw_if_index'):
# vl_api_fib_path_t
if (len(self.nh_labels) != other.n_labels):
return False
for i in range(len(self.nh_labels)):
if (self.nh_labels[i] != other.label_stack[i]):
return False
return self.nh_itf == other.sw_if_index
else:
return False
def __ne__(self, other):
return not (self == other)
class VppMRoutePath(VppRoutePath):
def __init__(self, nh_sw_if_index, flags,
nh=None,
proto=FibPathProto.FIB_PATH_NH_PROTO_IP4,
type=FibPathType.FIB_PATH_TYPE_NORMAL,
bier_imp=INVALID_INDEX):
if not nh:
nh = "::" if proto is FibPathProto.FIB_PATH_NH_PROTO_IP6 \
else "0.0.0.0"
super(VppMRoutePath, self).__init__(nh,
nh_sw_if_index,
proto=proto,
type=type,
next_hop_id=bier_imp)
self.nh_i_flags = flags
self.bier_imp = bier_imp
def encode(self):
return {'path': super(VppMRoutePath, self).encode(),
'itf_flags': self.nh_i_flags}
class VppIpRoute(VppObject):
"""
IP Route
"""
def __init__(self, test, dest_addr,
dest_addr_len, paths, table_id=0, register=True):
self._test = test
self.paths = paths
self.table_id = table_id
self.prefix = VppIpPrefix(dest_addr, dest_addr_len)
self.register = register
self.stats_index = None
self.encoded_paths = []
for path in self.paths:
self.encoded_paths.append(path.encode())
def __eq__(self, other):
if self.table_id == other.table_id and \
self.prefix == other.prefix:
return True
return False
def modify(self, paths):
self.paths = paths
self.encoded_paths = []
for path in self.paths:
self.encoded_paths.append(path.encode())
self._test.vapi.ip_route_add_del(route={'table_id': self.table_id,
'prefix': self.prefix.encode(),
'n_paths': len(
self.encoded_paths),
'paths': self.encoded_paths,
},
is_add=1,
is_multipath=0)
def add_vpp_config(self):
r = self._test.vapi.ip_route_add_del(
route={'table_id': self.table_id,
'prefix': self.prefix.encode(),
'n_paths': len(self.encoded_paths),
'paths': self.encoded_paths,
},
is_add=1,
is_multipath=0)
self.stats_index = r.stats_index
if self.register:
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.ip_route_add_del(route={'table_id': self.table_id,
'prefix': self.prefix.encode(),
'n_paths': len(
self.encoded_paths),
'paths': self.encoded_paths,
},
is_add=0,
is_multipath=0)
def query_vpp_config(self):
return find_route(self._test,
self.prefix.address,
self.prefix.len,
self.table_id)
def object_id(self):
return ("%s:table-%d-%s/%d" % (
'ip6-route' if self.prefix.addr.version == 6 else 'ip-route',
self.table_id,
self.prefix.address,
self.prefix.len))
def get_stats_to(self):
c = self._test.statistics.get_counter("/net/route/to")
return c[0][self.stats_index]
def get_stats_via(self):
c = self._test.statistics.get_counter("/net/route/via")
return c[0][self.stats_index]
class VppIpMRoute(VppObject):
"""
IP Multicast Route
"""
def __init__(self, test, src_addr, grp_addr,
grp_addr_len, e_flags, paths, table_id=0,
rpf_id=0):
self._test = test
self.paths = paths
self.table_id = table_id
self.e_flags = e_flags
self.rpf_id = rpf_id
self.prefix = VppIpMPrefix(src_addr, grp_addr, grp_addr_len)
self.encoded_paths = []
for path in self.paths:
self.encoded_paths.append(path.encode())
def add_vpp_config(self):
r = self._test.vapi.ip_mroute_add_del(self.table_id,
self.prefix.encode(),
self.e_flags,
self.rpf_id,
self.encoded_paths,
is_add=1)
self.stats_index = r.stats_index
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.ip_mroute_add_del(self.table_id,
self.prefix.encode(),
self.e_flags,
self.rpf_id,
self.encoded_paths,
is_add=0)
def update_entry_flags(self, flags):
self.e_flags = flags
self._test.vapi.ip_mroute_add_del(self.table_id,
self.prefix.encode(),
self.e_flags,
self.rpf_id,
[],
is_add=1)
def update_rpf_id(self, rpf_id):
self.rpf_id = rpf_id
self._test.vapi.ip_mroute_add_del(self.table_id,
self.prefix.encode(),
self.e_flags,
self.rpf_id,
[],
is_add=1)
def update_path_flags(self, itf, flags):
for p in range(len(self.paths)):
if self.paths[p].nh_itf == itf:
self.paths[p].nh_i_flags = flags
self.encoded_paths[p] = self.paths[p].encode()
break
self._test.vapi.ip_mroute_add_del(self.table_id,
self.prefix.encode(),
self.e_flags,
self.rpf_id,
[self.encoded_paths[p]],
is_add=1,
is_multipath=0)
def query_vpp_config(self):
return find_mroute(self._test,
self.prefix.gaddr,
self.prefix.saddr,
self.prefix.length,
self.table_id)
def object_id(self):
return ("%d:(%s,%s/%d)" % (self.table_id,
self.prefix.saddr,
self.prefix.gaddr,
self.prefix.length))
def get_stats(self):
c = self._test.statistics.get_counter("/net/mroute")
return c[0][self.stats_index]
class VppMFibSignal(object):
def __init__(self, test, route, interface, packet):
self.route = route
self.interface = interface
self.packet = packet
self.test = test
def compare(self, signal):
self.test.assertEqual(self.interface, signal.sw_if_index)
self.test.assertEqual(self.route.table_id, signal.table_id)
self.test.assertEqual(self.route.prefix, signal.prefix)
class VppMplsIpBind(VppObject):
"""
MPLS to IP Binding
"""
def __init__(self, test, local_label, dest_addr, dest_addr_len,
table_id=0, ip_table_id=0, is_ip6=0):
self._test = test
self.dest_addr_len = dest_addr_len
self.dest_addr = dest_addr
self.ip_addr = ip_address(text_type(dest_addr))
self.local_label = local_label
self.table_id = table_id
self.ip_table_id = ip_table_id
self.prefix = VppIpPrefix(dest_addr, dest_addr_len)
def add_vpp_config(self):
self._test.vapi.mpls_ip_bind_unbind(self.local_label,
self.prefix.encode(),
table_id=self.table_id,
ip_table_id=self.ip_table_id)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.mpls_ip_bind_unbind(self.local_label,
self.prefix.encode(),
table_id=self.table_id,
ip_table_id=self.ip_table_id,
is_bind=0)
def query_vpp_config(self):
dump = self._test.vapi.mpls_route_dump(self.table_id)
for e in dump:
if self.local_label == e.mr_route.mr_label \
and self.table_id == e.mr_route.mr_table_id:
return True
return False
def object_id(self):
return ("%d:%s binds %d:%s/%d"
% (self.table_id,
self.local_label,
self.ip_table_id,
self.dest_addr,
self.dest_addr_len))
class VppMplsTable(VppObject):
def __init__(self,
test,
table_id):
self._test = test
self.table_id = table_id
def add_vpp_config(self):
self._test.vapi.mpls_table_add_del(
self.table_id,
is_add=1)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.mpls_table_add_del(
self.table_id,
is_add=0)
def query_vpp_config(self):
dump = self._test.vapi.mpls_table_dump()
for d in dump:
if d.mt_table.mt_table_id == self.table_id:
return True
return False
def object_id(self):
return ("table-mpls-%d" % (self.table_id))
class VppMplsRoute(VppObject):
"""
MPLS Route/LSP
"""
def __init__(self, test, local_label, eos_bit, paths, table_id=0,
is_multicast=0,
eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP4):
self._test = test
self.paths = paths
self.local_label = local_label
self.eos_bit = eos_bit
self.eos_proto = eos_proto
self.table_id = table_id
self.is_multicast = is_multicast
def add_vpp_config(self):
paths = []
for path in self.paths:
paths.append(path.encode())
r = self._test.vapi.mpls_route_add_del(self.table_id,
self.local_label,
self.eos_bit,
self.eos_proto,
self.is_multicast,
paths, 1, 0)
self.stats_index = r.stats_index
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
paths = []
for path in self.paths:
paths.append(path.encode())
self._test.vapi.mpls_route_add_del(self.table_id,
self.local_label,
self.eos_bit,
self.eos_proto,
self.is_multicast,
paths, 0, 0)
def query_vpp_config(self):
return find_mpls_route(self._test, self.table_id,
self.local_label, self.eos_bit)
def object_id(self):
return ("mpls-route-%d:%s/%d"
% (self.table_id,
self.local_label,
20 + self.eos_bit))
def get_stats_to(self):
c = self._test.statistics.get_counter("/net/route/to")
return c[0][self.stats_index]
def get_stats_via(self):
c = self._test.statistics.get_counter("/net/route/via")
return c[0][self.stats_index]
|
"""
mfhyd module. Contains the ModflowHydclass. Note that the user can access
the ModflowHyd class as `flopy.modflow.ModflowHyd`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/hyd.htm>`_.
"""
import sys
import numpy as np
from ..pakbase import Package
from ..utils.recarray_utils import create_empty_recarray
class ModflowHyd(Package):
"""
MODFLOW HYDMOD (HYD) Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
nhyd : int
the maximum number of observation points. (default is 1).
ihydun : int
A flag that is used to determine if hydmod data should be saved.
If ihydun is non-zero hydmod data will be saved. (default is 1).
hydnoh : float
is a user-specified value that is output if a value cannot be computed
at a hydrograph location. For example, the cell in which the hydrograph
is located may be a no-flow cell. (default is -999.)
obsdata : list of lists, numpy array, or numpy recarray (nhyd, 7)
Each row of obsdata includes data defining pckg (3 character string),
arr (2 character string), intyp (1 character string) klay (int),
xl (float), yl (float), hydlbl (14 character string) for each
observation.
pckg : str
is a 3-character flag to indicate which package is to be addressed
by hydmod for the hydrograph of each observation point.
arr : str
is a text code indicating which model data value is to be accessed
for the hydrograph of each observation point.
intyp : str
is a 1-character value to indicate how the data from the specified
feature are to be accessed; The two options are 'I' for
interpolated value or 'C' for cell value (intyp must be 'C' for
STR and SFR Package hydrographs.
klay : int
is the layer sequence number (zero-based) of the array to be
addressed by HYDMOD.
xl : float
is the coordinate of the hydrograph point in model units of length
measured parallel to model rows, with the origin at the lower left
corner of the model grid.
yl : float
is the coordinate of the hydrograph point in model units of length
measured parallel to model columns, with the origin at the lower
left corner of the model grid.
hydlbl : str
is used to form a label for the hydrograph.
The simplest form is a list of lists. For example, if nhyd=3 this
gives the form of::
obsdata =
[
[pckg, arr, intyp, klay, xl, yl, hydlbl],
[pckg, arr, intyp, klay, xl, yl, hydlbl],
[pckg, arr, intyp, klay, xl, yl, hydlbl]
]
extension : list string
Filename extension (default is ['hyd', 'hyd.bin'])
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the hydmod output name will be created using
the model name and .hyd.bin extension (for example,
modflowtest.hyd.bin). If a single string is passed the package will be
set to the string and hydmod output name will be created using the
model name and .hyd.bin extension. To define the names for all package
files (input and output) the length of the list of strings should be 2.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> hyd = flopy.modflow.ModflowHyd(m)
"""
def __init__(self, model, nhyd=1, ihydun=None, hydnoh=-999.,
obsdata=[['BAS', 'HD', 'I', 0, 0., 0., 'HOBS1']],
extension=['hyd', 'hyd.bin'], unitnumber=None,
filenames=None):
"""
Package constructor.
"""
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowHyd.defaultunit()
# set filenames
if filenames is None:
filenames = [None, None]
elif isinstance(filenames, str):
filenames = [filenames, None]
elif isinstance(filenames, list):
if len(filenames) < 2:
filenames.append(None)
# set ihydun to a default unit number if it isn't specified
if ihydun is None:
ihydun = 536
# update external file information with hydmod output
fname = filenames[1]
model.add_output_file(ihydun, fname=fname, extension='hyd.bin',
package=ModflowHyd.ftype())
# Fill namefile items
name = [ModflowHyd.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
self.heading = '# {} package for '.format(self.name[0]) + \
' {}, '.format(model.version_types[model.version]) + \
'generated by Flopy.'
self.url = 'hyd.htm'
self.nhyd = nhyd
self.ihydun = ihydun
self.hydnoh = hydnoh
dtype = ModflowHyd.get_default_dtype()
obs = ModflowHyd.get_empty(nhyd)
if isinstance(obsdata, list):
if len(obsdata) != nhyd:
e = 'ModflowHyd: nhyd ({}) does not equal '.format(nhyd) + \
'length of obsdata ({}).'.format(len(obsdata))
raise RuntimeError(e)
for idx in range(nhyd):
obs['pckg'][idx] = obsdata[idx][0]
obs['arr'][idx] = obsdata[idx][1]
obs['intyp'][idx] = obsdata[idx][2]
obs['klay'][idx] = int(obsdata[idx][3])
obs['xl'][idx] = float(obsdata[idx][4])
obs['yl'][idx] = float(obsdata[idx][5])
obs['hydlbl'][idx] = obsdata[idx][6]
obsdata = obs
elif isinstance(obsdata, np.ndarray):
if obsdata.dtype == np.object:
if obsdata.shape[1] != len(dtype):
raise IndexError('Incorrect number of fields for obsdata')
obsdata = obsdata.transpose()
obs['pckg'] = obsdata[0]
obs['arr'] = obsdata[1]
obs['intyp'] = obsdata[2]
obs['klay'] = obsdata[3]
obs['xl'] = obsdata[4]
obs['yl'] = obsdata[5]
obs['hydlbl'] = obsdata[6]
else:
inds = ['pckg', 'arr', 'intyp', 'klay', 'xl', 'yl', 'hydlbl']
for idx in inds:
obs['pckg'] = obsdata['pckg']
obs['arr'] = obsdata['arr']
obs['intyp'] = obsdata['intyp']
obs['klay'] = obsdata['klay']
obs['xl'] = obsdata['xl']
obs['yl'] = obsdata['yl']
obs['hydlbl'] = obsdata['hydlbl']
obsdata = obs
obsdata = obsdata.view(dtype=dtype)
self.obsdata = obsdata
# add package
self.parent.add_package(self)
def write_file(self):
"""
Write the package file.
Returns
-------
None
"""
# Open file for writing
f = open(self.fn_path, 'w')
# write dataset 1
f.write('{} {} {} {}\n'.format(self.nhyd, self.ihydun, self.hydnoh,
self.heading))
# write dataset 2
for idx in range(self.nhyd):
if sys.version_info[0] == 3:
f.write('{} '.format(self.obsdata['pckg'][idx].decode()))
f.write('{} '.format(self.obsdata['arr'][idx].decode()))
f.write('{} '.format(self.obsdata['intyp'][idx].decode()))
f.write('{} '.format(self.obsdata['klay'][idx] + 1))
f.write('{} '.format(self.obsdata['xl'][idx]))
f.write('{} '.format(self.obsdata['yl'][idx]))
f.write('{} '.format(self.obsdata['hydlbl'][idx].decode()))
else:
f.write('{} '.format(self.obsdata['pckg'][idx]))
f.write('{} '.format(self.obsdata['arr'][idx]))
f.write('{} '.format(self.obsdata['intyp'][idx]))
f.write('{} '.format(self.obsdata['klay'][idx] + 1))
f.write('{} '.format(self.obsdata['xl'][idx]))
f.write('{} '.format(self.obsdata['yl'][idx]))
f.write('{} '.format(self.obsdata['hydlbl'][idx]))
f.write('\n')
# close hydmod file
f.close()
@staticmethod
def get_empty(ncells=0):
# get an empty recarray that corresponds to dtype
dtype = ModflowHyd.get_default_dtype()
return create_empty_recarray(ncells, dtype)
@staticmethod
def get_default_dtype():
# PCKG ARR INTYP KLAY XL YL HYDLBL
dtype = np.dtype([("pckg", '|S3'), ("arr", '|S2'),
("intyp", '|S1'), ("klay", np.int),
("xl", np.float32), ("yl", np.float32),
("hydlbl", '|S14')])
return dtype
@staticmethod
def load(f, model, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
hyd : ModflowHyd object
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> hyd = flopy.modflow.ModflowHyd.load('test.hyd', m)
"""
if model.verbose:
sys.stdout.write('loading hydmod package file...\n')
openfile = not hasattr(f, 'read')
if openfile:
filename = f
f = open(filename, 'r')
# --read dataset 1
# NHYD IHYDUN HYDNOH
if model.verbose:
sys.stdout.write(' loading hydmod dataset 1\n')
line = f.readline()
t = line.strip().split()
nhyd = int(t[0])
ihydun = int(t[1])
model.add_pop_key_list(ihydun)
hydnoh = float(t[2])
obs = ModflowHyd.get_empty(nhyd)
for idx in range(nhyd):
line = f.readline()
t = line.strip().split()
obs['pckg'][idx] = t[0].strip()
obs['arr'][idx] = t[1].strip()
obs['intyp'][idx] = t[2].strip()
obs['klay'][idx] = int(t[3]) - 1
obs['xl'][idx] = float(t[4])
obs['yl'][idx] = float(t[5])
obs['hydlbl'][idx] = t[6].strip()
if openfile:
f.close()
# set package unit number
unitnumber = None
filenames = [None, None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=ModflowHyd.ftype())
if ihydun > 0:
iu, filenames[1] = \
model.get_ext_dict_attr(ext_unit_dict, unit=ihydun)
model.add_pop_key_list(ihydun)
# create hyd instance
hyd = ModflowHyd(model, nhyd=nhyd, ihydun=ihydun, hydnoh=hydnoh,
obsdata=obs, unitnumber=unitnumber,
filenames=filenames)
# return hyd instance
return hyd
@staticmethod
def ftype():
return 'HYD'
@staticmethod
def defaultunit():
return 36
|
from .encoder import Encoder, EncHyperparameter
from .unary_encoder import UnaryEncoder, UEncHyperparameter
from .mean import MeanImputation, MeanHyperparameter
from .iterative_regression import IterativeRegressionImputation, IterativeRegressionHyperparameter
from .greedy import GreedyImputation, GreedyHyperparameter
from .mice import MICE, MiceHyperparameter
from .knn import KNNImputation, KnnHyperparameter
from .IQRScaler import IQRScaler, IQRHyperparams
from .labler import Labler, LablerHyperparams
from .cleaning_featurizer import CleaningFeaturizer, CleaningFeaturizerHyperparameter
from .denormalize import Denormalize, DenormalizeHyperparams
from .data_profile import Profiler, Hyperparams as ProfilerHyperparams
from .column_fold import FoldColumns, FoldHyperparameter
from .voter import Voter, VoterHyperparameter
from .datamart_query_from_dataframe import QueryFromDataFrameHyperparams, QueryFromDataframe
from .datamart_augment import DatamartAugmentationHyperparams, DatamartAugmentation
from .datamart_join import DatamartJoinHyperparams, DatamartJoin
from .to_numeric import ToNumeric
from .splitter import Splitter, SplitterHyperparameter
# __all__ = ['Encoder', 'GreedyImputation', 'IterativeRegressionImputation',
# 'MICE', 'KNNImputation', 'MeanImputation', 'KnnHyperparameter',
# 'UEncHyperparameter','EncHyperparameter']
__all__ = ['Encoder', 'EncHyperparameter',
'UnaryEncoder', 'UEncHyperparameter',
'KNNImputation', 'KnnHyperparameter',
'MeanImputation', 'MeanHyperparameter',
'MICE', 'MiceHyperparameter',
'IterativeRegressionImputation', 'IterativeRegressionHyperparameter',
'GreedyImputation', 'GreedyHyperparameter',
'IQRScaler','IQRHyperparams',
'Labler','LablerHyperparams',
'CleaningFeaturizer','CleaningFeaturizerHyperparameter',
'Denormalize','DenormalizeHyperparams',
'Profiler', 'ProfilerHyperparams',
'FoldColumns', 'FoldHyperparameter',
'Voter', 'VoterHyperparameter',
'Splitter','SplitterHyperparameter',
'QueryFromDataframe', 'DatamartAugmentation',
'DatamartJoin',
'ToNumeric'
]
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) # type: ignore
|
#!/usr/bin/python3
""" Review module for the HBNB project """
from models.base_model import BaseModel, Base
from sqlalchemy import Column, String, ForeignKey
class Review(BaseModel, Base):
""" Review classto store review information """
__tablename__ = "reviews"
place_id = Column(String(60), ForeignKey('places.id'), nullable=False)
user_id = Column(String(60), ForeignKey('users.id'), nullable=False)
text = Column(String(1024), nullable=False)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `util.config`."""
import pytest
from mock import patch
from finances_at_home.finances_at_home import *
class TestLoadConfig:
"""Tests for `load_config`."""
def test_file_exists(self):
"""Check handling of file."""
pass
def test_no_file(self):
"""Check error handling of no file."""
pass
def test_valid_config(self):
"""Valid YAML passes."""
pass
def test_invalid_config(self):
"""Invalid YAML fails."""
pass
|
"""AnyNumberOf and OneOf."""
from typing import List, Optional, Tuple
from sqlfluff.core.parser.helpers import trim_non_code_segments
from sqlfluff.core.parser.match_result import MatchResult
from sqlfluff.core.parser.match_wrapper import match_wrapper
from sqlfluff.core.parser.match_logging import parse_match_logging
from sqlfluff.core.parser.context import ParseContext
from sqlfluff.core.parser.segments import BaseSegment, allow_ephemeral
from sqlfluff.core.parser.grammar.base import (
BaseGrammar,
MatchableType,
cached_method_for_parse_context,
)
from sqlfluff.core.parser.grammar.sequence import Sequence, Bracketed
class AnyNumberOf(BaseGrammar):
"""A more configurable version of OneOf."""
def __init__(self, *args, **kwargs):
self.max_times = kwargs.pop("max_times", None)
self.min_times = kwargs.pop("min_times", 0)
self.max_times_per_element = kwargs.pop("max_times_per_element", None)
# Any patterns to _prevent_ a match.
self.exclude = kwargs.pop("exclude", None)
super().__init__(*args, **kwargs)
@cached_method_for_parse_context
def simple(self, parse_context: ParseContext) -> Optional[List[str]]:
"""Does this matcher support a uppercase hash matching route?
AnyNumberOf does provide this, as long as *all* the elements *also* do.
"""
simple_buff = [
opt.simple(parse_context=parse_context) for opt in self._elements
]
if any(elem is None for elem in simple_buff):
return None
# Flatten the list
return [inner for outer in simple_buff for inner in outer]
def is_optional(self) -> bool:
"""Return whether this element is optional.
This is mostly set in the init method, but also in this
case, if min_times is zero then this is also optional.
"""
return self.optional or self.min_times == 0
@staticmethod
def _first_non_whitespace(segments) -> Optional[str]:
"""Return the upper first non-whitespace segment in the iterable."""
for segment in segments:
if segment.raw_segments_upper:
return segment.raw_segments_upper
return None
def _prune_options(
self, segments: Tuple[BaseSegment, ...], parse_context: ParseContext
) -> Tuple[List[MatchableType], List[str]]:
"""Use the simple matchers to prune which options to match on."""
available_options = []
simple_opts = []
prune_buff = []
non_simple = 0
pruned_simple = 0
matched_simple = 0
# Find the first code element to match against.
first_elem = self._first_non_whitespace(segments)
for opt in self._elements:
simple = opt.simple(parse_context=parse_context)
if simple is None:
# This element is not simple, we have to do a
# full match with it...
available_options.append(opt)
non_simple += 1
continue
# Otherwise we have a simple option, so let's use
# it for pruning.
for simple_opt in simple:
# Check it's not a whitespace option
if not simple_opt.strip(): # pragma: no cover
raise NotImplementedError(
"_prune_options not supported for whitespace matching."
)
# We want to know if the first meaningful element of the str_buff
# matches the option.
# match the FIRST non-whitespace element of the list.
if first_elem != simple_opt:
# No match, carry on.
continue
# If we get here, it's matched the FIRST element of the string buffer.
available_options.append(opt)
simple_opts.append(simple_opt)
matched_simple += 1
break
else:
# Ditch this option, the simple match has failed
prune_buff.append(opt)
pruned_simple += 1
continue
parse_match_logging(
self.__class__.__name__,
"match",
"PRN",
parse_context=parse_context,
v_level=3,
ns=non_simple,
ps=pruned_simple,
ms=matched_simple,
pruned=prune_buff,
opts=available_options or "ALL",
)
return available_options, simple_opts
def _match_once(
self, segments: Tuple[BaseSegment, ...], parse_context: ParseContext
) -> Tuple[MatchResult, Optional["MatchableType"]]:
"""Match the forward segments against the available elements once.
This serves as the main body of OneOf, but also a building block
for AnyNumberOf.
"""
# For efficiency, we'll be pruning options if we can
# based on their simpleness. this provides a short cut
# to return earlier if we can.
# `segments` may already be nested so we need to break out
# the raw segments within it.
available_options, _ = self._prune_options(
segments, parse_context=parse_context
)
# If we've pruned all the options, return unmatched (with some logging).
if not available_options:
return MatchResult.from_unmatched(segments)
with parse_context.deeper_match() as ctx:
match, matched_option = self._longest_trimmed_match(
segments,
available_options,
parse_context=ctx,
trim_noncode=False,
)
return match, matched_option
@match_wrapper()
@allow_ephemeral
def match(
self, segments: Tuple[BaseSegment, ...], parse_context: ParseContext
) -> MatchResult:
"""Match against any of the elements a relevant number of times.
If it matches multiple, it returns the longest, and if any are the same
length it returns the first (unless we explicitly just match first).
"""
# First if we have an *exclude* option, we should check that
# which would prevent the rest of this grammar from matching.
if self.exclude:
with parse_context.deeper_match() as ctx:
if self.exclude.match(segments, parse_context=ctx):
return MatchResult.from_unmatched(segments)
# Match on each of the options
matched_segments: MatchResult = MatchResult.from_empty()
unmatched_segments: Tuple[BaseSegment, ...] = segments
n_matches = 0
# Keep track of the number of times each option has been matched.
available_options, _ = self._prune_options(
segments, parse_context=parse_context
)
available_option_counter = {str(o): 0 for o in available_options}
while True:
if self.max_times and n_matches >= self.max_times:
# We've matched as many times as we can
return MatchResult(
matched_segments.matched_segments, unmatched_segments
)
# Is there anything left to match?
if len(unmatched_segments) == 0:
# No...
if n_matches >= self.min_times:
return MatchResult(
matched_segments.matched_segments, unmatched_segments
)
else: # pragma: no cover TODO?
# We didn't meet the hurdle
return MatchResult.from_unmatched(unmatched_segments)
# If we've already matched once...
if n_matches > 0 and self.allow_gaps:
# Consume any non-code if there is any
pre_seg, mid_seg, post_seg = trim_non_code_segments(unmatched_segments)
unmatched_segments = mid_seg + post_seg
else:
pre_seg = () # empty tuple
match, matched_option = self._match_once(
unmatched_segments, parse_context=parse_context
)
# Increment counter for matched option.
if matched_option and (str(matched_option) in available_option_counter):
available_option_counter[str(matched_option)] += 1
# Check if we have matched an option too many times.
if (
self.max_times_per_element
and available_option_counter[str(matched_option)]
> self.max_times_per_element
):
return MatchResult(
matched_segments.matched_segments, unmatched_segments
)
if match:
matched_segments += pre_seg + match.matched_segments
unmatched_segments = match.unmatched_segments
n_matches += 1
else:
# If we get here, then we've not managed to match. And the next
# unmatched segments are meaningful, i.e. they're not what we're
# looking for.
if n_matches >= self.min_times:
return MatchResult(
matched_segments.matched_segments, pre_seg + unmatched_segments
)
else:
# We didn't meet the hurdle
return MatchResult.from_unmatched(unmatched_segments)
class OneOf(AnyNumberOf):
"""Match any of the elements given once.
If it matches multiple, it returns the longest, and if any are the same
length it returns the first (unless we explicitly just match first).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, max_times=1, min_times=1, **kwargs)
class OptionallyBracketed(OneOf):
"""Hybrid of Bracketed and Sequence: allows brackets but they aren't required.
NOTE: This class is greedy on brackets so if they *can* be claimed, then
they will be.
"""
def __init__(self, *args, **kwargs):
super().__init__(
Bracketed(*args),
# In the case that there is only one argument, no sequence is required.
args[0] if len(args) == 1 else Sequence(*args),
**kwargs,
)
class AnySetOf(AnyNumberOf):
"""Match any number of the elements but each element can only be matched once."""
def __init__(self, *args, **kwargs):
super().__init__(*args, max_times_per_element=1, **kwargs)
|
from django.shortcuts import render
from django_redis import get_redis_connection
from rest_framework import status
from rest_framework.generics import RetrieveAPIView, UpdateAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from goods.models import SKU
from users.models import User, Address
from rest_framework.response import Response
# Create your views here.
# 验证用户名是否存在
class RegisterUsernameAPIView(APIView):
def get(self,request,username):
count = User.objects.filter(username=username).count()
return Response({'count':count})
# 验证手机号是否存在
class RegisterMobileAPIView(APIView):
def get(self,request,mobile):
count = User.objects.filter(mobile=mobile).count()
return Response({'count':count})
# 注册 视图
"""
1、明确需求
2、明确请求方式 请求路由
3、明确用哪一种视图
4、按步骤进行开发
"""
"""
1、需求, 用户点击注册,需要将参数
username password password2 mobile smscode is_allow 传递给后端
2、请求方式 POST
请求路由 /users/
3、使用
4、具体步骤
# 接收参数
# 检验参数
# 数据入库
# 返回响应
"""
from .serializers import RegisterUsersSerializer, UserDetailSerializer
class RegisterUsersAPIView(APIView):
def post(self,request):
# 接收参数
data = request.data
# 检验参数
serializer = RegisterUsersSerializer(data=data)
serializer.is_valid(raise_exception=True)
# 数据入库
serializer.save()
# 返回响应
return Response(serializer.data)
"""
个人中心
需求分析:必须是已经登陆的用户才可以进入用户中心
前端需要传递用户的信息
"""
# 用户详情视图
class UserDetailView(RetrieveAPIView):
# 添加权限 只允许认证过的用户访问此视图
permission_classes = [IsAuthenticated]
serializer_class = UserDetailSerializer
# 因为没有传入id值,而三级视图RetrieveAPIView获取单个的用户需要pk值,
# 所以需要重写 get_object 方法
# 在类视图对象中也保存了请求对象request
# request对象的user属性是通过认证检验之后的请求用户对象
def get_object(self):
return self.request.user
# 设置邮箱 视图 保存邮箱号到数据库
"""
需求分析: 必须是应经认证登陆过的用户 因为是在用户中心
用户点击 按钮 保存 将用户的邮箱号保存到数据库中
请求方式 PUT 请求路由 users/emails/
确定视图 三级视图 CreateAPIView
按步骤开发
接收参数
校验参数
数据入库
返回响应
"""
from .serializers import UserEmailSerialzer
class UserEmailView(UpdateAPIView):
"""
保存邮箱
"""
# 此视图 只允许认证用户访问
permission_classes = [IsAuthenticated]
serializer_class = UserEmailSerialzer
def get_object(self):
""" 重写获取单个用户的方法 """
return self.request.user
"""
激活邮箱
请求方式 GET users/emails/verification/?token=xxx
接收token
验证token,获取user_id
查询用户信息
改变邮箱激活状态
返回响应
"""
class VerificationEmailView(APIView):
def get(self,request):
data = request.query_params
token = data.get('token')
if token is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
from users.utils import check_token
user_id = check_token(token)
if user_id is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
user = User.objects.get(pk=user_id)
user.email_active = True
user.save()
return Response({'msg':'ok'})
from rest_framework.generics import CreateAPIView
from .serializers import AddressSerializer
class AddressView(CreateAPIView):
"""
需求分析: 新增收货地址
明确要干什么 接收用户输入的地址信息,并保存到数据库
请求方式 POST
请求路由 /users/addresses/
使用的视图:CreateAPIView
按步骤进行开发:
接收参数
校验参数
数据入库
返回响应
"""
# 添加用户权限
permission_classes = [IsAuthenticated]
serializer_class = AddressSerializer
# 因为是新增数据所以 不需要设置 queryset
class AddressListView(APIView):
"""
收货地址列表视图 查询该用户的所有收货地址
请求方式 GET
请求路由 users/addresses/list/
类视图 APIView
按照步骤进行开发
"""
# 添加权限
permission_classes = [IsAuthenticated]
def get(self,request):
user = request.user
addrs = Address.objects.filter(user_id=user.id).all()
ser = AddressSerializer(instance=addrs,many=True)
data = {
'user_id':user.id,
'default_address_id':user.default_address_id,
'limit':20,
'addresses':ser.data,
}
return Response(data)
class AddressManageAPIView(APIView):
# 加权限
permission_classes = [IsAuthenticated]
""" 删除地址视图 """
def delete(self,request,addr_id):
addr = Address.objects.get(id=addr_id)
addr.delete()
return Response({'msg':'删除成功'})
""" 修改地址 """
def put(self, request, addr_id):
data = request.data
addr = Address.objects.get(id=addr_id)
ser = AddressSerializer(instance=addr,data=data)
ser.is_valid(raise_exception=True)
ser.save()
return Response(ser.data)
class AddressSetDefaultAPIView(APIView):
# 加权限
permission_classes = [IsAuthenticated]
def put(self,request,addr_id):
user = request.user
user.default_address_id = addr_id
user.save()
return Response({'msg':'设置默认地址成功'})
class AddressSetTitleAPIView(APIView):
""" 保存标题信息 """
# 加权限
permission_classes = [IsAuthenticated]
def put(self,request,addr_id):
title = request.data['title']
addr = Address.objects.get(id=addr_id)
addr.title = title
addr.save()
return Response({'msg':'保存标题成功'})
"""
用户浏览历史记录:保存浏览记录到redis
后端接口设计:
请求方式:POST
请求路由: /users/browerhistories/
"""
from rest_framework import mixins
from rest_framework.generics import GenericAPIView
from .serializers import AddUserBrowsingHistorySerializer
class UserBrowsingHistoryView(mixins.CreateModelMixin,GenericAPIView):
serializer_class = AddUserBrowsingHistorySerializer
permission_classes = [IsAuthenticated]
def post(self,request):
""" 保存 """
return self.create(request)
def get(self,request):
""" 获取浏览记录 """
user_id = request.user.id
# 连接redis
redis_conn = get_redis_connection('history')
# 获取数据
history_sku_ids = redis_conn.lrange('history_%s'%user_id,0,5)
skus = []
for sku_id in history_sku_ids:
sku = SKU.objects.get(pk=sku_id)
skus.append(sku)
# 序列化
from goods.serializers import SKUSerializer
serializer = SKUSerializer(skus,many=True)
return Response(serializer.data)
from rest_framework_jwt.views import ObtainJSONWebToken
class LoginMergeView(ObtainJSONWebToken):
"""
登陆后合并购物车
"""
def post(self, request,*args,**kwargs):
response = super().post(request,*args,**kwargs)
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
user = serializer.object.get('user') or request.user
# user = serializer.validated_data.get("user")
from carts.utils import merge_cookie_to_redis
response = merge_cookie_to_redis(request,user,response)
return response
"""
修改密码
接收参数:user_id old_password new_password
请求方式 POST
请求路由 users/change_password
视图 一级视图 APIView
"""
class ChangePasswordView(APIView):
""" 修改用户密码 """
# 认证用户 才能访问此接口
permission_classes = [IsAuthenticated]
def post(self,request):
user = request.user
data = request.data
old_password = data.get('old_password')
new_password = data.get('new_password')
if user.check_password(old_password):
user.password=new_password
user.set_password(new_password)
user.save()
return Response({'msg':'密码修改成功'},status=status.HTTP_200_OK)
|
#!/home/pi/server
# coding=utf-8
#使用超声波测距模块时,VCC接树莓派的5V,GND接树莓派GND。trig接树莓派38,echo接树莓派40.
#GPIO编码方式为BOARD!!
import RPi.GPIO as GPIO
import time
def t_stop():
GPIO.output(11,False)
GPIO.output(12,False)
GPIO.output(15,False)
GPIO.output(16,False)
def t_up():
GPIO.output(11,True)
GPIO.output(12,False)
GPIO.output(15,True)
GPIO.output(16,False)
def t_down():
GPIO.output(11,False)
GPIO.output(12,True)
GPIO.output(15,False)
GPIO.output(16,True)
def t_left():
GPIO.output(11,False)
GPIO.output(12,True)
GPIO.output(15,True)
GPIO.output(16,False)
def t_right():
GPIO.output(11,True)
GPIO.output(12,False)
GPIO.output(15,False)
GPIO.output(16,True)
def bee():
GPIO.output(12,True)
time.sleep(0.5)
GPIO.output(12,False)
GPIO.output(15,True)
time.sleep(5)
GPIO.output(12,True)
GPIO.output(15,False)
def checkdist():
#���������ź�
GPIO.output(38,GPIO.HIGH)
#����10us���ϣ���ѡ��15us��
time.sleep(0.000015)
GPIO.output(38,GPIO.LOW)
while not GPIO.input(40):
pass
#���ָߵ�ƽʱ��ʱ��ʱ
t1 = time.time()
while GPIO.input(40):
pass
#�ߵ�ƽ����ֹͣ��ʱ
t2 = time.time()
#���ؾ��룬��λΪ����
return (t2-t1)*34000/2
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(38,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(40,GPIO.IN)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(12,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
GPIO.setup(16,GPIO.OUT)
##time.sleep(2)
try:
while True:
dis = int(checkdistance())
print(dis)
if dis <= 30:
print"distance less than 0.30m and back"
t_stop()
time.sleep(0.1)
t_down()
time.sleep(0.5)
t_left()
elif dis > 30:
print"forward"
time.sleep(0.1)
t_up()
except KeyboardInterrupt:
GPIO.cleanup()
|
from datetime import datetime, timedelta
from timeit import default_timer
from adblockparser import AdblockRules
from adblockparser import AdblockRule
from carl import storage
all_options = {opt: True for opt in AdblockRule.BINARY_OPTIONS}
def load_rules():
fname = "easylist.txt"
with open(fname) as f:
raw_rules = f.readlines()
rules = AdblockRules(raw_rules, use_re2=True)
return rules
def get_req_urls():
q = "SELECT req_id, url FROM requests where ad IS null"
rows = storage.execute(q).fetchall()
return [(r[0], r[1]) for r in rows]
def update_db(items, status):
q = "UPDATE requests SET ad = {} WHERE req_id == ?".format(int(status))
items = [(i,) for i in items]
storage.execute_many(q, items)
def sec_to_time(sec):
sec = timedelta(seconds=sec)
d = datetime(1, 1, 1) + sec
return "%d:%d:%d:%d" % (d.day-1, d.hour, d.minute, d.second)
def mark_ads():
rules = load_rules()
reqs = get_req_urls()
ads = []
not_ads = []
num_req = len(reqs)
print("got: {} requests".format(num_req))
start = default_timer()
for i, req in enumerate(reqs):
req_id, url = req
if rules.should_block(url, all_options):
ads.append(req_id)
else:
not_ads.append(req_id)
if i > 0 and i % 1000 == 0:
update_db(ads, True)
update_db(not_ads, False)
now = default_timer()
time = now-start
est = sec_to_time(time*((num_req-i)/1000))
print("@ {} : ads: {} : not: {} in {:.2f} : est left: {}".format(
i, len(ads), len(not_ads), time, est))
start = now
ads = []
not_ads = []
print("@ {} : ads: {} : not: {}".format(i, len(ads), len(not_ads)))
update_db(ads, True)
update_db(not_ads, False)
|
from sys import getrefcount
a = [1, 2, 3]
print(getrefcount(a))
b = [a, a, a]
print(getrefcount(a))
print(getrefcount(b))
|
from django.db import connections
from django.conf import settings
from baserow.core.exceptions import UserNotInGroupError
from baserow.core.utils import extract_allowed, set_allowed_attrs
from baserow.contrib.database.fields.models import TextField
from baserow.contrib.database.views.handler import ViewHandler
from baserow.contrib.database.views.view_types import GridViewType
from baserow.contrib.database.fields.handler import FieldHandler
from baserow.contrib.database.fields.field_types import (
LongTextFieldType, BooleanFieldType
)
from .models import Table
from .exceptions import (
TableDoesNotExist, InvalidInitialTableData, InitialTableDataLimitExceeded
)
class TableHandler:
def get_table(self, user, table_id, base_queryset=None):
"""
Selects a table with a given id from the database.
:param user: The user on whose behalf the table is requested.
:type user: User
:param table_id: The identifier of the table that must be returned.
:type table_id: int
:param base_queryset: The base queryset from where to select the table
object from. This can for example be used to do a `select_related`.
:type base_queryset: Queryset
:raises TableDoesNotExist: When the table with the provided id does not exist.
:raises UserNotInGroupError: When the user does not belong to the related group.
:return: The requested table of the provided id.
:rtype: Table
"""
if not base_queryset:
base_queryset = Table.objects
try:
table = base_queryset.select_related('database__group').get(id=table_id)
except Table.DoesNotExist:
raise TableDoesNotExist(f'The table with id {table_id} doe not exist.')
group = table.database.group
if not group.has_user(user):
raise UserNotInGroupError(user, group)
return table
def create_table(self, user, database, fill_example=False, data=None,
first_row_header=True, **kwargs):
"""
Creates a new table and a primary text field.
:param user: The user on whose behalf the table is created.
:type user: User
:param database: The database that the table instance belongs to.
:type database: Database
:param fill_example: Indicates whether an initial view, some fields and
some rows should be added. Works only if no data is provided.
:type fill_example: bool
:param data: A list containing all the rows that need to be inserted is
expected. All the values of the row are going to be converted to a string
and will be inserted in the database.
:type: initial_data: None or list[list[str]
:param first_row_header: Indicates if the first row are the fields. The names
of these rows are going to be used as fields.
:type first_row_header: bool
:param kwargs: The fields that need to be set upon creation.
:type kwargs: object
:raises UserNotInGroupError: When the user does not belong to the related group.
:return: The created table instance.
:rtype: Table
"""
if not database.group.has_user(user):
raise UserNotInGroupError(user, database.group)
if data is not None:
fields, data = self.normalize_initial_table_data(data, first_row_header)
table_values = extract_allowed(kwargs, ['name'])
last_order = Table.get_last_order(database)
table = Table.objects.create(database=database, order=last_order,
**table_values)
if data is not None:
# If the initial data has been provided we will create those fields before
# creating the model so that we the whole table schema is created right
# away.
for index, name in enumerate(fields):
fields[index] = TextField.objects.create(
table=table,
order=index,
primary=index == 0,
name=name
)
else:
# If no initial data is provided we want to create a primary text field for
# the table.
TextField.objects.create(table=table, order=0, primary=True, name='Name')
# Create the table schema in the database database.
connection = connections[settings.USER_TABLE_DATABASE]
with connection.schema_editor() as schema_editor:
model = table.get_model()
schema_editor.create_model(model)
if data is not None:
self.fill_initial_table_data(user, table, fields, data, model)
elif fill_example:
self.fill_example_table_data(user, table)
return table
def normalize_initial_table_data(self, data, first_row_header):
"""
Normalizes the provided initial table data. The amount of columns will be made
equal for each row. The header and the rows will also be separated.
:param data: A list containing all the provided rows.
:type data: list
:param first_row_header: Indicates if the first row is the header. For each
of these header columns a field is going to be created.
:type first_row_header: bool
:return: A list containing the field names and a list containing all the rows.
:rtype: list, list
:raises InvalidInitialTableData: When the data doesn't contain a column or row.
"""
if len(data) == 0:
raise InvalidInitialTableData('At least one row should be provided.')
limit = settings.INITIAL_TABLE_DATA_LIMIT
if limit and len(data) > limit:
raise InitialTableDataLimitExceeded(
f'It is not possible to import more than '
f'{settings.INITIAL_TABLE_DATA_LIMIT} rows when creating a table.'
)
largest_column_count = len(max(data, key=len))
if largest_column_count == 0:
raise InvalidInitialTableData('At least one column should be provided.')
fields = data.pop(0) if first_row_header else []
for i in range(len(fields), largest_column_count):
fields.append(f'Field {i + 1}')
for row in data:
for i in range(len(row), largest_column_count):
row.append('')
return fields, data
def fill_initial_table_data(self, user, table, fields, data, model):
"""
Fills the provided table with the normalized data that needs to be created upon
creation of the table.
:param user: The user on whose behalf the table is created.
:type user: User`
:param table: The newly created table where the initial data has to be inserted
into.
:type table: Table
:param fields: A list containing the field names.
:type fields: list
:param data: A list containing the rows that need to be inserted.
:type data: list
:param model: The generated table model of the table that needs to be filled
with initial data.
:type model: TableModel
"""
ViewHandler().create_view(user, table, GridViewType.type, name='Grid')
bulk_data = [
model(**{
f'field_{fields[index].id}': str(value)
for index, value in enumerate(row)
})
for row in data
]
model.objects.bulk_create(bulk_data)
def fill_example_table_data(self, user, table):
"""
Fills the table with some initial example data. A new table is expected that
already has the a primary field named 'name'.
:param user: The user on whose behalf the table is filled.
:type: user: User
:param table: The table that needs the initial data.
:type table: User
"""
view_handler = ViewHandler()
field_handler = FieldHandler()
view = view_handler.create_view(user, table, GridViewType.type, name='Grid')
notes = field_handler.create_field(user, table, LongTextFieldType.type,
name='Notes')
active = field_handler.create_field(user, table, BooleanFieldType.type,
name='Active')
field_options = {
notes.id: {'width': 400},
active.id: {'width': 100}
}
fields = [notes, active]
view_handler.update_grid_view_field_options(view, field_options, fields=fields)
model = table.get_model(attribute_names=True)
model.objects.create(name='Tesla', active=True)
model.objects.create(name='Amazon', active=False)
def update_table(self, user, table, **kwargs):
"""
Updates an existing table instance.
:param user: The user on whose behalf the table is updated.
:type user: User
:param table: The table instance that needs to be updated.
:type table: Table
:param kwargs: The fields that need to be updated.
:type kwargs: object
:raises ValueError: When the provided table is not an instance of Table.
:raises UserNotInGroupError: When the user does not belong to the related group.
:return: The updated table instance.
:rtype: Table
"""
if not isinstance(table, Table):
raise ValueError('The table is not an instance of Table')
if not table.database.group.has_user(user):
raise UserNotInGroupError(user, table.database.group)
table = set_allowed_attrs(kwargs, ['name'], table)
table.save()
return table
def delete_table(self, user, table):
"""
Deletes an existing table instance.
:param user: The user on whose behalf the table is deleted.
:type user: User
:param table: The table instance that needs to be deleted.
:type table: Table
:raises ValueError: When the provided table is not an instance of Table.
:raises UserNotInGroupError: When the user does not belong to the related group.
"""
if not isinstance(table, Table):
raise ValueError('The table is not an instance of Table')
if not table.database.group.has_user(user):
raise UserNotInGroupError(user, table.database.group)
# Delete the table schema from the database.
connection = connections[settings.USER_TABLE_DATABASE]
with connection.schema_editor() as schema_editor:
model = table.get_model()
schema_editor.delete_model(model)
table.delete()
|
from z3 import *
from maneuverRecomadEngine.exactsolvers.SMT_Solver_Z3_IntIntOr_ILP import Z3_Solver_Int_Parent_ILP
from maneuverRecomadEngine.exactsolvers.ManuverSolver_SB_ILP import ManuverSolver_SB_ILP
class Z3_SolverInt_SB_Enc_AllCombinationsOffers_ILP(Z3_Solver_Int_Parent_ILP, ManuverSolver_SB_ILP):
def _defineVariablesAndConstraints(self):
"""
Creates the variables used in the solver and the constraints on them as well as others (offers encoding, usage vector, etc.)
:return: None
"""
super()._defineVariablesAndConstraints()
self.ProcProv = []
self.MemProv = []
self.StorageProv = []
self.PriceProv = []
if self.default_offers_encoding:
for i in range(len(self.offers_list)):
self.ProcProv.append(self.offers_list[i][1])
self.MemProv.append(self.offers_list[i][2])
self.StorageProv.append(self.offers_list[i][3])
self.PriceProv.append(self.offers_list[i][4])
print("ProcProv ", self.ProcProv)
print("MemProv ", self.MemProv)
print("StorageProv ", self.StorageProv)
print("PriceProv ", self.PriceProv)
# values from availableConfigurations
# if self.default_offers_encoding:
# self.ProcProv = [Int('ProcProv%i' % j) for j in range(1, self.nrVM + 1)]
# self.MemProv = [Int('MemProv%i' % j) for j in range(1, self.nrVM + 1)]
# self.StorageProv = [Int('StorageProv%i' % j) for j in range(1, self.nrVM + 1)]
# self.PriceProv = [Int('PriceProv%i' % j) for j in range(1, self.nrVM + 1)]
self.a = [Int('C%i_VM%i_T%i' % (i + 1, j + 1, k + 1))
for k in range(self.nrOffers) for j in range(self.nrVM) for i in range(self.nrComp) ]
self.v = [Int('VM%i_T%i' % (j + 1, k + 1)) for k in range(self.nrOffers) for j in range(self.nrVM) ]
print("a=", self.a)
print("v=", self.v)
# elements of the association matrix should be just 0 or 1
for i in range(len(self.a)):
self.solver.add(Or([self.a[i] == 0, self.a[i] == 1]))
# elements of the occupancy vector should be just 0 or 1
for i in range(len(self.v)):
self.solver.add(Or([self.v[i] == 0, self.v[i] == 1]))
#self.vmType = [Int('VM%iType' % j) for j in range(1, self.nrVM + 1)]
# for i in range(self.nrComp):
# for j in range(self.nrVM):
# self.solver.add(Implies(self.a[i * self.nrVM + j] == 1, Not(self.vmType[j] == 0)))
def convert_price(self, price):
return price / 1000.
def _hardware_and_offers_restrictionns(self, scale_factor):
#price restrictions
# for j in range(self.nrVM):
# self.solver.add(self.PriceProv[j] >= 0)
# self.solver.add(
# Implies(sum([self.a[i + j] for i in range(0, len(self.a), self.nrVM)]) == 0, self.PriceProv[j] == 0))
# #map vm to type
# priceIndex = len(self.offers_list[0]) - 1
# for vm_id in range(self.nrVM):
# index = 0
# for offer in self.offers_list:
# index += 1
# price = offer[priceIndex] if int(scale_factor) == 1 else offer[priceIndex] / scale_factor
# self.solver.add(
# Implies(And(sum([self.a[i + vm_id] for i in range(0, len(self.a), self.nrVM)]) >= 1,
# self.vmType[vm_id] == index),
# And(self.PriceProv[vm_id] == price,
# self.ProcProv[vm_id] == offer[1],
# self.MemProv[vm_id] == (
# offer[2] if int(scale_factor) == 1 else offer[2] / scale_factor),
# self.StorageProv[vm_id] == (
# offer[3] if int(scale_factor) == 1 else offer[3] / scale_factor)
# )
# ))
# lst = [self.vmType[vm_id] == offerID for offerID in range(1, len(self.offers_list)+1)]
# self.solver.add(Or(lst))
#map hardware
# tmp = []
# for k in range(self.nrVM):
# tmp.append(
# sum([self.a[i * self.nrVM + k] * (self.problem.componentsList[i].HC) for i in range(self.nrComp)]) <=
# self.ProcProv[k])
# tmp.append(sum([self.a[i * self.nrVM + k] * ((
# self.problem.componentsList[i].HM if int(scale_factor) == 1 else
# self.problem.componentsList[i].HM / scale_factor)) for i in range(self.nrComp)]) <=
# self.MemProv[k])
# tmp.append(sum([self.a[i * self.nrVM + k] * ((
# self.problem.componentsList[i].HS if int(scale_factor) == 1 else
# self.problem.componentsList[i].HS / scale_factor)) for i in range(self.nrComp)]) <=
# self.StorageProv[k])
# self.solver.add(tmp)
# A machine can have only one type
tmp = []
for k in range(self.nrVM):
print("A machine can have only one type:", sum([self.v[i * self.nrVM + k] for i in range(self.nrOffers)]) <= 1)
tmp.append(sum([self.v[i * self.nrVM + k] for i in range(self.nrOffers)]) <= 1)
self.solver.add(tmp)
# capacity constraints
tmp = []
for o in range(self.nrOffers):
for k in range(self.nrVM):
print("???", sum([self.a[i + self.nrVM*k] * (self.problem.componentsList[i].HC) for i in range(self.nrComp)])
<= self.ProcProv[o] #*self.v[j] for j in range(self.nrVM)
)
# # tmp.append(
# # sum([self.a[i * self.nrVM + k] * (self.problem.componentsList[i].HC) for i in range(self.nrComp)]) <=
# # self.ProcProv[k])
# # tmp.append(sum([self.a[i * self.nrVM + k] * ((
# # self.problem.componentsList[i].HM if int(scale_factor) == 1 else
# # self.problem.componentsList[i].HM / scale_factor)) for i in range(self.nrComp)]) <=
# # self.MemProv[k])
# # tmp.append(sum([self.a[i * self.nrVM + k] * ((
# # self.problem.componentsList[i].HS if int(scale_factor) == 1 else
# # self.problem.componentsList[i].HS / scale_factor)) for i in range(self.nrComp)]) <=
# # self.StorageProv[k])
# #self.solver.add(tmp)
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from .base import ClientV1RestServlet, client_path_patterns
import hmac
import hashlib
import base64
class VoipRestServlet(ClientV1RestServlet):
PATTERNS = client_path_patterns("/voip/turnServer$")
@defer.inlineCallbacks
def on_GET(self, request):
requester = yield self.auth.get_user_by_req(request)
turnUris = self.hs.config.turn_uris
turnSecret = self.hs.config.turn_shared_secret
turnUsername = self.hs.config.turn_username
turnPassword = self.hs.config.turn_password
userLifetime = self.hs.config.turn_user_lifetime
if turnUris and turnSecret and userLifetime:
expiry = (self.hs.get_clock().time_msec() + userLifetime) / 1000
username = "%d:%s" % (expiry, requester.user.to_string())
mac = hmac.new(turnSecret, msg=username, digestmod=hashlib.sha1)
# We need to use standard padded base64 encoding here
# encode_base64 because we need to add the standard padding to get the
# same result as the TURN server.
password = base64.b64encode(mac.digest())
elif turnUris and turnUsername and turnPassword and userLifetime:
username = turnUsername
password = turnPassword
else:
defer.returnValue((200, {}))
defer.returnValue((200, {
'username': username,
'password': password,
'ttl': userLifetime / 1000,
'uris': turnUris,
}))
def on_OPTIONS(self, request):
return (200, {})
def register_servlets(hs, http_server):
VoipRestServlet(hs).register(http_server)
|
from collections import Counter
num_letters =[]
cont_2 = 0
cont_3 = 0
#with open("day2//input.txt") as f:
with open("input.txt") as f:
for line in f:
c = Counter(line)
num_letters = c.values()
if 2 in num_letters:
cont_2 += 1
if 3 in num_letters:
cont_3 +=1
print cont_2*cont_3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.