hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7cf6a5111b2b89adbb9a05cd1e297b6cc0c6423c | 11,658 | py | Python | standup.py | drgerstl/standupTracker | d9b2d5b2da702ffd724aba8659e0044b0275c79f | [
"MIT",
"Unlicense"
] | null | null | null | standup.py | drgerstl/standupTracker | d9b2d5b2da702ffd724aba8659e0044b0275c79f | [
"MIT",
"Unlicense"
] | null | null | null | standup.py | drgerstl/standupTracker | d9b2d5b2da702ffd724aba8659e0044b0275c79f | [
"MIT",
"Unlicense"
] | null | null | null | #!/usr/bin/env python
"""
A simple GUI program used to display and track who is attending stand up
meetings as well as who has presented already.
"""
from tkinter import *
import tkinter as tk
import tkinter.font as tkFont
from tkinter.messagebox import showinfo
from typing import Tuple
import constants as const
__author__ = "Dan Gerstl"
__copyright__ = "Copyright 2021, Dan Gerstl, All rights reserved"
__credits__ = ["Dan Gerstl",]
__version__ = "1.2"
__maintainer__ = "Dan Gerstl"
__email__ = "drgerstl@gmail.com"
# Global Variables
top = tk.Tk()
employeeList = []
row = 0
col = 0
hostAssigned = False
attendingCount = 0
lblCount = StringVar()
class Main(Frame):
def __init__(self, parent=None):
""" Create main view to allow changing the title on the frame """
Frame.__init__(self, parent)
self.parent = parent
class Employee:
def __init__(self, name, view):
"""
Class that contains the employee name and associated buttons and label.
"""
# Component variables
self.attendingBtnVal = IntVar()
self.presentedBtnVal = IntVar()
self.lblText = StringVar()
# Class Variables
self.name = name
self.label = Label(view, textvariable=self.lblText, relief=FLAT,
font=const.FONT, state=DISABLED)
self.label.bind('<Button-1>', self.toggleLabel)
self.checkBtnAttending = tk.Checkbutton(
view, text=const.ATTENDING, command=self.toggleAttending,
variable=self.attendingBtnVal, onvalue=ON, offvalue=OFF,
height=const.BTN_HEIGHT, width=const.BTN_WIDTH)
self.checkBtnPresented = tk.Checkbutton(
view, text=const.TO_PRESENT, command=self.togglePresented,
state=DISABLED, variable=self.presentedBtnVal, onvalue=ON,
offvalue= OFF, height= const.BTN_HEIGHT, width= const.BTN_WIDTH)
# Set employee name as label text
self.lblText.set(self.name)
def toggleLabel(self, event):
"""
Toggles whether or not the HOST label is displayed on an Employee's
label.
"""
# Global Variables
global hostAssigned
if (self.label['state'] == const.ENABLED and not hostAssigned):
self.lblText.set(self.name + const.HOST)
hostAssigned = True
elif (self.label['state'] == const.ENABLED and hostAssigned):
if const.HOST in self.lblText.get():
self.lblText.set(self.name)
hostAssigned = False
def toggleAttending(self):
"""
Function is fired on checking the attendance checkbutton and updates GUI
based on value.
"""
# Global Variables
global hostAssigned
# Box goes from unchecked -> checked
if (self.attendingBtnVal.get() == ON):
markAttending(self)
else: # Box goes from checked -> unchecked
clearEmployee(self)
def togglePresented(self):
"""
Function is fired on checking the presented checkbutton and updates GUI
based on value.
"""
if (self.checkBtnPresented['text'] == const.TO_PRESENT):
self.checkBtnPresented['text'] = const.PRESENTED
self.label.config(fg= const.PRESENTED_COLOR)
else:
self.checkBtnPresented['text'] = const.TO_PRESENT
self.label.config(fg= const.ATTENDING_COLOR)
def showEmployee(Employee, start, col):
""" Used to display the Employee class components on GUI """
# Set starting row and add label
row = start
Employee.label.grid(row=row, column=col, padx=const.LBL_PAD_X,
pady=const.LBL_PAD_Y)
# Underline font in label
f = tkFont.Font(Employee.label, Employee.label.cget('font'))
f.configure(underline = True)
f.configure()
Employee.label.configure(font=f)
# Move down rows and add checkbuttons
row += 1
Employee.checkBtnAttending.grid(row=row, column=col, padx=const.BOX_PAD_X)
row += 1
Employee.checkBtnPresented.grid(row=row, column=col, padx=const.BOX_PAD_X)
# Move row back to start for next employee
row = start
def clearEmployee(Employee):
""" Clears the GUI components associated with an employee """
# Global Variables
global hostAssigned
global attendingCount
global lblCount
# Decrement attendance counter and update label
attendingCount -= 1
lblCount.set(const.ATTENDING + ': ' + str(attendingCount))
# Reset attending checkbutton
Employee.attendingBtnVal.set(OFF)
Employee.checkBtnAttending['text'] = const.ATTENDING
# Reset presented checkbutton
Employee.presentedBtnVal.set(OFF)
Employee.checkBtnPresented['text'] = const.TO_PRESENT
Employee.checkBtnPresented['state'] = DISABLED
# Disable employee label
Employee.label['state'] = DISABLED
Employee.label['cursor'] = 'arrow'
if const.HOST in Employee.lblText.get():
Employee.lblText.set(Employee.name)
hostAssigned = False
def clearAll(employeeList):
# Global Variables
global attendingCount
global lblCount
""" Clears the GUI components associated with all employees """
for employee in employeeList:
clearEmployee(employee)
# Reset attendance counter
attendingCount = 0
lblCount.set(const.ATTENDING + ': ' + str(attendingCount))
def markAttending(Employee):
""" Marks an Employee as attending """
# Global Variables
global attendingCount
global lblCount
# Increment attendance counter and update label
attendingCount += 1
lblCount.set(const.ATTENDING + ': ' + str(attendingCount))
# Ensure checkbox value is set to checked
Employee.attendingBtnVal.set(ON)
# Enable presented checkbox
Employee.checkBtnPresented['state'] = const.ENABLED
# Enable label and set color for attending
Employee.label['state'] = const.ENABLED
Employee.label.config(fg= const.ATTENDING_COLOR)
# Change cursor to indicate its clickable
Employee.label['cursor'] = 'hand2'
def markAllAttending(employeeList):
""" Marks all Employees as attending """
# Global Variables
global attendingCount
global lblCount
for employee in employeeList:
markAttending(employee)
# Set attendance counter to list size
attendingCount = len(employeeList)
lblCount.set(const.ATTENDING + ': ' + str(attendingCount))
def exit():
""" Destroys the main window to exit the program """
top.destroy()
""" These methods are for using a button instead of a checkbox """
#region
# class Employee:
# def __init__(self, name, view):
# #-- Component variables --#
# attendingBtnVal = IntVar()
# presentedBtnVal = IntVar()
# lblText = StringVar()
# #-- Class Variables--#
# self.name = name
# self.label = Label(view, textvariable=lblText, relief=FLAT, font=FONT)
# self.checkBtnAttending = tk.Button(view, text= ATTENDING, command= self.toggleAttending, \
# height= BTN_HEIGHT, width= BTN_WIDTH)
# self.checkBtnPresented = tk.Button(view, text= PRESENTED, command= self.togglePresented, \
# height= BTN_HEIGHT, width= BTN_WIDTH, state='disabled')
# #-- Set label name --#
# lblText.set(self.name)
# def toggleAttending(self):
# # label.config(text='TED')
# # tk.messagebox.showinfo(title=checkboxVar.get(), message=checkboxVar.get())
# # if (checkboxVar == 'ON'):
# # tk.messagebox.showinfo(title=checkboxVar.get(), message=checkboxVar.get())
# if (self.checkBtnAttending['text'] == ATTENDING):
# self.checkBtnAttending['text'] = 'Not Attending'
# self.checkBtnPresented['state'] = 'normal'
# else:
# self.checkBtnAttending['text'] = ATTENDING
# self.checkBtnPresented['state'] = 'disabled'
# def togglePresented(self):
# if (self.checkBtnPresented['text'] == PRESENTED):
# self.checkBtnPresented['text'] = 'Done'
# else:
# self.checkBtnPresented['text'] = PRESENTED
# #-- Used to display the Employee class components --#
# def showEmployee(Employee, start, col):
# row = start
# Employee.label.grid(row=row, column=col, padx=LBL_PAD_X, pady=LBL_PAD_Y)
# f = tkFont.Font(Employee.label, Employee.label.cget('font'))
# f.configure(underline = True)
# f.configure()
# Employee.label.configure(font=f)
# row += 1
# Employee.checkBtnAttending.grid(row=row, column=col, padx=BTN_PAD_X)
# row += 1
# Employee.checkBtnPresented.grid(row=row, column=col, padx=BTN_PAD_X)
# row = start
#endregion
def makeWidgets(view, row, col):
""" Draws the GUI """
# Set title for main window
view.winfo_toplevel().title(const.TITLE)
# Create Employee list
sortedEmployees = const.EMPLOYEES
sortedEmployees.sort()
for name in const.EMPLOYEES:
employeeList.append(Employee(name, top))
# Add Employees to top view
for employee in employeeList:
showEmployee(employee, row, col)
# Adjust location for grid layout
col += 1
if (col == 4):
col = 0
row += 4
return row
def addUtilityButtons(row, col):
""" Adds utility buttons to the top of the window """
# Global Variables
global attendingCount
global lblCount
# Add All Attending button
btnAllAttending = tk.Button(
top,height=const.BTN_HEIGHT, width=const.BTN_WIDTH,
text="All Attending", command=lambda:markAllAttending(employeeList))
btnAllAttending.grid(row=row, column=col, padx=const.BTN_PAD_X,
pady=const.UTILITY_BTN_PAD_Y)
col += 1
# Add Clear button
btnClear = tk.Button(
top,height=const.BTN_HEIGHT, width=const.BTN_WIDTH,
text="Clear", command=lambda:clearAll(employeeList))
btnClear.grid(row=row, column=col, padx=const.BTN_PAD_X,
pady=const.UTILITY_BTN_PAD_Y)
col += 1
# Add Exit button
btnExit = tk.Button(
top,height=const.BTN_HEIGHT, width=const.BTN_WIDTH,
text="Exit", command=exit)
btnExit.grid(row=row, column=col, padx=const.BTN_PAD_X,
pady=const.UTILITY_BTN_PAD_Y)
col += 1
# Add attendance counter
lblCount.set(const.ATTENDING + ': ' + str(attendingCount))
lblCounter = tk.Label(top, textvariable=lblCount, relief=FLAT, justify=LEFT,
anchor=W, font=const.FONT, width=const.BTN_WIDTH)
lblCounter.grid(row=row, column=col, padx=const.BTN_PAD_X,
pady=const.UTILITY_BTN_PAD_Y)
main = Main(top)
# Add widgets to main view
row = makeWidgets(main, row, col)
# Move the starting position to add utility buttons
row += 3
# Add utility buttons to bottom
addUtilityButtons(row, col)
# Set window size and disable resizing
# top.geometry(const.WINDOW_SIZE)
top.resizable(False, False)
# Set no host
hostAssigned = False
# Start
top.mainloop() | 32.932203 | 101 | 0.621376 |
9d5a6286742228a0cb2f27f8763fb1a0fd791fac | 4,035 | py | Python | fault/tester/symbolic_tester.py | makaimann/fault | 8c805415f398e64971d18fbd3014bc0b59fb38b8 | [
"BSD-3-Clause"
] | 31 | 2018-07-16T15:03:14.000Z | 2022-03-10T08:36:09.000Z | fault/tester/symbolic_tester.py | makaimann/fault | 8c805415f398e64971d18fbd3014bc0b59fb38b8 | [
"BSD-3-Clause"
] | 216 | 2018-07-18T20:00:34.000Z | 2021-10-05T17:40:47.000Z | fault/tester/symbolic_tester.py | makaimann/fault | 8c805415f398e64971d18fbd3014bc0b59fb38b8 | [
"BSD-3-Clause"
] | 10 | 2019-02-17T00:56:58.000Z | 2021-11-05T13:31:37.000Z | import fault
from .staged_tester import Tester
from fault.wrapper import Wrapper, PortWrapper, InstanceWrapper
try:
from fault.pono_target import PonoTarget
except ImportError:
# Optional dependency
pass
import fault.actions as actions
from fault.random import ConstrainedRandomGenerator
class SymbolicWrapper(Wrapper):
def __init__(self, circuit, parent):
super().__init__(circuit, parent)
def __setattr__(self, attr, value):
# Hack to stage this after __init__ has been run, should redefine this
# method in a metaclass? Could also use a try/except pattern, so the
# exceptions only occur during object instantiation
if hasattr(self, "circuit") and hasattr(self, "instance_map"):
if attr in self.circuit.interface.ports.keys():
if isinstance(self.parent, fault.Tester):
self.parent.poke(self.circuit.interface.ports[attr], value)
else:
exit(1)
else:
object.__setattr__(self, attr, value)
else:
object.__setattr__(self, attr, value)
def __getattr__(self, attr):
# Hack to stage this after __init__ has been run, should redefine this
# method in a metaclass?
try:
if attr in self.circuit.interface.ports.keys():
return SymbolicPortWrapper(self.circuit.interface.ports[attr],
self)
elif attr in self.instance_map:
return SymbolicInstanceWrapper(self.instance_map[attr], self)
else:
object.__getattribute__(self, attr)
except Exception as e:
object.__getattribute__(self, attr)
class SymbolicCircuitWrapper(SymbolicWrapper):
pass
class SymbolicPortWrapper(PortWrapper):
def assume(self, pred):
select_path = self.select_path
select_path.tester.assume(select_path, pred)
def guarantee(self, pred):
select_path = self.select_path
select_path.tester.guarantee(select_path, pred)
class SymbolicInstanceWrapper(InstanceWrapper):
pass
class SymbolicTester(Tester):
def __init__(self, circuit, clock=None, num_tests=100,
random_strategy="rejection"):
super().__init__(circuit, clock)
self.num_tests = num_tests
self.random_strategy = random_strategy
def assume(self, port, constraint):
"""
Place a constraint on an input port by providing a symbolic expression
as a Python lambda or function
symbolic_tester_inst.assume(top.I, lambda x : x >= 0)
"""
action = actions.Assume(port, constraint)
action.has_randvals = False
if self.random_strategy == "smt":
port = port[-1]
v = {str(port.name): len(port)}
gen = ConstrainedRandomGenerator()
action.randvals = iter(gen(v, constraint, self.num_tests))
action.has_randvals = True
self.actions.append(action)
def guarantee(self, port, constraint):
"""
Assert a property about an output port by providing a symbolic
expression as a Python lambda or function
symbolic_tester_inst.assume(top.O, lambda x : x >= 0)
"""
self.actions.append(actions.Guarantee(port, constraint))
@property
def circuit(self):
return SymbolicCircuitWrapper(self._circuit, self)
def run(self, target="verilator"):
if target == "verilator":
self.targets[target].run(self.actions, self.verilator_includes,
self.num_tests, self._circuit)
elif target == "pono":
self.targets[target].run(self.actions)
else:
raise NotImplementedError()
def make_target(self, target: str, **kwargs):
if target == "pono":
return PonoTarget(self._circuit, **kwargs)
else:
return super().make_target(target, **kwargs)
| 34.784483 | 79 | 0.626518 |
09a1c7b8c6315c9b750b57864a41a55a9f4cb1ac | 2,255 | py | Python | models/property.py | esacosta/u-mooc | 8d9a1427c988121e12dd6e2f7c8835f0e561c507 | [
"Apache-2.0"
] | 1 | 2015-10-06T14:01:44.000Z | 2015-10-06T14:01:44.000Z | models/property.py | esacosta/u-mooc | 8d9a1427c988121e12dd6e2f7c8835f0e561c507 | [
"Apache-2.0"
] | null | null | null | models/property.py | esacosta/u-mooc | 8d9a1427c988121e12dd6e2f7c8835f0e561c507 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Properties and its collections."""
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
import collections
class Property(object):
"""Property."""
def __init__(
self, name, label, property_type, select_data=None, description=None,
optional=False, extra_schema_dict_values=None):
self._name = name
self._label = label
self._property_type = property_type
self._select_data = select_data
self._description = description
self._optional = optional
self._extra_schema_dict_values = extra_schema_dict_values
@property
def name(self):
return self._name
class Registry(object):
"""Registry is a collection of Property's."""
def __init__(self, title, description=None):
self._title = title
self._registry = {'id': title, 'type': 'object'}
self._description = description
if description:
self._registry['description'] = description
self._properties = []
self._sub_registories = collections.OrderedDict()
@property
def title(self):
return self._title
def add_property(self, schema_field):
"""Add a Property to this Registry."""
self._properties.append(schema_field)
def add_sub_registry(
self, name, title=None, description=None, registry=None):
"""Add a sub registry to for this Registry."""
if not registry:
registry = Registry(title, description)
self._sub_registories[name] = registry
return registry
def has_subregistries(self):
return True if self._sub_registories else False
| 31.760563 | 77 | 0.682483 |
5081d2094d2b7952cf64120f6d82d6b5b55f5c84 | 1,991 | py | Python | peer/migrations/0004_lg_query.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | peer/migrations/0004_lg_query.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | peer/migrations/0004_lg_query.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ----------------------------------------------------------------------
# lg query
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
# Model 'LGQueryType'
self.db.create_table(
"peer_lgquerytype",
(
("id", models.AutoField(verbose_name="ID", primary_key=True, auto_created=True)),
("name", models.CharField("Name", max_length=32, unique=True)),
),
)
# Mock Models
PeeringPointType = self.db.mock_model(
model_name="PeeringPointType", db_table="peer_peeringpointtype"
)
LGQueryType = self.db.mock_model(model_name="LGQueryType", db_table="peer_lgquerytype")
# Model 'LGQueryCommand'
self.db.create_table(
"peer_lgquerycommand",
(
("id", models.AutoField(verbose_name="ID", primary_key=True, auto_created=True)),
(
"peering_point_type",
models.ForeignKey(
PeeringPointType,
verbose_name="Peering Point Type",
on_delete=models.CASCADE,
),
),
(
"query_type",
models.ForeignKey(
LGQueryType, verbose_name="LG Query Type", on_delete=models.CASCADE
),
),
("command", models.CharField("Command", max_length=128)),
),
)
self.db.create_index(
"peer_lgquerycommand", ["peering_point_type_id", "query_type_id"], unique=True
)
| 34.929825 | 97 | 0.473129 |
3bef7faa910a0ea457ffa47174d8ca754e96fcd5 | 1,363 | py | Python | src/dms-preview/azext_dms/vendored_sdks/datamigration/models/migration_table_metadata_py3.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | 1 | 2019-05-10T19:58:09.000Z | 2019-05-10T19:58:09.000Z | src/dms-preview/azext_dms/vendored_sdks/datamigration/models/migration_table_metadata_py3.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | src/dms-preview/azext_dms/vendored_sdks/datamigration/models/migration_table_metadata_py3.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MigrationTableMetadata(Model):
"""Metadata for tables selected in migration project.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar source_table_name: Source table name
:vartype source_table_name: str
:ivar target_table_name: Target table name
:vartype target_table_name: str
"""
_validation = {
'source_table_name': {'readonly': True},
'target_table_name': {'readonly': True},
}
_attribute_map = {
'source_table_name': {'key': 'sourceTableName', 'type': 'str'},
'target_table_name': {'key': 'targetTableName', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(MigrationTableMetadata, self).__init__(**kwargs)
self.source_table_name = None
self.target_table_name = None
| 33.243902 | 76 | 0.615554 |
e5d108e1d240e1920b34afadca4c8e0f3570c9b2 | 2,233 | py | Python | lowess/version.py | a5o/lowess | 416a83a87b0ada1fe95d973eaf64fc32a6266c40 | [
"MIT"
] | 30 | 2015-04-27T18:07:56.000Z | 2022-03-30T06:04:32.000Z | lowess/version.py | a5o/lowess | 416a83a87b0ada1fe95d973eaf64fc32a6266c40 | [
"MIT"
] | 3 | 2020-03-14T04:21:47.000Z | 2020-12-06T17:48:31.000Z | lowess/version.py | a5o/lowess | 416a83a87b0ada1fe95d973eaf64fc32a6266c40 | [
"MIT"
] | 13 | 2016-10-04T22:15:07.000Z | 2021-09-22T18:39:29.000Z | # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 1
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
#_version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "lowess: locally linear regression"
# Long description will go up on the pypi page
long_description = """
Lowess
======
This is a python implementation of the LOWESS algorithm for locally linear
regression described in Cleveland (1979) and in chapter 6 of Friedman, Hastie
and Tibshirani (2008).
Friedman, Hastie and Tibshirani (2008). The Elements of Statistical
Learning.
Cleveland (1979). Robust Locally Weighted Regression and Smoothing
Scatterplots. J American Statistical Association, 74: 829-836.)
License
=======
``lowess`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2015--, Ariel Rokem
All rights reserved.
"""
NAME = "lowess"
MAINTAINER = "Ariel Rokem"
MAINTAINER_EMAIL = "arokem@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/arokem/lowess"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Ariel Rokem"
AUTHOR_EMAIL = "arokem@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGES = ['lowess',
'lowess.tests']
REQUIRES = ["numpy"]
| 30.589041 | 77 | 0.717868 |
c587735915469139dbf0944a2be35cef88ee4597 | 319 | py | Python | tests/test_examples.py | bureau14/qdb-api-python | 2a010df3252d39bc4d529f545547c5cefb9fe86e | [
"BSD-3-Clause"
] | 9 | 2015-09-02T20:13:13.000Z | 2020-07-16T14:17:36.000Z | tests/test_examples.py | bureau14/qdb-api-python | 2a010df3252d39bc4d529f545547c5cefb9fe86e | [
"BSD-3-Clause"
] | 5 | 2018-02-20T10:47:02.000Z | 2020-05-20T10:05:49.000Z | tests/test_examples.py | bureau14/qdb-api-python | 2a010df3252d39bc4d529f545547c5cefb9fe86e | [
"BSD-3-Clause"
] | 1 | 2018-04-01T11:12:56.000Z | 2018-04-01T11:12:56.000Z | # pylint: disable=C0103,C0111,C0302,W0212
import datetime
import pytest
import quasardb
import sys
import os
sys.path.append(os.path.join(os.path.split(__file__)[0], '..', 'examples/'))
def test_tutorial_python():
import tutorial.python
def test_tutorial_pandas_tutorial():
import tutorial.pandas_tutorial
| 18.764706 | 76 | 0.768025 |
92c68ff0b736daab69fcfee955d674ca044a2c9d | 102,570 | py | Python | Python36_x86_Template/Lib/email/_header_value_parser.py | iveskim/cloudbase-init-installer | bc7630a7fb7dd527618dac3938147e2d9439c285 | [
"Apache-2.0"
] | null | null | null | Python36_x86_Template/Lib/email/_header_value_parser.py | iveskim/cloudbase-init-installer | bc7630a7fb7dd527618dac3938147e2d9439c285 | [
"Apache-2.0"
] | null | null | null | Python36_x86_Template/Lib/email/_header_value_parser.py | iveskim/cloudbase-init-installer | bc7630a7fb7dd527618dac3938147e2d9439c285 | [
"Apache-2.0"
] | 4 | 2019-12-11T18:50:22.000Z | 2020-08-10T19:25:11.000Z | """Header value parser implementing various email-related RFC parsing rules.
The parsing methods defined in this module implement various email related
parsing rules. Principal among them is RFC 5322, which is the followon
to RFC 2822 and primarily a clarification of the former. It also implements
RFC 2047 encoded word decoding.
RFC 5322 goes to considerable trouble to maintain backward compatibility with
RFC 822 in the parse phase, while cleaning up the structure on the generation
phase. This parser supports correct RFC 5322 generation by tagging white space
as folding white space only when folding is allowed in the non-obsolete rule
sets. Actually, the parser is even more generous when accepting input than RFC
5322 mandates, following the spirit of Postel's Law, which RFC 5322 encourages.
Where possible deviations from the standard are annotated on the 'defects'
attribute of tokens that deviate.
The general structure of the parser follows RFC 5322, and uses its terminology
where there is a direct correspondence. Where the implementation requires a
somewhat different structure than that used by the formal grammar, new terms
that mimic the closest existing terms are used. Thus, it really helps to have
a copy of RFC 5322 handy when studying this code.
Input to the parser is a string that has already been unfolded according to
RFC 5322 rules. According to the RFC this unfolding is the very first step, and
this parser leaves the unfolding step to a higher level message parser, which
will have already detected the line breaks that need unfolding while
determining the beginning and end of each header.
The output of the parser is a TokenList object, which is a list subclass. A
TokenList is a recursive data structure. The terminal nodes of the structure
are Terminal objects, which are subclasses of str. These do not correspond
directly to terminal objects in the formal grammar, but are instead more
practical higher level combinations of true terminals.
All TokenList and Terminal objects have a 'value' attribute, which produces the
semantically meaningful value of that part of the parse subtree. The value of
all whitespace tokens (no matter how many sub-tokens they may contain) is a
single space, as per the RFC rules. This includes 'CFWS', which is herein
included in the general class of whitespace tokens. There is one exception to
the rule that whitespace tokens are collapsed into single spaces in values: in
the value of a 'bare-quoted-string' (a quoted-string with no leading or
trailing whitespace), any whitespace that appeared between the quotation marks
is preserved in the returned value. Note that in all Terminal strings quoted
pairs are turned into their unquoted values.
All TokenList and Terminal objects also have a string value, which attempts to
be a "canonical" representation of the RFC-compliant form of the substring that
produced the parsed subtree, including minimal use of quoted pair quoting.
Whitespace runs are not collapsed.
Comment tokens also have a 'content' attribute providing the string found
between the parens (including any nested comments) with whitespace preserved.
All TokenList and Terminal objects have a 'defects' attribute which is a
possibly empty list all of the defects found while creating the token. Defects
may appear on any token in the tree, and a composite list of all defects in the
subtree is available through the 'all_defects' attribute of any node. (For
Terminal notes x.defects == x.all_defects.)
Each object in a parse tree is called a 'token', and each has a 'token_type'
attribute that gives the name from the RFC 5322 grammar that it represents.
Not all RFC 5322 nodes are produced, and there is one non-RFC 5322 node that
may be produced: 'ptext'. A 'ptext' is a string of printable ascii characters.
It is returned in place of lists of (ctext/quoted-pair) and
(qtext/quoted-pair).
XXX: provide complete list of token types.
"""
import re
import urllib # For urllib.parse.unquote
from string import hexdigits
from collections import OrderedDict
from operator import itemgetter
from email import _encoded_words as _ew
from email import errors
from email import utils
#
# Useful constants and functions
#
WSP = set(' \t')
CFWS_LEADER = WSP | set('(')
SPECIALS = set(r'()<>@,:;.\"[]')
ATOM_ENDS = SPECIALS | WSP
DOT_ATOM_ENDS = ATOM_ENDS - set('.')
# '.', '"', and '(' do not end phrases in order to support obs-phrase
PHRASE_ENDS = SPECIALS - set('."(')
TSPECIALS = (SPECIALS | set('/?=')) - set('.')
TOKEN_ENDS = TSPECIALS | WSP
ASPECIALS = TSPECIALS | set("*'%")
ATTRIBUTE_ENDS = ASPECIALS | WSP
EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%')
def quote_string(value):
return '"'+str(value).replace('\\', '\\\\').replace('"', r'\"')+'"'
#
# TokenList and its subclasses
#
class TokenList(list):
token_type = None
syntactic_break = True
ew_combine_allowed = True
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.defects = []
def __str__(self):
return ''.join(str(x) for x in self)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
super().__repr__())
@property
def value(self):
return ''.join(x.value for x in self if x.value)
@property
def all_defects(self):
return sum((x.all_defects for x in self), self.defects)
def startswith_fws(self):
return self[0].startswith_fws()
@property
def as_ew_allowed(self):
"""True if all top level tokens of this part may be RFC2047 encoded."""
return all(part.as_ew_allowed for part in self)
@property
def comments(self):
comments = []
for token in self:
comments.extend(token.comments)
return comments
def fold(self, *, policy):
return _refold_parse_tree(self, policy=policy)
def pprint(self, indent=''):
print(self.ppstr(indent=indent))
def ppstr(self, indent=''):
return '\n'.join(self._pp(indent=indent))
def _pp(self, indent=''):
yield '{}{}/{}('.format(
indent,
self.__class__.__name__,
self.token_type)
for token in self:
if not hasattr(token, '_pp'):
yield (indent + ' !! invalid element in token '
'list: {!r}'.format(token))
else:
yield from token._pp(indent+' ')
if self.defects:
extra = ' Defects: {}'.format(self.defects)
else:
extra = ''
yield '{}){}'.format(indent, extra)
class WhiteSpaceTokenList(TokenList):
@property
def value(self):
return ' '
@property
def comments(self):
return [x.content for x in self if x.token_type=='comment']
class UnstructuredTokenList(TokenList):
token_type = 'unstructured'
class Phrase(TokenList):
token_type = 'phrase'
class Word(TokenList):
token_type = 'word'
class CFWSList(WhiteSpaceTokenList):
token_type = 'cfws'
class Atom(TokenList):
token_type = 'atom'
class Token(TokenList):
token_type = 'token'
encode_as_ew = False
class EncodedWord(TokenList):
token_type = 'encoded-word'
cte = None
charset = None
lang = None
class QuotedString(TokenList):
token_type = 'quoted-string'
@property
def content(self):
for x in self:
if x.token_type == 'bare-quoted-string':
return x.value
@property
def quoted_value(self):
res = []
for x in self:
if x.token_type == 'bare-quoted-string':
res.append(str(x))
else:
res.append(x.value)
return ''.join(res)
@property
def stripped_value(self):
for token in self:
if token.token_type == 'bare-quoted-string':
return token.value
class BareQuotedString(QuotedString):
token_type = 'bare-quoted-string'
def __str__(self):
return quote_string(''.join(str(x) for x in self))
@property
def value(self):
return ''.join(str(x) for x in self)
class Comment(WhiteSpaceTokenList):
token_type = 'comment'
def __str__(self):
return ''.join(sum([
["("],
[self.quote(x) for x in self],
[")"],
], []))
def quote(self, value):
if value.token_type == 'comment':
return str(value)
return str(value).replace('\\', '\\\\').replace(
'(', r'\(').replace(
')', r'\)')
@property
def content(self):
return ''.join(str(x) for x in self)
@property
def comments(self):
return [self.content]
class AddressList(TokenList):
token_type = 'address-list'
@property
def addresses(self):
return [x for x in self if x.token_type=='address']
@property
def mailboxes(self):
return sum((x.mailboxes
for x in self if x.token_type=='address'), [])
@property
def all_mailboxes(self):
return sum((x.all_mailboxes
for x in self if x.token_type=='address'), [])
class Address(TokenList):
token_type = 'address'
@property
def display_name(self):
if self[0].token_type == 'group':
return self[0].display_name
@property
def mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return [self[0]]
return self[0].all_mailboxes
class MailboxList(TokenList):
token_type = 'mailbox-list'
@property
def mailboxes(self):
return [x for x in self if x.token_type=='mailbox']
@property
def all_mailboxes(self):
return [x for x in self
if x.token_type in ('mailbox', 'invalid-mailbox')]
class GroupList(TokenList):
token_type = 'group-list'
@property
def mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].all_mailboxes
class Group(TokenList):
token_type = "group"
@property
def mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].mailboxes
@property
def all_mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].all_mailboxes
@property
def display_name(self):
return self[0].display_name
class NameAddr(TokenList):
token_type = 'name-addr'
@property
def display_name(self):
if len(self) == 1:
return None
return self[0].display_name
@property
def local_part(self):
return self[-1].local_part
@property
def domain(self):
return self[-1].domain
@property
def route(self):
return self[-1].route
@property
def addr_spec(self):
return self[-1].addr_spec
class AngleAddr(TokenList):
token_type = 'angle-addr'
@property
def local_part(self):
for x in self:
if x.token_type == 'addr-spec':
return x.local_part
@property
def domain(self):
for x in self:
if x.token_type == 'addr-spec':
return x.domain
@property
def route(self):
for x in self:
if x.token_type == 'obs-route':
return x.domains
@property
def addr_spec(self):
for x in self:
if x.token_type == 'addr-spec':
if x.local_part:
return x.addr_spec
else:
return quote_string(x.local_part) + x.addr_spec
else:
return '<>'
class ObsRoute(TokenList):
token_type = 'obs-route'
@property
def domains(self):
return [x.domain for x in self if x.token_type == 'domain']
class Mailbox(TokenList):
token_type = 'mailbox'
@property
def display_name(self):
if self[0].token_type == 'name-addr':
return self[0].display_name
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
return self[0].domain
@property
def route(self):
if self[0].token_type == 'name-addr':
return self[0].route
@property
def addr_spec(self):
return self[0].addr_spec
class InvalidMailbox(TokenList):
token_type = 'invalid-mailbox'
@property
def display_name(self):
return None
local_part = domain = route = addr_spec = display_name
class Domain(TokenList):
token_type = 'domain'
as_ew_allowed = False
@property
def domain(self):
return ''.join(super().value.split())
class DotAtom(TokenList):
token_type = 'dot-atom'
class DotAtomText(TokenList):
token_type = 'dot-atom-text'
as_ew_allowed = True
class AddrSpec(TokenList):
token_type = 'addr-spec'
as_ew_allowed = False
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
if len(self) < 3:
return None
return self[-1].domain
@property
def value(self):
if len(self) < 3:
return self[0].value
return self[0].value.rstrip()+self[1].value+self[2].value.lstrip()
@property
def addr_spec(self):
nameset = set(self.local_part)
if len(nameset) > len(nameset-DOT_ATOM_ENDS):
lp = quote_string(self.local_part)
else:
lp = self.local_part
if self.domain is not None:
return lp + '@' + self.domain
return lp
class ObsLocalPart(TokenList):
token_type = 'obs-local-part'
as_ew_allowed = False
class DisplayName(Phrase):
token_type = 'display-name'
ew_combine_allowed = False
@property
def display_name(self):
res = TokenList(self)
if res[0].token_type == 'cfws':
res.pop(0)
else:
if res[0][0].token_type == 'cfws':
res[0] = TokenList(res[0][1:])
if res[-1].token_type == 'cfws':
res.pop()
else:
if res[-1][-1].token_type == 'cfws':
res[-1] = TokenList(res[-1][:-1])
return res.value
@property
def value(self):
quote = False
if self.defects:
quote = True
else:
for x in self:
if x.token_type == 'quoted-string':
quote = True
if quote:
pre = post = ''
if self[0].token_type=='cfws' or self[0][0].token_type=='cfws':
pre = ' '
if self[-1].token_type=='cfws' or self[-1][-1].token_type=='cfws':
post = ' '
return pre+quote_string(self.display_name)+post
else:
return super().value
class LocalPart(TokenList):
token_type = 'local-part'
as_ew_allowed = False
@property
def value(self):
if self[0].token_type == "quoted-string":
return self[0].quoted_value
else:
return self[0].value
@property
def local_part(self):
# Strip whitespace from front, back, and around dots.
res = [DOT]
last = DOT
last_is_tl = False
for tok in self[0] + [DOT]:
if tok.token_type == 'cfws':
continue
if (last_is_tl and tok.token_type == 'dot' and
last[-1].token_type == 'cfws'):
res[-1] = TokenList(last[:-1])
is_tl = isinstance(tok, TokenList)
if (is_tl and last.token_type == 'dot' and
tok[0].token_type == 'cfws'):
res.append(TokenList(tok[1:]))
else:
res.append(tok)
last = res[-1]
last_is_tl = is_tl
res = TokenList(res[1:-1])
return res.value
class DomainLiteral(TokenList):
token_type = 'domain-literal'
as_ew_allowed = False
@property
def domain(self):
return ''.join(super().value.split())
@property
def ip(self):
for x in self:
if x.token_type == 'ptext':
return x.value
class MIMEVersion(TokenList):
token_type = 'mime-version'
major = None
minor = None
class Parameter(TokenList):
token_type = 'parameter'
sectioned = False
extended = False
charset = 'us-ascii'
@property
def section_number(self):
# Because the first token, the attribute (name) eats CFWS, the second
# token is always the section if there is one.
return self[1].number if self.sectioned else 0
@property
def param_value(self):
# This is part of the "handle quoted extended parameters" hack.
for token in self:
if token.token_type == 'value':
return token.stripped_value
if token.token_type == 'quoted-string':
for token in token:
if token.token_type == 'bare-quoted-string':
for token in token:
if token.token_type == 'value':
return token.stripped_value
return ''
class InvalidParameter(Parameter):
token_type = 'invalid-parameter'
class Attribute(TokenList):
token_type = 'attribute'
@property
def stripped_value(self):
for token in self:
if token.token_type.endswith('attrtext'):
return token.value
class Section(TokenList):
token_type = 'section'
number = None
class Value(TokenList):
token_type = 'value'
@property
def stripped_value(self):
token = self[0]
if token.token_type == 'cfws':
token = self[1]
if token.token_type.endswith(
('quoted-string', 'attribute', 'extended-attribute')):
return token.stripped_value
return self.value
class MimeParameters(TokenList):
token_type = 'mime-parameters'
syntactic_break = False
@property
def params(self):
# The RFC specifically states that the ordering of parameters is not
# guaranteed and may be reordered by the transport layer. So we have
# to assume the RFC 2231 pieces can come in any order. However, we
# output them in the order that we first see a given name, which gives
# us a stable __str__.
params = OrderedDict()
for token in self:
if not token.token_type.endswith('parameter'):
continue
if token[0].token_type != 'attribute':
continue
name = token[0].value.strip()
if name not in params:
params[name] = []
params[name].append((token.section_number, token))
for name, parts in params.items():
parts = sorted(parts, key=itemgetter(0))
first_param = parts[0][1]
charset = first_param.charset
# Our arbitrary error recovery is to ignore duplicate parameters,
# to use appearance order if there are duplicate rfc 2231 parts,
# and to ignore gaps. This mimics the error recovery of get_param.
if not first_param.extended and len(parts) > 1:
if parts[1][0] == 0:
parts[1][1].defects.append(errors.InvalidHeaderDefect(
'duplicate parameter name; duplicate(s) ignored'))
parts = parts[:1]
# Else assume the *0* was missing...note that this is different
# from get_param, but we registered a defect for this earlier.
value_parts = []
i = 0
for section_number, param in parts:
if section_number != i:
# We could get fancier here and look for a complete
# duplicate extended parameter and ignore the second one
# seen. But we're not doing that. The old code didn't.
if not param.extended:
param.defects.append(errors.InvalidHeaderDefect(
'duplicate parameter name; duplicate ignored'))
continue
else:
param.defects.append(errors.InvalidHeaderDefect(
"inconsistent RFC2231 parameter numbering"))
i += 1
value = param.param_value
if param.extended:
try:
value = urllib.parse.unquote_to_bytes(value)
except UnicodeEncodeError:
# source had surrogate escaped bytes. What we do now
# is a bit of an open question. I'm not sure this is
# the best choice, but it is what the old algorithm did
value = urllib.parse.unquote(value, encoding='latin-1')
else:
try:
value = value.decode(charset, 'surrogateescape')
except LookupError:
# XXX: there should really be a custom defect for
# unknown character set to make it easy to find,
# because otherwise unknown charset is a silent
# failure.
value = value.decode('us-ascii', 'surrogateescape')
if utils._has_surrogates(value):
param.defects.append(errors.UndecodableBytesDefect())
value_parts.append(value)
value = ''.join(value_parts)
yield name, value
def __str__(self):
params = []
for name, value in self.params:
if value:
params.append('{}={}'.format(name, quote_string(value)))
else:
params.append(name)
params = '; '.join(params)
return ' ' + params if params else ''
class ParameterizedHeaderValue(TokenList):
# Set this false so that the value doesn't wind up on a new line even
# if it and the parameters would fit there but not on the first line.
syntactic_break = False
@property
def params(self):
for token in reversed(self):
if token.token_type == 'mime-parameters':
return token.params
return {}
class ContentType(ParameterizedHeaderValue):
token_type = 'content-type'
as_ew_allowed = False
maintype = 'text'
subtype = 'plain'
class ContentDisposition(ParameterizedHeaderValue):
token_type = 'content-disposition'
as_ew_allowed = False
content_disposition = None
class ContentTransferEncoding(TokenList):
token_type = 'content-transfer-encoding'
as_ew_allowed = False
cte = '7bit'
class HeaderLabel(TokenList):
token_type = 'header-label'
as_ew_allowed = False
class Header(TokenList):
token_type = 'header'
#
# Terminal classes and instances
#
class Terminal(str):
as_ew_allowed = True
ew_combine_allowed = True
syntactic_break = True
def __new__(cls, value, token_type):
self = super().__new__(cls, value)
self.token_type = token_type
self.defects = []
return self
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super().__repr__())
def pprint(self):
print(self.__class__.__name__ + '/' + self.token_type)
@property
def all_defects(self):
return list(self.defects)
def _pp(self, indent=''):
return ["{}{}/{}({}){}".format(
indent,
self.__class__.__name__,
self.token_type,
super().__repr__(),
'' if not self.defects else ' {}'.format(self.defects),
)]
def pop_trailing_ws(self):
# This terminates the recursion.
return None
@property
def comments(self):
return []
def __getnewargs__(self):
return(str(self), self.token_type)
class WhiteSpaceTerminal(Terminal):
@property
def value(self):
return ' '
def startswith_fws(self):
return True
class ValueTerminal(Terminal):
@property
def value(self):
return self
def startswith_fws(self):
return False
class EWWhiteSpaceTerminal(WhiteSpaceTerminal):
@property
def value(self):
return ''
def __str__(self):
return ''
# XXX these need to become classes and used as instances so
# that a program can't change them in a parse tree and screw
# up other parse trees. Maybe should have tests for that, too.
DOT = ValueTerminal('.', 'dot')
ListSeparator = ValueTerminal(',', 'list-separator')
RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
#
# Parser
#
# Parse strings according to RFC822/2047/2822/5322 rules.
#
# This is a stateless parser. Each get_XXX function accepts a string and
# returns either a Terminal or a TokenList representing the RFC object named
# by the method and a string containing the remaining unparsed characters
# from the input. Thus a parser method consumes the next syntactic construct
# of a given type and returns a token representing the construct plus the
# unparsed remainder of the input string.
#
# For example, if the first element of a structured header is a 'phrase',
# then:
#
# phrase, value = get_phrase(value)
#
# returns the complete phrase from the start of the string value, plus any
# characters left in the string after the phrase is removed.
_wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
_non_atom_end_matcher = re.compile(r"[^{}]+".format(
''.join(ATOM_ENDS).replace('\\','\\\\').replace(']',r'\]'))).match
_non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
_non_token_end_matcher = re.compile(r"[^{}]+".format(
''.join(TOKEN_ENDS).replace('\\','\\\\').replace(']',r'\]'))).match
_non_attribute_end_matcher = re.compile(r"[^{}]+".format(
''.join(ATTRIBUTE_ENDS).replace('\\','\\\\').replace(']',r'\]'))).match
_non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
''.join(EXTENDED_ATTRIBUTE_ENDS).replace(
'\\','\\\\').replace(']',r'\]'))).match
def _validate_xtext(xtext):
"""If input token contains ASCII non-printables, register a defect."""
non_printables = _non_printable_finder(xtext)
if non_printables:
xtext.defects.append(errors.NonPrintableDefect(non_printables))
if utils._has_surrogates(xtext):
xtext.defects.append(errors.UndecodableBytesDefect(
"Non-ASCII characters found in header token"))
def _get_ptext_to_endchars(value, endchars):
"""Scan printables/quoted-pairs until endchars and return unquoted ptext.
This function turns a run of qcontent, ccontent-without-comments, or
dtext-with-quoted-printables into a single string by unquoting any
quoted printables. It returns the string, the remaining value, and
a flag that is True iff there were any quoted printables decoded.
"""
fragment, *remainder = _wsp_splitter(value, 1)
vchars = []
escape = False
had_qp = False
for pos in range(len(fragment)):
if fragment[pos] == '\\':
if escape:
escape = False
had_qp = True
else:
escape = True
continue
if escape:
escape = False
elif fragment[pos] in endchars:
break
vchars.append(fragment[pos])
else:
pos = pos + 1
return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp
def get_fws(value):
"""FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
"""
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue
def get_encoded_word(value):
""" encoded-word = "=?" charset "?" encoding "?" encoded-text "?="
"""
ew = EncodedWord()
if not value.startswith('=?'):
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
tok, *remainder = value[2:].split('?=', 1)
if tok == value[2:]:
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
remstr = ''.join(remainder)
if len(remstr) > 1 and remstr[0] in hexdigits and remstr[1] in hexdigits:
# The ? after the CTE was followed by an encoded word escape (=XX).
rest, *remainder = remstr.split('?=', 1)
tok = tok + '?=' + rest
if len(tok.split()) > 1:
ew.defects.append(errors.InvalidHeaderDefect(
"whitespace inside encoded word"))
ew.cte = value
value = ''.join(remainder)
try:
text, charset, lang, defects = _ew.decode('=?' + tok + '?=')
except ValueError:
raise errors.HeaderParseError(
"encoded word format invalid: '{}'".format(ew.cte))
ew.charset = charset
ew.lang = lang
ew.defects.extend(defects)
while text:
if text[0] in WSP:
token, text = get_fws(text)
ew.append(token)
continue
chars, *remainder = _wsp_splitter(text, 1)
vtext = ValueTerminal(chars, 'vtext')
_validate_xtext(vtext)
ew.append(vtext)
text = ''.join(remainder)
return ew, value
def get_unstructured(value):
"""unstructured = (*([FWS] vchar) *WSP) / obs-unstruct
obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS)
obs-utext = %d0 / obs-NO-WS-CTL / LF / CR
obs-NO-WS-CTL is control characters except WSP/CR/LF.
So, basically, we have printable runs, plus control characters or nulls in
the obsolete syntax, separated by whitespace. Since RFC 2047 uses the
obsolete syntax in its specification, but requires whitespace on either
side of the encoded words, I can see no reason to need to separate the
non-printable-non-whitespace from the printable runs if they occur, so we
parse this into xtext tokens separated by WSP tokens.
Because an 'unstructured' value must by definition constitute the entire
value, this 'get' routine does not return a remaining value, only the
parsed TokenList.
"""
# XXX: but what about bare CR and LF? They might signal the start or
# end of an encoded word. YAGNI for now, since our current parsers
# will never send us strings with bare CR or LF.
unstructured = UnstructuredTokenList()
while value:
if value[0] in WSP:
token, value = get_fws(value)
unstructured.append(token)
continue
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: Need to figure out how to register defects when
# appropriate here.
pass
else:
have_ws = True
if len(unstructured) > 0:
if unstructured[-1].token_type != 'fws':
unstructured.defects.append(errors.InvalidHeaderDefect(
"missing whitespace before encoded word"))
have_ws = False
if have_ws and len(unstructured) > 1:
if unstructured[-2].token_type == 'encoded-word':
unstructured[-1] = EWWhiteSpaceTerminal(
unstructured[-1], 'fws')
unstructured.append(token)
continue
tok, *remainder = _wsp_splitter(value, 1)
vtext = ValueTerminal(tok, 'vtext')
_validate_xtext(vtext)
unstructured.append(vtext)
value = ''.join(remainder)
return unstructured
def get_qp_ctext(value):
r"""ctext = <printable ascii except \ ( )>
This is not the RFC ctext, since we are handling nested comments in comment
and unquoting quoted-pairs here. We allow anything except the '()'
characters, but if we find any ASCII other than the RFC defined printable
ASCII, a NonPrintableDefect is added to the token's defects list. Since
quoted pairs are converted to their unquoted values, what is returned is
a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value
is ' '.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '()')
ptext = WhiteSpaceTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_qcontent(value):
"""qcontent = qtext / quoted-pair
We allow anything except the DQUOTE character, but if we find any ASCII
other than the RFC defined printable ASCII, a NonPrintableDefect is
added to the token's defects list. Any quoted pairs are converted to their
unquoted values, so what is returned is a 'ptext' token. In this case it
is a ValueTerminal.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '"')
ptext = ValueTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_atext(value):
"""atext = <matches _atext_matcher>
We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
the token's defects list if we find non-atext characters.
"""
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected atext but found '{}'".format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, value
def get_bare_quoted_string(value):
"""bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
A quoted-string without the leading or trailing white space. Its
value is the text between the quote marks, with whitespace
preserved and quoted pairs decoded.
"""
if value[0] != '"':
raise errors.HeaderParseError(
"expected '\"' but found '{}'".format(value))
bare_quoted_string = BareQuotedString()
value = value[1:]
if value[0] == '"':
token, value = get_qcontent(value)
bare_quoted_string.append(token)
while value and value[0] != '"':
if value[0] in WSP:
token, value = get_fws(value)
elif value[:2] == '=?':
try:
token, value = get_encoded_word(value)
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"encoded word inside quoted string"))
except errors.HeaderParseError:
token, value = get_qcontent(value)
else:
token, value = get_qcontent(value)
bare_quoted_string.append(token)
if not value:
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"end of header inside quoted string"))
return bare_quoted_string, value
return bare_quoted_string, value[1:]
def get_comment(value):
"""comment = "(" *([FWS] ccontent) [FWS] ")"
ccontent = ctext / quoted-pair / comment
We handle nested comments here, and quoted-pair in our qp-ctext routine.
"""
if value and value[0] != '(':
raise errors.HeaderParseError(
"expected '(' but found '{}'".format(value))
comment = Comment()
value = value[1:]
while value and value[0] != ")":
if value[0] in WSP:
token, value = get_fws(value)
elif value[0] == '(':
token, value = get_comment(value)
else:
token, value = get_qp_ctext(value)
comment.append(token)
if not value:
comment.defects.append(errors.InvalidHeaderDefect(
"end of header inside comment"))
return comment, value
return comment, value[1:]
def get_cfws(value):
"""CFWS = (1*([FWS] comment) [FWS]) / FWS
"""
cfws = CFWSList()
while value and value[0] in CFWS_LEADER:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_comment(value)
cfws.append(token)
return cfws, value
def get_quoted_string(value):
"""quoted-string = [CFWS] <bare-quoted-string> [CFWS]
'bare-quoted-string' is an intermediate class defined by this
parser and not by the RFC grammar. It is the quoted string
without any attached CFWS.
"""
quoted_string = QuotedString()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
token, value = get_bare_quoted_string(value)
quoted_string.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
return quoted_string, value
def get_atom(value):
"""atom = [CFWS] 1*atext [CFWS]
An atom could be an rfc2047 encoded word.
"""
atom = Atom()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
if value and value[0] in ATOM_ENDS:
raise errors.HeaderParseError(
"expected atom but found '{}'".format(value))
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_atext(value)
else:
token, value = get_atext(value)
atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
return atom, value
def get_dot_atom_text(value):
""" dot-text = 1*atext *("." 1*atext)
"""
dot_atom_text = DotAtomText()
if not value or value[0] in ATOM_ENDS:
raise errors.HeaderParseError("expected atom at a start of "
"dot-atom-text but found '{}'".format(value))
while value and value[0] not in ATOM_ENDS:
token, value = get_atext(value)
dot_atom_text.append(token)
if value and value[0] == '.':
dot_atom_text.append(DOT)
value = value[1:]
if dot_atom_text[-1] is DOT:
raise errors.HeaderParseError("expected atom at end of dot-atom-text "
"but found '{}'".format('.'+value))
return dot_atom_text, value
def get_dot_atom(value):
""" dot-atom = [CFWS] dot-atom-text [CFWS]
Any place we can have a dot atom, we could instead have an rfc2047 encoded
word.
"""
dot_atom = DotAtom()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_dot_atom_text(value)
else:
token, value = get_dot_atom_text(value)
dot_atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
return dot_atom, value
def get_word(value):
"""word = atom / quoted-string
Either atom or quoted-string may start with CFWS. We have to peel off this
CFWS first to determine which type of word to parse. Afterward we splice
the leading CFWS, if any, into the parsed sub-token.
If neither an atom or a quoted-string is found before the next special, a
HeaderParseError is raised.
The token returned is either an Atom or a QuotedString, as appropriate.
This means the 'word' level of the formal grammar is not represented in the
parse tree; this is because having that extra layer when manipulating the
parse tree is more confusing than it is helpful.
"""
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
else:
leader = None
if value[0]=='"':
token, value = get_quoted_string(value)
elif value[0] in SPECIALS:
raise errors.HeaderParseError("Expected 'atom' or 'quoted-string' "
"but found '{}'".format(value))
else:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
return token, value
def get_phrase(value):
""" phrase = 1*word / obs-phrase
obs-phrase = word *(word / "." / CFWS)
This means a phrase can be a sequence of words, periods, and CFWS in any
order as long as it starts with at least one word. If anything other than
words is detected, an ObsoleteHeaderDefect is added to the token's defect
list. We also accept a phrase that starts with CFWS followed by a dot;
this is registered as an InvalidHeaderDefect, since it is not supported by
even the obsolete grammar.
"""
phrase = Phrase()
try:
token, value = get_word(value)
phrase.append(token)
except errors.HeaderParseError:
phrase.defects.append(errors.InvalidHeaderDefect(
"phrase does not start with word"))
while value and value[0] not in PHRASE_ENDS:
if value[0]=='.':
phrase.append(DOT)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"period in 'phrase'"))
value = value[1:]
else:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"comment found without atom"))
else:
raise
phrase.append(token)
return phrase, value
def get_local_part(value):
""" local-part = dot-atom / quoted-string / obs-local-part
"""
local_part = LocalPart()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected local-part but found '{}'".format(value))
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] != '\\' and value[0] in PHRASE_ENDS:
raise
token = TokenList()
if leader is not None:
token[:0] = [leader]
local_part.append(token)
if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
obs_local_part, value = get_obs_local_part(str(local_part) + value)
if obs_local_part.token_type == 'invalid-obs-local-part':
local_part.defects.append(errors.InvalidHeaderDefect(
"local-part is not dot-atom, quoted-string, or obs-local-part"))
else:
local_part.defects.append(errors.ObsoleteHeaderDefect(
"local-part is not a dot-atom (contains CFWS)"))
local_part[0] = obs_local_part
try:
local_part.value.encode('ascii')
except UnicodeEncodeError:
local_part.defects.append(errors.NonASCIILocalPartDefect(
"local-part contains non-ASCII characters)"))
return local_part, value
def get_obs_local_part(value):
""" obs-local-part = word *("." word)
"""
obs_local_part = ObsLocalPart()
last_non_ws_was_dot = False
while value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
if value[0] == '.':
if last_non_ws_was_dot:
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"invalid repeated '.'"))
obs_local_part.append(DOT)
last_non_ws_was_dot = True
value = value[1:]
continue
elif value[0]=='\\':
obs_local_part.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"'\\' character outside of quoted-string/ccontent"))
last_non_ws_was_dot = False
continue
if obs_local_part and obs_local_part[-1].token_type != 'dot':
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"missing '.' between words"))
try:
token, value = get_word(value)
last_non_ws_was_dot = False
except errors.HeaderParseError:
if value[0] not in CFWS_LEADER:
raise
token, value = get_cfws(value)
obs_local_part.append(token)
if (obs_local_part[0].token_type == 'dot' or
obs_local_part[0].token_type=='cfws' and
obs_local_part[1].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid leading '.' in local part"))
if (obs_local_part[-1].token_type == 'dot' or
obs_local_part[-1].token_type=='cfws' and
obs_local_part[-2].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid trailing '.' in local part"))
if obs_local_part.defects:
obs_local_part.token_type = 'invalid-obs-local-part'
return obs_local_part, value
def get_dtext(value):
r""" dtext = <printable ascii except \ [ ]> / obs-dtext
obs-dtext = obs-NO-WS-CTL / quoted-pair
We allow anything except the excluded characters, but if we find any
ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is
added to the token's defects list. Quoted pairs are converted to their
unquoted values, so what is returned is a ptext token, in this case a
ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
added to the returned token's defect list.
"""
ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
ptext = ValueTerminal(ptext, 'ptext')
if had_qp:
ptext.defects.append(errors.ObsoleteHeaderDefect(
"quoted printable found in domain-literal"))
_validate_xtext(ptext)
return ptext, value
def _check_for_early_dl_end(value, domain_literal):
if value:
return False
domain_literal.append(errors.InvalidHeaderDefect(
"end of input inside domain-literal"))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
return True
def get_domain_literal(value):
""" domain-literal = [CFWS] "[" *([FWS] dtext) [FWS] "]" [CFWS]
"""
domain_literal = DomainLiteral()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
if not value:
raise errors.HeaderParseError("expected domain-literal")
if value[0] != '[':
raise errors.HeaderParseError("expected '[' at start of domain-literal "
"but found '{}'".format(value))
value = value[1:]
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
token, value = get_dtext(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] != ']':
raise errors.HeaderParseError("expected ']' at end of domain-literal "
"but found '{}'".format(value))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
return domain_literal, value
def get_domain(value):
""" domain = dot-atom / domain-literal / obs-domain
obs-domain = atom *("." atom))
"""
domain = Domain()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected domain but found '{}'".format(value))
if value[0] == '[':
token, value = get_domain_literal(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
return domain, value
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
if value and value[0] == '.':
domain.defects.append(errors.ObsoleteHeaderDefect(
"domain is not a dot-atom (contains CFWS)"))
if domain[0].token_type == 'dot-atom':
domain[:] = domain[0]
while value and value[0] == '.':
domain.append(DOT)
token, value = get_atom(value[1:])
domain.append(token)
return domain, value
def get_addr_spec(value):
""" addr-spec = local-part "@" domain
"""
addr_spec = AddrSpec()
token, value = get_local_part(value)
addr_spec.append(token)
if not value or value[0] != '@':
addr_spec.defects.append(errors.InvalidHeaderDefect(
"add-spec local part with no domain"))
return addr_spec, value
addr_spec.append(ValueTerminal('@', 'address-at-symbol'))
token, value = get_domain(value[1:])
addr_spec.append(token)
return addr_spec, value
def get_obs_route(value):
""" obs-route = obs-domain-list ":"
obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
Returns an obs-route token with the appropriate sub-tokens (that is,
there is no obs-domain-list in the parse tree).
"""
obs_route = ObsRoute()
while value and (value[0]==',' or value[0] in CFWS_LEADER):
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
elif value[0] == ',':
obs_route.append(ListSeparator)
value = value[1:]
if not value or value[0] != '@':
raise errors.HeaderParseError(
"expected obs-route domain but found '{}'".format(value))
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
while value and value[0]==',':
obs_route.append(ListSeparator)
value = value[1:]
if not value:
break
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
if value[0] == '@':
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
if not value:
raise errors.HeaderParseError("end of header while parsing obs-route")
if value[0] != ':':
raise errors.HeaderParseError( "expected ':' marking end of "
"obs-route but found '{}'".format(value))
obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker'))
return obs_route, value[1:]
def get_angle_addr(value):
""" angle-addr = [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
obs-angle-addr = [CFWS] "<" obs-route addr-spec ">" [CFWS]
"""
angle_addr = AngleAddr()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
if not value or value[0] != '<':
raise errors.HeaderParseError(
"expected angle-addr but found '{}'".format(value))
angle_addr.append(ValueTerminal('<', 'angle-addr-start'))
value = value[1:]
# Although it is not legal per RFC5322, SMTP uses '<>' in certain
# circumstances.
if value[0] == '>':
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
angle_addr.defects.append(errors.InvalidHeaderDefect(
"null addr-spec in angle-addr"))
value = value[1:]
return angle_addr, value
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
try:
token, value = get_obs_route(value)
angle_addr.defects.append(errors.ObsoleteHeaderDefect(
"obsolete route specification in angle-addr"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected addr-spec or obs-route but found '{}'".format(value))
angle_addr.append(token)
token, value = get_addr_spec(value)
angle_addr.append(token)
if value and value[0] == '>':
value = value[1:]
else:
angle_addr.defects.append(errors.InvalidHeaderDefect(
"missing trailing '>' on angle-addr"))
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
return angle_addr, value
def get_display_name(value):
""" display-name = phrase
Because this is simply a name-rule, we don't return a display-name
token containing a phrase, but rather a display-name token with
the content of the phrase.
"""
display_name = DisplayName()
token, value = get_phrase(value)
display_name.extend(token[:])
display_name.defects = token.defects[:]
return display_name, value
def get_name_addr(value):
""" name-addr = [display-name] angle-addr
"""
name_addr = NameAddr()
# Both the optional display name and the angle-addr can start with cfws.
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(leader))
if value[0] != '<':
if value[0] in PHRASE_ENDS:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(value))
token, value = get_display_name(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(token))
if leader is not None:
token[0][:0] = [leader]
leader = None
name_addr.append(token)
token, value = get_angle_addr(value)
if leader is not None:
token[:0] = [leader]
name_addr.append(token)
return name_addr, value
def get_mailbox(value):
""" mailbox = name-addr / addr-spec
"""
# The only way to figure out if we are dealing with a name-addr or an
# addr-spec is to try parsing each one.
mailbox = Mailbox()
try:
token, value = get_name_addr(value)
except errors.HeaderParseError:
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected mailbox but found '{}'".format(value))
if any(isinstance(x, errors.InvalidHeaderDefect)
for x in token.all_defects):
mailbox.token_type = 'invalid-mailbox'
mailbox.append(token)
return mailbox, value
def get_invalid_mailbox(value, endchars):
""" Read everything up to one of the chars in endchars.
This is outside the formal grammar. The InvalidMailbox TokenList that is
returned acts like a Mailbox, but the data attributes are None.
"""
invalid_mailbox = InvalidMailbox()
while value and value[0] not in endchars:
if value[0] in PHRASE_ENDS:
invalid_mailbox.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_mailbox.append(token)
return invalid_mailbox, value
def get_mailbox_list(value):
""" mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list
obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS])
For this routine we go outside the formal grammar in order to improve error
handling. We recognize the end of the mailbox list only at the end of the
value or at a ';' (the group terminator). This is so that we can turn
invalid mailboxes into InvalidMailbox tokens and continue parsing any
remaining valid mailboxes. We also allow all mailbox entries to be null,
and this condition is handled appropriately at a higher level.
"""
mailbox_list = MailboxList()
while value and value[0] != ';':
try:
token, value = get_mailbox(value)
mailbox_list.append(token)
except errors.HeaderParseError:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] in ',;':
mailbox_list.append(leader)
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
elif value[0] == ',':
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] not in ',;':
# Crap after mailbox; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = mailbox_list[-1]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',;')
mailbox.extend(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] == ',':
mailbox_list.append(ListSeparator)
value = value[1:]
return mailbox_list, value
def get_group_list(value):
""" group-list = mailbox-list / CFWS / obs-group-list
obs-group-list = 1*([CFWS] ",") [CFWS]
"""
group_list = GroupList()
if not value:
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header before group-list"))
return group_list, value
leader = None
if value and value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
# This should never happen in email parsing, since CFWS-only is a
# legal alternative to group-list in a group, which is the only
# place group-list appears.
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header in group-list"))
group_list.append(leader)
return group_list, value
if value[0] == ';':
group_list.append(leader)
return group_list, value
token, value = get_mailbox_list(value)
if len(token.all_mailboxes)==0:
if leader is not None:
group_list.append(leader)
group_list.extend(token)
group_list.defects.append(errors.ObsoleteHeaderDefect(
"group-list with empty entries"))
return group_list, value
if leader is not None:
token[:0] = [leader]
group_list.append(token)
return group_list, value
def get_group(value):
""" group = display-name ":" [group-list] ";" [CFWS]
"""
group = Group()
token, value = get_display_name(value)
if not value or value[0] != ':':
raise errors.HeaderParseError("expected ':' at end of group "
"display name but found '{}'".format(value))
group.append(token)
group.append(ValueTerminal(':', 'group-display-name-terminator'))
value = value[1:]
if value and value[0] == ';':
group.append(ValueTerminal(';', 'group-terminator'))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
if value[0] != ';':
raise errors.HeaderParseError(
"expected ';' at end of group but found {}".format(value))
group.append(ValueTerminal(';', 'group-terminator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value
def get_address(value):
""" address = mailbox / group
Note that counter-intuitively, an address can be either a single address or
a list of addresses (a group). This is why the returned Address object has
a 'mailboxes' attribute which treats a single address as a list of length
one. When you need to differentiate between to two cases, extract the single
element, which is either a mailbox or a group token.
"""
# The formal grammar isn't very helpful when parsing an address. mailbox
# and group, especially when allowing for obsolete forms, start off very
# similarly. It is only when you reach one of @, <, or : that you know
# what you've got. So, we try each one in turn, starting with the more
# likely of the two. We could perhaps make this more efficient by looking
# for a phrase and then branching based on the next character, but that
# would be a premature optimization.
address = Address()
try:
token, value = get_group(value)
except errors.HeaderParseError:
try:
token, value = get_mailbox(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected address but found '{}'".format(value))
address.append(token)
return address, value
def get_address_list(value):
""" address_list = (address *("," address)) / obs-addr-list
obs-addr-list = *([CFWS] ",") address *("," [address / CFWS])
We depart from the formal grammar here by continuing to parse until the end
of the input, assuming the input to be entirely composed of an
address-list. This is always true in email parsing, and allows us
to skip invalid addresses to parse additional valid ones.
"""
address_list = AddressList()
while value:
try:
token, value = get_address(value)
address_list.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] == ',':
address_list.append(leader)
address_list.defects.append(errors.ObsoleteHeaderDefect(
"address-list entry with no content"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
elif value[0] == ',':
address_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in address-list"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value and value[0] != ',':
# Crap after address; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = address_list[-1][0]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',')
mailbox.extend(token)
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value: # Must be a , at this point.
address_list.append(ValueTerminal(',', 'list-separator'))
value = value[1:]
return address_list, value
#
# XXX: As I begin to add additional header parsers, I'm realizing we probably
# have two level of parser routines: the get_XXX methods that get a token in
# the grammar, and parse_XXX methods that parse an entire field value. So
# get_address_list above should really be a parse_ method, as probably should
# be get_unstructured.
#
def parse_mime_version(value):
""" mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS]
"""
# The [CFWS] is implicit in the RFC 2045 BNF.
# XXX: This routine is a bit verbose, should factor out a get_int method.
mime_version = MIMEVersion()
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Missing MIME version number (eg: 1.0)"))
return mime_version
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Expected MIME version number but found only CFWS"))
digits = ''
while value and value[0] != '.' and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME major version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.major = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value or value[0] != '.':
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
if value:
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
mime_version.append(ValueTerminal('.', 'version-separator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
return mime_version
digits = ''
while value and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME minor version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.minor = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if value:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Excess non-CFWS text after MIME version"))
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
def get_invalid_parameter(value):
""" Read everything up to the next ';'.
This is outside the formal grammar. The InvalidParameter TokenList that is
returned acts like a Parameter, but the data attributes are None.
"""
invalid_parameter = InvalidParameter()
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
invalid_parameter.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_parameter.append(token)
return invalid_parameter, value
def get_ttext(value):
"""ttext = <matches _ttext_matcher>
We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
defects list if we find non-ttext characters. We also register defects for
*any* non-printables even though the RFC doesn't exclude all of them,
because we follow the spirit of RFC 5322.
"""
m = _non_token_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected ttext but found '{}'".format(value))
ttext = m.group()
value = value[len(ttext):]
ttext = ValueTerminal(ttext, 'ttext')
_validate_xtext(ttext)
return ttext, value
def get_token(value):
"""token = [CFWS] 1*ttext [CFWS]
The RFC equivalent of ttext is any US-ASCII chars except space, ctls, or
tspecials. We also exclude tabs even though the RFC doesn't.
The RFC implies the CFWS but is not explicit about it in the BNF.
"""
mtoken = Token()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
if value and value[0] in TOKEN_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_ttext(value)
mtoken.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
return mtoken, value
def get_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character)
We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the
token's defects list if we find non-attrtext characters. We also register
defects for *any* non-printables even though the RFC doesn't exclude all of
them, because we follow the spirit of RFC 5322.
"""
m = _non_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_attribute(value):
""" [CFWS] 1*attrtext [CFWS]
This version of the BNF makes the CFWS explicit, and as usual we use a
value terminal for the actual run of characters. The RFC equivalent of
attrtext is the token characters, with the subtraction of '*', "'", and '%'.
We include tab in the excluded set just as we do for token.
"""
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_extended_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
This is a special parsing routine so that we get a value that
includes % escapes as a single string (which we decode as a single
string later).
"""
m = _non_extended_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected extended attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'extended-attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_extended_attribute(value):
""" [CFWS] 1*extended_attrtext [CFWS]
This is like the non-extended version except we allow % characters, so that
we can pick up an encoded value as a single string.
"""
# XXX: should we have an ExtendedAttribute TokenList?
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_extended_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_section(value):
""" '*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS.
"""
section = Section()
if not value or value[0] != '*':
raise errors.HeaderParseError("Expected section but found {}".format(
value))
section.append(ValueTerminal('*', 'section-marker'))
value = value[1:]
if not value or not value[0].isdigit():
raise errors.HeaderParseError("Expected section number but "
"found {}".format(value))
digits = ''
while value and value[0].isdigit():
digits += value[0]
value = value[1:]
if digits[0] == '0' and digits != '0':
section.defects.append(errors.InvalidHeaderError("section number"
"has an invalid leading 0"))
section.number = int(digits)
section.append(ValueTerminal(digits, 'digits'))
return section, value
def get_value(value):
""" quoted-string / attribute
"""
v = Value()
if not value:
raise errors.HeaderParseError("Expected value but found end of string")
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError("Expected value but found "
"only {}".format(leader))
if value[0] == '"':
token, value = get_quoted_string(value)
else:
token, value = get_extended_attribute(value)
if leader is not None:
token[:0] = [leader]
v.append(token)
return v, value
def get_parameter(value):
""" attribute [section] ["*"] [CFWS] "=" value
The CFWS is implied by the RFC but not made explicit in the BNF. This
simplified form of the BNF from the RFC is made to conform with the RFC BNF
through some extra checks. We do it this way because it makes both error
recovery and working with the resulting parse tree easier.
"""
# It is possible CFWS would also be implicitly allowed between the section
# and the 'extended-attribute' marker (the '*') , but we've never seen that
# in the wild and we will therefore ignore the possibility.
param = Parameter()
token, value = get_attribute(value)
param.append(token)
if not value or value[0] == ';':
param.defects.append(errors.InvalidHeaderDefect("Parameter contains "
"name ({}) but no value".format(token)))
return param, value
if value[0] == '*':
try:
token, value = get_section(value)
param.sectioned = True
param.append(token)
except errors.HeaderParseError:
pass
if not value:
raise errors.HeaderParseError("Incomplete parameter")
if value[0] == '*':
param.append(ValueTerminal('*', 'extended-parameter-marker'))
value = value[1:]
param.extended = True
if value[0] != '=':
raise errors.HeaderParseError("Parameter not followed by '='")
param.append(ValueTerminal('=', 'parameter-separator'))
value = value[1:]
leader = None
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
param.append(token)
remainder = None
appendto = param
if param.extended and value and value[0] == '"':
# Now for some serious hackery to handle the common invalid case of
# double quotes around an extended value. We also accept (with defect)
# a value marked as encoded that isn't really.
qstring, remainder = get_quoted_string(value)
inner_value = qstring.stripped_value
semi_valid = False
if param.section_number == 0:
if inner_value and inner_value[0] == "'":
semi_valid = True
else:
token, rest = get_attrtext(inner_value)
if rest and rest[0] == "'":
semi_valid = True
else:
try:
token, rest = get_extended_attrtext(inner_value)
except:
pass
else:
if not rest:
semi_valid = True
if semi_valid:
param.defects.append(errors.InvalidHeaderDefect(
"Quoted string value for extended parameter is invalid"))
param.append(qstring)
for t in qstring:
if t.token_type == 'bare-quoted-string':
t[:] = []
appendto = t
break
value = inner_value
else:
remainder = None
param.defects.append(errors.InvalidHeaderDefect(
"Parameter marked as extended but appears to have a "
"quoted string value that is non-encoded"))
if value and value[0] == "'":
token = None
else:
token, value = get_value(value)
if not param.extended or param.section_number > 0:
if not value or value[0] != "'":
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
param.defects.append(errors.InvalidHeaderDefect(
"Apparent initial-extended-value but attribute "
"was not marked as extended or was not initial section"))
if not value:
# Assume the charset/lang is missing and the token is the value.
param.defects.append(errors.InvalidHeaderDefect(
"Missing required charset/lang delimiters"))
appendto.append(token)
if remainder is None:
return param, value
else:
if token is not None:
for t in token:
if t.token_type == 'extended-attrtext':
break
t.token_type == 'attrtext'
appendto.append(t)
param.charset = t.value
if value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {!r}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231-delimiter'))
value = value[1:]
if value and value[0] != "'":
token, value = get_attrtext(value)
appendto.append(token)
param.lang = token.value
if not value or value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231-delimiter'))
value = value[1:]
if remainder is not None:
# Treat the rest of value as bare quoted string content.
v = Value()
while value:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_qcontent(value)
v.append(token)
token = v
else:
token, value = get_value(value)
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
def parse_mime_parameters(value):
""" parameter *( ";" parameter )
That BNF is meant to indicate this routine should only be called after
finding and handling the leading ';'. There is no corresponding rule in
the formal RFC grammar, but it is more convenient for us for the set of
parameters to be treated as its own TokenList.
This is 'parse' routine because it consumes the reminaing value, but it
would never be called to parse a full header. Instead it is called to
parse everything after the non-parameter value of a specific MIME header.
"""
mime_parameters = MimeParameters()
while value:
try:
token, value = get_parameter(value)
mime_parameters.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
mime_parameters.append(leader)
return mime_parameters
if value[0] == ';':
if leader is not None:
mime_parameters.append(leader)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter entry with no content"))
else:
token, value = get_invalid_parameter(value)
if leader:
token[:0] = [leader]
mime_parameters.append(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"invalid parameter {!r}".format(token)))
if value and value[0] != ';':
# Junk after the otherwise valid parameter. Mark it as
# invalid, but it will have a value.
param = mime_parameters[-1]
param.token_type = 'invalid-parameter'
token, value = get_invalid_parameter(value)
param.extend(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter with invalid trailing text {!r}".format(token)))
if value:
# Must be a ';' at this point.
mime_parameters.append(ValueTerminal(';', 'parameter-separator'))
value = value[1:]
return mime_parameters
def _find_mime_parameters(tokenlist, value):
"""Do our best to find the parameters in an invalid MIME header
"""
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
tokenlist.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
tokenlist.append(token)
if not value:
return
tokenlist.append(ValueTerminal(';', 'parameter-separator'))
tokenlist.append(parse_mime_parameters(value[1:]))
def parse_content_type_header(value):
""" maintype "/" subtype *( ";" parameter )
The maintype and substype are tokens. Theoretically they could
be checked against the official IANA list + x-token, but we
don't do that.
"""
ctype = ContentType()
recover = False
if not value:
ctype.defects.append(errors.HeaderMissingRequiredValue(
"Missing content type specification"))
return ctype
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content maintype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
# XXX: If we really want to follow the formal grammar we should make
# mantype and subtype specialized TokenLists here. Probably not worth it.
if not value or value[0] != '/':
ctype.defects.append(errors.InvalidHeaderDefect(
"Invalid content type"))
if value:
_find_mime_parameters(ctype, value)
return ctype
ctype.maintype = token.value.strip().lower()
ctype.append(ValueTerminal('/', 'content-type-separator'))
value = value[1:]
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content subtype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
ctype.subtype = token.value.strip().lower()
if not value:
return ctype
if value[0] != ';':
ctype.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content type, but "
"found {!r}".format(value)))
# The RFC requires that a syntactically invalid content-type be treated
# as text/plain. Perhaps we should postel this, but we should probably
# only do that if we were checking the subtype value against IANA.
del ctype.maintype, ctype.subtype
_find_mime_parameters(ctype, value)
return ctype
ctype.append(ValueTerminal(';', 'parameter-separator'))
ctype.append(parse_mime_parameters(value[1:]))
return ctype
def parse_content_disposition_header(value):
""" disposition-type *( ";" parameter )
"""
disp_header = ContentDisposition()
if not value:
disp_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content disposition"))
return disp_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
disp_header.defects.append(errors.InvalidHeaderDefect(
"Expected content disposition but found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(token)
disp_header.content_disposition = token.value.strip().lower()
if not value:
return disp_header
if value[0] != ';':
disp_header.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content disposition, but "
"found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(ValueTerminal(';', 'parameter-separator'))
disp_header.append(parse_mime_parameters(value[1:]))
return disp_header
def parse_content_transfer_encoding_header(value):
""" mechanism
"""
# We should probably validate the values, since the list is fixed.
cte_header = ContentTransferEncoding()
if not value:
cte_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content transfer encoding"))
return cte_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Expected content transfer encoding but found {!r}".format(value)))
else:
cte_header.append(token)
cte_header.cte = token.value.strip().lower()
if not value:
return cte_header
while value:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Extra text after content transfer encoding"))
if value[0] in PHRASE_ENDS:
cte_header.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
cte_header.append(token)
return cte_header
#
# Header folding
#
# Header folding is complex, with lots of rules and corner cases. The
# following code does its best to obey the rules and handle the corner
# cases, but you can be sure there are few bugs:)
#
# This folder generally canonicalizes as it goes, preferring the stringified
# version of each token. The tokens contain information that supports the
# folder, including which tokens can be encoded in which ways.
#
# Folded text is accumulated in a simple list of strings ('lines'), each
# one of which should be less than policy.max_line_length ('maxlen').
#
def _steal_trailing_WSP_if_exists(lines):
wsp = ''
if lines and lines[-1] and lines[-1][-1] in WSP:
wsp = lines[-1][-1]
lines[-1] = lines[-1][:-1]
return wsp
def _refold_parse_tree(parse_tree, *, policy):
"""Return string of contents of parse_tree folded according to RFC rules.
"""
# max_line_length 0/None means no limit, ie: infinitely long.
maxlen = policy.max_line_length or float("+inf")
encoding = 'utf-8' if policy.utf8 else 'us-ascii'
lines = ['']
last_ew = None
wrap_as_ew_blocked = 0
want_encoding = False
end_ew_not_allowed = Terminal('', 'wrap_as_ew_blocked')
parts = list(parse_tree)
while parts:
part = parts.pop(0)
if part is end_ew_not_allowed:
wrap_as_ew_blocked -= 1
continue
tstr = str(part)
try:
tstr.encode(encoding)
charset = encoding
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
# If policy.utf8 is false this should really be taken from a
# 'charset' property on the policy.
charset = 'utf-8'
want_encoding = True
if part.token_type == 'mime-parameters':
# Mime parameter folding (using RFC2231) is extra special.
_fold_mime_parameters(part, lines, maxlen, encoding)
continue
if want_encoding and not wrap_as_ew_blocked:
if not part.as_ew_allowed:
want_encoding = False
last_ew = None
if part.syntactic_break:
encoded_part = part.fold(policy=policy)[:-1] # strip nl
if policy.linesep not in encoded_part:
# It fits on a single line
if len(encoded_part) > maxlen - len(lines[-1]):
# But not on this one, so start a new one.
newline = _steal_trailing_WSP_if_exists(lines)
# XXX what if encoded_part has no leading FWS?
lines.append(newline)
lines[-1] += encoded_part
continue
# Either this is not a major syntactic break, so we don't
# want it on a line by itself even if it fits, or it
# doesn't fit on a line by itself. Either way, fall through
# to unpacking the subparts and wrapping them.
if not hasattr(part, 'encode'):
# It's not a Terminal, do each piece individually.
parts = list(part) + parts
else:
# It's a terminal, wrap it as an encoded word, possibly
# combining it with previously encoded words if allowed.
last_ew = _fold_as_ew(tstr, lines, maxlen, last_ew,
part.ew_combine_allowed, charset)
want_encoding = False
continue
if len(tstr) <= maxlen - len(lines[-1]):
lines[-1] += tstr
continue
# This part is too long to fit. The RFC wants us to break at
# "major syntactic breaks", so unless we don't consider this
# to be one, check if it will fit on the next line by itself.
if (part.syntactic_break and
len(tstr) + 1 <= maxlen):
newline = _steal_trailing_WSP_if_exists(lines)
if newline or part.startswith_fws():
lines.append(newline + tstr)
continue
if not hasattr(part, 'encode'):
# It's not a terminal, try folding the subparts.
newparts = list(part)
if not part.as_ew_allowed:
wrap_as_ew_blocked += 1
newparts.append(end_ew_not_allowed)
parts = newparts + parts
continue
if part.as_ew_allowed and not wrap_as_ew_blocked:
# It doesn't need CTE encoding, but encode it anyway so we can
# wrap it.
parts.insert(0, part)
want_encoding = True
continue
# We can't figure out how to wrap, it, so give up.
newline = _steal_trailing_WSP_if_exists(lines)
if newline or part.startswith_fws():
lines.append(newline + tstr)
else:
# We can't fold it onto the next line either...
lines[-1] += tstr
return policy.linesep.join(lines) + policy.linesep
def _fold_as_ew(to_encode, lines, maxlen, last_ew, ew_combine_allowed, charset):
"""Fold string to_encode into lines as encoded word, combining if allowed.
Return the new value for last_ew, or None if ew_combine_allowed is False.
If there is already an encoded word in the last line of lines (indicated by
a non-None value for last_ew) and ew_combine_allowed is true, decode the
existing ew, combine it with to_encode, and re-encode. Otherwise, encode
to_encode. In either case, split to_encode as necessary so that the
encoded segments fit within maxlen.
"""
if last_ew is not None and ew_combine_allowed:
to_encode = str(
get_unstructured(lines[-1][last_ew:] + to_encode))
lines[-1] = lines[-1][:last_ew]
if to_encode[0] in WSP:
# We're joining this to non-encoded text, so don't encode
# the leading blank.
leading_wsp = to_encode[0]
to_encode = to_encode[1:]
if (len(lines[-1]) == maxlen):
lines.append(_steal_trailing_WSP_if_exists(lines))
lines[-1] += leading_wsp
trailing_wsp = ''
if to_encode[-1] in WSP:
# Likewise for the trailing space.
trailing_wsp = to_encode[-1]
to_encode = to_encode[:-1]
new_last_ew = len(lines[-1]) if last_ew is None else last_ew
while to_encode:
remaining_space = maxlen - len(lines[-1])
# The RFC2047 chrome takes up 7 characters plus the length
# of the charset name.
encode_as = 'utf-8' if charset == 'us-ascii' else charset
text_space = remaining_space - len(encode_as) - 7
if text_space <= 0:
lines.append(' ')
# XXX We'll get an infinite loop here if maxlen is <= 7
continue
first_part = to_encode[:text_space]
ew = _ew.encode(first_part, charset=encode_as)
excess = len(ew) - remaining_space
if excess > 0:
# encode always chooses the shortest encoding, so this
# is guaranteed to fit at this point.
first_part = first_part[:-excess]
ew = _ew.encode(first_part)
lines[-1] += ew
to_encode = to_encode[len(first_part):]
if to_encode:
lines.append(' ')
new_last_ew = len(lines[-1])
lines[-1] += trailing_wsp
return new_last_ew if ew_combine_allowed else None
def _fold_mime_parameters(part, lines, maxlen, encoding):
"""Fold TokenList 'part' into the 'lines' list as mime parameters.
Using the decoded list of parameters and values, format them according to
the RFC rules, including using RFC2231 encoding if the value cannot be
expressed in 'encoding' and/or the parameter+value is too long to fit
within 'maxlen'.
"""
# Special case for RFC2231 encoding: start from decoded values and use
# RFC2231 encoding iff needed.
#
# Note that the 1 and 2s being added to the length calculations are
# accounting for the possibly-needed spaces and semicolons we'll be adding.
#
for name, value in part.params:
# XXX What if this ';' puts us over maxlen the first time through the
# loop? We should split the header value onto a newline in that case,
# but to do that we need to recognize the need earlier or reparse the
# header, so I'm going to ignore that bug for now. It'll only put us
# one character over.
if not lines[-1].rstrip().endswith(';'):
lines[-1] += ';'
charset = encoding
error_handler = 'strict'
try:
value.encode(encoding)
encoding_required = False
except UnicodeEncodeError:
encoding_required = True
if utils._has_surrogates(value):
charset = 'unknown-8bit'
error_handler = 'surrogateescape'
else:
charset = 'utf-8'
if encoding_required:
encoded_value = urllib.parse.quote(
value, safe='', errors=error_handler)
tstr = "{}*={}''{}".format(name, charset, encoded_value)
else:
tstr = '{}={}'.format(name, quote_string(value))
if len(lines[-1]) + len(tstr) + 1 < maxlen:
lines[-1] = lines[-1] + ' ' + tstr
continue
elif len(tstr) + 2 <= maxlen:
lines.append(' ' + tstr)
continue
# We need multiple sections. We are allowed to mix encoded and
# non-encoded sections, but we aren't going to. We'll encode them all.
section = 0
extra_chrome = charset + "''"
while value:
chrome_len = len(name) + len(str(section)) + 3 + len(extra_chrome)
if maxlen <= chrome_len + 3:
# We need room for the leading blank, the trailing semicolon,
# and at least one character of the value. If we don't
# have that, we'd be stuck, so in that case fall back to
# the RFC standard width.
maxlen = 78
splitpoint = maxchars = maxlen - chrome_len - 2
while True:
partial = value[:splitpoint]
encoded_value = urllib.parse.quote(
partial, safe='', errors=error_handler)
if len(encoded_value) <= maxchars:
break
splitpoint -= 1
lines.append(" {}*{}*={}{}".format(
name, section, extra_chrome, encoded_value))
extra_chrome = ''
section += 1
value = value[splitpoint:]
if value:
lines[-1] += ';'
| 36.398155 | 85 | 0.592561 |
38c4d1c3cf9ded007c564944f77fb4d620c383d9 | 33,054 | py | Python | python/ccxt/lbank.py | sandutsar/ccxt | f27c187fa1626a6c261c6fa5caaae89cb657461d | [
"MIT"
] | null | null | null | python/ccxt/lbank.py | sandutsar/ccxt | f27c187fa1626a6c261c6fa5caaae89cb657461d | [
"MIT"
] | null | null | null | python/ccxt/lbank.py | sandutsar/ccxt | f27c187fa1626a6c261c6fa5caaae89cb657461d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import DDoSProtection
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class lbank(Exchange):
def describe(self):
return self.deep_extend(super(lbank, self).describe(), {
'id': 'lbank',
'name': 'LBank',
'countries': ['CN'],
'version': 'v1',
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': False,
'createStopMarketOrder': False,
'createStopOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchClosedOrders': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': None, # status 0 API doesn't work
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'withdraw': True,
},
'timeframes': {
'1m': 'minute1',
'5m': 'minute5',
'15m': 'minute15',
'30m': 'minute30',
'1h': 'hour1',
'2h': 'hour2',
'4h': 'hour4',
'6h': 'hour6',
'8h': 'hour8',
'12h': 'hour12',
'1d': 'day1',
'1w': 'week1',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/38063602-9605e28a-3302-11e8-81be-64b1e53c4cfb.jpg',
'api': 'https://api.lbank.info',
'www': 'https://www.lbank.info',
'doc': 'https://github.com/LBank-exchange/lbank-official-api-docs',
'fees': 'https://www.lbank.info/fees.html',
'referral': 'https://www.lbank.info/invitevip?icode=7QCY',
},
'api': {
'public': {
'get': [
'currencyPairs',
'ticker',
'depth',
'trades',
'kline',
'accuracy',
],
},
'private': {
'post': [
'user_info',
'create_order',
'cancel_order',
'orders_info',
'orders_info_history',
'withdraw',
'withdrawCancel',
'withdraws',
'withdrawConfigs',
],
},
},
'fees': {
'trading': {
'maker': self.parse_number('0.001'),
'taker': self.parse_number('0.001'),
},
'funding': {
'withdraw': {},
},
},
'commonCurrencies': {
'GMT': 'GMT Token',
'PNT': 'Penta',
'VET_ERC20': 'VEN',
},
'options': {
'cacheSecretAsPem': True,
},
'precisionMode': TICK_SIZE,
})
def fetch_markets(self, params={}):
"""
retrieves data on all markets for lbank
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = self.publicGetAccuracy(params)
#
# [
# {
# "symbol": "btc_usdt",
# "quantityAccuracy": "4",
# "minTranQua": "0.0001",
# "priceAccuracy": "2"
# },
# ...
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = market['symbol']
parts = id.split('_')
baseId = None
quoteId = None
numParts = len(parts)
# lbank will return symbols like "vet_erc20_usdt"
if numParts > 2:
baseId = parts[0] + '_' + parts[1]
quoteId = parts[2]
else:
baseId = parts[0]
quoteId = parts[1]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': True,
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number(self.parse_precision(self.safe_string(market, 'quantityAccuracy'))),
'price': self.parse_number(self.parse_precision(self.safe_string(market, 'priceAccuracy'))),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_float(market, 'minTranQua'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': id,
})
return result
def parse_ticker(self, ticker, market=None):
#
# {
# "symbol":"btc_usdt",
# "ticker":{
# "high":43416.06,
# "vol":7031.7427,
# "low":41804.26,
# "change":1.33,
# "turnover":300302447.81,
# "latest":43220.4
# },
# "timestamp":1642201617747
# }
#
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_market(marketId, market, '_')
symbol = market['symbol']
timestamp = self.safe_integer(ticker, 'timestamp')
info = ticker
ticker = info['ticker']
last = self.safe_string(ticker, 'latest')
percentage = self.safe_string(ticker, 'change')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_string(ticker, 'vol'),
'quoteVolume': self.safe_string(ticker, 'turnover'),
'info': info,
}, market)
def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the lbank api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetTicker(self.extend(request, params))
# {
# "symbol":"btc_usdt",
# "ticker":{
# "high":43416.06,
# "vol":7031.7427,
# "low":41804.26,
# "change":1.33,
# "turnover":300302447.81,
# "latest":43220.4
# },
# "timestamp":1642201617747
# }
return self.parse_ticker(response, market)
def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the lbank api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
request = {
'symbol': 'all',
}
response = self.publicGetTicker(self.extend(request, params))
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_order_book(self, symbol, limit=60, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the lbank api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
self.load_markets()
size = 60
if limit is not None:
size = min(limit, size)
request = {
'symbol': self.market_id(symbol),
'size': size,
}
response = self.publicGetDepth(self.extend(request, params))
return self.parse_order_book(response, symbol)
def parse_trade(self, trade, market=None):
market = self.safe_market(None, market)
timestamp = self.safe_integer(trade, 'date_ms')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
id = self.safe_string(trade, 'tid')
type = None
side = self.safe_string(trade, 'type')
side = side.replace('_market', '')
return {
'id': id,
'info': self.safe_value(trade, 'info', trade),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': None,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the lbank api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'size': 100,
}
if since is not None:
request['time'] = int(since)
if limit is not None:
request['size'] = limit
response = self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590969600,
# 0.02451657,
# 0.02452675,
# 0.02443701,
# 0.02447814,
# 238.38210000
# ]
#
return [
self.safe_timestamp(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=1000, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the lbank api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
self.load_markets()
market = self.market(symbol)
if since is None:
raise ArgumentsRequired(self.id + ' fetchOHLCV() requires a `since` argument')
if limit is None:
raise ArgumentsRequired(self.id + ' fetchOHLCV() requires a `limit` argument')
request = {
'symbol': market['id'],
'type': self.timeframes[timeframe],
'size': limit,
'time': int(since / 1000),
}
response = self.publicGetKline(self.extend(request, params))
#
# [
# [1590969600,0.02451657,0.02452675,0.02443701,0.02447814,238.38210000],
# [1590969660,0.02447814,0.02449883,0.02443209,0.02445973,212.40270000],
# [1590969720,0.02445973,0.02452067,0.02445909,0.02446151,266.16920000],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
info = self.safe_value(response, 'info', {})
free = self.safe_value(info, 'free', {})
freeze = self.safe_value(info, 'freeze', {})
asset = self.safe_value(info, 'asset', {})
currencyIds = list(free.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(free, currencyId)
account['used'] = self.safe_string(freeze, currencyId)
account['total'] = self.safe_string(asset, currencyId)
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the lbank api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
self.load_markets()
response = self.privatePostUserInfo(params)
#
# {
# "result":"true",
# "info":{
# "freeze":{
# "iog":"0.00000000",
# "ssc":"0.00000000",
# "eon":"0.00000000",
# },
# "asset":{
# "iog":"0.00000000",
# "ssc":"0.00000000",
# "eon":"0.00000000",
# },
# "free":{
# "iog":"0.00000000",
# "ssc":"0.00000000",
# "eon":"0.00000000",
# },
# }
# }
#
return self.parse_balance(response)
def parse_order_status(self, status):
statuses = {
'-1': 'cancelled', # cancelled
'0': 'open', # not traded
'1': 'open', # partial deal
'2': 'closed', # complete deal
'4': 'closed', # disposal processing
}
return self.safe_string(statuses, status)
def parse_order(self, order, market=None):
#
# {
# "symbol":"eth_btc",
# "amount":10.000000,
# "create_time":1484289832081,
# "price":5000.000000,
# "avg_price":5277.301200,
# "type":"sell",
# "order_id":"ab704110-af0d-48fd-a083-c218f19a4a55",
# "deal_amount":10.000000,
# "status":2
# }
#
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market, '_')
timestamp = self.safe_integer(order, 'create_time')
# Limit Order Request Returns: Order Price
# Market Order Returns: cny amount of market order
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
filled = self.safe_string(order, 'deal_amount')
average = self.safe_string(order, 'avg_price')
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'order_id')
type = self.safe_string(order, 'order_type')
side = self.safe_string(order, 'type')
return self.safe_order({
'id': id,
'clientOrderId': None,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'cost': None,
'amount': amount,
'filled': filled,
'remaining': None,
'trades': None,
'fee': None,
'info': self.safe_value(order, 'info', order),
'average': average,
}, market)
def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float|None price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the lbank api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
market = self.market(symbol)
order = {
'symbol': market['id'],
'type': side,
'amount': amount,
}
if type == 'market':
order['type'] += '_market'
else:
order['price'] = price
response = self.privatePostCreateOrder(self.extend(order, params))
order = self.omit(order, 'type')
order['order_id'] = response['order_id']
order['type'] = side
order['order_type'] = type
order['create_time'] = self.milliseconds()
order['info'] = response
return self.parse_order(order, market)
def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str|None symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the lbank api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'order_id': id,
}
response = self.privatePostCancelOrder(self.extend(request, params))
return response
def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str|None symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the lbank api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
# Id can be a list of ids delimited by a comma
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'order_id': id,
}
response = self.privatePostOrdersInfo(self.extend(request, params))
data = self.safe_value(response, 'orders', [])
orders = self.parse_orders(data, market)
numOrders = len(orders)
if numOrders == 1:
return orders[0]
else:
return orders
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple orders made by the user
:param str|None symbol: unified market symbol of the market orders were made in
:param int|None since: the earliest time in ms to fetch orders for
:param int|None limit: the maximum number of orde structures to retrieve
:param dict params: extra parameters specific to the lbank api endpoint
:returns [dict]: a list of [order structures]{@link https://docs.ccxt.com/en/latest/manual.html#order-structure
"""
self.load_markets()
if limit is None:
limit = 100
market = self.market(symbol)
request = {
'symbol': market['id'],
'current_page': 1,
'page_length': limit,
}
response = self.privatePostOrdersInfoHistory(self.extend(request, params))
data = self.safe_value(response, 'orders', [])
return self.parse_orders(data, None, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple closed orders made by the user
:param str|None symbol: unified market symbol of the market orders were made in
:param int|None since: the earliest time in ms to fetch orders for
:param int|None limit: the maximum number of orde structures to retrieve
:param dict params: extra parameters specific to the lbank api endpoint
:returns [dict]: a list of [order structures]{@link https://docs.ccxt.com/en/latest/manual.html#order-structure
"""
self.load_markets()
if symbol is not None:
market = self.market(symbol)
symbol = market['symbol']
orders = self.fetch_orders(symbol, since, limit, params)
closed = self.filter_by(orders, 'status', 'closed')
canceled = self.filter_by(orders, 'status', 'cancelled') # cancelled orders may be partially filled
allOrders = self.array_concat(closed, canceled)
return self.filter_by_symbol_since_limit(allOrders, symbol, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the lbank api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
# mark and fee are optional params, mark is a note and must be less than 255 characters
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'assetCode': currency['id'],
'amount': amount,
'account': address,
}
if tag is not None:
request['memo'] = tag
response = self.privatePostWithdraw(self.extend(request, params))
#
# {
# 'result': 'true',
# 'withdrawId': 90082,
# 'fee':0.001
# }
#
return self.parse_transaction(response, currency)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# 'result': 'true',
# 'withdrawId': 90082,
# 'fee':0.001
# }
#
currency = self.safe_currency(None, currency)
return {
'id': self.safe_string_2(transaction, 'id', 'withdrawId'),
'txid': None,
'timestamp': None,
'datetime': None,
'network': None,
'addressFrom': None,
'address': None,
'addressTo': None,
'amount': None,
'type': None,
'currency': currency['code'],
'status': None,
'updated': None,
'tagFrom': None,
'tag': None,
'tagTo': None,
'comment': None,
'fee': None,
'info': transaction,
}
def convert_secret_to_pem(self, secret):
lineLength = 64
secretLength = len(secret) - 0
numLines = int(secretLength / lineLength)
numLines = self.sum(numLines, 1)
pem = "-----BEGIN PRIVATE KEY-----\n" # eslint-disable-line
for i in range(0, numLines):
start = i * lineLength
end = self.sum(start, lineLength)
pem += self.secret[start:end] + "\n" # eslint-disable-line
return pem + '-----END PRIVATE KEY-----'
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = self.urls['api'] + '/' + self.version + '/' + self.implode_params(path, params)
# Every endpoint ends with ".do"
url += '.do'
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
query = self.keysort(self.extend({
'api_key': self.apiKey,
}, params))
queryString = self.rawencode(query)
message = self.hash(self.encode(queryString)).upper()
cacheSecretAsPem = self.safe_value(self.options, 'cacheSecretAsPem', True)
pem = None
if cacheSecretAsPem:
pem = self.safe_value(self.options, 'pem')
if pem is None:
pem = self.convert_secret_to_pem(self.secret)
self.options['pem'] = pem
else:
pem = self.convert_secret_to_pem(self.secret)
sign = self.binary_to_base64(self.rsa(message, self.encode(pem), 'RS256'))
query['sign'] = sign
body = self.urlencode(query)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
success = self.safe_string(response, 'result')
if success == 'false':
errorCode = self.safe_string(response, 'error_code')
message = self.safe_string({
'10000': 'Internal error',
'10001': 'The required parameters can not be empty',
'10002': 'verification failed',
'10003': 'Illegal parameters',
'10004': 'User requests are too frequent',
'10005': 'Key does not exist',
'10006': 'user does not exist',
'10007': 'Invalid signature',
'10008': 'This currency pair is not supported',
'10009': 'Limit orders can not be missing orders and the number of orders',
'10010': 'Order price or order quantity must be greater than 0',
'10011': 'Market orders can not be missing the amount of the order',
'10012': 'market sell orders can not be missing orders',
'10013': 'is less than the minimum trading position 0.001',
'10014': 'Account number is not enough',
'10015': 'The order type is wrong',
'10016': 'Account balance is not enough',
'10017': 'Abnormal server',
'10018': 'order inquiry can not be more than 50 less than one',
'10019': 'withdrawal orders can not be more than 3 less than one',
'10020': 'less than the minimum amount of the transaction limit of 0.001',
'10022': 'Insufficient key authority',
}, errorCode, self.json(response))
ErrorClass = self.safe_value({
'10002': AuthenticationError,
'10004': DDoSProtection,
'10005': AuthenticationError,
'10006': AuthenticationError,
'10007': AuthenticationError,
'10009': InvalidOrder,
'10010': InvalidOrder,
'10011': InvalidOrder,
'10012': InvalidOrder,
'10013': InvalidOrder,
'10014': InvalidOrder,
'10015': InvalidOrder,
'10016': InvalidOrder,
'10022': AuthenticationError,
}, errorCode, ExchangeError)
raise ErrorClass(message)
| 40.309756 | 155 | 0.516034 |
ede7ee218fb69349de1db96e1964e4f2501d3707 | 3,032 | py | Python | tests/unit/states/test_pyrax_queues.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 5 | 2017-02-07T05:39:29.000Z | 2020-06-13T02:07:33.000Z | tests/unit/states/test_pyrax_queues.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 86 | 2017-01-27T11:54:46.000Z | 2020-05-20T06:25:26.000Z | tests/unit/states/test_pyrax_queues.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 11 | 2017-01-26T19:36:29.000Z | 2021-12-11T07:54:16.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
# Import Salt Libs
import salt.states.pyrax_queues as pyrax_queues
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PyraxQueuesTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.pyrax_queues
'''
def setup_loader_modules(self):
return {pyrax_queues: {}}
# 'present' function tests: 1
def test_present(self):
'''
Test to ensure the RackSpace queue exists.
'''
name = 'myqueue'
provider = 'my-pyrax'
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
mock_dct = MagicMock(side_effect=[{provider: {'salt': True}},
{provider: {'salt': False}},
{provider: {'salt': False}}, False])
with patch.dict(pyrax_queues.__salt__, {'cloud.action': mock_dct}):
comt = ('{0} present.'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(pyrax_queues.present(name, provider), ret)
with patch.dict(pyrax_queues.__opts__, {'test': True}):
comt = ('Rackspace queue myqueue is set to be created.')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(pyrax_queues.present(name, provider), ret)
with patch.dict(pyrax_queues.__opts__, {'test': False}):
comt = ('Failed to create myqueue Rackspace queue.')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(pyrax_queues.present(name, provider), ret)
# 'absent' function tests: 1
def test_absent(self):
'''
Test to ensure the named Rackspace queue is deleted.
'''
name = 'myqueue'
provider = 'my-pyrax'
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
mock_dct = MagicMock(side_effect=[{provider: {'salt': False}},
{provider: {'salt': True}}])
with patch.dict(pyrax_queues.__salt__, {'cloud.action': mock_dct}):
comt = ('myqueue does not exist.')
ret.update({'comment': comt})
self.assertDictEqual(pyrax_queues.absent(name, provider), ret)
with patch.dict(pyrax_queues.__opts__, {'test': True}):
comt = ('Rackspace queue myqueue is set to be removed.')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(pyrax_queues.absent(name, provider), ret)
| 34.850575 | 79 | 0.569921 |
0c9dc45100793bda3971bd88c0faf7329d000152 | 1,436 | py | Python | examples/ultimate/ultimate.py | river8822/instabot | 5d3289cdda724a4b5df5cd0eaa72da5e664e5cc5 | [
"Apache-2.0"
] | 4 | 2017-04-25T10:56:27.000Z | 2021-03-19T06:47:58.000Z | examples/ultimate/ultimate.py | river8822/instabot | 5d3289cdda724a4b5df5cd0eaa72da5e664e5cc5 | [
"Apache-2.0"
] | null | null | null | examples/ultimate/ultimate.py | river8822/instabot | 5d3289cdda724a4b5df5cd0eaa72da5e664e5cc5 | [
"Apache-2.0"
] | 1 | 2020-12-01T20:24:32.000Z | 2020-12-01T20:24:32.000Z | """
ULTIMATE SCRIPT
It uses data written in files:
* follow_followers.txt
* follow_following.txt
* like_hashtags.txt
* like_users.txt
and do the job. This bot can be run 24/7.
"""
import os
import sys
import time
from random import shuffle
sys.path.append(os.path.join(sys.path[0], '../../'))
from instabot import Bot
bot = Bot()
bot.login()
print("Current script's schedule:")
follow_followers_list = bot.read_list_from_file("follow_followers.txt")
print("Going to follow followers of:", follow_followers_list)
follow_following_list = bot.read_list_from_file("follow_following.txt")
print("Going to follow following of:", follow_following_list)
like_hashtags_list = bot.read_list_from_file("like_hashtags.txt")
print("Going to like hashtags:", like_hashtags_list)
like_users_list = bot.read_list_from_file("like_users.txt")
print("Going to like users:", like_users_list)
tasks_list = []
for item in follow_followers_list:
tasks_list.append((bot.follow_followers, {'user_id': item, 'nfollows': None}))
for item in follow_following_list:
tasks_list.append((bot.follow_following, {'user_id': item}))
for item in like_hashtags_list:
tasks_list.append((bot.like_hashtag, {'hashtag': item, 'amount': None}))
for item in like_users_list:
tasks_list.append((bot.like_user, {'user_id': item, 'amount': None}))
# shuffle(tasks_list)
for func, arg in tasks_list:
func(**arg)
| 31.217391 | 82 | 0.736072 |
08694a8e2241e58dc476222e5c00940f8bfb9e68 | 14,365 | py | Python | substance/substance_raffle_sql.py | Liu-0726/bili2.0 | 5320964b1f4fbb75ea9bccd4bb6fd3d15dfed0e0 | [
"MIT"
] | 1,081 | 2018-07-10T11:20:22.000Z | 2022-03-25T09:26:25.000Z | substance/substance_raffle_sql.py | Liu-0726/bili2.0 | 5320964b1f4fbb75ea9bccd4bb6fd3d15dfed0e0 | [
"MIT"
] | 440 | 2018-07-12T08:50:31.000Z | 2021-12-22T11:56:54.000Z | substance/substance_raffle_sql.py | Liu-0726/bili2.0 | 5320964b1f4fbb75ea9bccd4bb6fd3d15dfed0e0 | [
"MIT"
] | 280 | 2018-07-11T14:35:20.000Z | 2022-03-28T11:09:14.000Z | import sqlite3 # sqlite是个很灵活的东西,会自动转换,但是如果错误type且无法转换那么也不报错,传说中的沙雕feature https://www.sqlite.org/faq.html#q3
from os import path
from .bili_data_types import \
SubstanceRaffleStatus, SubstanceRaffleJoined, SubstanceRaffleResults, SubstanceRaffleLuckydog
# 设计理由是execute script from another directory时,保证仍然可以正确执行(与conf读取设计一致,后续config读取也将自己控制,不再由main控制)
conn = sqlite3.connect(f'{path.dirname(path.realpath(__file__))}/data.db')
class OthersTable:
def __init__(self):
sql_create_table = (
'CREATE TABLE IF NOT EXISTS others ('
'key_word TEXT NOT NULL,'
'value TEXT NOT NULL,'
'PRIMARY KEY (key_word)'
'); '
)
conn.execute(sql_create_table)
self.conn = conn
def insert_or_replace(self, key_word, value):
with self.conn:
self.conn.execute('INSERT OR REPLACE INTO others (key_word, value) VALUES '
'(?, ?)', (str(key_word), str(value)))
def select_by_primary_key(self, key_word):
cursor = self.conn.execute('SELECT value FROM others WHERE key_word=?', (str(key_word),))
result = cursor.fetchone()
return result
# 删除先删joined再删除status
class SubstanceRaffleStatusTable:
def __init__(self):
sql_create_table = (
'CREATE TABLE IF NOT EXISTS substanceraffle_status ('
'aid TEXT NOT NULL,'
'number TEXT NOT NULL,'
'describe TEXT NOT NULL,'
'join_start_time INTEGER NOT NULL,' # 时间这里很简单就能比较
'join_end_time INTEGER NOT NULL, '
'handle_status INTEGER NOT NULL,'
'prize_cmt TEXT NOT NULL,'
'PRIMARY KEY (aid, number)'
'); '
)
conn.execute(sql_create_table)
self.conn = conn
def as_bili_data(self, row):
*info, prize_cmt = row
list_prize_cmt = [i for i in prize_cmt.split(' ')] # 半角空格分割
return SubstanceRaffleStatus(*info, list_prize_cmt)
def insert_element(self, substance_raffle_status: SubstanceRaffleStatus):
# ?,?,?这种可以对应type,否则很难折腾
with self.conn:
self.conn.execute('INSERT INTO substanceraffle_status VALUES (?, ?, ?, ?, ?, ?, ?)',
substance_raffle_status.as_sql_values())
def select_all(self):
results = []
for row in self.conn.execute('SELECT * FROM substanceraffle_status'):
results.append(self.as_bili_data(row))
return results
def select_by_primary_key(self, aid, number):
cursor = self.conn.execute(
'SELECT * FROM substanceraffle_status WHERE aid=? AND number=?', (str(aid), str(number)))
result = cursor.fetchone()
if result is None:
return None
return self.as_bili_data(result)
def del_by_primary_key(self, aid, number):
with self.conn:
self.conn.execute('DELETE FROM substanceraffle_status WHERE aid=? AND number=?', (str(aid), str(number)))
# tuple_join_time_range硬造的一个Tuple[int, int],表示介于join_start_time与join_end_time之间
def select(self, handle_status, tuple_join_time_range, join_end_time_r):
assert handle_status is not None
results = []
if tuple_join_time_range is None and join_end_time_r is not None:
sql = 'SELECT * FROM substanceraffle_status WHERE join_end_time <= ? AND handle_status = ?'
parameters = (int(join_end_time_r), int(handle_status))
elif tuple_join_time_range is not None and join_end_time_r is None:
sql = 'SELECT * FROM substanceraffle_status ' \
'WHERE join_start_time <= ? AND join_end_time >= ? AND handle_status = ?'
join_start_time_r, join_end_time_l = tuple_join_time_range
parameters = (int(join_start_time_r), int(join_end_time_l), int(handle_status))
elif tuple_join_time_range is not None and join_end_time_r is not None:
sql = 'SELECT * FROM substanceraffle_status ' \
'WHERE join_start_time <= ? AND (join_end_time BETWEEN ? AND ?) AND (handle_status = ?)'
join_start_time_r, join_end_time_l = tuple_join_time_range
parameters = (int(join_start_time_r), int(join_end_time_l), int(join_end_time_r), int(handle_status))
else:
sql = 'SELECT * FROM substanceraffle_status WHERE handle_status = ?'
parameters = (int(handle_status),)
for row in self.conn.execute(sql, parameters):
results.append(self.as_bili_data(row))
return results
# 与bili_statistics的api名字相同
def is_raffleid_duplicate(self, aid, number):
cursor = self.conn.execute(
'SELECT 1 FROM substanceraffle_status WHERE aid=? AND number=?', (str(aid), str(number)))
return bool(cursor.fetchone())
class SubstanceRaffleJoinedTable:
def __init__(self):
# uid + orig_substanceid 唯一
sql_create_table = (
'CREATE TABLE IF NOT EXISTS substanceraffle_joined ('
'uid TEXT NOT NULL, '
'aid TEXT NOT NULL, ' # 比如关注人等信息都存在第一个表里面,不再加冗余
'number TEXT NOT NULL,'
'PRIMARY KEY (uid, aid, number)'
'); '
)
conn.execute(sql_create_table)
self.conn = conn
def as_bili_data(self, row):
return SubstanceRaffleJoined(*row)
def insert_element(self, substance_raffle_joined: SubstanceRaffleJoined):
with self.conn:
self.conn.execute('INSERT INTO substanceraffle_joined VALUES (?, ?, ?)',
substance_raffle_joined.as_sql_values())
def select_all(self):
results = []
for row in self.conn.execute('SELECT * FROM substanceraffle_joined'):
results.append(self.as_bili_data(row))
return results
def select_by_primary_key(self, uid, aid, number):
cursor = self.conn.execute('SELECT * FROM substanceraffle_joined WHERE uid = ? AND aid = ? AND number=?',
(str(uid), str(aid), str(number)))
result = cursor.fetchone()
if result is None:
return None
return self.as_bili_data(result)
def del_by_primary_key(self, uid, aid, number):
with self.conn:
self.conn.execute('DELETE FROM substanceraffle_joined WHERE uid = ? AND aid = ? AND number=?',
(str(uid), str(aid), str(number)))
# substanceraffle_results 存储结果,其实这个表与第一个表格略微重合,但是感觉没必要去折腾第一个表格,因为结果这里存储数据没必要过于详细
class SubstanceRaffleResultsTable:
def __init__(self):
sql_create_table = (
'CREATE TABLE IF NOT EXISTS substanceraffle_results ('
'aid TEXT NOT NULL,'
'number TEXT NOT NULL,'
'describe TEXT NOT NULL,'
'join_start_time INTEGER NOT NULL,' # 时间这里很简单就能比较
'join_end_time INTEGER NOT NULL, '
'prize_cmt TEXT NOT NULL,'
'prize_list TEXT NOT NULL,'
'PRIMARY KEY (aid, number)'
'); '
)
conn.execute(sql_create_table)
self.conn = conn
def as_bili_data(self, row):
*info, prize_cmt, prize_list = row
list_prize_cmt = [i for i in prize_cmt.split(' ')]
list_prize_list = [int(i) for i in prize_list.split(' ')]
return SubstanceRaffleResults(*info, list_prize_cmt, list_prize_list)
def insert_element(self, substance_raffle_results: SubstanceRaffleResults):
# ?,?,?这种可以对应type,否则很难折腾
with self.conn:
self.conn.execute(
'INSERT INTO substanceraffle_results VALUES (?, ?, ?, ?, ?, ?, ?)',
substance_raffle_results.as_sql_values()
)
def select_all(self):
results = []
for row in self.conn.execute('SELECT * FROM substanceraffle_results'):
results.append(self.as_bili_data(row))
return results
def select_by_primary_key(self, aid, number):
cursor = self.conn.execute(
'SELECT * FROM substanceraffle_results WHERE aid=? AND number=?', (str(aid), str(number)))
result = cursor.fetchone()
if result is None:
return None
return self.as_bili_data(result)
def del_by_primary_key(self, aid, number):
with self.conn:
self.conn.execute('DELETE FROM substanceraffle_results WHERE aid=? AND number=?', (str(aid), str(number)))
class SubstanceRaffleLuckydogTable:
def __init__(self):
# uid + orig_substanceid 唯一
sql_create_table = (
'CREATE TABLE IF NOT EXISTS substanceraffle_luckydog ('
'uid TEXT NOT NULL, '
'aid TEXT NOT NULL, ' # 比如关注人等信息都存在第一个表里面,不再加冗余
'number TEXT NOT NULL,'
'PRIMARY KEY (uid, aid, number)'
'); '
)
conn.execute(sql_create_table)
self.conn = conn
def as_bili_data(self, row):
return SubstanceRaffleLuckydog(*row)
def insert_element(self, substance_raffle_luckydog: SubstanceRaffleLuckydog):
with self.conn:
self.conn.execute('INSERT INTO substanceraffle_luckydog VALUES (?, ?, ?)',
substance_raffle_luckydog.as_sql_values())
def select_all(self):
results = []
for row in self.conn.execute('SELECT * FROM substanceraffle_luckydog'):
results.append(self.as_bili_data(row))
return results
def select_by_primary_key(self, uid, aid, number):
cursor = self.conn.execute('SELECT * FROM substanceraffle_luckydog WHERE uid = ? AND aid = ? AND number=?',
(str(uid), str(aid), str(number)))
result = cursor.fetchone()
if result is None:
return None
return self.as_bili_data(result)
def del_by_primary_key(self, uid, aid, number):
with self.conn:
self.conn.execute('DELETE FROM substanceraffle_luckydog WHERE uid = ? AND aid = ? AND number=?',
(str(uid), str(aid), str(number)))
substanceraffle_status_table = SubstanceRaffleStatusTable()
substanceraffle_joined_table = SubstanceRaffleJoinedTable()
substanceraffle_results_table = SubstanceRaffleResultsTable()
substanceraffle_luckydog_table = SubstanceRaffleLuckydogTable()
other_table = OthersTable()
def insert_substanceraffle_status_table(substance_raffle_status: SubstanceRaffleStatus):
substanceraffle_status_table.insert_element(substance_raffle_status)
def insert_substanceraffle_joined_table(substance_raffle_joined: SubstanceRaffleJoined):
substanceraffle_joined_table.insert_element(substance_raffle_joined)
def insert_substanceraffle_results_table(substance_raffle_result: SubstanceRaffleResults):
substanceraffle_results_table.insert_element(substance_raffle_result)
def insert_substanceraffle_luckydog_table(substance_raffle_luckydog: SubstanceRaffleLuckydog):
substanceraffle_luckydog_table.insert_element(substance_raffle_luckydog)
def select_by_primary_key_from_substanceraffle_joined_table(uid, aid, number):
return substanceraffle_joined_table.select_by_primary_key(uid, aid, number)
def del_from_substanceraffle_status_table(aid, number):
substanceraffle_status_table.del_by_primary_key(aid, number)
def is_raffleid_duplicate(aid, number):
return substanceraffle_status_table.is_raffleid_duplicate(aid, number)
def del_from_substanceraffle_joind_table(uid, aid, number):
substanceraffle_joined_table.del_by_primary_key(uid, aid, number)
# 从三个表里查最新数据
def init_id() -> int:
result = other_table.select_by_primary_key('init_id')
if result is not None:
init_docid0 = int(result[0])
else:
init_docid0 = -1
sql_select = 'SELECT aid FROM substanceraffle_status'
select_results = conn.execute(sql_select).fetchall()
print(0, select_results)
list_int_results = [int(i[0]) for i in select_results]
init_docid1 = max(list_int_results, default=-1)
sql_select = 'SELECT aid FROM substanceraffle_results'
select_results = conn.execute(sql_select).fetchall()
print(1, select_results)
list_int_results = [int(i[0]) for i in select_results]
init_docid2 = max(list_int_results, default=-1)
print(init_docid0, init_docid1, init_docid2)
return max(init_docid0, init_docid1, init_docid2)
def insert_or_replace_other_able(key_word, value):
other_table.insert_or_replace(key_word, value)
def set_rafflestatus_handle_status(handle_status: int, aid, number):
with conn:
conn.execute('UPDATE substanceraffle_status SET handle_status = ? WHERE aid=? AND number=?', (handle_status, str(aid), str(number)))
def select_rafflestatus(handle_status, tuple_join_time_range=None, join_end_time_r=None):
return substanceraffle_status_table.select(handle_status, tuple_join_time_range, join_end_time_r)
'''
a = SubstanceRaffleStatus(
aid=3, number=4, describe='323', join_start_time=21, join_end_time=2332, prize_cmt=['12', 'we ds'], handle_status=0)
print(a)
# substanceraffle_status_table.insert_element(a)
a0 = substanceraffle_status_table.select_by_primary_key(aid=3, number=4)
print(a0)
print(a == a0)
b = SubstanceRaffleJoined(uid='1213', aid='3', number='4')
print(b)
# substanceraffle_joined_table.insert_element(b)
b0 = substanceraffle_joined_table.select_by_primary_key(uid=1213, aid=3, number=4)
print(b0)
print(b == b0)
c = SubstanceRaffleResults(
aid=3, number=4, describe='323', join_start_time=21, join_end_time=2332,
prize_cmt=['12', 'we ds'], prize_list=[12, 22])
print(c)
# substanceraffle_results_table.insert_element(c)
c0 = substanceraffle_results_table.select_by_primary_key(aid=3, number=4)
print(c0)
print(c == c0)
d = SubstanceRaffleLuckydog(uid='1213', aid='3', number='4')
print(d)
# substanceraffle_luckydog_table.insert_element(d)
d0 = substanceraffle_luckydog_table.select_by_primary_key(uid=1213, aid=3, number=4)
print(d0)
print(d == d0)
print(init_id())
'''
| 39.68232 | 142 | 0.651584 |
6af20796563b8156cd73ae768a550be0e89b4f82 | 71 | py | Python | area_intersect/config/prod.py | gfw-api/area-intersect | 72f0bd7ebabfa77aec60052bfcc32c7eee80a98b | [
"MIT"
] | 5 | 2017-11-01T21:13:19.000Z | 2022-03-03T09:26:19.000Z | area_intersect/config/prod.py | gfw-api/area-intersect | 72f0bd7ebabfa77aec60052bfcc32c7eee80a98b | [
"MIT"
] | 11 | 2018-01-22T09:05:23.000Z | 2019-09-20T11:55:57.000Z | proconfig/config/prod.py | gfw-api/gs-pro-config | cf43fb977189a025a0b5b6e1fe38616845021b03 | [
"MIT"
] | 4 | 2017-07-20T15:32:59.000Z | 2018-04-03T15:51:33.000Z | """-"""
SETTINGS = {
'logging': {
'level': 'INFO'
}
}
| 8.875 | 23 | 0.338028 |
1e31d3764f7baeeeee80bfa5905b20c88dd1b2e2 | 38 | py | Python | tests/__init__.py | agateriver/django-make-app | 1c390b78284ee727c645e560c323b53f05629fd4 | [
"MIT"
] | 6 | 2021-12-29T08:04:09.000Z | 2022-03-19T02:41:20.000Z | tests/__init__.py | agateriver/django-make-app | 1c390b78284ee727c645e560c323b53f05629fd4 | [
"MIT"
] | 105 | 2015-04-15T23:19:18.000Z | 2019-02-11T19:41:55.000Z | {{cookiecutter.project_slug}}/src/web/tests.py | illagrenan/django-cookiecutter-template | d237c01bbb4b61aa4d777c6abb5461e1dadfa8d0 | [
"MIT"
] | 5 | 2018-07-31T15:49:17.000Z | 2022-02-27T16:37:50.000Z | # -*- encoding: utf-8 -*-
# ! python3
| 12.666667 | 25 | 0.5 |
7e067cb32130b0eae8c24dc34c648d013a1b41ad | 4,739 | py | Python | python/ray/tests/test_memory_scheduling.py | vermashresth/ray | 9aaaa508cacb90a5be714478970b2191aaa43170 | [
"Apache-2.0"
] | 4 | 2019-10-18T17:44:58.000Z | 2021-04-14T14:37:21.000Z | python/ray/tests/test_memory_scheduling.py | vermashresth/ray | 9aaaa508cacb90a5be714478970b2191aaa43170 | [
"Apache-2.0"
] | 3 | 2021-06-08T21:46:35.000Z | 2022-03-12T00:35:21.000Z | python/ray/tests/test_memory_scheduling.py | vermashresth/ray | 9aaaa508cacb90a5be714478970b2191aaa43170 | [
"Apache-2.0"
] | 2 | 2017-10-31T23:20:07.000Z | 2019-11-13T20:16:03.000Z | import numpy as np
import unittest
import ray
from ray import tune
from ray.rllib import _register_all
MB = 1024 * 1024
@ray.remote(memory=100 * MB)
class Actor:
def __init__(self):
pass
def ping(self):
return "ok"
@ray.remote(object_store_memory=100 * MB)
class Actor2:
def __init__(self):
pass
def ping(self):
return "ok"
def train_oom(config, reporter):
ray.put(np.zeros(200 * 1024 * 1024))
reporter(result=123)
class TestMemoryScheduling(unittest.TestCase):
def testMemoryRequest(self):
try:
ray.init(num_cpus=1, memory=200 * MB)
# fits first 2
a = Actor.remote()
b = Actor.remote()
ok, _ = ray.wait(
[a.ping.remote(), b.ping.remote()],
timeout=60.0,
num_returns=2)
self.assertEqual(len(ok), 2)
# does not fit
c = Actor.remote()
ok, _ = ray.wait([c.ping.remote()], timeout=5.0)
self.assertEqual(len(ok), 0)
finally:
ray.shutdown()
def testObjectStoreMemoryRequest(self):
try:
ray.init(num_cpus=1, object_store_memory=300 * MB)
# fits first 2 (70% allowed)
a = Actor2.remote()
b = Actor2.remote()
ok, _ = ray.wait(
[a.ping.remote(), b.ping.remote()],
timeout=60.0,
num_returns=2)
self.assertEqual(len(ok), 2)
# does not fit
c = Actor2.remote()
ok, _ = ray.wait([c.ping.remote()], timeout=5.0)
self.assertEqual(len(ok), 0)
finally:
ray.shutdown()
def testTuneDriverHeapLimit(self):
try:
_register_all()
result = tune.run(
"PG",
stop={"timesteps_total": 10000},
config={
"env": "CartPole-v0",
"memory": 100 * 1024 * 1024, # too little
},
raise_on_failed_trial=False)
self.assertEqual(result.trials[0].status, "ERROR")
self.assertTrue(
"RayOutOfMemoryError: Heap memory usage for ray_PG_" in
result.trials[0].error_msg)
finally:
ray.shutdown()
def testTuneDriverStoreLimit(self):
try:
_register_all()
self.assertRaisesRegexp(
ray.tune.error.TuneError,
".*Insufficient cluster resources.*",
lambda: tune.run(
"PG",
stop={"timesteps_total": 10000},
config={
"env": "CartPole-v0",
# too large
"object_store_memory": 10000 * 1024 * 1024,
}))
finally:
ray.shutdown()
def testTuneWorkerHeapLimit(self):
try:
_register_all()
result = tune.run(
"PG",
stop={"timesteps_total": 10000},
config={
"env": "CartPole-v0",
"num_workers": 1,
"memory_per_worker": 100 * 1024 * 1024, # too little
},
raise_on_failed_trial=False)
self.assertEqual(result.trials[0].status, "ERROR")
self.assertTrue(
"RayOutOfMemoryError: Heap memory usage for ray_Rollout" in
result.trials[0].error_msg)
finally:
ray.shutdown()
def testTuneWorkerStoreLimit(self):
try:
_register_all()
self.assertRaisesRegexp(
ray.tune.error.TuneError,
".*Insufficient cluster resources.*",
lambda:
tune.run("PG", stop={"timesteps_total": 0}, config={
"env": "CartPole-v0",
"num_workers": 1,
# too large
"object_store_memory_per_worker": 10000 * 1024 * 1024,
}))
finally:
ray.shutdown()
def testTuneObjectLimitApplied(self):
try:
result = tune.run(
train_oom,
resources_per_trial={"object_store_memory": 150 * 1024 * 1024},
raise_on_failed_trial=False)
self.assertTrue(result.trials[0].status, "ERROR")
self.assertTrue("ObjectStoreFullError: Failed to put" in
result.trials[0].error_msg)
finally:
ray.shutdown()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 29.993671 | 79 | 0.49061 |
38e129538b64636454aa405e8f4440f4cb72f876 | 63,369 | py | Python | plotly/graph_objs/surface/_colorbar.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 1 | 2018-07-16T01:51:47.000Z | 2018-07-16T01:51:47.000Z | plotly/graph_objs/surface/_colorbar.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | null | null | null | plotly/graph_objs/surface/_colorbar.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class ColorBar(BaseTraceHierarchyType):
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['bgcolor']
@bgcolor.setter
def bgcolor(self, val):
self['bgcolor'] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['bordercolor']
@bordercolor.setter
def bordercolor(self, val):
self['bordercolor'] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['borderwidth']
@borderwidth.setter
def borderwidth(self, val):
self['borderwidth'] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
*log* and *date* axes. If the axis `type` is *log*, then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. *log* has several special values;
*L<f>*, where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = *L0.5* will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use *D1* (all
digits) or *D2* (only 2 and 5). `tick0` is ignored for *D1* and
*D2*. If the axis `type` is *date*, then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. *date* also has
special values *M<n>* gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to *2000-01-15* and `dtick` to
*M3*. To set ticks every 4 years, set `dtick` to *M48*
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self['dtick']
@dtick.setter
def dtick(self, val):
self['dtick'] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If *none*, it
appears as 1,000,000,000. If *e*, 1e+9. If *E*, 1E+9. If
*power*, 1x10^9 (with 9 in a super script). If *SI*, 1G. If
*B*, 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self['exponentformat']
@exponentformat.setter
def exponentformat(self, val):
self['exponentformat'] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['len']
@len.setter
def len(self, val):
self['len'] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
*fraction* or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self['lenmode']
@lenmode.setter
def lenmode(self, val):
self['lenmode'] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to *auto*.
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self['nticks']
@nticks.setter
def nticks(self, val):
self['nticks'] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['outlinecolor']
@outlinecolor.setter
def outlinecolor(self, val):
self['outlinecolor'] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['outlinewidth']
@outlinewidth.setter
def outlinewidth(self, val):
self['outlinewidth'] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['separatethousands']
@separatethousands.setter
def separatethousands(self, val):
self['separatethousands'] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If *all*, all exponents are shown besides their significands.
If *first*, only the exponent of the first tick is shown. If
*last*, only the exponent of the last tick is shown. If *none*,
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self['showexponent']
@showexponent.setter
def showexponent(self, val):
self['showexponent'] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showticklabels']
@showticklabels.setter
def showticklabels(self, val):
self['showticklabels'] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If *all*, all tick labels are displayed with a prefix. If
*first*, only the first tick is displayed with a prefix. If
*last*, only the last tick is displayed with a suffix. If
*none*, tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self['showtickprefix']
@showtickprefix.setter
def showtickprefix(self, val):
self['showtickprefix'] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self['showticksuffix']
@showticksuffix.setter
def showticksuffix(self, val):
self['showticksuffix'] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['thickness']
@thickness.setter
def thickness(self, val):
self['thickness'] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
*fraction* or in *pixels*. Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self['thicknessmode']
@thicknessmode.setter
def thicknessmode(self, val):
self['thicknessmode'] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is *log*, then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is *date*, it should
be a date string, like date data. If the axis `type` is
*category*, it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self['tick0']
@tick0.setter
def tick0(self, val):
self['tick0'] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self['tickangle']
@tickangle.setter
def tickangle(self, val):
self['tickangle'] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['tickcolor']
@tickcolor.setter
def tickcolor(self, val):
self['tickcolor'] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of plotly.graph_objs.surface.colorbar.Tickfont
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include *Arial*,
*Balto*, *Courier New*, *Droid Sans*,, *Droid
Serif*, *Droid Sans Mono*, *Gravitas One*, *Old
Standard TT*, *Open Sans*, *Overpass*, *PT Sans
Narrow*, *Raleway*, *Times New Roman*.
size
Returns
-------
plotly.graph_objs.surface.colorbar.Tickfont
"""
return self['tickfont']
@tickfont.setter
def tickfont(self, val):
self['tickfont'] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-format/blob/master/READM
E.md#locale_format And for dates see:
https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We add one item to
d3's date formatter: *%{n}f* for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
*%H~%M~%S.%2f* would display *09~15~23.46*
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['tickformat']
@tickformat.setter
def tickformat(self, val):
self['tickformat'] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.surface.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where *min*, *max* -
dtick values which describe some zoom level, it
is possible to omit *min* or *max* value by
passing *null*
value
string - dtickformat for described zoom level,
the same as *tickformat*
Returns
-------
tuple[plotly.graph_objs.surface.colorbar.Tickformatstop]
"""
return self['tickformatstops']
@tickformatstops.setter
def tickformatstops(self, val):
self['tickformatstops'] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['ticklen']
@ticklen.setter
def ticklen(self, val):
self['ticklen'] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If *auto*, the number of
ticks is set via `nticks`. If *linear*, the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` (*linear* is the default value if `tick0` and
`dtick` are provided). If *array*, the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. (*array*
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self['tickmode']
@tickmode.setter
def tickmode(self, val):
self['tickmode'] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['tickprefix']
@tickprefix.setter
def tickprefix(self, val):
self['tickprefix'] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If **, this axis'
ticks are not drawn. If *outside* (*inside*), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self['ticks']
@ticks.setter
def ticks(self, val):
self['ticks'] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['ticksuffix']
@ticksuffix.setter
def ticksuffix(self, val):
self['ticksuffix'] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to *array*. Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['ticktext']
@ticktext.setter
def ticktext(self, val):
self['ticktext'] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on plot.ly for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['ticktextsrc']
@ticktextsrc.setter
def ticktextsrc(self, val):
self['ticktextsrc'] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to *array*. Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['tickvals']
@tickvals.setter
def tickvals(self, val):
self['tickvals'] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on plot.ly for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['tickvalssrc']
@tickvalssrc.setter
def tickvalssrc(self, val):
self['tickvalssrc'] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['tickwidth']
@tickwidth.setter
def tickwidth(self, val):
self['tickwidth'] = val
# title
# -----
@property
def title(self):
"""
Sets the title of the color bar.
The 'title' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['title']
@title.setter
def title(self, val):
self['title'] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Sets this color bar's title font.
The 'titlefont' property is an instance of Titlefont
that may be specified as:
- An instance of plotly.graph_objs.surface.colorbar.Titlefont
- A dict of string/value properties that will be passed
to the Titlefont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include *Arial*,
*Balto*, *Courier New*, *Droid Sans*,, *Droid
Serif*, *Droid Sans Mono*, *Gravitas One*, *Old
Standard TT*, *Open Sans*, *Overpass*, *PT Sans
Narrow*, *Raleway*, *Times New Roman*.
size
Returns
-------
plotly.graph_objs.surface.colorbar.Titlefont
"""
return self['titlefont']
@titlefont.setter
def titlefont(self, val):
self['titlefont'] = val
# titleside
# ---------
@property
def titleside(self):
"""
Determines the location of the colorbar title with respect to
the color bar.
The 'titleside' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self['titleside']
@titleside.setter
def titleside(self, val):
self['titleside'] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self['x']
@x.setter
def x(self, val):
self['x'] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the *left*, *center* or *right* of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self['xanchor']
@xanchor.setter
def xanchor(self, val):
self['xanchor'] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['xpad']
@xpad.setter
def xpad(self, val):
self['xpad'] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self['y']
@y.setter
def y(self, val):
self['y'] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the *top*, *middle* or *bottom* of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self['yanchor']
@yanchor.setter
def yanchor(self, val):
self['yanchor'] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['ypad']
@ypad.setter
def ypad(self, val):
self['ypad'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'surface'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to *log* and *date* axes. If the axis `type`
is *log*, then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. *log* has several special
values; *L<f>*, where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = *L0.5* will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use *D1* (all digits) or *D2*
(only 2 and 5). `tick0` is ignored for *D1* and *D2*.
If the axis `type` is *date*, then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
*date* also has special values *M<n>* gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to *2000-01-15* and `dtick` to *M3*. To set
ticks every 4 years, set `dtick` to *M48*
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
*none*, it appears as 1,000,000,000. If *e*, 1e+9. If
*E*, 1E+9. If *power*, 1x10^9 (with 9 in a super
script). If *SI*, 1G. If *B*, 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot *fraction* or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
*auto*.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If *all*, all exponents are shown besides their
significands. If *first*, only the exponent of the
first tick is shown. If *last*, only the exponent of
the last tick is shown. If *none*, no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If *all*, all tick labels are displayed with a prefix.
If *first*, only the first tick is displayed with a
prefix. If *last*, only the last tick is displayed with
a suffix. If *none*, tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot *fraction* or in *pixels*. Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is *log*, then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is *date*, it should be a date string, like
date data. If the axis `type` is *category*, it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see: https://github.com/d3/d3-form
at/blob/master/README.md#locale_format And for dates
see: https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We add one
item to d3's date formatter: *%{n}f* for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat *%H~%M~%S.%2f* would
display *09~15~23.46*
tickformatstops
plotly.graph_objs.surface.colorbar.Tickformatstop
instance or dict with compatible properties
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If *auto*, the number
of ticks is set via `nticks`. If *linear*, the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` (*linear* is
the default value if `tick0` and `dtick` are provided).
If *array*, the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. (*array* is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If **, this
axis' ticks are not drawn. If *outside* (*inside*),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
*array*. Used with `tickvals`.
ticktextsrc
Sets the source reference on plot.ly for ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to *array*.
Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for tickvals .
tickwidth
Sets the tick width (in px).
title
Sets the title of the color bar.
titlefont
Sets this color bar's title font.
titleside
Determines the location of the colorbar title with
respect to the color bar.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the *left*, *center*
or *right* of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the *top*, *middle* or
*bottom* of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.surface.ColorBar
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to *log* and *date* axes. If the axis `type`
is *log*, then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. *log* has several special
values; *L<f>*, where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = *L0.5* will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use *D1* (all digits) or *D2*
(only 2 and 5). `tick0` is ignored for *D1* and *D2*.
If the axis `type` is *date*, then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
*date* also has special values *M<n>* gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to *2000-01-15* and `dtick` to *M3*. To set
ticks every 4 years, set `dtick` to *M48*
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
*none*, it appears as 1,000,000,000. If *e*, 1e+9. If
*E*, 1E+9. If *power*, 1x10^9 (with 9 in a super
script). If *SI*, 1G. If *B*, 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot *fraction* or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
*auto*.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If *all*, all exponents are shown besides their
significands. If *first*, only the exponent of the
first tick is shown. If *last*, only the exponent of
the last tick is shown. If *none*, no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If *all*, all tick labels are displayed with a prefix.
If *first*, only the first tick is displayed with a
prefix. If *last*, only the last tick is displayed with
a suffix. If *none*, tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot *fraction* or in *pixels*. Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is *log*, then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is *date*, it should be a date string, like
date data. If the axis `type` is *category*, it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see: https://github.com/d3/d3-form
at/blob/master/README.md#locale_format And for dates
see: https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We add one
item to d3's date formatter: *%{n}f* for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat *%H~%M~%S.%2f* would
display *09~15~23.46*
tickformatstops
plotly.graph_objs.surface.colorbar.Tickformatstop
instance or dict with compatible properties
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If *auto*, the number
of ticks is set via `nticks`. If *linear*, the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` (*linear* is
the default value if `tick0` and `dtick` are provided).
If *array*, the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. (*array* is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If **, this
axis' ticks are not drawn. If *outside* (*inside*),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
*array*. Used with `tickvals`.
ticktextsrc
Sets the source reference on plot.ly for ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to *array*.
Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for tickvals .
tickwidth
Sets the tick width (in px).
title
Sets the title of the color bar.
titlefont
Sets this color bar's title font.
titleside
Determines the location of the colorbar title with
respect to the color bar.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the *left*, *center*
or *right* of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the *top*, *middle* or
*bottom* of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__('colorbar')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.surface.ColorBar
constructor must be a dict or
an instance of plotly.graph_objs.surface.ColorBar"""
)
# Import validators
# -----------------
from plotly.validators.surface import (colorbar as v_colorbar)
# Initialize validators
# ---------------------
self._validators['bgcolor'] = v_colorbar.BgcolorValidator()
self._validators['bordercolor'] = v_colorbar.BordercolorValidator()
self._validators['borderwidth'] = v_colorbar.BorderwidthValidator()
self._validators['dtick'] = v_colorbar.DtickValidator()
self._validators['exponentformat'
] = v_colorbar.ExponentformatValidator()
self._validators['len'] = v_colorbar.LenValidator()
self._validators['lenmode'] = v_colorbar.LenmodeValidator()
self._validators['nticks'] = v_colorbar.NticksValidator()
self._validators['outlinecolor'] = v_colorbar.OutlinecolorValidator()
self._validators['outlinewidth'] = v_colorbar.OutlinewidthValidator()
self._validators['separatethousands'
] = v_colorbar.SeparatethousandsValidator()
self._validators['showexponent'] = v_colorbar.ShowexponentValidator()
self._validators['showticklabels'
] = v_colorbar.ShowticklabelsValidator()
self._validators['showtickprefix'
] = v_colorbar.ShowtickprefixValidator()
self._validators['showticksuffix'
] = v_colorbar.ShowticksuffixValidator()
self._validators['thickness'] = v_colorbar.ThicknessValidator()
self._validators['thicknessmode'] = v_colorbar.ThicknessmodeValidator()
self._validators['tick0'] = v_colorbar.Tick0Validator()
self._validators['tickangle'] = v_colorbar.TickangleValidator()
self._validators['tickcolor'] = v_colorbar.TickcolorValidator()
self._validators['tickfont'] = v_colorbar.TickfontValidator()
self._validators['tickformat'] = v_colorbar.TickformatValidator()
self._validators['tickformatstops'
] = v_colorbar.TickformatstopsValidator()
self._validators['ticklen'] = v_colorbar.TicklenValidator()
self._validators['tickmode'] = v_colorbar.TickmodeValidator()
self._validators['tickprefix'] = v_colorbar.TickprefixValidator()
self._validators['ticks'] = v_colorbar.TicksValidator()
self._validators['ticksuffix'] = v_colorbar.TicksuffixValidator()
self._validators['ticktext'] = v_colorbar.TicktextValidator()
self._validators['ticktextsrc'] = v_colorbar.TicktextsrcValidator()
self._validators['tickvals'] = v_colorbar.TickvalsValidator()
self._validators['tickvalssrc'] = v_colorbar.TickvalssrcValidator()
self._validators['tickwidth'] = v_colorbar.TickwidthValidator()
self._validators['title'] = v_colorbar.TitleValidator()
self._validators['titlefont'] = v_colorbar.TitlefontValidator()
self._validators['titleside'] = v_colorbar.TitlesideValidator()
self._validators['x'] = v_colorbar.XValidator()
self._validators['xanchor'] = v_colorbar.XanchorValidator()
self._validators['xpad'] = v_colorbar.XpadValidator()
self._validators['y'] = v_colorbar.YValidator()
self._validators['yanchor'] = v_colorbar.YanchorValidator()
self._validators['ypad'] = v_colorbar.YpadValidator()
# Populate data dict with properties
# ----------------------------------
v = arg.pop('bgcolor', None)
self.bgcolor = bgcolor if bgcolor is not None else v
v = arg.pop('bordercolor', None)
self.bordercolor = bordercolor if bordercolor is not None else v
v = arg.pop('borderwidth', None)
self.borderwidth = borderwidth if borderwidth is not None else v
v = arg.pop('dtick', None)
self.dtick = dtick if dtick is not None else v
v = arg.pop('exponentformat', None)
self.exponentformat = exponentformat if exponentformat is not None else v
v = arg.pop('len', None)
self.len = len if len is not None else v
v = arg.pop('lenmode', None)
self.lenmode = lenmode if lenmode is not None else v
v = arg.pop('nticks', None)
self.nticks = nticks if nticks is not None else v
v = arg.pop('outlinecolor', None)
self.outlinecolor = outlinecolor if outlinecolor is not None else v
v = arg.pop('outlinewidth', None)
self.outlinewidth = outlinewidth if outlinewidth is not None else v
v = arg.pop('separatethousands', None)
self.separatethousands = separatethousands if separatethousands is not None else v
v = arg.pop('showexponent', None)
self.showexponent = showexponent if showexponent is not None else v
v = arg.pop('showticklabels', None)
self.showticklabels = showticklabels if showticklabels is not None else v
v = arg.pop('showtickprefix', None)
self.showtickprefix = showtickprefix if showtickprefix is not None else v
v = arg.pop('showticksuffix', None)
self.showticksuffix = showticksuffix if showticksuffix is not None else v
v = arg.pop('thickness', None)
self.thickness = thickness if thickness is not None else v
v = arg.pop('thicknessmode', None)
self.thicknessmode = thicknessmode if thicknessmode is not None else v
v = arg.pop('tick0', None)
self.tick0 = tick0 if tick0 is not None else v
v = arg.pop('tickangle', None)
self.tickangle = tickangle if tickangle is not None else v
v = arg.pop('tickcolor', None)
self.tickcolor = tickcolor if tickcolor is not None else v
v = arg.pop('tickfont', None)
self.tickfont = tickfont if tickfont is not None else v
v = arg.pop('tickformat', None)
self.tickformat = tickformat if tickformat is not None else v
v = arg.pop('tickformatstops', None)
self.tickformatstops = tickformatstops if tickformatstops is not None else v
v = arg.pop('ticklen', None)
self.ticklen = ticklen if ticklen is not None else v
v = arg.pop('tickmode', None)
self.tickmode = tickmode if tickmode is not None else v
v = arg.pop('tickprefix', None)
self.tickprefix = tickprefix if tickprefix is not None else v
v = arg.pop('ticks', None)
self.ticks = ticks if ticks is not None else v
v = arg.pop('ticksuffix', None)
self.ticksuffix = ticksuffix if ticksuffix is not None else v
v = arg.pop('ticktext', None)
self.ticktext = ticktext if ticktext is not None else v
v = arg.pop('ticktextsrc', None)
self.ticktextsrc = ticktextsrc if ticktextsrc is not None else v
v = arg.pop('tickvals', None)
self.tickvals = tickvals if tickvals is not None else v
v = arg.pop('tickvalssrc', None)
self.tickvalssrc = tickvalssrc if tickvalssrc is not None else v
v = arg.pop('tickwidth', None)
self.tickwidth = tickwidth if tickwidth is not None else v
v = arg.pop('title', None)
self.title = title if title is not None else v
v = arg.pop('titlefont', None)
self.titlefont = titlefont if titlefont is not None else v
v = arg.pop('titleside', None)
self.titleside = titleside if titleside is not None else v
v = arg.pop('x', None)
self.x = x if x is not None else v
v = arg.pop('xanchor', None)
self.xanchor = xanchor if xanchor is not None else v
v = arg.pop('xpad', None)
self.xpad = xpad if xpad is not None else v
v = arg.pop('y', None)
self.y = y if y is not None else v
v = arg.pop('yanchor', None)
self.yanchor = yanchor if yanchor is not None else v
v = arg.pop('ypad', None)
self.ypad = ypad if ypad is not None else v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
| 36.50288 | 93 | 0.568764 |
17a46e5785fea2d0dc78b910c72d39820072f002 | 1,668 | py | Python | bdd100k/common/typing.py | Anthuang/bdd100k | b7e1781317784317e4e715ab325515ade73978a9 | [
"BSD-3-Clause"
] | 193 | 2020-09-22T09:48:17.000Z | 2022-03-31T20:49:24.000Z | bdd100k/common/typing.py | Anthuang/bdd100k | b7e1781317784317e4e715ab325515ade73978a9 | [
"BSD-3-Clause"
] | 60 | 2020-09-28T15:44:40.000Z | 2022-03-31T07:58:58.000Z | bdd100k/common/typing.py | Anthuang/bdd100k | b7e1781317784317e4e715ab325515ade73978a9 | [
"BSD-3-Clause"
] | 41 | 2020-09-27T02:52:20.000Z | 2022-02-21T03:33:39.000Z | """Common type definitions.
The annotation files in BDD100K format has additional annotations
('other person', 'other vehicle' and 'trail') besides the considered
categories ('car', 'pedestrian', 'truck', etc.) to indicate the uncertain
regions. Given the different handlings of these additional classes, we
provide three options to process the labels when converting them into COCO
format.
1. Ignore the labels. This is the default setting and is often used for
evaluation. CocoAPIs have native support for ignored annotations.
2. Remove the annotations from the label file. By setting
`remove-ignored=True`, the script will remove all the ignored annotations.
3. Use `ignored` as a separate class and the user can decide how to utilize
the annotations in `ignored` class. To achieve this, setting
`ignored-as-class=True`.
"""
import sys
from typing import Dict, List, Optional
from pydantic import BaseModel
from scalabel.common.typing import NDArrayU8
from scalabel.label.typing import Config
if sys.version_info >= (3, 8):
from typing import TypedDict # pylint: disable=no-name-in-module
else:
from typing_extensions import TypedDict
class InstanceType(TypedDict, total=False):
"""Define types of annotations in GT."""
instance_id: int
category_id: int
truncated: bool
occluded: bool
crowd: bool
ignored: bool
mask: NDArrayU8
bbox: List[float]
area: float
class BDD100KConfig(BaseModel):
"""Extend metadata for BDD100K."""
scalabel: Config
remove_ignored: bool = False
ignored_as_class: bool = False
ignored_mapping: Optional[Dict[str, str]]
name_mapping: Optional[Dict[str, str]]
| 31.471698 | 75 | 0.751199 |
63c8ca3f8ec4d8ad1a6b685879f713834207d64f | 1,894 | py | Python | tests/gold_tests/timeout/timeout.test.py | zds05/trafficserver | 258c69b7628f5a4b90488e147c244a582222b5c8 | [
"Apache-2.0"
] | null | null | null | tests/gold_tests/timeout/timeout.test.py | zds05/trafficserver | 258c69b7628f5a4b90488e147c244a582222b5c8 | [
"Apache-2.0"
] | null | null | null | tests/gold_tests/timeout/timeout.test.py | zds05/trafficserver | 258c69b7628f5a4b90488e147c244a582222b5c8 | [
"Apache-2.0"
] | null | null | null | '''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = 'Testing ATS TCP handshake timeout'
ts = Test.MakeATSProcess("ts")
server = Test.MakeOriginServer("server", delay=15)
dns = Test.MakeDNServer("dns", ip='127.0.0.1', default=['127.0.0.1'])
request_header = {"headers": "GET /file HTTP/1.1\r\nHost: *\r\n\r\n", "timestamp": "5678", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "5678", "body": ""}
server.addResponse("sessionfile.log", request_header, response_header)
ts.Disk.records_config.update({
'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port),
'proxy.config.dns.resolv_conf': 'NULL',
'proxy.config.url_remap.remap_required': 0,
'proxy.config.http.connect_attempts_timeout': 5
})
tr = Test.AddTestRun("tr")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(ts)
tr.Processes.Default.StartBefore(dns)
tr.Processes.Default.Command = 'curl -i -x http://127.0.0.1:{0} http://127.0.0.1:{1}/file'.format(ts.Variables.port, server.Variables.Port)
tr.Processes.Default.Streams.stdout = "timeout.gold"
| 44.046512 | 139 | 0.729673 |
eba4b4459dc532ee4c8b3f12893b43bce03f8f5d | 464 | py | Python | apps/data/migrations/0029_auto_20200318_1146.py | moogoo78/portal20 | 085d598af0448ad34e22aa55981f7e82a042a893 | [
"MIT"
] | 7 | 2020-12-31T13:57:58.000Z | 2022-02-07T08:05:15.000Z | apps/data/migrations/0029_auto_20200318_1146.py | moogoo78/portal20 | 085d598af0448ad34e22aa55981f7e82a042a893 | [
"MIT"
] | 75 | 2020-03-16T07:18:28.000Z | 2022-03-31T09:55:15.000Z | apps/data/migrations/0029_auto_20200318_1146.py | moogoo78/portal20 | 085d598af0448ad34e22aa55981f7e82a042a893 | [
"MIT"
] | 9 | 2020-01-06T14:08:51.000Z | 2022-01-03T05:19:54.000Z | # Generated by Django 3.0.4 on 2020-03-18 03:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0028_merge_20200318_1145'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='status',
field=models.CharField(choices=[('Public', 'Public'), ('Private', 'Private')], max_length=10, verbose_name='status'),
),
]
| 24.421053 | 129 | 0.605603 |
a08c9cf31e42ae73fa22b93ffd937f188f9a1f81 | 417 | py | Python | scripts/component_graph/test/server/util/test_logging.py | opensource-assist/fuschia | 66646c55b3d0b36aae90a4b6706b87f1a6261935 | [
"BSD-3-Clause"
] | 3 | 2020-08-02T04:46:18.000Z | 2020-08-07T10:10:53.000Z | scripts/component_graph/test/server/util/test_logging.py | opensource-assist/fuschia | 66646c55b3d0b36aae90a4b6706b87f1a6261935 | [
"BSD-3-Clause"
] | null | null | null | scripts/component_graph/test/server/util/test_logging.py | opensource-assist/fuschia | 66646c55b3d0b36aae90a4b6706b87f1a6261935 | [
"BSD-3-Clause"
] | 1 | 2020-08-07T10:11:49.000Z | 2020-08-07T10:11:49.000Z | #!/usr/bin/env python3
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from server.util.logging import *
class TestLogging(unittest.TestCase):
def test_logger_not_none(self):
self.assertNotEqual(get_logger(__name__), None)
if __name__ == '__main__':
unittest.main()
| 23.166667 | 72 | 0.743405 |
c5fa2a6a10ecb522ba506bebdcfacedf23f3c559 | 393 | py | Python | 1019 - Conversao de Tempo.py | le16bits/URI---Python | 9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db | [
"Apache-2.0"
] | null | null | null | 1019 - Conversao de Tempo.py | le16bits/URI---Python | 9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db | [
"Apache-2.0"
] | null | null | null | 1019 - Conversao de Tempo.py | le16bits/URI---Python | 9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''Autor: Alessandra Souza
Data: 06/05/2017
Objetivo: Leia um valor inteiro e informe-o expresso no formato hora:minuto:segundo.
ID Urionlinejudge: 1019'''
N=int(input())
if N<3600:
h=0
m=N/60
s=N%60
else:
h=N/60**2
if h>0:
m=(N%60**2)/60
s= N%60
else:
m=N/60
s= N%60
print("%d:%d:%d" %(h,m,s))
| 17.086957 | 84 | 0.519084 |
cf97788225c7cf0a3663cc57ab055b384ad6c1ef | 571 | py | Python | projects/migrations/0020_auto_20200711_1356.py | peppasd/LIT | 80e256e7678be3cf3ad72d152005cdb7778545d5 | [
"MIT"
] | 2 | 2020-06-05T14:49:11.000Z | 2021-07-19T17:50:05.000Z | projects/migrations/0020_auto_20200711_1356.py | peppasd/LIT | 80e256e7678be3cf3ad72d152005cdb7778545d5 | [
"MIT"
] | 50 | 2020-05-29T11:15:33.000Z | 2020-07-29T15:30:53.000Z | projects/migrations/0020_auto_20200711_1356.py | peppasd/LIT | 80e256e7678be3cf3ad72d152005cdb7778545d5 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-07-11 11:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0019_remove_project_labeled_photos'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='photo',
field=models.FileField(blank=True, upload_to=''),
),
migrations.AlterField(
model_name='photo',
name='title',
field=models.CharField(default='', max_length=100),
),
]
| 23.791667 | 63 | 0.579685 |
85c56299f145204f99d2d3ae39ddeaba01b737f0 | 3,045 | py | Python | senseye_cameras/input/camera_usb.py | senseye-inc/senseye-cameras | 9d9cdb95e64aaa8d08aa56bd9a79641263e65940 | [
"BSD-3-Clause"
] | 5 | 2020-03-20T17:07:35.000Z | 2022-01-25T23:48:52.000Z | senseye_cameras/input/camera_usb.py | senseye-inc/senseye-cameras | 9d9cdb95e64aaa8d08aa56bd9a79641263e65940 | [
"BSD-3-Clause"
] | 5 | 2020-03-05T20:55:06.000Z | 2022-03-24T22:41:56.000Z | senseye_cameras/input/camera_usb.py | senseye-inc/senseye-cameras | 9d9cdb95e64aaa8d08aa56bd9a79641263e65940 | [
"BSD-3-Clause"
] | null | null | null | import cv2
import time
import logging
from . input import Input
log = logging.getLogger(__name__)
class CameraUsb(Input):
'''
Opens a usb camera or video using OpenCV.
Args:
id (int OR str): id of the camera, or path to a video file.
config (dict): Configuration dictionary. Accepted keywords:
res (tuple): frame size
codec (str)
fps (int)
'''
def __init__(self, id=0, config={}):
defaults = {
'fps': 30,
'codec': 'MJPG',
'use_dshow': 0,
'channels': 3,
'format': 'rawvideo',
}
Input.__init__(self, id=id, config=config, defaults=defaults)
def configure(self):
'''
Configures the camera using a config.
Supported configurations: fps, codec, res
Fills self.config with camera attributes.
Logs camera start.
'''
if 'fps' in self.config:
self.input.set(cv2.CAP_PROP_FPS, self.config.get('fps'))
if 'codec' in self.config:
self.input.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*self.config.get('codec')))
if 'res' in self.config:
self.input.set(cv2.CAP_PROP_FRAME_WIDTH, self.config.get('res')[0])
self.input.set(cv2.CAP_PROP_FRAME_HEIGHT, self.config.get('res')[1])
self.config['res'] = (int(self.input.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.input.get(cv2.CAP_PROP_FRAME_HEIGHT)), 3)
self.config['width'] = int(self.input.get(cv2.CAP_PROP_FRAME_WIDTH))
self.config['height'] = int(self.input.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.config['fps'] = (int(self.input.get(cv2.CAP_PROP_FPS)))
self.config['codec'] = (int(self.input.get(cv2.CAP_PROP_FOURCC)))
def open(self):
# If specified, enable DSHOW. This is required for some camera APIs,
# Specifically involved with choosing camera resolution
if self.config['use_dshow'] and type(self.id) is str:
self.id += cv2.CAP_DSHOW
self.input = cv2.VideoCapture(self.id)
if not self.input.isOpened():
raise Exception(f'USB Camera {self.id} failed to open.')
else:
self.configure()
# the first read is usually delayed on linux/windows by ~0.4 seconds
# prime the opencv object for delayless reads
self.input.read()
def read(self):
'''
Reads in frames.
Converts frames from BGR to the more commonly used RGB format.
'''
frame = None
try:
ret, frame = self.input.read()
if not ret:
raise Exception(f'Opencv VideoCapture ret error: {ret}')
# bgr to rgb
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
except Exception as e:
log.error(f'{str(self)} read error: {e}')
raise
return frame, time.time()
def close(self):
if self.input:
self.input.release()
self.input = None
| 31.71875 | 127 | 0.58555 |
d5fdb200d1cfbe99b3c54ff271cc7ed2a3ba7588 | 555 | py | Python | servicios_profesionales/manage.py | acs-um/ServiciosProfesionales | b29d67cda42f3d975a8abaf58203d92c9d1a3f57 | [
"MIT"
] | 1 | 2018-05-24T23:33:02.000Z | 2018-05-24T23:33:02.000Z | servicios_profesionales/manage.py | acs-um/ServiciosProfesionales | b29d67cda42f3d975a8abaf58203d92c9d1a3f57 | [
"MIT"
] | 22 | 2018-05-07T20:46:27.000Z | 2018-06-10T23:59:49.000Z | servicios_profesionales/manage.py | acs-um/ServiciosProfesionales | b29d67cda42f3d975a8abaf58203d92c9d1a3f57 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "servicios_profesionales.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.6875 | 87 | 0.695495 |
5edc1c9824570d8d7f267678df70dc479291422e | 366 | py | Python | dukaan/apps/udaan_app/routers.py | deepdik/udaan | 0315212072d98d9a5d487b2e212ecb77e34740b7 | [
"BSD-2-Clause"
] | null | null | null | dukaan/apps/udaan_app/routers.py | deepdik/udaan | 0315212072d98d9a5d487b2e212ecb77e34740b7 | [
"BSD-2-Clause"
] | null | null | null | dukaan/apps/udaan_app/routers.py | deepdik/udaan | 0315212072d98d9a5d487b2e212ecb77e34740b7 | [
"BSD-2-Clause"
] | null | null | null | """
"""
from django.urls import path, include
from rest_framework.routers import SimpleRouter
from dukaan.apps.udaan_app.views import (QuizViewSet,
QuestionViewSet)
router = SimpleRouter()
router.register(r'quiz', QuizViewSet, basename='quiz')
router.register(r'question', QuestionViewSet, basename='question')
urlpatterns = [
]
urlpatterns += router.urls
| 17.428571 | 66 | 0.765027 |
3d238b28d22aa4aa30cdb0fdaa1619eea3a44f3f | 7,587 | py | Python | python/classifier.py | guilhermeleobas/recastd | 385795df308fdea2f1dc02863dce7c7547167efc | [
"MIT"
] | null | null | null | python/classifier.py | guilhermeleobas/recastd | 385795df308fdea2f1dc02863dce7c7547167efc | [
"MIT"
] | null | null | null | python/classifier.py | guilhermeleobas/recastd | 385795df308fdea2f1dc02863dce7c7547167efc | [
"MIT"
] | null | null | null | import os
from random import shuffle
from collections import deque
import networkx as nx
import numpy as np
import graph_methods as gm
global random
global social
global friends
global acquaintance
global bridges
random = 0
social = 1
friends = 2
acquaintance = 3
bridges = 4
# random = 0 # random defined above
def get_delta(temporal_graphs):
# delta = tf-ti
sum_delta = dict()
for graph in temporal_graphs.itervalues():
for (s, t) in graph.edges_iter():
if (s, t) in sum_delta:
sum_delta[(s, t)] += graph.get_edge_data(s, t)['delta']
else:
sum_delta[(s, t)] = graph.get_edge_data(s, t)['delta']
return sum_delta
def set_delta(random_graphs, temporal_graphs):
l = deque()
aux = deque()
for graph in temporal_graphs.itervalues():
for (s, t) in graph.edges_iter():
l.append( graph.get_edge_data(s, t)['delta'] )
aux.append( graph.get_edge_data(s, t)['delta'] )
# print ("before len(t) = {}".format(len(l)))
shuffle(l)
shuffle(aux)
for graph in random_graphs.itervalues():
nx.set_edge_attributes (graph, 'delta', None)
for (s, t) in graph.edges_iter():
try:
x = l.popleft()
except:
l = aux
x = l.popleft()
graph[s][t]['delta'] = x
# print ("after len(t) = {}".format(len(l)))
def calc_ecdf(observations, linspace=None):
# print ("chegou aqui\n")
# print (observations)
observations = np.array(observations)
observations.sort()
def calc (x):
counter = len(observations) - np.searchsorted(observations, x)
# print "call with x = {} -.- counter = {} and return value = {}".format (x, counter, ((counter + 0.0) / len(observations)))
return (float(counter) / len(observations))
if (linspace == None):
# X = np.linspace(0.0, 1.0, 100)
X = np.linspace(min(observations), max(observations), 100)
else:
X = linspace
ret = []
for x in X:
ret.append((calc(x), x))
return ret
# size is the absolute number of nodes in the network
def edge_persistence(l_graphs):
ep = dict()
t = len(l_graphs)
for k, v in l_graphs.iteritems():
for edge in v.edges_iter():
x, y = edge[0], edge[1]
if edge in ep:
# ep[edge] += 1
ep[(x, y)] += 1
ep[(y, x)] += 1
else:
# ep[edge] = 1
ep[(x, y)] = 1
ep[(y, x)] = 1
# print ("#edges = {}".format(len(ep.keys()))) ==> 66876
# print ("#edges = {}".format(len(ep.keys())))
for k, v in ep.iteritems():
ep[k] = (v+0.0)/t
# print ("#edges = {}".format(len(ep.keys())))
return ep
def topological_overlap(agg_tgraph):
to = dict()
for edge in agg_tgraph.edges_iter():
i = edge[0]
j = edge[1]
ni = set(agg_tgraph.neighbors(i))
nj = set(agg_tgraph.neighbors(j))
num = len(ni.intersection(nj))
den = len(ni.union(nj))
# x = (num + 0.0)/((len(ni) - 1) + (len(nj) - 1) - num)
# print 'nodes {}-{} with {}/{}'.format(i, j, num, den)
if (den <= 0):
to[(i, j)] = to[(j, i)] = 0.0
else:
to[(i, j)] = to[(j, i)] = (num + 0.0)/(den)
# to[(i, j)] = (num + 0.0)/(den)
# to[(i, j)] = to[(j, i)] = x
return to
def get_x(d, p_rnd):
# Find a value x such that F_(x) >= p_rnd
F = calc_ecdf(d.values())
# print (F)
for p, x in F:
if p <= p_rnd:
return x
return F[-1][1]
def classify_to(to, random_to, p_rnd):
# given the topological overlap and a value (double) p_rnd, classify the relations
#
# if to[(i, j)] > x, then the relation is considered as social (1)
# otherwise, random (0)
rto = dict()
x_to = get_x(random_to, p_rnd)
cr = 0
for edge, value in to.iteritems():
if value > x_to:
rto[edge] = social
else:
cr += 1
rto[edge] = random
# print 'cr = {}/{}'.format(cr, len(to))
return rto
def classify_ep(ep, random_ep, p_rnd):
# given the edge persistence (dict from edge to value) and a value (double) p_rnd, classify the relations
#
# if ep(i, j) > x, then the relation is considered as social (1)
# otherwise, random (0)
rep = dict()
x_ep = get_x(random_ep, p_rnd)
# print ("x_ep value = ", x_ep)
cr = 0
for edge, value in ep.iteritems():
if value > x_ep:
# print (edge, "social")
rep[edge] = social
else:
cr += 1
rep[edge] = random
# print 'cr = {}/{}'.format(cr, len(ep))
return rep
def classify_relationships(rep, rto):
# given the edge persistence and topological overlap matrix
# classify relations between two nodes in the network using the relations below:
# if rep = social and rto = social
# relation => friend
# if rep = random and rto = social
# relation => acquaintance
# if rep = social and rto = random
# relation => bridges
# if rep = random and rto = random
# relation => random
# being E the set of edges
# E(rto) contains (or is equal to) E(rep)
relations = dict()
for edge in rep.iterkeys():
if (rep[edge] == social and rto[edge] == social):
relations[edge] = friends
elif (rep[edge] == random and rto[edge] == social):
relations[edge] = acquaintance
elif (rep[edge] == social and rto[edge] == random):
relations[edge] = bridges
else:
relations[edge] = random
return relations
def get_ep (temporal_graphs):
# print 'generating edge_persistence'
ep = edge_persistence (temporal_graphs)
return ep
def get_to (aggregate_graph):
# print 'generating topological overlap'
to = topological_overlap (aggregate_graph)
return to
def classify (ep, random_ep, to, random_to, p_rnd):
# print 'classifying edge persistence'
# rep = classify_ep (ep, random_ep, p_rnd)
# print 'classifying topological overlap'
# rto = classify_to (to, random_to, p_rnd)
# print 'combining edge persistence and topological overlap'
# relations = classify_relationships (rep, rto)
# return relations
x_ep = get_x (random_ep, p_rnd)
x_to = get_x (random_to, p_rnd)
relations = dict()
for edge in ep.iterkeys():
if (ep[edge] > x_ep and to[edge] > x_to):
relations[edge] = friends
elif (ep[edge] <= x_ep and to[edge] > x_to):
relations[edge] = acquaintance
elif (ep[edge] > x_ep and to[edge] <= x_to):
relations[edge] = bridges
else:
relations[edge] = random
return relations
def combine_relations(relations):
d_r = dict()
d_r[friends] = 0
d_r[acquaintance] = 0
d_r[bridges] = 0
d_r[random] = 0
for edge, relation in relations.iteritems():
d_r[relation] += 1
return d_r
def combine_relations_with_time(relations):
d_r = dict()
d_r['ppp'] = 0
d_r['ppn'] = 0
d_r['pnp'] = 0
d_r['pnn'] = 0
d_r['npp'] = 0
d_r['npn'] = 0
d_r['nnp'] = 0
d_r['nnn'] = 0
for edge, relation in relations.iteritems():
d_r[relation] += 1
return d_r
def main():
return 0
if __name__ == '__main__':
main()
| 24.713355 | 132 | 0.550811 |
f98b530cc2c8a75f55155f49ece37e5ff087050a | 1,303 | py | Python | app/core/tests/test_admin.py | wamauri/recipe-app-api | 953e62cf71c80b4a6615c93f894110c6f9922a48 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | wamauri/recipe-app-api | 953e62cf71c80b4a6615c93f894110c6f9922a48 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | wamauri/recipe-app-api | 953e62cf71c80b4a6615c93f894110c6f9922a48 | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@meuq.com.br',
password='password123')
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@meuq.com.br',
password='password123',
name='Test user full name')
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 32.575 | 68 | 0.647736 |
89fd50bee85d252ba62d968767949eb1058af0bf | 2,568 | py | Python | image_cluster/pipeline/reader.py | kowaalczyk/image-cluster | 2d034dd154491da8b22ee6662c9dcf9500283186 | [
"BSD-2-Clause"
] | 1 | 2019-05-09T10:30:23.000Z | 2019-05-09T10:30:23.000Z | image_cluster/pipeline/reader.py | kowaalczyk/image-cluster | 2d034dd154491da8b22ee6662c9dcf9500283186 | [
"BSD-2-Clause"
] | null | null | null | image_cluster/pipeline/reader.py | kowaalczyk/image-cluster | 2d034dd154491da8b22ee6662c9dcf9500283186 | [
"BSD-2-Clause"
] | null | null | null | from typing import Union, Iterable
from pathlib import Path
from sklearn.base import BaseEstimator, TransformerMixin
import cv2 as cv
import numpy as np
from image_cluster.pipeline.utils import NoFitMixin, VerboseMixin
from image_cluster.types import ImageData
class MetadataReader(BaseEstimator, TransformerMixin, NoFitMixin):
"""
Reads metadata, yielding ImageData.
Metadata path can be specified either
during construction or during transform,
for easier experimenting.
"""
def __init__(self, meta_path: Union[str, Path] = None):
self._store_meta(meta_path)
def transform(
self,
meta_path: Union[str, Path] = None
) -> Iterable[ImageData]:
self._store_meta(meta_path)
return self.metadata_
def _store_meta(self, meta_path: Union[str, Path]):
if meta_path is not None:
with open(meta_path, 'r') as f:
self.metadata_ = list(
map(self.parse_meta_line, f.readlines())
)
def parse_meta_line(self, line: str) -> ImageData:
img_path = Path(line.strip())
return ImageData(
img_path.name,
img_path.resolve()
)
class BaseImageReader(
BaseEstimator,
TransformerMixin,
NoFitMixin,
VerboseMixin
):
"""
Base class for all image readers.
"""
def __init__(self, verbose: bool = False):
self.verbose = verbose
def transform(
self,
image_data: Iterable[ImageData]
) -> Iterable[ImageData]:
self.images_ = [
self.strategy(img)
for img in self._progress(image_data)
]
return self.images_
def strategy(self, image_data: ImageData) -> ImageData:
raise NotImplementedError()
class MaskImageReader(BaseImageReader):
"""
Reads image to binary mask, 0 = white, 1 = black.
"""
def strategy(self, image_data):
img = cv.imread(str(image_data.path), cv.IMREAD_GRAYSCALE)
_, img = cv.threshold(
img,
0,
255,
cv.THRESH_BINARY+cv.THRESH_OTSU
)
image_data.image = (1 - (img // 255)).astype(np.uint8)
return image_data
class GreyscaleImageReader(BaseImageReader):
"""
Reads image as greyscale, 0. = white, 1. = black.
"""
def strategy(self, image_data):
img = cv.imread(str(image_data.path), cv.IMREAD_GRAYSCALE)
image_data.image = 1 - img.astype(np.float) / 255
return image_data
| 27.319149 | 66 | 0.614875 |
b0e0e72cb37c5fc6bf10ae98e596b4da1e354268 | 1,055 | py | Python | opencypher/ast/function.py | globality-corp/opencypher | b60bf526fb6d5ea6c731aab867f714f3e10f629b | [
"Apache-2.0"
] | 6 | 2019-01-31T18:55:46.000Z | 2020-12-02T14:53:45.000Z | opencypher/ast/function.py | globality-corp/opencypher | b60bf526fb6d5ea6c731aab867f714f3e10f629b | [
"Apache-2.0"
] | 1 | 2020-12-04T00:18:20.000Z | 2020-12-04T00:18:20.000Z | opencypher/ast/function.py | globality-corp/opencypher | b60bf526fb6d5ea6c731aab867f714f3e10f629b | [
"Apache-2.0"
] | 1 | 2019-03-17T03:46:26.000Z | 2019-03-17T03:46:26.000Z | from dataclasses import dataclass
from typing import Iterable
from opencypher.ast.expression import Expression
from opencypher.ast.parameter import Parameter, Parameterized
from opencypher.ast.collection import NonEmptySequence
from opencypher.ast.formatting import str_join
from opencypher.ast.naming import FunctionName
@dataclass(frozen=True)
class FunctionInvocation(Parameterized):
"""
FunctionInvocation = FunctionName, [SP], '(', [SP], [(D,I,S,T,I,N,C,T), [SP]], [Expression, [SP], { ',', [SP], Expression, [SP] }], ')' ; # noqa: E501
"""
name: FunctionName
expressions: NonEmptySequence[Expression]
distinct: bool = False
def __str__(self) -> str:
if self.distinct:
return f"{self.name}(DISTINCT {str_join(self.expressions)})"
else:
return f"{self.name}({str_join(self.expressions)})"
def iter_parameters(self) -> Iterable[Parameter]:
if self.expressions:
for expression in self.expressions:
yield from expression.iter_parameters()
| 34.032258 | 155 | 0.686256 |
88b8a430146fd652d05d34e12a31e39afc5faaa1 | 1,866 | py | Python | onnxruntime_extensions/pnp/_base.py | TruscaPetre/onnxruntime-extensions | d0ff193eec0d9b5bc07c961ebc451c34d0228741 | [
"MIT"
] | null | null | null | onnxruntime_extensions/pnp/_base.py | TruscaPetre/onnxruntime-extensions | d0ff193eec0d9b5bc07c961ebc451c34d0228741 | [
"MIT"
] | null | null | null | onnxruntime_extensions/pnp/_base.py | TruscaPetre/onnxruntime-extensions | d0ff193eec0d9b5bc07c961ebc451c34d0228741 | [
"MIT"
] | null | null | null | import io
import onnx
import torch
from typing import Any
from onnx.onnx_pb import TensorProto
class ProcessingModule(torch.nn.Module):
@staticmethod
def _argsort(g, x, dim, descending):
return g.op('ai.onnx.contrib::ArgSort', x, dim)
@classmethod
def register_customops(cls):
if hasattr(cls, 'loaded'):
return True
torch.onnx.register_custom_op_symbolic('::argsort', cls._argsort, 1)
# ... more
cls.loaded = True
return True
def export(self, opset_version, *args, **kwargs):
mod = self
script_model = kwargs.pop('script_mode', False)
if script_model:
mod = torch.jit.script(mod)
ofname = kwargs.pop('ofname', None)
with io.BytesIO() as f:
torch.onnx.export(mod, args, f,
training=torch.onnx.TrainingMode.EVAL,
opset_version=opset_version,
**kwargs)
mdl = onnx.load_model(io.BytesIO(f.getvalue()))
if ofname is not None:
ofname.replace('.onnx', '.1.onnx')
onnx.save_model(mdl, ofname)
return mdl
class ProcessingScriptModule(ProcessingModule):
def export(self, opset_version, *args, **kwargs):
return super().export(opset_version, *args, script_mode=True, **kwargs)
class CustomFunction(torch.autograd.Function):
@staticmethod
def jvp(ctx: Any, *grad_inputs: Any) -> Any:
pass
@staticmethod
def backward(ctx: Any, *grad_outputs: Any) -> Any:
return grad_outputs
@classmethod
def forward(cls, ctx: Any, *args: Any, **kwargs: Any) -> Any:
pass
@classmethod
def symbolic(cls, g, *args):
return g.op('ai.onnx.contrib::' + cls.__name__, *args)
tensor_data_type = TensorProto
| 27.043478 | 79 | 0.594855 |
20dc99a9763ac7f7eaaf35c4943ad06e7a923d91 | 1,832 | py | Python | data/p2DJ/New/program/cirq/startCirq153.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/cirq/startCirq153.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/cirq/startCirq153.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=2
# total number=11
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[1])) # number=8
c.append(cirq.Y.on(input_qubit[1])) # number=2
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=4
c.append(cirq.Y.on(input_qubit[1])) # number=3
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=10
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq153.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 28.625 | 77 | 0.694869 |
2c2549cb783f7ce8e6fbbda4f0b6c43980de256c | 40,822 | py | Python | mailpile/plugins/setup_magic.py | jmvrbanac/Mailpile | 9a74950c1b6b1e7da8c1f8dc396f9d1ce330d9e8 | [
"Apache-2.0"
] | null | null | null | mailpile/plugins/setup_magic.py | jmvrbanac/Mailpile | 9a74950c1b6b1e7da8c1f8dc396f9d1ce330d9e8 | [
"Apache-2.0"
] | null | null | null | mailpile/plugins/setup_magic.py | jmvrbanac/Mailpile | 9a74950c1b6b1e7da8c1f8dc396f9d1ce330d9e8 | [
"Apache-2.0"
] | 1 | 2020-04-13T17:46:12.000Z | 2020-04-13T17:46:12.000Z | import os
import random
import sys
import datetime
from urllib import urlencode
import mailpile.auth
from mailpile.defaults import CONFIG_RULES
from mailpile.i18n import ListTranslations, ActivateTranslation, gettext
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.plugins import PluginManager
from mailpile.plugins import PLUGINS
from mailpile.plugins.contacts import AddProfile
from mailpile.plugins.contacts import ListProfiles
from mailpile.plugins.migrate import Migrate
from mailpile.plugins.tags import AddTag
from mailpile.commands import Command
from mailpile.config import SecurePassphraseStorage
from mailpile.crypto.gpgi import GnuPG, SignatureInfo, EncryptionInfo
from mailpile.crypto.gpgi import GnuPGKeyGenerator, GnuPGKeyEditor
from mailpile.httpd import BLOCK_HTTPD_LOCK, Idle_HTTPD
from mailpile.smtp_client import SendMail, SendMailError
from mailpile.urlmap import UrlMap
from mailpile.ui import Session, SilentInteraction
from mailpile.util import *
_ = lambda s: s
_plugins = PluginManager(builtin=__file__)
##[ Commands ]################################################################
class SetupMagic(Command):
"""Perform initial setup"""
SYNOPSIS = (None, None, None, None)
ORDER = ('Internals', 0)
LOG_PROGRESS = True
TAGS = {
'New': {
'type': 'unread',
'label': False,
'display': 'invisible',
'icon': 'icon-new',
'label_color': '03-gray-dark',
'name': _('New'),
},
'Inbox': {
'type': 'inbox',
'display': 'priority',
'display_order': 2,
'icon': 'icon-inbox',
'label_color': '06-blue',
'name': _('Inbox'),
},
'Blank': {
'type': 'blank',
'flag_editable': True,
'display': 'invisible',
'name': _('Blank'),
},
'Drafts': {
'type': 'drafts',
'flag_editable': True,
'display': 'priority',
'display_order': 1,
'icon': 'icon-compose',
'label_color': '03-gray-dark',
'name': _('Drafts'),
},
'Outbox': {
'type': 'outbox',
'display': 'priority',
'display_order': 3,
'icon': 'icon-outbox',
'label_color': '06-blue',
'name': _('Outbox'),
},
'Sent': {
'type': 'sent',
'display': 'priority',
'display_order': 4,
'icon': 'icon-sent',
'label_color': '03-gray-dark',
'name': _('Sent'),
},
'Spam': {
'type': 'spam',
'flag_hides': True,
'display': 'priority',
'display_order': 5,
'icon': 'icon-spam',
'label_color': '10-orange',
'name': _('Spam'),
},
'MaybeSpam': {
'display': 'invisible',
'icon': 'icon-spam',
'label_color': '10-orange',
'name': _('MaybeSpam'),
},
'Ham': {
'type': 'ham',
'display': 'invisible',
'name': _('Ham'),
},
'Trash': {
'type': 'trash',
'flag_hides': True,
'display': 'priority',
'display_order': 6,
'icon': 'icon-trash',
'label_color': '13-brown',
'name': _('Trash'),
},
# These are magical tags that perform searches and show
# messages in contextual views.
'All Mail': {
'type': 'tag',
'icon': 'icon-logo',
'label_color': '06-blue',
'search_terms': 'all:mail',
'name': _('All Mail'),
'display_order': 1000,
},
'Photos': {
'type': 'tag',
'icon': 'icon-photos',
'label_color': '08-green',
'search_terms': 'att:jpg',
'name': _('Photos'),
'template': 'photos',
'display_order': 1001,
},
'Files': {
'type': 'tag',
'icon': 'icon-document',
'label_color': '06-blue',
'search_terms': 'has:attachment',
'name': _('Files'),
'template': 'files',
'display_order': 1002,
},
'Links': {
'type': 'tag',
'icon': 'icon-links',
'label_color': '12-red',
'search_terms': 'http',
'name': _('Links'),
'display_order': 1003,
},
# These are internal tags, used for tracking user actions on
# messages, as input for machine learning algorithms. These get
# automatically added, and may be automatically removed as well
# to keep the working sets reasonably small.
'mp_rpl': {'type': 'replied', 'label': False, 'display': 'invisible'},
'mp_fwd': {'type': 'fwded', 'label': False, 'display': 'invisible'},
'mp_tag': {'type': 'tagged', 'label': False, 'display': 'invisible'},
'mp_read': {'type': 'read', 'label': False, 'display': 'invisible'},
'mp_ham': {'type': 'ham', 'label': False, 'display': 'invisible'},
}
def basic_app_config(self, session,
save_and_update_workers=True,
want_daemons=True):
# Create local mailboxes
session.config.open_local_mailbox(session)
# Create standard tags and filters
created = []
for t in self.TAGS:
if not session.config.get_tag_id(t):
AddTag(session, arg=[t]).run(save=False)
created.append(t)
session.config.get_tag(t).update(self.TAGS[t])
for stype, statuses in (('sig', SignatureInfo.STATUSES),
('enc', EncryptionInfo.STATUSES)):
for status in statuses:
tagname = 'mp_%s-%s' % (stype, status)
if not session.config.get_tag_id(tagname):
AddTag(session, arg=[tagname]).run(save=False)
created.append(tagname)
session.config.get_tag(tagname).update({
'type': 'attribute',
'display': 'invisible',
'label': False,
})
if 'New' in created:
session.ui.notify(_('Created default tags'))
# Import all the basic plugins
reload_config = False
for plugin in PLUGINS:
if plugin not in session.config.sys.plugins:
session.config.sys.plugins.append(plugin)
reload_config = True
for plugin in session.config.plugins.WANTED:
if plugin in session.config.plugins.available():
session.config.sys.plugins.append(plugin)
if reload_config:
with session.config._lock:
session.config.save()
session.config.load(session)
try:
# If spambayes is not installed, this will fail
import mailpile.plugins.autotag_sb
if 'autotag_sb' not in session.config.sys.plugins:
session.config.sys.plugins.append('autotag_sb')
session.ui.notify(_('Enabling spambayes autotagger'))
except ImportError:
session.ui.warning(_('Please install spambayes '
'for super awesome spam filtering'))
vcard_importers = session.config.prefs.vcard.importers
if not vcard_importers.gravatar:
vcard_importers.gravatar.append({'active': True})
session.ui.notify(_('Enabling gravatar image importer'))
gpg_home = os.path.expanduser('~/.gnupg')
if os.path.exists(gpg_home) and not vcard_importers.gpg:
vcard_importers.gpg.append({'active': True,
'gpg_home': gpg_home})
session.ui.notify(_('Importing contacts from GPG keyring'))
if ('autotag_sb' in session.config.sys.plugins and
len(session.config.prefs.autotag) == 0):
session.config.prefs.autotag.append({
'match_tag': 'spam',
'unsure_tag': 'maybespam',
'tagger': 'spambayes',
'trainer': 'spambayes'
})
session.config.prefs.autotag[0].exclude_tags[0] = 'ham'
if save_and_update_workers:
session.config.save()
session.config.prepare_workers(session, daemons=want_daemons)
def setup_command(self, session, do_gpg_stuff=False):
do_gpg_stuff = do_gpg_stuff or ('do_gpg_stuff' in self.args)
# Stop the workers...
want_daemons = session.config.cron_worker is not None
session.config.stop_workers()
# Perform any required migrations
Migrate(session).run(before_setup=True, after_setup=False)
# Basic app config, tags, plugins, etc.
self.basic_app_config(session,
save_and_update_workers=False,
want_daemons=want_daemons)
# Assumption: If you already have secret keys, you want to
# use the associated addresses for your e-mail.
# If you don't already have secret keys, you should have
# one made for you, if GnuPG is available.
# If GnuPG is not available, you should be warned.
if do_gpg_stuff:
gnupg = GnuPG(None)
accepted_keys = []
if gnupg.is_available():
keys = gnupg.list_secret_keys()
cutoff = (datetime.date.today() + datetime.timedelta(days=365)
).strftime("%Y-%m-%d")
for key, details in keys.iteritems():
# Ignore revoked/expired/disabled keys.
revoked = details.get('revocation_date')
expired = details.get('expiration_date')
if (details.get('disabled') or
(revoked and revoked <= cutoff) or
(expired and expired <= cutoff)):
continue
accepted_keys.append(key)
for uid in details["uids"]:
if "email" not in uid or uid["email"] == "":
continue
if uid["email"] in [x["email"]
for x in session.config.profiles]:
# Don't set up the same e-mail address twice.
continue
# FIXME: Add route discovery mechanism.
profile = {
"email": uid["email"],
"name": uid["name"],
}
session.config.profiles.append(profile)
if (session.config.prefs.gpg_recipient in (None, '', '!CREATE')
and details["capabilities_map"]["encrypt"]):
session.config.prefs.gpg_recipient = key
session.ui.notify(_('Encrypting config to %s') % key)
if session.config.prefs.crypto_policy == 'none':
session.config.prefs.crypto_policy = 'openpgp-sign'
if len(accepted_keys) == 0:
# FIXME: Start background process generating a key once a
# user has supplied a name and e-mail address.
pass
else:
session.ui.warning(_('Oh no, PGP/GPG support is unavailable!'))
# If we have a GPG key, but no master key, create it
self.make_master_key()
# Perform any required migrations
Migrate(session).run(before_setup=False, after_setup=True)
session.config.save()
session.config.prepare_workers(session, daemons=want_daemons)
return self._success(_('Performed initial Mailpile setup'))
def make_master_key(self):
session = self.session
if (session.config.prefs.gpg_recipient not in (None, '', '!CREATE')
and not session.config.master_key
and not session.config.prefs.obfuscate_index):
#
# This secret is arguably the most critical bit of data in the
# app, it is used as an encryption key and to seed hashes in
# a few places. As such, the user may need to type this in
# manually as part of data recovery, so we keep it reasonably
# sized and devoid of confusing chars.
#
# The strategy below should give about 281 bits of randomness:
#
# import math
# math.log((25 + 25 + 8) ** (12 * 4), 2) == 281.183...
#
secret = ''
chars = 12 * 4
while len(secret) < chars:
secret = sha512b64(os.urandom(1024),
'%s' % session.config,
'%s' % time.time())
secret = CleanText(secret,
banned=CleanText.NONALNUM + 'O01l'
).clean[:chars]
session.config.master_key = secret
if self._idx() and self._idx().INDEX:
session.ui.warning(_('Unable to obfuscate search index '
'without losing data. Not indexing '
'encrypted mail.'))
else:
session.config.prefs.obfuscate_index = True
session.config.prefs.index_encrypted = True
session.ui.notify(_('Obfuscating search index and enabling '
'indexing of encrypted e-mail. Yay!'))
return True
else:
return False
def command(self, *args, **kwargs):
session = self.session
if session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
return self.setup_command(session, *args, **kwargs)
class TestableWebbable(SetupMagic):
HTTP_AUTH_REQUIRED = 'Maybe'
HTTP_CALLABLE = ('GET', )
HTTP_QUERY_VARS = {
'_path': 'Redirect path'
}
HTTP_POST_VARS = {
'testing': 'Yes or No, if testing',
'advance': 'Yes or No, advance setup flow',
}
TRUTHY = {
'0': False, 'no': False, 'fuckno': False, 'false': False,
'1': True, 'yes': True, 'hellyeah': True, 'true': True,
}
def _advance(self):
path = self.data.get('_path', [None])[0]
data = dict([(k, v) for k, v in self.data.iteritems()
if k not in self.HTTP_POST_VARS
and k not in ('_method',)])
nxt = Setup.Next(self.session.config, None, needed_auth=False)
if nxt:
url = '/%s/' % nxt.SYNOPSIS[2]
elif path and path != '/%s/' % Setup.SYNOPSIS[2]:
# Use the same redirection logic as the Authenticator
mailpile.auth.Authenticate.RedirectBack(path, data)
else:
url = '/'
qs = urlencode([(k, v) for k, vl in data.iteritems() for v in vl])
raise UrlRedirectException(''.join([url, '?%s' % qs if qs else '']))
def _success(self, message, result=True, advance=False):
if (advance or
self.TRUTHY.get(self.data.get('advance', ['no'])[0].lower())):
self._advance()
return SetupMagic._success(self, message, result=result)
def _testing(self):
self._testing_yes(lambda: True)
return (self.testing is not None)
def _testing_yes(self, method, *args, **kwargs):
testination = self.data.get('testing')
if testination:
self.testing = random.randint(0, 1)
if testination[0].lower() in self.TRUTHY:
self.testing = self.TRUTHY[testination[0].lower()]
return self.testing
self.testing = None
return method(*args, **kwargs)
def _testing_data(self, method, tdata, *args, **kwargs):
result = self._testing_yes(method, *args, **kwargs) or []
return (result
if (self.testing is None) else
(self.testing and tdata or []))
def setup_command(self, session):
raise Exception('FIXME')
class SetupGetEmailSettings(TestableWebbable):
"""Guess server details for an e-mail address"""
SYNOPSIS = (None, 'setup/email_servers', 'setup/email_servers', None)
HTTP_CALLABLE = ('GET', )
HTTP_QUERY_VARS = dict_merge(TestableWebbable.HTTP_QUERY_VARS, {
'email': 'E-mail address'
})
TEST_DATA = {
'imap_host': 'imap.wigglebonk.com',
'imap_port': 993,
'imap_tls': True,
'pop3_host': 'pop3.wigglebonk.com',
'pop3_port': 110,
'pop3_tls': False,
'smtp_host': 'smtp.wigglebonk.com',
'smtp_port': 465,
'smtp_tls': False
}
def _get_domain_settings(self, domain):
raise Exception('FIXME')
def setup_command(self, session):
results = {}
for email in list(self.args) + self.data.get('email'):
settings = self._testing_data(self._get_domain_settings,
self.TEST_DATA, email)
if settings:
results[email] = settings
if results:
self._success(_('Found settings for %d addresses'), results)
else:
self._error(_('No settings found'))
class SetupWelcome(TestableWebbable):
SYNOPSIS = (None, None, 'setup/welcome', None)
HTTP_CALLABLE = ('GET', 'POST')
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS, {
'language': 'Language selection'
})
def bg_setup_stage_1(self):
# Wait a bit, so the user has something to look at befor we
# block the web server and do real work.
time.sleep(2)
# Intial configuration of app goes here...
if not self.session.config.tags:
with BLOCK_HTTPD_LOCK, Idle_HTTPD(allowed=0):
self.basic_app_config(self.session)
# Next, if we have any secret GPG keys, extract all the e-mail
# addresses and create a profile for each one.
with BLOCK_HTTPD_LOCK, Idle_HTTPD(allowed=0):
SetupProfiles(self.session).auto_create_profiles()
def setup_command(self, session):
config = session.config
if self.data.get('_method') == 'POST' or self._testing():
language = self.data.get('language', [''])[0]
if language:
try:
i18n = lambda: ActivateTranslation(session, config,
language)
if not self._testing_yes(i18n):
raise ValueError('Failed to configure i18n')
config.prefs.language = language
if not self._testing():
self._background_save(config=True)
except ValueError:
return self._error(_('Invalid language: %s') % language)
config.slow_worker.add_unique_task(
session, 'Setup, Stage 1', lambda: self.bg_setup_stage_1())
results = {
'languages': ListTranslations(config),
'language': config.prefs.language
}
return self._success(_('Welcome to Mailpile!'), results)
class SetupCrypto(TestableWebbable):
SYNOPSIS = (None, None, 'setup/crypto', None)
HTTP_CALLABLE = ('GET', 'POST')
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS, {
'choose_key': 'Select an existing key to use',
'passphrase': 'Specify a passphrase',
'passphrase_confirm': 'Confirm the passphrase',
'index_encrypted': 'y/n: index encrypted mail?',
# 'obfuscate_index': 'y/n: obfuscate keywords?', # Omitted do to DANGER
'encrypt_mail': 'y/n: encrypt locally stored mail?',
'encrypt_index': 'y/n: encrypt search index?',
'encrypt_vcards': 'y/n: encrypt vcards?',
'encrypt_events': 'y/n: encrypt event log?',
'encrypt_misc': 'y/n: encrypt plugin and misc data?'
})
TEST_DATA = {}
def list_secret_keys(self):
cutoff = (datetime.date.today() + datetime.timedelta(days=365)
).strftime("%Y-%m-%d")
keylist = {}
for key, details in self._gnupg().list_secret_keys().iteritems():
# Ignore (soon to be) revoked/expired/disabled keys.
revoked = details.get('revocation_date')
expired = details.get('expiration_date')
if (details.get('disabled') or
(revoked and revoked <= cutoff) or
(expired and expired <= cutoff)):
continue
# Ignore keys that cannot both encrypt and sign
caps = details["capabilities_map"]
if not caps["encrypt"] or not caps["sign"]:
continue
keylist[key] = details
return keylist
def gpg_key_ready(self, gpg_keygen):
if not gpg_keygen.failed:
self.session.config.prefs.gpg_recipient = gpg_keygen.generated_key
self.make_master_key()
self._background_save(config=True)
self.save_profiles_to_key()
def save_profiles_to_key(self, key_id=None, add_all=False, now=False,
profiles=None):
if key_id is None:
if (Setup.KEY_CREATING_THREAD and
not Setup.KEY_CREATING_THREAD.failed):
key_id = Setup.KEY_CREATING_THREAD.generated_key
add_all = True
if not add_all:
self.session.ui.warning('FIXME: Not updating GPG key!')
return
if key_id is not None:
uids = []
data = ListProfiles(self.session).run().result
for profile in data['profiles']:
uids.append({
'name': profile["fn"],
'email': profile["email"][0]["email"],
'comment': profile.get('note', '')
})
if not uids:
return
editor = GnuPGKeyEditor(key_id, set_uids=uids,
sps=self.session.config.gnupg_passphrase,
deletes=max(10, 2*len(uids)))
def start_editor(*unused_args):
with Setup.KEY_WORKER_LOCK:
Setup.KEY_EDITING_THREAD = editor
editor.start()
with Setup.KEY_WORKER_LOCK:
if now:
start_editor()
elif Setup.KEY_EDITING_THREAD is not None:
Setup.KEY_EDITING_THREAD.on_complete('edit keys',
start_editor)
elif Setup.KEY_CREATING_THREAD is not None:
Setup.KEY_CREATING_THREAD.on_complete('edit keys',
start_editor)
else:
start_editor()
def setup_command(self, session):
changed = authed = False
results = {
'secret_keys': self.list_secret_keys(),
}
error_info = None
if self.data.get('_method') == 'POST' or self._testing():
# 1st, are we choosing or creating a new key?
choose_key = self.data.get('choose_key', [''])[0]
if choose_key and not error_info:
if (choose_key not in results['secret_keys'] and
choose_key != '!CREATE'):
error_info = (_('Invalid key'), {
'invalid_key': True,
'chosen_key': choose_key
})
# 2nd, check authentication...
#
# FIXME: Creating a new key will allow a malicious actor to
# bypass authentication and change settings.
#
try:
passphrase = self.data.get('passphrase', [''])[0]
passphrase2 = self.data.get('passphrase_confirm', [''])[0]
chosen_key = ((not error_info) and choose_key
) or session.config.prefs.gpg_recipient
if not error_info:
assert(passphrase == passphrase2)
if chosen_key == '!CREATE':
assert(passphrase != '')
sps = SecurePassphraseStorage(passphrase)
elif chosen_key:
sps = mailpile.auth.VerifyAndStorePassphrase(
session.config,
passphrase=passphrase,
key=chosen_key)
else:
sps = mailpile.auth.VerifyAndStorePassphrase(
session.config, passphrase=passphrase)
if not chosen_key:
choose_key = '!CREATE'
results['updated_passphrase'] = True
session.config.gnupg_passphrase.data = sps.data
mailpile.auth.SetLoggedIn(self)
except AssertionError:
error_info = (_('Invalid passphrase'), {
'invalid_passphrase': True,
'chosen_key': session.config.prefs.gpg_recipient
})
# 3rd, if necessary master key and/or GPG key
with BLOCK_HTTPD_LOCK, Idle_HTTPD():
if choose_key and not error_info:
session.config.prefs.gpg_recipient = choose_key
# FIXME: This should probably only happen if the GPG
# key was successfully created.
self.make_master_key()
changed = True
with Setup.KEY_WORKER_LOCK:
if ((not error_info) and
(session.config.prefs.gpg_recipient
== '!CREATE') and
(Setup.KEY_CREATING_THREAD is None or
Setup.KEY_CREATING_THREAD.failed)):
gk = GnuPGKeyGenerator(
sps=session.config.gnupg_passphrase,
on_complete=('notify',
lambda: self.gpg_key_ready(gk)))
Setup.KEY_CREATING_THREAD = gk
Setup.KEY_CREATING_THREAD.start()
# Finally we update misc. settings
for key in self.HTTP_POST_VARS.keys():
# FIXME: This should probably only happen if the GPG
# key was successfully created.
# Continue iff all is well...
if error_info:
break
if key in (['choose_key', 'passphrase', 'passphrase_confirm'] +
TestableWebbable.HTTP_POST_VARS.keys()):
continue
try:
val = self.data.get(key, [''])[0]
if val:
session.config.prefs[key] = self.TRUTHY[val.lower()]
changed = True
except (ValueError, KeyError):
error_info = (_('Invalid preference'), {
'invalid_setting': True,
'variable': key
})
results.update({
'creating_key': (Setup.KEY_CREATING_THREAD is not None and
Setup.KEY_CREATING_THREAD.running),
'creating_failed': (Setup.KEY_CREATING_THREAD is not None and
Setup.KEY_CREATING_THREAD.failed),
'chosen_key': session.config.prefs.gpg_recipient,
'prefs': {
'index_encrypted': session.config.prefs.index_encrypted,
'obfuscate_index': session.config.prefs.obfuscate_index,
'encrypt_mail': session.config.prefs.encrypt_mail,
'encrypt_index': session.config.prefs.encrypt_index,
'encrypt_vcards': session.config.prefs.encrypt_vcards,
'encrypt_events': session.config.prefs.encrypt_events,
'encrypt_misc': session.config.prefs.encrypt_misc
}
})
if changed:
self._background_save(config=True)
if error_info:
return self._error(error_info[0],
info=error_info[1], result=results)
elif changed:
return self._success(_('Updated crypto preferences'), results)
else:
return self._success(_('Configure crypto preferences'), results)
class SetupProfiles(SetupCrypto):
SYNOPSIS = (None, None, 'setup/profiles', None)
HTTP_AUTH_REQUIRED = True
HTTP_CALLABLE = ('GET', 'POST')
HTTP_QUERY_VARS = dict_merge(TestableWebbable.HTTP_QUERY_VARS, {
})
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS, {
'email': 'Create a profile for this e-mail address',
'name': 'Name associated with this e-mail',
'note': 'Profile note',
'pass': 'Password for remote accounts',
'route_id': 'Route ID for sending mail',
})
TEST_DATA = {}
# This is where we cache the passwords we are given, for use later.
# This is deliberately made a singleton on the class.
PASSWORD_CACHE = {}
def _auto_configurable(self, email):
# FIXME: Actually look things up, this is super lame
return email.endswith('@gmail.com')
def get_profiles(self, secret_keys=None):
data = ListProfiles(self.session).run().result
profiles = {}
for rid, ofs in data["rids"].iteritems():
profile = data["profiles"][ofs]
email = profile["email"][0]["email"]
name = profile["fn"]
note = profile.get('note', '')
profiles[rid] = {
"name": name,
"note": note,
"pgp_keys": [], # FIXME
"email": email,
"route_id": profile.get('x-mailpile-profile-route', ''),
"photo": profile.get('photo', [{}])[0].get('photo', ''),
"auto_configurable": self._auto_configurable(email)
}
for key, info in (secret_keys or {}).iteritems():
for uid in info['uids']:
email = uid.get('email')
if email in profiles:
profiles[email]["pgp_keys"].append(key)
return profiles
def discover_new_email_addresses(self, profiles):
addresses = {}
existing = set([p['email'] for p in profiles.values()])
for key, info in self.list_secret_keys().iteritems():
for uid in info['uids']:
email = uid.get('email')
note = uid.get('comment')
if email:
if email in existing:
continue
if email not in addresses:
addresses[email] = {'pgp_keys': [],
'name': '', 'note': ''}
ai = addresses[email]
name = uid.get('name')
ai['name'] = name if name else ai['name']
ai['note'] = note if note else ai['note']
ai['pgp_keys'].append(key)
# FIXME: Scan Thunderbird and MacMail for e-mails, other apps...
return addresses
def auto_create_profiles(self):
new_emails = self.discover_new_email_addresses(self.get_profiles())
for email, info in new_emails.iteritems():
AddProfile(self.session, data={
'_method': 'POST',
'email': [email],
'note': [info["note"]],
'name': [info['name']]
}).run()
def _result(self):
profiles = self.get_profiles()
return {
'new_emails': self.discover_new_email_addresses(profiles),
'profiles': profiles,
'routes': self.session.config.routes,
'default_email': self.session.config.prefs.default_email
}
def setup_command(self, session):
changed = False
if self.data.get('_method') == 'POST' or self._testing():
name, email, note, pwd = (self.data.get(k, [None])[0] for k in
('name', 'email', 'note', 'pass'))
if email:
rv = AddProfile(session, data=self.data).run()
if rv.status == 'success':
#
# FIXME: We need to fire off a background process to
# try and auto-discover routes and sources.
#
if not session.config.prefs.default_email:
session.config.prefs.default_email = email
changed = True
self.save_profiles_to_key()
else:
return self._error(_('Failed to add profile'),
info=rv.error_info,
result=self._result())
if email and pwd:
sps = SecurePassphraseStorage(pwd)
SetupProfiles.PASSWORD_CACHE[email] = sps
result = self._result()
if not result['default_email']:
profiles = result['profiles'].values()
profiles.sort(key=lambda p: (len(p['pgp_keys']),
len(p['name'])))
e = result['default_email'] = profiles[-1]['email']
session.config.prefs.default_email = e
changed = True
else:
result = self._result()
if changed:
self._background_save(config=True)
return self._success(_('Your profiles'), result)
class SetupConfigureKey(SetupProfiles):
SYNOPSIS = (None, None, 'setup/configure_key', None)
HTTP_AUTH_REQUIRED = True
HTTP_CALLABLE = ('GET', 'POST')
HTTP_QUERY_VARS = dict_merge(TestableWebbable.HTTP_QUERY_VARS, {
})
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS, {
})
TEST_DATA = {}
def _result(self):
keylist = self.list_secret_keys()
profiles = self.get_profiles(secret_keys=keylist)
return {
'secret_keys': keylist,
'profiles': profiles,
}
def setup_command(self, session):
# FIXME!
return self._success(_('Configuring a key'), self._result())
class SetupTestRoute(SetupProfiles):
SYNOPSIS = (None, None, 'setup/test_route', None)
HTTP_AUTH_REQUIRED = True
HTTP_CALLABLE = ('POST', )
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS,
dict((k, v[0]) for k, v in
CONFIG_RULES['routes'][1].iteritems()),
{'route_id': 'ID of existing route'})
TEST_DATA = {}
def setup_command(self, session):
if self.args:
route_id = self.args[0]
elif 'route_id' in self.data:
route_id = self.data['route_id'][0]
else:
route_id = None
if route_id:
route = self.session.config.routes[route_id]
assert(route)
else:
route = {}
for k in CONFIG_RULES['routes'][1]:
if k not in self.data:
pass
elif CONFIG_RULES['routes'][1][k][1] in (int, 'int'):
route[k] = int(self.data[k][0])
else:
route[k] = self.data[k][0]
fromaddr = route.get('username', '')
if '@' not in fromaddr:
fromaddr = self.session.config.get_profile()['email']
if not fromaddr or '@' not in fromaddr:
fromaddr = '%s@%s' % (route.get('username', 'test'),
route.get('host', 'example.com'))
assert(fromaddr)
error_info = {'error': _('Unknown error')}
try:
assert(SendMail(self.session, None,
[(fromaddr,
[fromaddr, 'test@mailpile.is'],
None,
[self.event])],
test_only=True, test_route=route))
return self._success(_('Route is working'),
result=route)
except OSError:
error_info = {'error': _('Invalid command'),
'invalid_command': True}
except SendMailError, e:
error_info = {'error': e.message,
'sendmail_error': True}
error_info.update(e.error_info)
except:
import traceback
traceback.print_exc()
return self._error(_('Route is not working'),
result=route, info=error_info)
class Setup(TestableWebbable):
"""Enter setup flow"""
SYNOPSIS = (None, 'setup', 'setup', '[do_gpg_stuff]')
ORDER = ('Internals', 0)
LOG_PROGRESS = True
HTTP_CALLABLE = ('GET',)
HTTP_AUTH_REQUIRED = True
# These are a global, may be modified...
KEY_WORKER_LOCK = CryptoRLock()
KEY_CREATING_THREAD = None
KEY_EDITING_THREAD = None
@classmethod
def _check_profiles(self, config):
session = Session(config)
session.ui = SilentInteraction(config)
session.ui.block()
data = ListProfiles(session).run().result
okay = routes = bad = 0
for rid, ofs in data["rids"].iteritems():
profile = data["profiles"][ofs]
if profile.get('email', None):
okay += 1
route_id = profile.get('x-mailpile-profile-route', '')
if route_id:
if route_id in config.routes:
routes += 1
else:
bad += 1
else:
bad += 1
return (routes > 0) and (okay > 0) and (bad == 0)
@classmethod
def _CHECKPOINTS(self, config):
return [
# Stage 0: Welcome: Choose app language
('language', lambda: config.prefs.language, SetupWelcome),
# Stage 1: Crypto: Configure our master key stuff
('crypto', lambda: config.prefs.gpg_recipient, SetupCrypto),
# Stage 2: Identity (via. single page install flow)
('profiles', lambda: self._check_profiles(config), Setup),
# Stage 3: Routes (via. single page install flow)
('routes', lambda: config.routes, Setup),
# Stage 4: Sources (via. single page install flow)
('sources', lambda: config.sources, Setup),
# Stage 5: Is All Complete
('complete', lambda: config.web.setup_complete, Setup),
# FIXME: Check for this too?
#(lambda: config.prefs.crypto_policy != 'none', SetupConfigureKey),
]
@classmethod
def Next(cls, config, default, needed_auth=True):
if not config.loaded_config:
return default
for name, guard, step in cls._CHECKPOINTS(config):
auth_required = (step.HTTP_AUTH_REQUIRED is True
or (config.prefs.gpg_recipient and
step.HTTP_AUTH_REQUIRED == 'Maybe'))
if not guard():
if (not needed_auth) or (not auth_required):
return step
return default
def setup_command(self, session):
if '_method' in self.data:
return self._success(_('Entering setup flow'), result=dict(
((c[0], c[1]() and True or False)
for c in self._CHECKPOINTS(session.config)
)))
else:
return SetupMagic.setup_command(self, session)
_ = gettext
_plugins.register_commands(SetupMagic,
SetupGetEmailSettings,
SetupWelcome,
SetupCrypto,
SetupProfiles,
SetupConfigureKey,
SetupTestRoute,
Setup)
| 38.915157 | 83 | 0.519303 |
082ae7f80e885af2872489a5d575515ce52eed68 | 38,807 | py | Python | datalogger/DataLogger.py | gunny26/datalogger | 7bd29ab88f2e2749284d80a6a834c94c0955a7e0 | [
"Apache-2.0"
] | null | null | null | datalogger/DataLogger.py | gunny26/datalogger | 7bd29ab88f2e2749284d80a6a834c94c0955a7e0 | [
"Apache-2.0"
] | null | null | null | datalogger/DataLogger.py | gunny26/datalogger | 7bd29ab88f2e2749284d80a6a834c94c0955a7e0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# pylint: disable=line-too-long
"""
Module to handle global DataLogger things, like reading from raw data,
dumping cache files, and so on
"""
import os
import glob
import json
import logging
import datetime
import calendar
import time
import gzip
import base64
import pwd
# own modules
from TimeseriesArray import TimeseriesArray as TimeseriesArray
from TimeseriesArrayStats import TimeseriesArrayStats as TimeseriesArrayStats
from TimeseriesStats import TimeseriesStats as TimeseriesStats
from Quantile import QuantileArray as QuantileArray
from CustomExceptions import *
class DataLogger(object):
"""
class to handle same work around Datalogger Files
either precalculate Timeseries, TimeseriesArray, TimeseriesStats and TimeseriesArrayStats
or simple return this precalculated data
most of the time the pre-calculation will be done with the first call for this kind of data
"""
def __init__(self, basedir, configfilename="datalogger.json"):
"""
loads some meta information of this raw data table in this project
to get name of headers, and index columns
prameters:
basedir <str> path to base directory for all projects
project <str> name of project, should also be a subdirectory in basedir
tablename <str> specific raw data in project,
there should be some files in <basedir>/<project>/raw/<tablename>_<isodate>.csv
"""
self.__basedir = basedir
# checking and loading config
if not os.path.isdir(self.__basedir):
raise AttributeError("global Base Directory %s does not exist" % self.__basedir)
self.__config_filename = os.path.join(basedir, configfilename)
# loading global configuration
with open(self.__config_filename, "rt") as infile:
self.__config = json.load(infile)
if "cachedir" not in self.__config:
logging.info("you should define 'cachedir' in main configuration file, fallback to global_cache subdir under basedir")
self.__config["cachedir"] = os.path.join(basedir, "global_cache") # TODO: remove fallback in future
else:
self.__config["cachedir"] = os.path.join(basedir, self.__config["cachedir"])
if not os.path.isdir(self.__config["cachedir"]):
raise AttributeError("global Cache Directory %s does not exist" % self.__config["cachedir"])
self.__datestring = None
self.__project = None
self.__tablename = None
self.__timedelta = None
self.__meta = None
def setup(self, project, tablename, datestring, timedelta=0.0):
"""
set datalogger to some specific project/tablename/datestring combination
project <str> has to be in defined projects
tablename <str> has to be in defined tablenames of project
datestring <str> some datestring like 2018-12-31 in the past
timedelta <int> defaults to 0
"""
try:
assert datetime.date.today().isoformat() != datestring
except AssertionError:
raise DataLoggerLiveDataError("Reading from live data is not allowed")
self.__datestring = datestring
self.__project = project
self.__tablename = tablename
self.__timedelta = timedelta
if project not in self.__config["projects"]:
raise AttributeError("called project %s is not defined in main configuration file" % project)
if tablename not in self.__config["projects"][project]:
raise AttributeError("called tablename %s is not defined in project" % tablename)
# loading specific configuration, first look for project directory
projectdir = os.path.join(self.__basedir, project)
if not os.path.isdir(projectdir):
raise AttributeError("project Directory %s does not exist" % projectdir)
# load table definition from meta subdir, dir and subdir must exist
# define some working directories
metadir = os.path.join(projectdir, "meta")
if not os.path.isdir(metadir):
raise AttributeError("project meta directory %s does not exist" % metadir)
metafile = os.path.join(metadir, "%s.json" % self.__tablename)
if not os.path.isfile(metafile):
raise AttributeError("table definition file %s does not exist" % metafile)
with open(metafile, "rt") as infile:
self.__meta = json.load(infile)
# where to find raw input files
self.__meta["raw_basedir"] = os.path.join(projectdir, "raw")
if not os.path.isdir(self.__meta["raw_basedir"]):
raise AttributeError("project raw input directory %s does not exist" % self.__meta["raw_basedir"])
# convert to tuple
self.__meta["headers"] = tuple(self.__meta["headers"])
# transitional hook to implement datatypes without correcting
# all meta files at once
if isinstance(self.__meta["value_keynames"], dict):
self.__meta["datatypes"] = self.__meta["value_keynames"]
self.__meta["value_keynames"] = tuple(self.__meta["value_keynames"].keys())
elif isinstance(self.__meta["value_keynames"], list):
# if old stype, keep all datatype asis, and print warning
logging.error("You should define value_keynames as dict with datatypes")
self.__meta["datatypes"] = dict(zip(self.__meta["value_keynames"], ("asis",) * len(self.__meta["value_keynames"])))
self.__meta["value_keynames"] = tuple(self.__meta["value_keynames"])
self.__meta["index_keynames"] = tuple(self.__meta["index_keynames"])
self.__meta["blacklist"] = tuple(self.__meta["blacklist"])
# add available Statistical function names to meta structure
self.__meta["stat_func_names"] = list(TimeseriesStats.stat_funcs.keys())
# make some assertions
# every index_keyname has to be in headers
assert all((key in self.__meta["headers"] for key in self.__meta["index_keynames"]))
# ever value_keyname has to be in headers
assert all((key in self.__meta["headers"] for key in self.__meta["value_keynames"]))
# ts_keyname has to be in headers
assert self.__meta["ts_keyname"] in self.__meta["headers"]
def __str__(self):
ret = {
"basedir" : self.__basedir,
"project" : self.__project,
"tablename" : self.__tablename,
"config" : self.__config,
"meta" : self.__meta,
}
return json.dumps(ret, indent=4)
@property
def project(self):
"""project is the highest entity"""
return self.__project
@property
def tablename(self):
"""under every project there are one or some tablenames"""
return self.__tablename
@property
def datestring(self):
return self.__datestring
@property
def timedelta(self):
return self.__timedelta
@property
def delimiter(self):
"""delimiter to use to read raw input"""
return self.__meta["delimiter"]
@property
def ts_keyname(self):
"""keyname of timestamp"""
return self.__meta["ts_keyname"]
@property
def headers(self):
"""all headers, order matters"""
return self.__meta["headers"]
@property
def value_keynames(self):
"""keynames of value fields, have to be float convertible"""
return self.__meta["value_keynames"]
@property
def datatypes(self):
"""dictionary of datatypes"""
return self.__meta["datatypes"]
@property
def index_keynames(self):
"""keynames of value fields, are treated as strings"""
return self.__meta["index_keynames"]
@property
def blacklist(self):
"""keynames to ignore from raw input file"""
return self.__meta["blacklist"]
@property
def raw_basedir(self):
"""subdirectory under wich to find raw inout files"""
return self.__meta["raw_basedir"]
@property
def global_cachedir(self):
"""subdirectory where to put caches"""
return self.__config["cachedir"]
@property
def meta(self):
"""definition of this particular project/tablename configuration"""
return self.__meta
@property
def os_user(self):
"""return OS user to use for file permissions, defined in datalogger.json"""
return self.__config["user"]
@property
def os_group(self):
"""return OS group to use for file permissions, defined in datalogger.json"""
return self.__config["group"]
@property
def cachedir(self):
"""
return specific subdirectory to store cache files
if this directory does not exist, it will be created
parameters:
datestring <str> actual datestring
returns:
<str> directory path
"""
subdir = os.path.join(self.__config["cachedir"], self.datestring, self.project, self.tablename)
if not os.path.exists(subdir):
os.makedirs(subdir)
# try to set ownership of created directories
username = self.os_user
try:
uid = pwd.getpwnam(username).pw_uid
gid = pwd.getpwnam(username).pw_gid
os.chown(os.path.join(self.__config["cachedir"], self.datestring), uid, gid)
os.chown(os.path.join(self.__config["cachedir"], self.datestring, self.project), uid, gid)
os.chown(os.path.join(self.__config["cachedir"], self.datestring, self.project, self.tablename), uid, gid)
except KeyError as exc:
logging.exception(exc)
logging.error("User %s does not exist on this systemi, default permission will be applied to created directories", username)
return subdir
@property
def interval(self):
"""return defined interval of timestamps defined in configuration"""
return self.__meta["interval"]
def __getitem__(self, *args):
"""
super overloaded __getitem__ function could be either
use this method to get to plain stored data, without any sophisticated processing
["caches"] -> <dict>
["tsa"] -> return TimeseriesArray
["tsa", <key>] -> return Timeseries
["tsastats"] -> return TimeseriesArrayStats
["tsastats", <key>] -> return TimeseriesStats
["qa"] -> return QuantileArray
["qa", <key>] -> return <dict> Quantile
["total_stats"] -> return <dict> total_stats
"""
if isinstance(args[0], str):
kind = args[0]
if kind == "tsa":
return self.load_tsa()
if kind == "tsastats":
return self.load_tsastats()
if kind == "qa":
return self.load_quantile()
if kind == "caches":
return self.get_caches()
if kind == "total_stats":
return self.load_total_stats()
if isinstance(args[0], tuple):
kind, subkey = args[0]
if kind == "tsa":
return self.load_tsa()[subkey]
if kind == "tsastats":
return self.load_tsastats()[subkey]
if kind == "qa":
return self.load_quantile()[subkey]
else:
raise KeyError("unknown datatype")
def __parse_line(self, row):
"""
specialized method to parse a single line read from raw CSV
parameters:
row <str> result from readline()
timedelta <int> amount second to correct every read timestamp
returns:
<dict> keys from headers, all values are converted to float
"""
data = dict(zip(self.headers, row.split(self.delimiter)))
try:
data[self.ts_keyname] = int(float(data[self.ts_keyname]) + self.timedelta)
except ValueError as exc:
logging.exception(exc)
logging.error("ValueError on row skipping this data: %s", str(data))
except KeyError as exc:
logging.exception(exc)
logging.error("KeyError on row, skipping this data: %s", str(data))
return data
# @staticmethod
# def __get_file_handle(filename, mode):
# """
# return filehandle either for gzip or normal uncompressed file
#
# parameters:
# filename <str> fileanme
# mode <str> as used in open(<filename>, <mode>)
#
# returns:
# <file> handle to opened file, either gzip.open or normal open
# """
# if filename.endswith(".gz"):
# return gzip.open(filename, mode)
# return open(filename, mode)
def __get_raw_filename(self):
"""
return filename of raw input file, if one is available
otherwise raise Exception
parameters:
datestring <str>
"""
filename = os.path.join(self.raw_basedir, "%s_%s.csv" % (self.tablename, self.datestring))
if not os.path.isfile(filename):
filename += ".gz" # try gz version
if not os.path.isfile(filename):
raise DataLoggerRawFileMissing("No Raw Input File named %s (or .gz) found", filename)
return filename
def __read_raw_dict(self):
"""
generator to return parsed lines from raw file of one specific datestring
parameters:
datestring <str> isodate string like 2014-12-31
yields:
<dict> of every row
"""
filename = self.__get_raw_filename()
logging.debug("reading raw data from file %s", filename)
start_ts, stop_ts = self.get_ts_for_datestring(self.__datestring) # get first and last timestamp of this date
logging.debug("appropriate timestamps for this date are between %s and %s", start_ts, stop_ts)
if filename.endswith(".gz"):
filehandle = gzip.open(filename, "rt")
else:
filehandle = open(filename, "rt")
with filehandle as infile:
next(infile) # skip header line
for lineno, row in enumerate(infile):
if not row or row[0] == "#":
continue
try:
data = self.__parse_line(row)
if self.ts_keyname not in data:
logging.info("Format Error in row: %s, got %s", row, data)
continue
if not start_ts <= data[self.ts_keyname] <= stop_ts:
logging.debug("Skipping line, ts %s not between %s and %s", data[self.ts_keyname], start_ts, stop_ts)
continue
yield data
except KeyError as exc:
logging.exception(exc)
logging.error("KeyError in File %s, line %s, on row: %s, skipping", filename, lineno, row)
except IndexError as exc:
logging.exception(exc)
logging.error("IndexError in File %s, line %s, on row: %s, skipping", filename, lineno, row)
except UnicodeDecodeError as exc:
logging.exception(exc)
logging.error("UnicodeDecodeError in File %s, line %s, on row: %s, skipping", filename, lineno, row)
def get_projects(self):
"""return available project, defined in datalogger.json"""
return list(self.__config["projects"].keys())
def get_tablenames(self, project):
"""return available tablenames for projects, defined in datalogger.json"""
return list(self.__config["projects"][project].keys())
def raw_reader(self):
"""
kind of debugging method to read from raw file, like load_tsa does,
but report every line as is, only converted into dict
"""
for row in self.__read_raw_dict():
yield row
def delete_caches(self):
"""delete pre calculates caches"""
for entry in os.listdir(self.cachedir):
absfile = os.path.join(self.cachedir, entry)
if entry.startswith("tsa_") or entry.startswith("ts_") or entry.startswith("tsastat_") or entry.startswith("tsstat_") or entry.startswith("quantile") or entry.startswith("total_stats"):
logging.debug("deleting cached file %s", entry)
os.unlink(absfile)
def get_caches(self):
"""
search for available cachefiles
mainly to check if the raw data of this datestring is prosessed already
pattern is mainly used only to find the correct files, more for internal use
parameters:
datestring <str>
returns:
<dict>
"""
caches = {
"tsa" : {
"pattern" : "tsa_*.json",
"keys" : {},
"raw" : None,
},
"ts" : {
"pattern" : "ts_*.csv.gz",
"keys" : {},
},
"tsastat" : {
"pattern" : "tsastat_*.json",
"keys" : {},
},
"tsstat" : {
"pattern" : "tsstat_*.json",
"keys" : {},
},
"quantile" : {
"pattern" : "quantile.json",
"exists" : False,
},
"total_stats" : {
"pattern" : "total_stats.json",
"exists" : False,
}
}
# the original raw file could be deleted, and only the
# calculated TSA/TSASTATS and so on are available. In this case
# define None
try:
caches["tsa"]["raw"] = self.__get_raw_filename() # raises exception if no file was found
except DataLoggerRawFileMissing:
caches["tsa"]["raw"] = None
except Exception as exc:
logging.exception(exc)
raise
for cachetype in ("tsa", "ts", "tsastat", "tsstat"):
file_pattern = os.path.join(self.cachedir, caches[cachetype]["pattern"])
for abs_filename in glob.glob(file_pattern):
filename = os.path.basename(abs_filename)
key = self.__decode_filename(filename)
caches[cachetype]["keys"][str(key)] = filename
# add quantile part
caches["quantile"]["exists"] = os.path.isfile(os.path.join(self.cachedir, "quantile.json"))
# add total_stats part
caches["total_stats"]["exists"] = os.path.isfile(os.path.join(self.cachedir, "total_stats.json"))
return caches
def import_tsa(self, tsa):
"""
store tsa given in parameter in global_cache to make the data available
usually this could be modfied existing tsa extended by some keys, or filtered or ...
the structure has to be predefined in meta data
the tsa can afterwards be accessed via normal frontends (web, api)
parameters:
tsa <TimeseriesArray> object
"""
if self.index_keynames != tsa.index_keynames:
raise AssertionError("provided index_keynames does not match defined index_keynames")
if self.value_keynames != tuple(tsa.value_keynames):
raise AssertionError("provided value_keynames does not match defined value_keynames")
cachefilename = os.path.join(self.cachedir, TimeseriesArray.get_dumpfilename(tsa.index_keynames))
if not os.path.isfile(cachefilename):
tsa.dump(self.cachedir)
tsastats = TimeseriesArrayStats(tsa)
tsastats.dump(self.cachedir)
qantile = QuantileArray(tsa, tsastats)
qantile.dump(self.cachedir)
else:
raise Exception("TSA Archive %s exists already in cache" % cachefilename)
def load_tsa(self, filterkeys=None, index_pattern=None):
"""
caching version to load_tsa_raw
if never called, get ts from load_tsa_raw, and afterwards dump_tsa
on every consecutive call read from cached version
parameters:
datestring <str>
filterkeys <tuple> or None default None
index_pattern <str> or None default None
HINT:
use delete_caches to delete all precalculated files
use setup to define some sort of timedelta to use
returns
<TimeseriesArray> object read from cachefile or from raw data
"""
cachefilename = os.path.join(self.cachedir, TimeseriesArray.get_dumpfilename(self.index_keynames))
def fallback():
"""
fallback method to use, if reading from cache data is not possible
"""
tsa = self.load_tsa_raw()
tsa.dump(self.cachedir) # save full data
# read the data afterwards to make sure there is no problem,
# TODO: is this the fastest way?
# corrected 2017-09-21 reread stored data to convert data to correct type
# if validate is True:
tsa = TimeseriesArray.load(self.cachedir, self.index_keynames, filterkeys=filterkeys, index_pattern=index_pattern, datatypes=self.datatypes)
return tsa
if not os.path.isfile(cachefilename):
logging.info("cachefile %s does not exist, fallback read from raw data file", cachefilename)
return fallback()
logging.debug("loading stored TimeseriesArray object file %s", cachefilename)
try:
tsa = TimeseriesArray.load(self.cachedir, self.index_keynames, filterkeys=filterkeys, index_pattern=index_pattern, datatypes=self.datatypes)
return tsa
except IOError:
logging.error("IOError while reading from %s, using fallback", cachefilename)
os.unlink(cachefilename)
return fallback()
except EOFError:
logging.error("EOFError while reading from %s, using fallback", cachefilename)
os.unlink(cachefilename)
return fallback()
def load_tsastats(self, filterkeys=None):
"""
caching version to load_tsa_raw
if never called, get ts from load_tsa_raw, and afterwards dump_tsa
on every consecutive call read from cached version
use cleancache to remove caches
parameters:
datestring <str>
timedelta <int>
cleancache <bool>
returns
<TimeseriesArray> object read from cachefile or from raw data
"""
cachefilename = os.path.join(self.cachedir, TimeseriesArrayStats.get_dumpfilename(self.index_keynames))
def fallback():
"""
fallback method to use, if reading from cache data is not possible
"""
tsa = self.load_tsa(filterkeys=None) # load full tsa, and generate statistics
tsastats = TimeseriesArrayStats(tsa) # generate full Stats
tsastats.dump(self.cachedir) # save it for future usage
tsastats = TimeseriesArrayStats.load(self.cachedir, self.index_keynames, filterkeys=filterkeys) # read specific
return tsastats
if not os.path.isfile(cachefilename):
logging.info("cachefile %s does not exist, fallback read from tsa archive", cachefilename)
return fallback()
logging.debug("loading stored TimeseriesArray object file %s", cachefilename)
try:
tsastats = TimeseriesArrayStats.load(self.cachedir, self.index_keynames, filterkeys=filterkeys)
return tsastats
except IOError:
logging.error("IOError while reading from %s, using fallback", cachefilename)
os.unlink(cachefilename)
return fallback()
except EOFError:
logging.error("EOFError while reading from %s, using fallback", cachefilename)
os.unlink(cachefilename)
return fallback()
def load_quantile(self):
"""
retuns quantile for this specific tsa, either load cache version,
or recreate from tsa
parameters:
datestring <str>
returns:
<QuantileArray>
"""
cachefilename = QuantileArray.get_dumpfilename(self.cachedir)
quantile_array = None
if os.path.isfile(cachefilename):
quantile_array = QuantileArray.load(self.cachedir)
else:
logging.info("cachefile %s does not exist, fallback read from tsa archive", cachefilename)
tsa = self["tsa"]
tsa.cache = True # to enable in memory caching of timeseries
# huge performance improvement, from 500s to 70s
tsastats = self["tsastats"]
quantile_array = QuantileArray(tsa, tsastats)
quantile_array.dump(self.cachedir)
return quantile_array
def load_total_stats(self):
"""
aggregates all TimeseriesStats available in TimeseriesArrayStats to total_stats dict
returns:
<dict> of statistical functions, and values
"""
aggregator = {
'median': lambda a, b: 0.0, # median of medians
'avg': lambda a, b: a + b,
'last': lambda a, b: 0.0,
'diff': lambda a, b: 0.0,
'max': lambda a, b: max(a, b),
'first': lambda a, b: 0.0,
'min': lambda a, b: min(a, b),
'std': lambda a, b: 0.0,
'count': lambda a, b: a + b,
'mean': lambda a, b: 0.0,
'dec': lambda a, b: a + b,
'inc': lambda a, b: a + b,
'sum': lambda a, b: a + b,
'total_count' : lambda a, b: a # to be consistent
}
cachefilename = os.path.join(self.cachedir, "total_stats.json")
if not os.path.isfile(cachefilename):
tsastats = self["tsastats"]
stats_data = {}
for value_keyname in self.value_keynames:
stats_data[value_keyname] = dict((key, 0.0) for key in aggregator.keys()) # prefill with 0.0
for index_key in tsastats.keys():
stats = tsastats[index_key]
for stat_func in stats[value_keyname].keys():
stats_data[value_keyname][stat_func] = aggregator[stat_func](stats_data[value_keyname][stat_func], stats[value_keyname][stat_func])
stats_data[value_keyname]["total_count"] += 1
if stats_data[value_keyname]["total_count"] > 0:
stats_data[value_keyname]["total_avg"] = stats_data[value_keyname]["sum"] / stats_data[value_keyname]["total_count"]
stats_data[value_keyname]["avg"] /= stats_data[value_keyname]["total_count"]
else:
stats_data[value_keyname]["total_avg"] = 0.0
stats_data[value_keyname]["avg"] = 0.0
with open(cachefilename, "wt") as outfile:
json.dump(stats_data, outfile, indent=4)
return stats_data
else:
with open(cachefilename, "rt") as infile:
stats_data = json.load(infile)
return stats_data
@staticmethod
def __decode_filename(filename):
"""
return parameters from encoded filename (basename) in form of
<prefix identifier>_<base64 encoded key>.<endings>
parameters:
filename <str> basename of file, without path
returns:
<tuple> decoded key (eval(base64.b64decode(key)))
"""
try:
parts = filename.split(".")[0].split("_")
key_encoded = "_".join(parts[1:]) # there could be more than 2 parts
# the first part ist something like tsa_, tsastats_, ts_,
# tsstats_ and so on.
#_, key_and_ending = filename.split("_")
#key_encoded = key_and_ending.split(".")[0]
key = None
try:
# TODO: there are some problems to decode b64string with
# urlsafe_b64decode if unicode,
# try to use b64decode instead
try:
key = eval(base64.urlsafe_b64decode(str(key_encoded)))
except TypeError as exc:
logging.exception(exc)
key = eval(base64.b64decode(key_encoded))
assert isinstance(key, tuple)
return key
except Exception as exc:
logging.exception(exc)
raise DataLoggerFilenameDecodeError("filename %s could not be decoded to tuple, result: %s" % (filename, key))
except Exception as exc:
logging.exception(exc)
raise DataLoggerFilenameDecodeError("Something went wrong while decoding filensme %s" % filename)
def load_tsa_raw(self):
"""
read data from raw input files and return TimeseriesArray object
parameters:
datestring <str> isodate representation of date like 2015-12-31
timedelta <int> amount second to correct raw input timestamps
returns:
<TimeseriesArray> object wich holds all data of this day
"""
tsa = TimeseriesArray(self.index_keynames, self.value_keynames, datatypes=self.datatypes)
for rowdict in self.__read_raw_dict():
try:
tsa.add(rowdict)
except ValueError as exc:
logging.exception(exc)
logging.error("ValueError by adding this data to TimeseriesArray: %s", rowdict)
raise exc
except AssertionError as exc:
logging.exception(exc)
logging.error("AssertionError by adding this data to TimeseriesArray: %s", rowdict)
raise exc
return tsa
read_day = load_tsa_raw
# def old_tsa_group_by(self, tsa, subkeys, group_func):
# """
# TODO: make this method static, inteval should be in tsa
# group given tsa by subkeys, and use group_func to aggregate data
# first all Timeseries will be aligned in time, to get proper points in timeline
#
# parameters:
# tsa <TimeseriesArray>
# subkey <tuple> could also be empty, to aggregate everything
# group_func <func> like lambda a,b : (a+b)/2 to get averages
# slotlength <int> interval in seconds to correct every timeseries to
#
# returns:
# <TimeseriesArray>
# """
# # intermediated tsa
# tsa2 = TimeseriesArray(index_keys=subkeys, value_keys=tsa.value_keys, ts_key=tsa.ts_key, datatypes=tsa.datatypes)
# start_ts, _ = DataLogger.get_ts_for_datestring(self.__datestring)
# ts_keyname = tsa.ts_key
# for data in tsa.export():
# # align timestamp
# nearest_slot = round((data[ts_keyname] - start_ts) / self.__interval)
# data[ts_keyname] = int(start_ts + nearest_slot * self.__interval)
# #data[ts_keyname] = align_timestamp(data[ts_keyname])
# tsa2.group_add(data, group_func)
# return tsa2
# group_by = tsa_group_by
# @staticmethod
# def old_tsastat_group_by(tsastat, subkey):
# """
# group given tsastat array by some subkey
#
# parameters:
# tsastat <TimeseriesArrayStats>
# subkey <tuple> subkey to group by
#
# returns:
# <dict>
# """
# # how to aggregate statistical values
# group_funcs = {
# u'count' : lambda a, b: a + b,
# u'std' : lambda a, b: (a + b)/2,
# u'avg': lambda a, b: (a + b)/2,
# u'last' : lambda a, b: -1.0, # theres no meaning
# u'min' : min,
# u'max' : max,
# u'sum' : lambda a, b: (a + b) / 2,
# u'median' : lambda a, b: (a + b)/2,
# u'mean' : lambda a, b: (a + b)/2,
# u'diff' : lambda a, b: (a + b)/2,
# u'dec' : lambda a, b: (a + b)/2,
# u'inc' : lambda a, b: (a + b)/2,
# u'first' : lambda a, b: -1.0, # theres no meaning
# }
# # create new empty TimeseriesArrayStats Object
# tsastats_new = TimeseriesArrayStats.__new__(TimeseriesArrayStats)
# tsastats_new.index_keys = subkey # only subkey
# tsastats_new.value_keys = tsastat.value_keys # same oas original
# newdata = {}
# for index_key, tsstat in tsastat.items():
# key_dict = dict(zip(tsastat.index_keynames, index_key))
# newkey = None
# if len(subkey) == 0: # no subkey means total aggregation
# newkey = ("__total__", )
# else:
# newkey = tuple([key_dict[key] for key in subkey])
# if newkey not in newdata:
# newdata[newkey] = {}
# for value_key in tsastat.value_keynames:
# if value_key not in newdata[newkey]:
# newdata[newkey][value_key] = dict(tsstat[value_key])
# else:
# for stat_funcname in tsstat[value_key].keys():
# existing = float(newdata[newkey][value_key][stat_funcname])
# to_group = float(tsstat[value_key][stat_funcname])
# newdata[newkey][value_key][stat_funcname] = group_funcs[stat_funcname](existing, to_group)
# tsastats_new.stats = newdata
# return tsastats_new
# @staticmethod
# def old_get_scatter_data(tsa, value_keys, stat_func):
# """
# get data structure to use for highgraph scatter plots,
# [
# {
# name : str(<key>),
# data : [stat_func(tsa[key][value_keys[0]]), stat_func(tsa[key][value_keys[1]], ]
# },
# ...
# ]
#
# parameters:
# tsa <TimeseriesArray>
# value_keys <tuple> with len 2, represents x- and y-axis
# stat_fuc <str> statistical function to use to aggregate xcolumn and ycolumns
# must exist in Timeseries object
#
# returns:
# <list> of <dict> data structure to use directly in highgraph scatter plots, when json encoded
# """
# assert len(value_keys) == 2
# highchart_data = []
# for key in tsa.keys():
# stats = tsa[key].get_stat(stat_func)
# highchart_data.append({
# "name" : key[0],
# "data" : [[stats[value_keys[0]], stats[value_keys[1]]],]
# })
# return highchart_data
@staticmethod
def datestring_to_date(datestring):
"""function to convert datestring to datetime object"""
year, month, day = datestring.split("-")
return datetime.date(int(year), int(month), int(day))
@staticmethod
def datewalker(datestring_start, datestring_stop):
"""
function to walk from beginning datestring to end datestring,
in steps of one day
"""
start_date = DataLogger.datestring_to_date(datestring_start)
stop_date = DataLogger.datestring_to_date(datestring_stop)
while start_date <= stop_date:
yield start_date.isoformat()
start_date = start_date + datetime.timedelta(days=1)
@staticmethod
def monthwalker(monthdatestring):
"""
function to walf from first day to last day in given month
"""
year, month = monthdatestring.split("-")
lastday = calendar.monthrange(int(year), int(month))[1]
start = "%04d-%02d-01" % (int(year), int(month))
stop = "%04d-%02d-%02d" % (int(year), int(month), lastday)
return DataLogger.datewalker(start, stop)
def get_tsastats_longtime_hc(self, monthstring, key, value_key):
"""
TODO: do this in webapp, not here, too special
method to get longtime data from stored TimeseriesArrayStats objects
and return data usable as higcharts input
"""
# datalogger = DataLogger(BASEDIR, project, tablename)
filterkeys = dict(zip(self.index_keynames, key))
logging.debug("build filterkeys %s", filterkeys)
ret_data = {}
for datestring in self.monthwalker(monthstring):
logging.debug("getting tsatstats for %s", monthstring)
try:
tsastats = self.load_tsastats(filterkeys)
for funcname in tsastats[key][value_key].keys():
if funcname in ret_data:
ret_data[funcname].append((datestring, tsastats[key][value_key][funcname]))
else:
ret_data[funcname] = [(datestring, tsastats[key][value_key][funcname]), ]
except DataLoggerRawFileMissing as exc:
logging.exception(exc)
logging.error("No Input File for datestring %s found, skipping this date", datestring)
except DataLoggerLiveDataError as exc:
logging.exception(exc)
logging.error("Reading from live data is not allowed, skipping this data, and ending loop")
break
return ret_data
@staticmethod
def get_ts_for_datestring(datestring):
"""
returns first and last available timestamp of this date
parameters:
datestring <str> in isodate format like 2015-12-31
returns:
<int> first -> 2015-12-31 00:00:00.0
<int> last -> 2015-12-31 23:59:59.999
"""
def datetime_to_ts(datetime_object):
"""
return unix timestamp from given datetime object
parameters:
datetime_object <datetime>
returns:
<int> timestamp of this datetime
"""
return int((datetime_object - datetime.datetime.fromtimestamp(0)).total_seconds())
year, month, day = (int(part) for part in datestring.split("-"))
start = datetime.datetime(year, month, day, 0, 0, 0)
start_ts = datetime_to_ts(start)
stop = datetime.datetime(year, month, day, 23, 59, 59)
stop_ts = datetime_to_ts(stop)
# time.time() differs from datetime.datetime.now()
time_to_datetime_delta = time.time() - (datetime.datetime.now() - datetime.datetime.fromtimestamp(0)).total_seconds()
return (start_ts + time_to_datetime_delta, stop_ts + time_to_datetime_delta)
@staticmethod
def get_yesterday_datestring():
"""return datestring from yesterday (24h ago)"""
return datetime.date.fromtimestamp(time.time() - 60 * 60 * 24).isoformat()
@staticmethod
def get_last_business_day_datestring():
"""
returns last businessday datestring, ignoring Feiertage
"""
last_business_day = datetime.date.today()
shift = datetime.timedelta(max(1, (last_business_day.weekday() + 6) % 7 - 3))
last_business_day = last_business_day - shift
return last_business_day.isoformat()
| 41.638412 | 197 | 0.601721 |
d93e5cff539e46d714ced9bde8adbbad5043eda7 | 4,368 | py | Python | streamlit/page_tec.py | kartikkumar7/News-Big-Data-Analysis | 3f9a306284a5dca28c033bfec16c69ce4fdd30df | [
"MIT"
] | null | null | null | streamlit/page_tec.py | kartikkumar7/News-Big-Data-Analysis | 3f9a306284a5dca28c033bfec16c69ce4fdd30df | [
"MIT"
] | null | null | null | streamlit/page_tec.py | kartikkumar7/News-Big-Data-Analysis | 3f9a306284a5dca28c033bfec16c69ce4fdd30df | [
"MIT"
] | 1 | 2021-12-11T22:57:49.000Z | 2021-12-11T22:57:49.000Z | import streamlit as st
import pandas as pd
import spacy_streamlit
import spacy
from spacy import displacy
import requests
from requests.structures import CaseInsensitiveDict
nlp = spacy.load('en_core_web_sm')
def main():
pages = {
"Article Selection": page_second,
"Dashboard": page_third
}
if "page" not in st.session_state:
st.session_state.update({
# Default page
"page": "Article Selection",
# Default widget values
"int": 0,
"options": ["NER","Summarization","Sentiment", "Tokenize"],
"radio": "NER"
})
with st.sidebar:
page = st.radio("Select your page", tuple(pages.keys()))
pages[page]()
def page_second():
st.header("TECHNOLOGY")
DATA_URL="https://storage.googleapis.com/news_articles_scraped/CNN/tech.csv"
data = st.cache(pd.read_csv)(DATA_URL, nrows=1000)
data_pol = data[['title',"datetime"]]
st.write('### Full Dataset', data_pol)
int_val = st.number_input('Select a row for the article', min_value=0, max_value=49, step=1, key="int")
title = st.header(data["title"][int_val])
audio_backend = f'https://news-analysis-px7gwe6txq-ue.a.run.app/tech/{int_val}/text-to-speech'
audio = process_tts(audio_backend)
if audio:
st.audio(f'https://storage.googleapis.com/audio-output/tech/{int_val}.mp3', format='audio/ogg')
author = st.write("Author "+data["author"][int_val])
datetime = st.write(data["datetime"][int_val])
body = st.write(data["body"][int_val])
article_url = st.write(data["url"][int_val])
def page_third():
x=st.session_state.int
st.session_state.int = x
DATA_URL="https://storage.googleapis.com/news_articles_scraped/CNN/tech.csv"
data = st.cache(pd.read_csv)(DATA_URL)
nlp_option = st.radio("Services", st.session_state["options"], key="radio")
if nlp_option=="NER":
st.write("# NER")
doc=nlp(data["body"][x])
spacy_streamlit.visualize_ner(doc,labels=nlp.get_pipe('ner').labels, show_table=False)
if nlp_option=="Tokenize":
st.write("# Text Tokenization")
doc=nlp(data["body"][x])
spacy_streamlit.visualize_tokens(doc, attrs=["text", "pos_", "dep_", "ent_type_"])
if nlp_option=="Sentiment":
st.write("# Sentiment")
backend = f'https://news-analysis-px7gwe6txq-ue.a.run.app/tech/{x}/sentiment'
sentiment = process_sentiment(backend)
st.write(sentiment ["Sentiment"])
st.write(sentiment["Subjectivity"])
if nlp_option=="Summarization":
st.write("# Summarization")
backend = f'https://news-analysis-px7gwe6txq-ue.a.run.app/tech/{x}/summarizer'
summarize = process_summarization(backend)
st.write(summarize)
def process_sentiment(server_url: str):
headers = CaseInsensitiveDict()
headers["accept"] = "application/json"
# headers["Content-Type"] = "application/json"
# valid_text = {
# 'text': input_text
# }
# data = '{"text":'+input_text+'}'
# data = '{"text":"'+text+'"}'
data = ''
resp = requests.post(server_url, headers=headers, data=data, verify=False, timeout=8000)
result = resp.json()
result_dict = result['sentiment']
valid_sentiment = result_dict["Sentiment"]
valid_subjectivity = result_dict["dataframe"]["value"]["1"]
return {"Sentiment":valid_sentiment, "Subjectivity":valid_subjectivity}
def process_tts(server_url: str):
headers = CaseInsensitiveDict()
headers["accept"] = "application/json"
# headers["Content-Type"] = "application/json"
# valid_text = {
# 'text': input_text
# }
# data = '{"text":'+input_text+'}'
# data = '{"text":"'+text+'"}'
data = ''
resp = requests.post(server_url, headers=headers, data=data, verify=False, timeout=8000)
result = resp.json()
valid_result = result['Save']
return True if valid_result=="Successful" else False
def process_summarization(server_url: str):
headers = CaseInsensitiveDict()
headers["accept"] = "application/json"
data = ''
resp = requests.post(server_url, headers=headers, data=data, verify=False, timeout=8000)
result = resp.json()
summ = result["summary"][0]["summary_text"]
return summ
if __name__ == "__main__":
main() | 34.125 | 107 | 0.641026 |
24ba8be15d787abea1aa0ca787ceb1cd378f5cf2 | 120 | py | Python | code/chapter-2/exercise2_3.py | Kevin-Oudai/python-solutions | d67f6b14723b000fec0011c3e8156b805eb288f7 | [
"MIT"
] | null | null | null | code/chapter-2/exercise2_3.py | Kevin-Oudai/python-solutions | d67f6b14723b000fec0011c3e8156b805eb288f7 | [
"MIT"
] | null | null | null | code/chapter-2/exercise2_3.py | Kevin-Oudai/python-solutions | d67f6b14723b000fec0011c3e8156b805eb288f7 | [
"MIT"
] | null | null | null | feet = eval(input("Enter a value for feet: "))
meters = feet * 0.305
print("{} feet is {} meters".format(feet, meters))
| 30 | 50 | 0.65 |
6459b5dd917b9ad35653860e6a9307091e61b1d3 | 741 | py | Python | tpcdsDataGenerator/dataParser/warehouse.py | bomeng/smartbench | 349f202cedacc96bb66c2306bb22beb8ab7112b5 | [
"Apache-2.0"
] | null | null | null | tpcdsDataGenerator/dataParser/warehouse.py | bomeng/smartbench | 349f202cedacc96bb66c2306bb22beb8ab7112b5 | [
"Apache-2.0"
] | null | null | null | tpcdsDataGenerator/dataParser/warehouse.py | bomeng/smartbench | 349f202cedacc96bb66c2306bb22beb8ab7112b5 | [
"Apache-2.0"
] | null | null | null | from pyspark.sql import Row
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.sql import HiveContext
import os
conf = SparkConf()
sc = SparkContext(conf=conf)
spark = HiveContext(sc)
textDataRDD = sc.textFile(os.environ["DATA_HDFS"] + "warehouse.dat")
textDataDF = textDataRDD.map(lambda x: x.split("|")).map(lambda x: Row(w_warehouse_sk = x[0],w_warehouse_id = x[1],w_warehouse_name = x[2],w_warehouse_sq_ft = x[3],w_street_number = x[4],w_street_name = x[5],w_street_type = x[6],w_suite_number = x[7],w_city = x[8],w_county = x[9],w_state = x[10],w_zip = x[11],w_country = x[12],w_gmt_offset = x[13])).toDF()
textDataDF.write.saveAsTable("tpcds.warehouse", format="parquet", mode="overwrite")
| 52.928571 | 358 | 0.74359 |
5c6faf11c9060fa540180387172212f505fb7407 | 23,972 | py | Python | tensorboardX/summary.py | n0whereRuoxi/rl-starter-files | b2ae68d544f4665a62b3d782c44008ef050e9b62 | [
"MIT"
] | 10 | 2021-05-31T07:18:08.000Z | 2022-03-19T09:20:11.000Z | tensorboardX/summary.py | n0whereRuoxi/rl-starter-files | b2ae68d544f4665a62b3d782c44008ef050e9b62 | [
"MIT"
] | 1 | 2021-08-03T12:23:01.000Z | 2021-08-10T08:35:22.000Z | tensorboardX/summary.py | n0whereRuoxi/rl-starter-files | b2ae68d544f4665a62b3d782c44008ef050e9b62 | [
"MIT"
] | 2 | 2021-12-09T07:23:21.000Z | 2022-03-31T06:13:10.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import os
import re as _re
# pylint: disable=unused-import
from six.moves import range
from .proto.summary_pb2 import Summary
from .proto.summary_pb2 import HistogramProto
from .proto.summary_pb2 import SummaryMetadata
from .proto.tensor_pb2 import TensorProto
from .proto.tensor_shape_pb2 import TensorShapeProto
from .proto.plugin_pr_curve_pb2 import PrCurvePluginData
from .proto.plugin_text_pb2 import TextPluginData
from .proto.plugin_mesh_pb2 import MeshPluginData
from .proto import layout_pb2
from .x2num import make_np
from .utils import _prepare_video, convert_to_HWC
_INVALID_TAG_CHARACTERS = _re.compile(r'[^-/\w\.]')
def _clean_tag(name):
# In the past, the first argument to summary ops was a tag, which allowed
# arbitrary characters. Now we are changing the first argument to be the node
# name. This has a number of advantages (users of summary ops now can
# take advantage of the tf name scope system) but risks breaking existing
# usage, because a much smaller set of characters are allowed in node names.
# This function replaces all illegal characters with _s, and logs a warning.
# It also strips leading slashes from the name.
if name is not None:
new_name = _INVALID_TAG_CHARACTERS.sub('_', name)
new_name = new_name.lstrip('/') # Remove leading slashes
if new_name != name:
logging.info(
'Summary name %s is illegal; using %s instead.' % (name, new_name))
name = new_name
return name
def _draw_single_box(image, xmin, ymin, xmax, ymax, display_str, color='black', color_text='black', thickness=2):
from PIL import ImageDraw, ImageFont
font = ImageFont.load_default()
draw = ImageDraw.Draw(image)
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
if display_str:
text_bottom = bottom
# Reverse list and print from bottom to top.
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom)], fill=color
)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str, fill=color_text, font=font
)
return image
def hparams(hparam_dict=None, metric_dict=None):
from tensorboardX.proto.plugin_hparams_pb2 import HParamsPluginData, SessionEndInfo, SessionStartInfo
from tensorboardX.proto.api_pb2 import Experiment, HParamInfo, MetricInfo, MetricName, Status, DataType
from six import string_types
PLUGIN_NAME = 'hparams'
PLUGIN_DATA_VERSION = 0
EXPERIMENT_TAG = '_hparams_/experiment'
SESSION_START_INFO_TAG = '_hparams_/session_start_info'
SESSION_END_INFO_TAG = '_hparams_/session_end_info'
# TODO: expose other parameters in the future.
# hp = HParamInfo(name='lr',display_name='learning rate', type=DataType.DATA_TYPE_FLOAT64, domain_interval=Interval(min_value=10, max_value=100)) # noqa E501
# mt = MetricInfo(name=MetricName(tag='accuracy'), display_name='accuracy', description='', dataset_type=DatasetType.DATASET_VALIDATION) # noqa E501
# exp = Experiment(name='123', description='456', time_created_secs=100.0, hparam_infos=[hp], metric_infos=[mt], user='tw') # noqa E501
hps = []
ssi = SessionStartInfo()
for k, v in hparam_dict.items():
if v is None:
continue
if isinstance(v, string_types):
ssi.hparams[k].string_value = v
hps.append(HParamInfo(name=k, type=DataType.Value("DATA_TYPE_STRING")))
continue
if isinstance(v, bool):
ssi.hparams[k].bool_value = v
hps.append(HParamInfo(name=k, type=DataType.Value("DATA_TYPE_BOOL")))
continue
if isinstance(v, int) or isinstance(v, float):
v = make_np(v)[0]
ssi.hparams[k].number_value = v
hps.append(HParamInfo(name=k, type=DataType.Value("DATA_TYPE_FLOAT64")))
continue
if callable(v):
ssi.hparams[k].string_value = getattr(v, '__name__', str(v))
hps.append(HParamInfo(name=k, type=DataType.Value("DATA_TYPE_STRING")))
continue
hps.append(HParamInfo(name=k, type=DataType.Value("DATA_TYPE_UNSET")))
content = HParamsPluginData(session_start_info=ssi, version=PLUGIN_DATA_VERSION)
smd = SummaryMetadata(plugin_data=SummaryMetadata.PluginData(plugin_name=PLUGIN_NAME,
content=content.SerializeToString()))
ssi = Summary(value=[Summary.Value(tag=SESSION_START_INFO_TAG, metadata=smd)])
mts = [MetricInfo(name=MetricName(tag=k)) for k in metric_dict.keys()]
exp = Experiment(hparam_infos=hps, metric_infos=mts)
content = HParamsPluginData(experiment=exp, version=PLUGIN_DATA_VERSION)
smd = SummaryMetadata(plugin_data=SummaryMetadata.PluginData(plugin_name=PLUGIN_NAME,
content=content.SerializeToString()))
exp = Summary(value=[Summary.Value(tag=EXPERIMENT_TAG, metadata=smd)])
sei = SessionEndInfo(status=Status.Value("STATUS_SUCCESS"))
content = HParamsPluginData(session_end_info=sei, version=PLUGIN_DATA_VERSION)
smd = SummaryMetadata(plugin_data=SummaryMetadata.PluginData(plugin_name=PLUGIN_NAME,
content=content.SerializeToString()))
sei = Summary(value=[Summary.Value(tag=SESSION_END_INFO_TAG, metadata=smd)])
return exp, ssi, sei
def scalar(name, scalar, display_name="", summary_description=""):
"""Outputs a `Summary` protocol buffer containing a single scalar value.
The generated Summary has a Tensor.proto containing the input Tensor.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A real numeric Tensor containing a single value.
display_name: The title of the plot. If empty string is passed, `name` will be used.
summary_description: The comprehensive text that will showed by clicking the information icon on TensorBoard.
Returns:
A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf.
Raises:
ValueError: If tensor has the wrong shape or type.
"""
name = _clean_tag(name)
scalar = make_np(scalar)
assert(scalar.squeeze().ndim == 0), 'scalar should be 0D'
scalar = float(scalar)
if display_name == "" and summary_description == "":
return Summary(value=[Summary.Value(tag=name, simple_value=scalar)])
metadata = SummaryMetadata(display_name=display_name, summary_description=summary_description)
return Summary(value=[Summary.Value(tag=name, simple_value=scalar, metadata=metadata)])
def histogram_raw(name, min, max, num, sum, sum_squares, bucket_limits, bucket_counts):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
min: A float or int min value
max: A float or int max value
num: Int number of values
sum: Float or int sum of all values
sum_squares: Float or int sum of squares for all values
bucket_limits: A numeric `Tensor` with upper value per bucket
bucket_counts: A numeric `Tensor` with number of values per bucket
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
hist = HistogramProto(min=min,
max=max,
num=num,
sum=sum,
sum_squares=sum_squares,
bucket_limit=bucket_limits,
bucket=bucket_counts)
return Summary(value=[Summary.Value(tag=name, histo=hist)])
def histogram(name, values, bins, max_bins=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
name = _clean_tag(name)
values = make_np(values)
hist = make_histogram(values.astype(float), bins, max_bins)
return Summary(value=[Summary.Value(tag=name, histo=hist)])
def make_histogram(values, bins, max_bins=None):
"""Convert values into a histogram proto using logic from histogram.cc."""
if values.size == 0:
raise ValueError('The input has no element.')
values = values.reshape(-1)
counts, limits = np.histogram(values, bins=bins)
num_bins = len(counts)
if max_bins is not None and num_bins > max_bins:
subsampling = num_bins // max_bins
subsampling_remainder = num_bins % subsampling
if subsampling_remainder != 0:
counts = np.pad(counts, pad_width=[[0, subsampling - subsampling_remainder]],
mode="constant", constant_values=0)
counts = counts.reshape(-1, subsampling).sum(axis=-1)
new_limits = np.empty((counts.size + 1,), limits.dtype)
new_limits[:-1] = limits[:-1:subsampling]
new_limits[-1] = limits[-1]
limits = new_limits
# Find the first and the last bin defining the support of the histogram:
cum_counts = np.cumsum(np.greater(counts, 0, dtype=np.int32))
start, end = np.searchsorted(cum_counts, [0, cum_counts[-1] - 1], side="right")
start = int(start)
end = int(end) + 1
del cum_counts
# TensorBoard only includes the right bin limits. To still have the leftmost limit
# included, we include an empty bin left.
# If start == 0, we need to add an empty one left, otherwise we can just include the bin left to the
# first nonzero-count bin:
counts = counts[start - 1:end] if start > 0 else np.concatenate([[0], counts[:end]])
limits = limits[start:end + 1]
if counts.size == 0 or limits.size == 0:
raise ValueError('The histogram is empty, please file a bug report.')
sum_sq = values.dot(values)
return HistogramProto(min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limit=limits.tolist(),
bucket=counts.tolist())
def image(tag, tensor, rescale=1, dataformats='CHW'):
"""Outputs a `Summary` protocol buffer with images.
The summary has up to `max_images` summary values containing images. The
images are built from `tensor` which must be 3-D with shape `[height, width,
channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
Args:
tag: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width,
channels]` where `channels` is 1, 3, or 4.
'tensor' can either have values in [0, 1] (float32) or [0, 255] (uint8).
The image() function will scale the image values to [0, 255] by applying
a scale factor of either 1 (uint8) or 255 (float32).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
tag = _clean_tag(tag)
tensor = make_np(tensor)
tensor = convert_to_HWC(tensor, dataformats)
# Do not assume that user passes in values in [0, 255], use data type to detect
if tensor.dtype != np.uint8:
tensor = (tensor * 255.0).astype(np.uint8)
image = make_image(tensor, rescale=rescale)
return Summary(value=[Summary.Value(tag=tag, image=image)])
def image_boxes(tag, tensor_image, tensor_boxes, rescale=1, dataformats='CHW', labels=None):
'''Outputs a `Summary` protocol buffer with images.'''
tensor_image = make_np(tensor_image)
tensor_image = convert_to_HWC(tensor_image, dataformats)
tensor_boxes = make_np(tensor_boxes)
if tensor_image.dtype != np.uint8:
tensor_image = (tensor_image * 255.0).astype(np.uint8)
image = make_image(tensor_image,
rescale=rescale,
rois=tensor_boxes, labels=labels)
return Summary(value=[Summary.Value(tag=tag, image=image)])
def draw_boxes(disp_image, boxes, labels=None):
# xyxy format
num_boxes = boxes.shape[0]
list_gt = range(num_boxes)
for i in list_gt:
disp_image = _draw_single_box(disp_image,
boxes[i, 0],
boxes[i, 1],
boxes[i, 2],
boxes[i, 3],
display_str=None if labels is None else labels[i],
color='Red')
return disp_image
def make_image(tensor, rescale=1, rois=None, labels=None):
"""Convert an numpy representation image to Image protobuf"""
from PIL import Image
height, width, channel = tensor.shape
scaled_height = int(height * rescale)
scaled_width = int(width * rescale)
image = Image.fromarray(tensor)
if rois is not None:
image = draw_boxes(image, rois, labels=labels)
image = image.resize((scaled_width, scaled_height), Image.ANTIALIAS)
import io
output = io.BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
return Summary.Image(height=height,
width=width,
colorspace=channel,
encoded_image_string=image_string)
def video(tag, tensor, fps=4):
tag = _clean_tag(tag)
tensor = make_np(tensor)
tensor = _prepare_video(tensor)
# If user passes in uint8, then we don't need to rescale by 255
if tensor.dtype != np.uint8:
tensor = (tensor * 255.0).astype(np.uint8)
video = make_video(tensor, fps)
return Summary(value=[Summary.Value(tag=tag, image=video)])
def make_video(tensor, fps):
try:
import moviepy # noqa: F401
except ImportError:
print('add_video needs package moviepy')
return
try:
from moviepy import editor as mpy
except ImportError:
print("moviepy is installed, but can't import moviepy.editor.",
"Some packages could be missing [imageio, requests]")
return
import tempfile
t, h, w, c = tensor.shape
# encode sequence of images into gif string
clip = mpy.ImageSequenceClip(list(tensor), fps=fps)
filename = tempfile.NamedTemporaryFile(suffix='.gif', delete=False).name
# moviepy >= 1.0.0 use logger=None to suppress output.
try:
clip.write_gif(filename, verbose=False, logger=None)
except TypeError:
logging.warning('Upgrade to moviepy >= 1.0.0 to supress the progress bar.')
clip.write_gif(filename, verbose=False)
with open(filename, 'rb') as f:
tensor_string = f.read()
try:
os.remove(filename)
except OSError:
logging.warning('The temporary file used by moviepy cannot be deleted.')
return Summary.Image(height=h, width=w, colorspace=c, encoded_image_string=tensor_string)
def audio(tag, tensor, sample_rate=44100):
"""
Args:
tensor: A 2-D float Tensor of shape `[frames, channels]` where `channels` is 1 or 2.
The values should between [-1, 1]. We also accepts 1-D tensor.
"""
import io
import soundfile
tensor = make_np(tensor)
if abs(tensor).max() > 1:
print('warning: audio amplitude out of range, auto clipped.')
tensor = tensor.clip(-1, 1)
if tensor.ndim == 1: # old API, which expects single channel audio
tensor = np.expand_dims(tensor, axis=1)
assert(tensor.ndim == 2), 'Input tensor should be 2 dimensional.'
length_frames, num_channels = tensor.shape
assert num_channels == 1 or num_channels == 2, 'The second dimension should be 1 or 2.'
with io.BytesIO() as fio:
soundfile.write(fio, tensor, samplerate=sample_rate, format='wav')
audio_string = fio.getvalue()
audio = Summary.Audio(sample_rate=sample_rate,
num_channels=num_channels,
length_frames=length_frames,
encoded_audio_string=audio_string,
content_type='audio/wav')
return Summary(value=[Summary.Value(tag=tag, audio=audio)])
def custom_scalars(layout):
categoriesnames = layout.keys()
categories = []
layouts = []
for k, v in layout.items():
charts = []
for chart_name, chart_meatadata in v.items():
tags = chart_meatadata[1]
if chart_meatadata[0] == 'Margin':
assert len(tags) == 3
mgcc = layout_pb2.MarginChartContent(series=[layout_pb2.MarginChartContent.Series(value=tags[0],
lower=tags[1],
upper=tags[2])])
chart = layout_pb2.Chart(title=chart_name, margin=mgcc)
else:
mlcc = layout_pb2.MultilineChartContent(tag=tags)
chart = layout_pb2.Chart(title=chart_name, multiline=mlcc)
charts.append(chart)
categories.append(layout_pb2.Category(title=k, chart=charts))
layout = layout_pb2.Layout(category=categories)
PluginData = SummaryMetadata.PluginData(plugin_name='custom_scalars')
smd = SummaryMetadata(plugin_data=PluginData)
tensor = TensorProto(dtype='DT_STRING',
string_val=[layout.SerializeToString()],
tensor_shape=TensorShapeProto())
return Summary(value=[Summary.Value(tag='custom_scalars__config__', tensor=tensor, metadata=smd)])
def text(tag, text):
import json
PluginData = SummaryMetadata.PluginData(
plugin_name='text', content=TextPluginData(version=0).SerializeToString())
smd = SummaryMetadata(plugin_data=PluginData)
tensor = TensorProto(dtype='DT_STRING',
string_val=[text.encode(encoding='utf_8')],
tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]))
return Summary(value=[Summary.Value(tag=tag + '/text_summary', metadata=smd, tensor=tensor)])
def pr_curve_raw(tag, tp, fp, tn, fn, precision, recall, num_thresholds=127, weights=None):
if num_thresholds > 127: # weird, value > 127 breaks protobuf
num_thresholds = 127
data = np.stack((tp, fp, tn, fn, precision, recall))
pr_curve_plugin_data = PrCurvePluginData(
version=0, num_thresholds=num_thresholds).SerializeToString()
PluginData = SummaryMetadata.PluginData(
plugin_name='pr_curves', content=pr_curve_plugin_data)
smd = SummaryMetadata(plugin_data=PluginData)
tensor = TensorProto(dtype='DT_FLOAT',
float_val=data.reshape(-1).tolist(),
tensor_shape=TensorShapeProto(
dim=[TensorShapeProto.Dim(size=data.shape[0]), TensorShapeProto.Dim(size=data.shape[1])]))
return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
def pr_curve(tag, labels, predictions, num_thresholds=127, weights=None):
# weird, value > 127 breaks protobuf
num_thresholds = min(num_thresholds, 127)
data = compute_curve(labels, predictions,
num_thresholds=num_thresholds, weights=weights)
pr_curve_plugin_data = PrCurvePluginData(
version=0, num_thresholds=num_thresholds).SerializeToString()
PluginData = SummaryMetadata.PluginData(
plugin_name='pr_curves', content=pr_curve_plugin_data)
smd = SummaryMetadata(plugin_data=PluginData)
tensor = TensorProto(dtype='DT_FLOAT',
float_val=data.reshape(-1).tolist(),
tensor_shape=TensorShapeProto(
dim=[TensorShapeProto.Dim(size=data.shape[0]), TensorShapeProto.Dim(size=data.shape[1])]))
return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py
def compute_curve(labels, predictions, num_thresholds=None, weights=None):
_MINIMUM_COUNT = 1e-7
if weights is None:
weights = 1.0
# Compute bins of true positives and false positives.
bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))
float_labels = labels.astype(np.float)
histogram_range = (0, num_thresholds - 1)
tp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=float_labels * weights)
fp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=(1.0 - float_labels) * weights)
# Obtain the reverse cumulative sum.
tp = np.cumsum(tp_buckets[::-1])[::-1]
fp = np.cumsum(fp_buckets[::-1])[::-1]
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn)
return np.stack((tp, fp, tn, fn, precision, recall))
def _get_tensor_summary(tag, tensor, content_type, json_config):
mesh_plugin_data = MeshPluginData(
version=0,
name=tag,
content_type=content_type,
json_config=json_config,
shape=tensor.shape,
)
content = mesh_plugin_data.SerializeToString()
smd = SummaryMetadata(
plugin_data=SummaryMetadata.PluginData(
plugin_name='mesh',
content=content))
tensor = TensorProto(dtype='DT_FLOAT',
float_val=tensor.reshape(-1).tolist(),
tensor_shape=TensorShapeProto(dim=[
TensorShapeProto.Dim(size=tensor.shape[0]),
TensorShapeProto.Dim(size=tensor.shape[1]),
TensorShapeProto.Dim(size=tensor.shape[2]),
]))
tensor_summary = Summary.Value(
tag='{}_{}'.format(tag, content_type),
tensor=tensor,
metadata=smd,
)
return tensor_summary
def mesh(tag, vertices, colors, faces, config_dict=None):
import json
summaries = []
tensors = [
(vertices, 1),
(faces, 2),
(colors, 3)
]
for tensor, content_type in tensors:
if tensor is None:
continue
summaries.append(
_get_tensor_summary(tag, make_np(tensor), content_type, json.dumps(config_dict, sort_keys=True)))
return Summary(value=summaries)
| 41.618056 | 162 | 0.643793 |
0e6bb685e45102d9bb9c633320ae0aeb4cb480e9 | 8,981 | py | Python | tests/extras/datasets/pandas/test_feather_dataset.py | daniel-falk/kedro | 19187199339ddc4a757aaaa328f319ec4c1e452a | [
"Apache-2.0"
] | 2,047 | 2022-01-10T15:22:12.000Z | 2022-03-31T13:38:56.000Z | tests/extras/datasets/pandas/test_feather_dataset.py | daniel-falk/kedro | 19187199339ddc4a757aaaa328f319ec4c1e452a | [
"Apache-2.0"
] | 170 | 2022-01-10T12:44:31.000Z | 2022-03-31T17:01:24.000Z | tests/extras/datasets/pandas/test_feather_dataset.py | daniel-falk/kedro | 19187199339ddc4a757aaaa328f319ec4c1e452a | [
"Apache-2.0"
] | 112 | 2022-01-10T19:15:24.000Z | 2022-03-30T11:20:52.000Z | from pathlib import Path, PurePosixPath
import pandas as pd
import pytest
from fsspec.implementations.http import HTTPFileSystem
from fsspec.implementations.local import LocalFileSystem
from gcsfs import GCSFileSystem
from pandas.testing import assert_frame_equal
from s3fs.core import S3FileSystem
from kedro.extras.datasets.pandas import FeatherDataSet
from kedro.io import DataSetError
from kedro.io.core import PROTOCOL_DELIMITER, Version
@pytest.fixture
def filepath_feather(tmp_path):
return (tmp_path / "test.feather").as_posix()
@pytest.fixture
def feather_data_set(filepath_feather, load_args, fs_args):
return FeatherDataSet(
filepath=filepath_feather, load_args=load_args, fs_args=fs_args
)
@pytest.fixture
def versioned_feather_data_set(filepath_feather, load_version, save_version):
return FeatherDataSet(
filepath=filepath_feather, version=Version(load_version, save_version)
)
@pytest.fixture
def dummy_dataframe():
return pd.DataFrame({"col1": [1, 2], "col2": [4, 5], "col3": [5, 6]})
class TestFeatherDataSet:
def test_save_and_load(self, feather_data_set, dummy_dataframe):
"""Test saving and reloading the data set."""
feather_data_set.save(dummy_dataframe)
reloaded = feather_data_set.load()
assert_frame_equal(dummy_dataframe, reloaded)
def test_exists(self, feather_data_set, dummy_dataframe):
"""Test `exists` method invocation for both existing and
nonexistent data set."""
assert not feather_data_set.exists()
feather_data_set.save(dummy_dataframe)
assert feather_data_set.exists()
@pytest.mark.parametrize(
"load_args", [{"k1": "v1", "index": "value"}], indirect=True
)
def test_load_extra_params(self, feather_data_set, load_args):
"""Test overriding the default load arguments."""
for key, value in load_args.items():
assert feather_data_set._load_args[key] == value
@pytest.mark.parametrize(
"load_args,save_args",
[
({"storage_options": {"a": "b"}}, {}),
({}, {"storage_options": {"a": "b"}}),
({"storage_options": {"a": "b"}}, {"storage_options": {"x": "y"}}),
],
)
def test_storage_options_dropped(self, load_args, save_args, caplog, tmp_path):
filepath = str(tmp_path / "test.csv")
ds = FeatherDataSet(filepath=filepath, load_args=load_args, save_args=save_args)
records = [r for r in caplog.records if r.levelname == "WARNING"]
expected_log_message = (
f"Dropping `storage_options` for {filepath}, "
f"please specify them under `fs_args` or `credentials`."
)
assert records[0].getMessage() == expected_log_message
assert "storage_options" not in ds._save_args
assert "storage_options" not in ds._load_args
def test_load_missing_file(self, feather_data_set):
"""Check the error when trying to load missing file."""
pattern = r"Failed while loading data from data set FeatherDataSet\(.*\)"
with pytest.raises(DataSetError, match=pattern):
feather_data_set.load()
@pytest.mark.parametrize(
"filepath,instance_type,load_path",
[
("s3://bucket/file.feather", S3FileSystem, "s3://bucket/file.feather"),
("file:///tmp/test.feather", LocalFileSystem, "/tmp/test.feather"),
("/tmp/test.feather", LocalFileSystem, "/tmp/test.feather"),
("gcs://bucket/file.feather", GCSFileSystem, "gcs://bucket/file.feather"),
(
"https://example.com/file.feather",
HTTPFileSystem,
"https://example.com/file.feather",
),
],
)
def test_protocol_usage(self, filepath, instance_type, load_path, mocker):
data_set = FeatherDataSet(filepath=filepath)
assert isinstance(data_set._fs, instance_type)
path = filepath.split(PROTOCOL_DELIMITER, 1)[-1]
assert str(data_set._filepath) == path
assert isinstance(data_set._filepath, PurePosixPath)
mock_pandas_call = mocker.patch("pandas.read_feather")
data_set.load()
assert mock_pandas_call.call_count == 1
assert mock_pandas_call.call_args_list[0][0][0] == load_path
def test_catalog_release(self, mocker):
fs_mock = mocker.patch("fsspec.filesystem").return_value
filepath = "test.feather"
data_set = FeatherDataSet(filepath=filepath)
data_set.release()
fs_mock.invalidate_cache.assert_called_once_with(filepath)
class TestFeatherDataSetVersioned:
def test_version_str_repr(self, load_version, save_version):
"""Test that version is in string representation of the class instance
when applicable."""
filepath = "test.feather"
ds = FeatherDataSet(filepath=filepath)
ds_versioned = FeatherDataSet(
filepath=filepath, version=Version(load_version, save_version)
)
assert filepath in str(ds)
assert "version" not in str(ds)
assert filepath in str(ds_versioned)
ver_str = f"version=Version(load={load_version}, save='{save_version}')"
assert ver_str in str(ds_versioned)
assert "FeatherDataSet" in str(ds_versioned)
assert "FeatherDataSet" in str(ds)
assert "protocol" in str(ds_versioned)
assert "protocol" in str(ds)
def test_save_and_load(self, versioned_feather_data_set, dummy_dataframe):
"""Test that saved and reloaded data matches the original one for
the versioned data set."""
versioned_feather_data_set.save(dummy_dataframe)
reloaded_df = versioned_feather_data_set.load()
assert_frame_equal(dummy_dataframe, reloaded_df)
def test_no_versions(self, versioned_feather_data_set):
"""Check the error if no versions are available for load."""
pattern = r"Did not find any versions for FeatherDataSet\(.+\)"
with pytest.raises(DataSetError, match=pattern):
versioned_feather_data_set.load()
def test_exists(self, versioned_feather_data_set, dummy_dataframe):
"""Test `exists` method invocation for versioned data set."""
assert not versioned_feather_data_set.exists()
versioned_feather_data_set.save(dummy_dataframe)
assert versioned_feather_data_set.exists()
def test_prevent_overwrite(self, versioned_feather_data_set, dummy_dataframe):
"""Check the error when attempting to overwrite the data set if the
corresponding feather file for a given save version already exists."""
versioned_feather_data_set.save(dummy_dataframe)
pattern = (
r"Save path \`.+\` for FeatherDataSet\(.+\) must "
r"not exist if versioning is enabled\."
)
with pytest.raises(DataSetError, match=pattern):
versioned_feather_data_set.save(dummy_dataframe)
@pytest.mark.parametrize(
"load_version", ["2019-01-01T23.59.59.999Z"], indirect=True
)
@pytest.mark.parametrize(
"save_version", ["2019-01-02T00.00.00.000Z"], indirect=True
)
def test_save_version_warning(
self, versioned_feather_data_set, load_version, save_version, dummy_dataframe
):
"""Check the warning when saving to the path that differs from
the subsequent load path."""
pattern = (
rf"Save version `{save_version}` did not match load version "
rf"`{load_version}` for FeatherDataSet\(.+\)"
)
with pytest.warns(UserWarning, match=pattern):
versioned_feather_data_set.save(dummy_dataframe)
def test_http_filesystem_no_versioning(self):
pattern = r"HTTP\(s\) DataSet doesn't support versioning\."
with pytest.raises(DataSetError, match=pattern):
FeatherDataSet(
filepath="https://example.com/file.feather", version=Version(None, None)
)
def test_versioning_existing_dataset(
self, feather_data_set, versioned_feather_data_set, dummy_dataframe
):
"""Check the error when attempting to save a versioned dataset on top of an
already existing (non-versioned) dataset."""
feather_data_set.save(dummy_dataframe)
assert feather_data_set.exists()
assert feather_data_set._filepath == versioned_feather_data_set._filepath
pattern = (
f"(?=.*file with the same name already exists in the directory)"
f"(?=.*{versioned_feather_data_set._filepath.parent.as_posix()})"
)
with pytest.raises(DataSetError, match=pattern):
versioned_feather_data_set.save(dummy_dataframe)
# Remove non-versioned dataset and try again
Path(feather_data_set._filepath.as_posix()).unlink()
versioned_feather_data_set.save(dummy_dataframe)
assert versioned_feather_data_set.exists()
| 40.638009 | 88 | 0.675649 |
7ba36b240bf2093ac4ee7d24a01accfa8ea0620b | 1,165 | py | Python | src/tournament_poller/polls/migrations/0001_initial.py | jorses/tfg | 8cf15997378d782a2f2bdcff929830af9b3d9840 | [
"MIT"
] | null | null | null | src/tournament_poller/polls/migrations/0001_initial.py | jorses/tfg | 8cf15997378d782a2f2bdcff929830af9b3d9840 | [
"MIT"
] | null | null | null | src/tournament_poller/polls/migrations/0001_initial.py | jorses/tfg | 8cf15997378d782a2f2bdcff929830af9b3d9840 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-03-20 21:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| 31.486486 | 114 | 0.577682 |
f5e5ae09f5eaf30e134d44e6c0ca4f0bb76a10c1 | 2,818 | py | Python | modules/s3db/tr.py | sungkomp/sambro | 4618d785d03424d122206d88d9ebfb6971486e2c | [
"MIT"
] | 1 | 2017-10-06T23:18:01.000Z | 2017-10-06T23:18:01.000Z | modules/s3db/tr.py | sungkomp/sambro | 4618d785d03424d122206d88d9ebfb6971486e2c | [
"MIT"
] | null | null | null | modules/s3db/tr.py | sungkomp/sambro | 4618d785d03424d122206d88d9ebfb6971486e2c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" Turkey-specific Tables
@copyright: 2015-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3TurkeyIdentityModel",)
from gluon import *
from ..s3 import *
# =============================================================================
class S3TurkeyIdentityModel(S3Model):
""" Model for Turkish Identity Cards """
names = ("tr_identity",)
def model(self):
T = current.T
# -------------------------------------------------------------------------
# Turkish Identity
#
tablename = "tr_identity"
self.define_table(tablename,
self.pr_person_id(),
self.gis_location_id(
widget = S3LocationSelector(levels=("L1", "L2", "L3"),
show_map=False,
),
),
Field("volume_no",
label = T("Volume No"),
),
Field("family_order_no", "integer",
label = T("Family Order No"),
),
Field("order_no", "integer",
label = T("Order No"),
),
*s3_meta_fields()
)
# ---------------------------------------------------------------------
# Return global names to s3.*
#
return {}
# END =========================================================================
| 38.081081 | 83 | 0.481192 |
b170485e34a29f215d5b9e2be85f509fe1ae2367 | 4,103 | py | Python | policy_eval/dual_dice.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | policy_eval/dual_dice.py | davidfitzek/google-research | eb2b142f26e39aac1dcbb768417465ae9d4e5af6 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | policy_eval/dual_dice.py | davidfitzek/google-research | eb2b142f26e39aac1dcbb768417465ae9d4e5af6 | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of DualDICE."""
import typing
import tensorflow as tf
from tensorflow_addons import optimizers as tfa_optimizers
import tqdm
from policy_eval.q_fitter import CriticNet
class DualDICE(object):
"""Implementation of DualDICE."""
def __init__(self, state_dim, action_dim, weight_decay):
self.nu = CriticNet(state_dim, action_dim)
self.zeta = CriticNet(state_dim, action_dim)
self.nu_optimizer = tfa_optimizers.AdamW(
learning_rate=1e-4, beta_1=0.0, beta_2=0.99, weight_decay=weight_decay)
self.zeta_optimizer = tfa_optimizers.AdamW(
learning_rate=1e-3, beta_1=0.0, beta_2=0.99, weight_decay=weight_decay)
@tf.function
def update(self, initial_states, initial_actions,
initial_weights, states, actions,
next_states, next_actions, masks,
weights, discount):
"""Updates parameters.
Args:
initial_states: A batch of states.
initial_actions: A batch of actions sampled from target policy.
initial_weights: A batch of weights for the initial states.
states: A batch of states.
actions: A batch of actions sampled from behavior policy.
next_states: A batch of next states.
next_actions: A batch of next actions sampled from target policy.
masks: A batch of masks indicating the end of the episodes.
weights: A batch of weights.
discount: An MDP discount factor.
Returns:
Critic loss.
"""
with tf.GradientTape(
watch_accessed_variables=False, persistent=True) as tape:
tape.watch(self.nu.trainable_variables)
tape.watch(self.zeta.trainable_variables)
nu = self.nu(states, actions)
nu_next = self.nu(next_states, next_actions)
nu_0 = self.nu(initial_states, initial_actions)
zeta = self.zeta(states, actions)
nu_loss = (
tf.reduce_sum(weights * (
(nu - discount * masks * nu_next) * zeta - tf.square(zeta) / 2)) /
tf.reduce_sum(weights) -
tf.reduce_sum(initial_weights *
(1 - discount) * nu_0) / tf.reduce_sum(initial_weights))
zeta_loss = -nu_loss
nu_grads = tape.gradient(nu_loss, self.nu.trainable_variables)
zeta_grads = tape.gradient(zeta_loss, self.zeta.trainable_variables)
self.nu_optimizer.apply_gradients(
zip(nu_grads, self.nu.trainable_variables))
self.zeta_optimizer.apply_gradients(
zip(zeta_grads, self.zeta.trainable_variables))
del tape
tf.summary.scalar(
'train/nu loss', nu_loss, step=self.nu_optimizer.iterations)
tf.summary.scalar(
'train/zeta loss', zeta_loss, step=self.zeta_optimizer.iterations)
return nu_loss
@tf.function
def estimate_returns(
self,
tf_dataset_iter,
num_samples = 100):
"""Estimated returns for a target policy.
Args:
tf_dataset_iter: Iterator over the dataset.
num_samples: Number of samples used to estimate the returns.
Returns:
Estimated returns.
"""
pred_returns = 0.0
pred_ratio = 0.0
for _ in tqdm.tqdm(range(num_samples), desc='Estimating Returns'):
states, actions, _, rewards, _, weights, _ = next(tf_dataset_iter)
zeta = self.zeta(states, actions)
pred_ratio += tf.reduce_sum(weights * zeta) / tf.reduce_sum(weights)
pred_returns += tf.reduce_sum(
weights * zeta * rewards) / tf.reduce_sum(weights)
return pred_returns / num_samples, pred_ratio / num_samples
| 35.068376 | 80 | 0.694857 |
5eeb13d562dd5935ed0ea16ef5600441d328f67b | 9,778 | py | Python | grasping/annotation/utils.py | wangyan-hlab/wrs | 8f81cdd33a419d5b4ffe18d13cd4cbf9f258bc7c | [
"MIT"
] | null | null | null | grasping/annotation/utils.py | wangyan-hlab/wrs | 8f81cdd33a419d5b4ffe18d13cd4cbf9f258bc7c | [
"MIT"
] | null | null | null | grasping/annotation/utils.py | wangyan-hlab/wrs | 8f81cdd33a419d5b4ffe18d13cd4cbf9f258bc7c | [
"MIT"
] | null | null | null | import math
import pickle
import numpy as np
import basis.robot_math as rm
def define_grasp(hnd_s,
objcm,
gl_jaw_center_pos,
gl_jaw_center_z,
gl_jaw_center_y,
jaw_width,
toggle_flip=True,
toggle_debug=False):
"""
:param hnd_s:
:param objcm:
:param gl_jaw_center_pos:
:param gl_jaw_center_z: hand approaching direction
:param gl_jaw_center_y: normal direction of thumb's contact surface
:param jaw_width:
:param objcm:
:param toggle_flip:
:return: a list like [[jaw_width, gl_jaw_center_pos, pos, rotmat], ...]
author: chenhao, revised by weiwei
date: 20200104
"""
grasp_info_list = []
collided_grasp_info_list = []
grasp_info = hnd_s.grip_at_with_jczy(gl_jaw_center_pos, gl_jaw_center_z, gl_jaw_center_y, jaw_width)
if not hnd_s.is_mesh_collided([objcm]):
grasp_info_list.append(grasp_info)
else:
collided_grasp_info_list.append(grasp_info)
if toggle_flip:
grasp_info = hnd_s.grip_at_with_jczy(gl_jaw_center_pos, gl_jaw_center_z, -gl_jaw_center_y, jaw_width)
if not hnd_s.is_mesh_collided([objcm]):
grasp_info_list.append(grasp_info)
else:
collided_grasp_info_list.append(grasp_info)
if toggle_debug:
for grasp_info in collided_grasp_info_list:
jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
hnd_s.fix_to(hnd_pos, hnd_rotmat)
hnd_s.jaw_to(jaw_width)
hnd_s.gen_meshmodel(rgba=[1, 0, 0, .3]).attach_to(base)
for grasp_info in grasp_info_list:
jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
hnd_s.fix_to(hnd_pos, hnd_rotmat)
hnd_s.jaw_to(jaw_width)
hnd_s.gen_meshmodel(rgba=[0, 1, 0, .3]).attach_to(base)
return grasp_info_list
def define_grasp_with_rotation(hnd_s,
objcm,
gl_jaw_center_pos,
gl_jaw_center_z,
gl_jaw_center_y,
jaw_width,
gl_rotation_ax,
rotation_interval=math.radians(60),
rotation_range=(math.radians(-180), math.radians(180)),
toggle_flip=True,
toggle_debug=False):
"""
:param hnd_s:
:param objcm:
:param gl_jaw_center_pos:
:param gl_jaw_center_z: hand approaching direction
:param gl_jaw_center_y: normal direction of thumb's contact surface
:param jaw_width:
:param rotation_interval:
:param rotation_range:
:param toggle_flip:
:return: a list [[jaw_width, gl_jaw_center_pos, pos, rotmat], ...]
author: chenhao, revised by weiwei
date: 20200104
"""
grasp_info_list = []
collided_grasp_info_list = []
for rotate_angle in np.arange(rotation_range[0], rotation_range[1], rotation_interval):
tmp_rotmat = rm.rotmat_from_axangle(gl_rotation_ax, rotate_angle)
gl_jaw_center_z_rotated = np.dot(tmp_rotmat, gl_jaw_center_z)
gl_jaw_center_y_rotated = np.dot(tmp_rotmat, gl_jaw_center_y)
grasp_info = hnd_s.grip_at_with_jczy(gl_jaw_center_pos, gl_jaw_center_z_rotated, gl_jaw_center_y_rotated,
jaw_width)
if not hnd_s.is_mesh_collided([objcm]):
grasp_info_list.append(grasp_info)
else:
collided_grasp_info_list.append(grasp_info)
if toggle_flip:
for rotate_angle in np.arange(rotation_range[0], rotation_range[1], rotation_interval):
tmp_rotmat = rm.rotmat_from_axangle(gl_rotation_ax, rotate_angle)
gl_jaw_center_z_rotated = np.dot(tmp_rotmat, gl_jaw_center_z)
gl_jaw_center_y_rotated = np.dot(tmp_rotmat, -gl_jaw_center_y)
grasp_info = hnd_s.grip_at_with_jczy(gl_jaw_center_pos, gl_jaw_center_z_rotated, gl_jaw_center_y_rotated,
jaw_width)
if not hnd_s.is_mesh_collided([objcm]):
grasp_info_list.append(grasp_info)
else:
collided_grasp_info_list.append(grasp_info)
if toggle_debug:
for grasp_info in collided_grasp_info_list:
jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
hnd_s.fix_to(hnd_pos, hnd_rotmat)
hnd_s.jaw_to(jaw_width)
hnd_s.gen_meshmodel(rgba=[1, 0, 0, .3]).attach_to(base)
for grasp_info in grasp_info_list:
jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
hnd_s.fix_to(hnd_pos, hnd_rotmat)
hnd_s.jaw_to(jaw_width)
hnd_s.gen_meshmodel(rgba=[0, 1, 0, .3]).attach_to(base)
return grasp_info_list
def define_pushing(hnd_s,
objcm,
gl_surface_pos,
gl_surface_normal,
cone_angle=math.radians(30),
icosphere_level=2,
local_rotation_interval=math.radians(45),
toggle_debug=False):
"""
:param hnd_s:
:param objcm:
:param gl_surface_pos: used as cone tip
:param gl_surface_normal: used as cone's main axis
:param cone_angle: pushing poses will be randomized in this cone
:param icosphere_levle: 2
:param local_rotation_interval: discretize the rotation around the local axis of each pushing pose
:return:
author: weiwei
date: 20220308
"""
push_info_list = []
collided_push_info_list = []
pushing_icorotmats = rm.gen_icorotmats(icolevel=icosphere_level,
crop_angle=cone_angle,
crop_normal=gl_surface_normal,
rotation_interval=local_rotation_interval,
toggle_flat=True)
for pushing_rotmat in pushing_icorotmats:
push_info = hnd_s.push_at(gl_push_pos=gl_surface_pos, gl_push_rotmat=pushing_rotmat)
if not hnd_s.is_mesh_collided([objcm]):
push_info_list.append(push_info)
else:
collided_push_info_list.append(push_info)
if toggle_debug:
for push_info in collided_push_info_list:
gl_tip_pos, gl_tip_rotmat, hnd_pos, hnd_rotmat = push_info
hnd_s.fix_to(hnd_pos, hnd_rotmat)
hnd_s.gen_meshmodel(rgba=[1, 0, 0, .3]).attach_to(base)
for push_info in push_info_list:
gl_tip_pos, gl_tip_rotmat, hnd_pos, hnd_rotmat = push_info
hnd_s.fix_to(hnd_pos, hnd_rotmat)
hnd_s.gen_meshmodel(rgba=[0, 1, 0, .3]).attach_to(base)
base.run()
return push_info_list
def write_pickle_file(objcm_name, grasp_info_list, root=None, file_name='preannotated_grasps.pickle', append=False):
"""
if model_name was saved, replace the old grasp info.
if model_name was never saved, additionally save it.
:param objcm_name:
:param grasp_info_list:
:param root:
:param file_name:
:return:
author: chenhao, revised by weiwei
date: 20200104
"""
if root is None:
directory = "./"
else:
directory = root + "/"
try:
data = pickle.load(open(directory + file_name, 'rb'))
except:
print("load failed, create new data.")
data = {}
if append:
data[objcm_name].extend(grasp_info_list)
else:
data[objcm_name] = grasp_info_list
for k, v in data.items():
print(k, len(v))
pickle.dump(data, open(directory + file_name, 'wb'))
def load_pickle_file(objcm_name, root=None, file_name='preannotated_grasps.pickle'):
"""
:param objcm_name:
:param root:
:param file_name:
:return:
author: chenhao, revised by weiwei
date: 20200105
"""
if root is None:
directory = "./"
else:
directory = root + "/"
try:
data = pickle.load(open(directory + file_name, 'rb'))
for k, v in data.items():
print(k, len(v))
grasp_info_list = data[objcm_name]
return grasp_info_list
except:
raise ValueError("File or data not found!")
if __name__ == '__main__':
import os
import basis
import robot_sim.end_effectors.gripper.xarm_gripper.xarm_gripper as xag
import modeling.collision_model as cm
import visualization.panda.world as wd
base = wd.World(cam_pos=[.5, .5, .3], lookat_pos=[0, 0, 0])
gripper_s = xag.XArmGripper(enable_cc=True)
objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')
objcm = cm.CollisionModel(objpath)
objcm.attach_to(base)
objcm.show_localframe()
grasp_info_list = define_grasp_with_rotation(gripper_s,
objcm,
gl_jaw_center_pos=np.array([0, 0, 0]),
gl_jaw_center_z=np.array([1, 0, 0]),
gl_jaw_center_y=np.array([0, 1, 0]),
jaw_width=.04,
gl_rotation_ax=np.array([0, 0, 1]))
for grasp_info in grasp_info_list:
jaw_width, jaw_center_pos, jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
gic = gripper_s.copy()
gic.grip_at_with_jcpose(jaw_center_pos, jaw_center_rotmat, jaw_width)
gic.gen_meshmodel().attach_to(base)
base.run()
| 40.404959 | 117 | 0.6126 |
67d205d1d91d0b893686b0fddb40425f02464428 | 1,285 | py | Python | apps/reports/migrations/0001_initial.py | sonnhfit/supermarket | 9b455c1354007cccb6cec40c04008c72bd4d7316 | [
"MIT"
] | null | null | null | apps/reports/migrations/0001_initial.py | sonnhfit/supermarket | 9b455c1354007cccb6cec40c04008c72bd4d7316 | [
"MIT"
] | null | null | null | apps/reports/migrations/0001_initial.py | sonnhfit/supermarket | 9b455c1354007cccb6cec40c04008c72bd4d7316 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-02-18 10:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('report_name', models.CharField(max_length=200, verbose_name='Tên báo cáo')),
('report_time', models.DateTimeField(auto_now_add=True, verbose_name='Thời gian tạo báo cáo')),
('tienhang', models.IntegerField(default=0, verbose_name='Tổng tiền hàng')),
('thu', models.IntegerField(default=0, verbose_name='Tổng số tiền đã thu')),
('chi', models.IntegerField(default=0, verbose_name='Tổng số tiền đã chi')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Người tạo báo cáo')),
],
options={
'verbose_name_plural': 'Báo cáo',
},
),
]
| 38.939394 | 152 | 0.624125 |
e85b4bbc58f6029805438a97be15f5f6b6c0b954 | 6,311 | py | Python | tests/test_validate.py | cclauss/vergeml | 3403574db9c1df232809ddf85bbc415c60d35c7c | [
"MIT"
] | 1 | 2018-11-05T02:02:14.000Z | 2018-11-05T02:02:14.000Z | tests/test_validate.py | cclauss/vergeml | 3403574db9c1df232809ddf85bbc415c60d35c7c | [
"MIT"
] | null | null | null | tests/test_validate.py | cclauss/vergeml | 3403574db9c1df232809ddf85bbc415c60d35c7c | [
"MIT"
] | null | null | null | from vergeml import VergeMLError
from vergeml.validate import ValidateDevice, ValidateData, apply_config, yaml_find_definition, _display_err, _normalize
from vergeml.plugins import _DictPluginManager
from vergeml.sources.image import ImageSource
from vergeml.operations.augment import AugmentOperation
import pytest
def test_normalize():
VALIDATORS = {'device': ValidateDevice()}
assert _normalize({
'device.id': 'gpu',
'device.memory': '20%',
'some.thing.else': 1,
'this': {
'is': 'normal'
}
} , VALIDATORS) == {
'device': {
'id': 'gpu',
'memory': '20%'
},
'some': {
'thing': {
'else': 1
}
},
'this': {
'is': 'normal'
}
}
def test_normalize_aliases():
VALIDATORS = {'device': ValidateDevice()}
assert _normalize({
'device': 'gpu:1',
'device.memory': '20%'
} , VALIDATORS) == {
'device': {
'id': 'gpu:1',
'memory': '20%'
},
}
def test_apply_empty_config():
VALIDATORS = {'device': ValidateDevice()}
assert apply_config({}, VALIDATORS) == {}
assert VALIDATORS['device'].values == {
'device': {
'id': 'auto',
'memory': 'auto',
'grow-memory': False
}
}
def test_apply_config():
VALIDATORS = {'device': ValidateDevice()}
assert apply_config({'device': 'gpu', 'model': 'inception-v3'}, VALIDATORS) == {'model': 'inception-v3'}
assert VALIDATORS['device'].values == {
'device': {
'id': 'gpu:0',
'memory': 'auto',
'grow-memory': False
}
}
def test_input_output():
PLUGINS = _DictPluginManager()
PLUGINS.set('vergeml.io', 'image', ImageSource)
VALIDATORS = {'data': ValidateData('image', plugins=PLUGINS)}
apply_config({
'data': {
'input': {
'type': 'image'
},
'output': {
'type': 'image'
}
}
}, validators=VALIDATORS)
assert VALIDATORS['data'].values['data']['input']['type'] == 'image'
assert VALIDATORS['data'].values['data']['output']['type'] == 'image'
def test_validate_preprocess():
PLUGINS = _DictPluginManager()
PLUGINS.set('vergeml.operation', 'augment', AugmentOperation)
VALIDATORS = {'data': ValidateData(plugins=PLUGINS)}
apply_config({
'data': {
'preprocess': [
{'op': 'augment',
'variants': 4}
]
}
}, VALIDATORS)
assert VALIDATORS['data'].values == {
'data': {
'cache': '*auto*',
'input': {
'type': None
},
'output': {
'type': None
},
'preprocess': [
{'op': 'augment',
'variants': 4}
]
}
}
def test_validate_preprocess_invalid():
PLUGINS = _DictPluginManager()
PLUGINS.set('vergeml.operation', 'augment', AugmentOperation)
VALIDATORS = {'data': ValidateData(plugins=PLUGINS)}
with pytest.raises(VergeMLError, match=r".*Did you mean 'variants'.*"):
apply_config({
'data': {
'preprocess': [
{'op': 'augment',
'variantz': 4}
]
}
}, VALIDATORS)
def test_config_dict():
VALIDATORS = {'device': ValidateDevice()}
res = apply_config({'device': {'id': 'cpu'}}, VALIDATORS)
assert(res == {})
assert(VALIDATORS['device'].values['device']['id'] == 'cpu')
def test_config_invalid():
VALIDATORS = {'device': ValidateDevice()}
with pytest.raises(VergeMLError):
apply_config({'device': {'id': 'cpu', 'invalid': 'true'}}, VALIDATORS)
TEST_YAML = """\
data:
input:
type: imagez
preprocess:
- op: center-crop
width: 30
height: 30
- op: flip-horizontalz
- op: rgb
"""
def test_find_definition_key():
res = yaml_find_definition(TEST_YAML, 'data.input.type', 'key')
assert res == (2, 8, 5)
def test_find_definition_val():
res = yaml_find_definition(TEST_YAML, 'data.input.type', 'value')
assert res == (2, 14, 6)
def test_find_definition_arr_key():
res = yaml_find_definition(TEST_YAML, 'data.preprocess.1.op', 'key')
assert res == (9, 10, 3)
def test_find_definition_arr_val():
res = yaml_find_definition(TEST_YAML, 'data.preprocess.1.op', 'value')
assert res == (9, 14, 16)
def test_display_err():
line, column, length = yaml_find_definition(TEST_YAML, 'data.preprocess.1.op', 'value')
res = _display_err("vergeml.yaml",
line,
column,
"Invalid preprocessing operation 'flip-horizontalz'. Did you mean 'flip-horizontal'?",
length,
3,
TEST_YAML)
res = "Error! " + res
assert res == """\
Error! File vergeml.yaml, line 10:15
------------------------------------
height: 30
- op: flip-horizontalz
^^^^^^^^^^^^^^^^
Invalid preprocessing operation 'flip-horizontalz'. Did you mean 'flip-horizontal'?"""
def test_apply_config_image():
PLUGINS = _DictPluginManager()
PLUGINS.set('vergeml.io', 'image', ImageSource)
VALIDATORS = {'data': ValidateData(plugins=PLUGINS)}
assert apply_config({'data': {'input': {'type': 'image', 'input-patterns': '*.jpg'}}}, VALIDATORS) == {}
assert VALIDATORS['data'].values == {
'data': {
'input': {
'type': 'image',
'input-patterns': '*.jpg'
},
'output': {
'type': None
},
'cache': '*auto*',
'preprocess': []
}
}
def test_apply_config_image_invalid():
PLUGINS = _DictPluginManager()
PLUGINS.set('vergeml.io', 'image', ImageSource)
VALIDATORS = {'data': ValidateData(plugins=PLUGINS)}
with pytest.raises(VergeMLError):
assert apply_config({'data': {'input': {'type': 'image', 'input-patternz': '*.jpg'}}}, VALIDATORS) == {}
| 28.048889 | 119 | 0.522263 |
890ae78197f63a96033d28375f585dd41eebfab1 | 119 | py | Python | flaskHelloWorld.py | rowrodney/Automation | b6aa8bc802fa369a974868764c941c80f9edd2d2 | [
"MIT"
] | null | null | null | flaskHelloWorld.py | rowrodney/Automation | b6aa8bc802fa369a974868764c941c80f9edd2d2 | [
"MIT"
] | null | null | null | flaskHelloWorld.py | rowrodney/Automation | b6aa8bc802fa369a974868764c941c80f9edd2d2 | [
"MIT"
] | null | null | null |
from flask import Flask
app = Flask(__name__)
@app.route('/')
def home():
return "hello World!"
app.run(port=5000) | 17 | 25 | 0.680672 |
157d5db804b4037a4b8fddc253715eff0b72497f | 1,625 | py | Python | _includes/output_saving/activities/output_saving_jython.py | NEUBIAS/training-resources | 7a26674f328953ef83f9e731fc5ae69086169ec8 | [
"CC-BY-4.0"
] | 14 | 2020-04-29T14:18:57.000Z | 2022-03-27T22:06:28.000Z | _includes/output_saving/activities/output_saving_jython.py | NEUBIAS/training-resources | 7a26674f328953ef83f9e731fc5ae69086169ec8 | [
"CC-BY-4.0"
] | 299 | 2020-04-23T13:45:25.000Z | 2022-03-27T08:40:33.000Z | _includes/output_saving/activities/output_saving_jython.py | NEUBIAS/training-resources | 7a26674f328953ef83f9e731fc5ae69086169ec8 | [
"CC-BY-4.0"
] | 9 | 2020-05-16T13:28:48.000Z | 2022-03-15T08:47:09.000Z | # import classes
from ij import IJ, ImagePlus, WindowManager
from ij.io import FileSaver
from ij.plugin.filter import ParticleAnalyzer
from ij.plugin.frame import RoiManager
from ij.measure import ResultsTable, Measurements
from ij.process import ImageProcessor
import os
#@String outputdir
# specify settings
min_size = 100
max_size = 500
min_circ = 0.5
max_circ = 1
# Initialize Roi Manager and empty results table, close other open images
rm = RoiManager().getInstance()
rm.reset()
IJ.run("Close All")
# create blob mask
blobs = IJ.openImage("http://imagej.net/images/blobs.gif")
blobs.getProcessor().setAutoThreshold("Default", 1, 1)
blobs_mask = ImagePlus("blobs mask", blobs.getProcessor().createMask())
blobs_mask.show()
# Configure and run particle analyzer
results = ResultsTable() # construct empty resultstable
pa = ParticleAnalyzer((ParticleAnalyzer.ADD_TO_MANAGER + ParticleAnalyzer.SHOW_ROI_MASKS),(Measurements.AREA + Measurements.CENTROID + Measurements.CENTER_OF_MASS + Measurements.PERIMETER + Measurements.RECT), results, min_size, max_size, 0.5, 1)
pa.analyze(blobs_mask) # run the particle analyzer on the image
results.show("Results")
# Save results, label mask, and ROIs
results.save(os.path.join(outputdir, "blob_results_jython.txt")) # save results table
labelmask = WindowManager.getImage("Count Masks of blobs mask")
IJ.run(labelmask, "Glasbey", "") # set glasbey LUT
FileSaver(labelmask).saveAsTiff(os.path.join(outputdir, "blob_labels_jython.tif")) # save the label mask
rm.runCommand("Select All")
rm.runCommand("Save", os.path.join(outputdir, "blob_ROIset_jython.zip")) # save the ROIs | 37.790698 | 246 | 0.782154 |
81467f6373e153d3bdb98a827d22aecf0994b64b | 200 | py | Python | start_on_PAW.py | Robert565/pxollypy | 4874ca9625feed7d3b318a6aa3c0c8c5d4e895ea | [
"MIT"
] | 6 | 2021-11-05T17:34:28.000Z | 2022-01-28T12:33:11.000Z | start_on_PAW.py | RobertMeow/pxollypy | a1d47eee514cee2b0cb475cd9777c17ba9378f59 | [
"MIT"
] | null | null | null | start_on_PAW.py | RobertMeow/pxollypy | a1d47eee514cee2b0cb475cd9777c17ba9378f59 | [
"MIT"
] | 2 | 2021-11-05T17:33:46.000Z | 2021-11-06T08:34:53.000Z | from Application.webhook import main, app
import os
import sys
for module in ['flask']:
if module not in sys.modules:
os.system('python3 -m pip install {} --user'.format(module))
main()
| 20 | 68 | 0.685 |
6d0b0d1a923a9a5b3954065de970ef957709b33f | 1,625 | py | Python | src/oneNeuron/Perceptron.py | umangtank/oneNeuron_pypi | c975170f4ce5814e6cf6e44519da288a7edecef9 | [
"MIT"
] | null | null | null | src/oneNeuron/Perceptron.py | umangtank/oneNeuron_pypi | c975170f4ce5814e6cf6e44519da288a7edecef9 | [
"MIT"
] | null | null | null | src/oneNeuron/Perceptron.py | umangtank/oneNeuron_pypi | c975170f4ce5814e6cf6e44519da288a7edecef9 | [
"MIT"
] | null | null | null | import numpy as np
import logging
from tqdm import tqdm
class Perceptron:
def __init__(self, eta, epochs):
np.random.seed(2)
self.weights = np.random.randn(3) * 1e-4
logging.info(f"initial weights before training: {self.weights}")
self.eta = eta
self.epochs = epochs
def Activationfunction(self, inputs, weights):
z = np.dot(inputs, weights)
return np.where(z > 0, 1, 0)
def fit(self, X, y):
self.X = X
self.y = y
X_with_bias = np.c_[self.X, -np.ones((len(self.X), 1))]
logging.info(f"X with bias: \n{X_with_bias}")
for epoch in tqdm(range(self.epochs), total = self.epochs, desc = "Training the Epochs"):
logging.info("---"*10)
logging.info(f"for epoch \n{epoch}")
logging.info("---"*10)
y_hat = self.Activationfunction(X_with_bias, self.weights)
logging.info(f"predicted values after forward pass: \n{y_hat}")
self.error = self.y - y_hat
logging.info(f"error: \n{self.error}")
self.weights = self.weights + self.eta * \
np.dot(X_with_bias.T, self.error)
logging.info(
f"updated weights after epoch: \n{epoch}/{self.epochs}: {self.weights}")
logging.info("####"*10)
def predict(self, X):
X_with_bias = np.c_[X, -np.ones((len(X), 1))]
return self.Activationfunction(X_with_bias, self.weights)
def total_loss(self):
total_loss = np.sum(self.error)
logging.info(f"total loss : {total_loss}")
return total_loss | 34.574468 | 97 | 0.579077 |
71804327bc60d47763d9235db76627a38c0bc2e5 | 692 | py | Python | src/dc/core/miners/qryptonight7/CNv1PoWValidator.py | dc-blockchain/dc-core | fc6af8ce04d7b52f94c069f6ec05b0e419e07d70 | [
"MIT"
] | 1 | 2021-03-05T14:24:32.000Z | 2021-03-05T14:24:32.000Z | src/dc/core/miners/qryptonight7/CNv1PoWValidator.py | dc-blockchain/dc-core | fc6af8ce04d7b52f94c069f6ec05b0e419e07d70 | [
"MIT"
] | null | null | null | src/dc/core/miners/qryptonight7/CNv1PoWValidator.py | dc-blockchain/dc-core | fc6af8ce04d7b52f94c069f6ec05b0e419e07d70 | [
"MIT"
] | null | null | null | # coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import functools
import threading
from pyqryptonight.pyqryptonight import PoWHelper
from dc.core.Singleton import Singleton
class CNv1PoWValidator(object, metaclass=Singleton):
def __init__(self):
self.lock = threading.Lock()
self._powv = PoWHelper()
def verify_input(self, mining_blob, target):
return self._verify_input_cached(mining_blob, target)
@functools.lru_cache(maxsize=5)
def _verify_input_cached(self, mining_blob, target):
return self._powv.verifyInput(mining_blob, target)
| 30.086957 | 69 | 0.75578 |
99147b19f9fd7f5aacbb0fb6c66861f85dad07e5 | 529 | py | Python | Logger/LoggerSetup.py | actuatech/fuel-tourism | 60e6953cdcccf164e5cd03916a1c3b3c2b071a85 | [
"MIT"
] | null | null | null | Logger/LoggerSetup.py | actuatech/fuel-tourism | 60e6953cdcccf164e5cd03916a1c3b3c2b071a85 | [
"MIT"
] | null | null | null | Logger/LoggerSetup.py | actuatech/fuel-tourism | 60e6953cdcccf164e5cd03916a1c3b3c2b071a85 | [
"MIT"
] | null | null | null | import logging
def setup_logger(logger_name, log_file, level=logging.INFO, stream: bool = True):
logger = logging.getLogger(logger_name)
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
logger.setLevel(level)
logger.addHandler(fileHandler)
if stream:
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler) | 35.266667 | 81 | 0.722117 |
1e7d7675d7ea3ef9689e568f80fb395fdc7d9a95 | 3,088 | py | Python | examples/extensions/three_sat.py | NunoEdgarGFlowHub/cvxpy | 43270fcc8af8fc4742f1b3519800b0074f2e6693 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | examples/extensions/three_sat.py | NunoEdgarGFlowHub/cvxpy | 43270fcc8af8fc4742f1b3519800b0074f2e6693 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | examples/extensions/three_sat.py | NunoEdgarGFlowHub/cvxpy | 43270fcc8af8fc4742f1b3519800b0074f2e6693 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy import *
from mixed_integer import *
import random
import numpy as np
# TODO this is wrong because Boolean.u and Boolean.z aren't reset
# between iterations.
random.seed(1)
np.random.seed(1)
# 3-SAT problem solved with non-convex ADMM
# TODO initialize z's at 0.5
EPSILON = 1e-8
MAX_ITER = 10
# Randomly generate a feasible 3-SAT problem.
VARIABLES = 250
CLAUSES_PER_VARIABLE = 3
# The 3-SAT solution.
solution = [random.random() < 0.5 for i in range(VARIABLES)]
# The 3-SAT clauses.
clauses = []
for i in range(VARIABLES*CLAUSES_PER_VARIABLE):
clause_vars = random.sample(range(VARIABLES), 3)
# Which variables are negated in the clause?
while True:
negated = [random.random() < 0.5 for j in range(3)]
# Must be consistent with the solution.
result = False
for index, negation in zip(clause_vars,negated):
result |= negation ^ solution[index]
if result:
break
clauses.append( (clause_vars, negated) )
print "Generated %d clauses." % len(clauses)
# The 3-SAT variables.
vars = [Boolean() for i in range(VARIABLES)]
# The 3-SAT constraints.
constraints = []
for clause_vars, negated in clauses:
terms = []
for index,negation in zip(clause_vars,negated):
if negation:
terms.append( (1-vars[index]) )
else:
terms.append(vars[index])
constraints.append(sum(terms) >= 1)
best_values = VARIABLES*[0]
best_match = 0
best_rho = 0
for i in range(MAX_ITER):
p = Problem(Minimize(0), constraints)
rho = random.random()
print rho
result = p.solve(method="admm", rho=rho,
iterations=2, solver=ECOS)
print result
# Store the result.
values = [vars[i].value for i in range(VARIABLES)]
# What percentage of the clauses were satisfied?
satisfied = []
for clause_vars,negated in clauses:
result = False
for index, negation in zip(clause_vars,negated):
if negation:
result |= vars[index].value <= EPSILON
else:
result |= vars[index].value > EPSILON
satisfied.append(result)
if sum(satisfied) > best_match:
best_values = values
best_match = sum(satisfied)
best_rho = rho
if best_match == len(clauses): break
percent_satisfied = 100*best_match/len(clauses)
print "%s%% of the clauses were satisfied." % percent_satisfied
| 29.409524 | 68 | 0.676813 |
0a2da4802433fef7b6b619c69887595ff87cdf2f | 4,570 | py | Python | yarn/tests/test_yarn.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2020-08-08T02:01:01.000Z | 2020-08-08T02:01:01.000Z | yarn/tests/test_yarn.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2018-08-15T05:50:17.000Z | 2018-08-15T05:50:17.000Z | yarn/tests/test_yarn.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2018-08-15T05:45:42.000Z | 2018-08-15T05:45:42.000Z | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from requests.exceptions import SSLError
from datadog_checks.yarn import YarnCheck
from datadog_checks.yarn.yarn import (
SERVICE_CHECK_NAME, YARN_QUEUE_METRICS, YARN_APP_METRICS
)
from .common import (
YARN_CONFIG,
YARN_CONFIG_EXCLUDING_APP,
YARN_AUTH_CONFIG,
YARN_APP_METRICS_TAGS,
YARN_CLUSTER_METRICS_TAGS,
YARN_NODE_METRICS_TAGS,
YARN_ROOT_QUEUE_METRICS_TAGS,
YARN_QUEUE_METRICS_TAGS,
YARN_QUEUE_NOFOLLOW_METRICS_TAGS,
YARN_CLUSTER_METRICS_VALUES,
YARN_APP_METRICS_VALUES,
YARN_NODE_METRICS_VALUES,
YARN_ROOT_QUEUE_METRICS_VALUES,
YARN_QUEUE_METRICS_VALUES,
YARN_SSL_VERIFY_TRUE_CONFIG,
YARN_SSL_VERIFY_FALSE_CONFIG,
RM_ADDRESS,
CUSTOM_TAGS,
)
def test_check(aggregator, mocked_request):
# Instantiate YarnCheck
yarn = YarnCheck('yarn', {}, {})
# Run the check once
yarn.check(YARN_CONFIG['instances'][0])
aggregator.assert_service_check(
SERVICE_CHECK_NAME,
status=YarnCheck.OK,
tags=YARN_CLUSTER_METRICS_TAGS + CUSTOM_TAGS + ['url:{}'.format(RM_ADDRESS)],
)
# Check the YARN Cluster Metrics
for metric, value in YARN_CLUSTER_METRICS_VALUES.iteritems():
aggregator.assert_metric(metric, value=value, tags=YARN_CLUSTER_METRICS_TAGS + CUSTOM_TAGS, count=1)
# Check the YARN App Metrics
for metric, value in YARN_APP_METRICS_VALUES.iteritems():
aggregator.assert_metric(metric, value=value, tags=YARN_APP_METRICS_TAGS + CUSTOM_TAGS, count=1)
# Check the YARN Node Metrics
for metric, value in YARN_NODE_METRICS_VALUES.iteritems():
aggregator.assert_metric(metric, value=value, tags=YARN_NODE_METRICS_TAGS + CUSTOM_TAGS, count=1)
# Check the YARN Root Queue Metrics
for metric, value in YARN_ROOT_QUEUE_METRICS_VALUES.iteritems():
aggregator.assert_metric(metric, value=value, tags=YARN_ROOT_QUEUE_METRICS_TAGS + CUSTOM_TAGS, count=1)
# Check the YARN Custom Queue Metrics
for metric, value in YARN_QUEUE_METRICS_VALUES.iteritems():
aggregator.assert_metric(metric, value=value, tags=YARN_QUEUE_METRICS_TAGS + CUSTOM_TAGS, count=1)
# Check the YARN Queue Metrics from excluded queues are absent
for metric, value in YARN_QUEUE_METRICS.values():
aggregator.assert_metric(metric, tags=YARN_QUEUE_NOFOLLOW_METRICS_TAGS + CUSTOM_TAGS, count=0)
aggregator.assert_all_metrics_covered()
def test_check_excludes_app_metrics(aggregator, mocked_request):
# Instantiate YarnCheck
yarn = YarnCheck('yarn', {}, {})
# Run the check once
yarn.check(YARN_CONFIG_EXCLUDING_APP['instances'][0])
# Check that the YARN App metrics is empty
for metric, type in YARN_APP_METRICS.values():
aggregator.assert_metric(metric, count=0)
# Check that our service is up
aggregator.assert_service_check(
SERVICE_CHECK_NAME,
status=YarnCheck.OK,
tags=YARN_CLUSTER_METRICS_TAGS + CUSTOM_TAGS + ['url:{}'.format(RM_ADDRESS)],
count=3,
)
def test_auth(aggregator, mocked_auth_request):
# Instantiate YarnCheck
yarn = YarnCheck('yarn', {}, {})
# Run the check once
yarn.check(YARN_AUTH_CONFIG['instances'][0])
# Make sure check is working
aggregator.assert_service_check(
SERVICE_CHECK_NAME,
status=YarnCheck.OK,
tags=YARN_CLUSTER_METRICS_TAGS + CUSTOM_TAGS + ['url:{}'.format(RM_ADDRESS)],
count=4,
)
def test_ssl_verification(aggregator, mocked_bad_cert_request):
# Instantiate YarnCheck
yarn = YarnCheck('yarn', {}, {})
# Run the check on a config with a badly configured SSL certificate
try:
yarn.check(YARN_SSL_VERIFY_TRUE_CONFIG['instances'][0])
except SSLError:
aggregator.assert_service_check(
SERVICE_CHECK_NAME,
status=YarnCheck.CRITICAL,
tags=YARN_CLUSTER_METRICS_TAGS + CUSTOM_TAGS + ['url:{}'.format(RM_ADDRESS)],
count=1
)
pass
else:
assert False, "Should have thrown an SSLError due to a badly configured certificate"
# Run the check on the same configuration, but with verify=False. We shouldn't get an exception.
yarn.check(YARN_SSL_VERIFY_FALSE_CONFIG['instances'][0])
aggregator.assert_service_check(
SERVICE_CHECK_NAME,
status=YarnCheck.OK,
tags=YARN_CLUSTER_METRICS_TAGS + CUSTOM_TAGS + ['url:{}'.format(RM_ADDRESS)],
count=4,
)
| 33.602941 | 111 | 0.717943 |
7ad248b88d640fce6d650de73dfe568e7a204cc1 | 4,004 | py | Python | homeassistant/components/gogogate2/sensor.py | DoctorU/core | 5b218d7e1c4164e32d41473977459cbaf23adf42 | [
"Apache-2.0"
] | 5 | 2020-10-08T12:59:44.000Z | 2021-12-28T06:46:25.000Z | homeassistant/components/gogogate2/sensor.py | DoctorU/core | 5b218d7e1c4164e32d41473977459cbaf23adf42 | [
"Apache-2.0"
] | 87 | 2020-07-06T22:22:54.000Z | 2022-03-31T06:01:46.000Z | homeassistant/components/gogogate2/sensor.py | winning1120xx/home-assistant | 53d4c0ce2d374b5e97bbdc37742656c27adf8eea | [
"Apache-2.0"
] | 11 | 2020-12-16T13:48:14.000Z | 2022-02-01T00:28:05.000Z | """Support for Gogogate2 garage Doors."""
from __future__ import annotations
from itertools import chain
from ismartgate.common import AbstractDoor, get_configured_doors
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .common import (
DeviceDataUpdateCoordinator,
GoGoGate2Entity,
get_data_update_coordinator,
sensor_unique_id,
)
SENSOR_ID_WIRED = "WIRE"
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the config entry."""
data_update_coordinator = get_data_update_coordinator(hass, config_entry)
sensors = chain(
[
DoorSensorBattery(config_entry, data_update_coordinator, door)
for door in get_configured_doors(data_update_coordinator.data)
if door.sensorid and door.sensorid != SENSOR_ID_WIRED
],
[
DoorSensorTemperature(config_entry, data_update_coordinator, door)
for door in get_configured_doors(data_update_coordinator.data)
if door.sensorid and door.sensorid != SENSOR_ID_WIRED
],
)
async_add_entities(sensors)
class DoorSensorBattery(GoGoGate2Entity, SensorEntity):
"""Battery sensor entity for gogogate2 door sensor."""
def __init__(
self,
config_entry: ConfigEntry,
data_update_coordinator: DeviceDataUpdateCoordinator,
door: AbstractDoor,
) -> None:
"""Initialize the object."""
unique_id = sensor_unique_id(config_entry, door, "battery")
super().__init__(config_entry, data_update_coordinator, door, unique_id)
@property
def name(self):
"""Return the name of the door."""
return f"{self._get_door().name} battery"
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_BATTERY
@property
def native_value(self):
"""Return the state of the entity."""
door = self._get_door()
return door.voltage # This is a percentage, not an absolute voltage
@property
def extra_state_attributes(self):
"""Return the state attributes."""
door = self._get_door()
if door.sensorid is not None:
return {"door_id": door.door_id, "sensor_id": door.sensorid}
return None
class DoorSensorTemperature(GoGoGate2Entity, SensorEntity):
"""Temperature sensor entity for gogogate2 door sensor."""
def __init__(
self,
config_entry: ConfigEntry,
data_update_coordinator: DeviceDataUpdateCoordinator,
door: AbstractDoor,
) -> None:
"""Initialize the object."""
unique_id = sensor_unique_id(config_entry, door, "temperature")
super().__init__(config_entry, data_update_coordinator, door, unique_id)
@property
def name(self):
"""Return the name of the door."""
return f"{self._get_door().name} temperature"
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_TEMPERATURE
@property
def native_value(self):
"""Return the state of the entity."""
door = self._get_door()
return door.temperature
@property
def native_unit_of_measurement(self):
"""Return the unit_of_measurement."""
return TEMP_CELSIUS
@property
def extra_state_attributes(self):
"""Return the state attributes."""
door = self._get_door()
if door.sensorid is not None:
return {"door_id": door.door_id, "sensor_id": door.sensorid}
return None
| 30.8 | 80 | 0.682817 |
112e79939ab7b123b6d3a710fa0db4240e78933c | 1,206 | py | Python | research/cv/PDarts/src/my_utils.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/cv/PDarts/src/my_utils.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/cv/PDarts/src/my_utils.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# less required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define some utils."""
import numpy as np
def print_trainable_params_count(network):
params = network.trainable_params()
trainable_params_count = 0
for param in enumerate(params):
shape = param[1].data.shape
size = np.prod(shape)
trainable_params_count += size
print("trainable_params_count:" + str(trainable_params_count))
def drop_path(div, mul, x, drop_prob, mask):
if drop_prob > 0.:
keep_prob = 1. - drop_prob
x = div(x, keep_prob)
x = mul(x, mask)
return x
| 34.457143 | 78 | 0.664179 |
0c2505f9a0eab29c759845da289564ca21a4c3ef | 9,057 | py | Python | IG02_Scripts/neglect_mvpa/nperlabel_equal/redos_ROIperms/submit_MVPA_ROI_perm_redo_LinN_c2.py | DVS-Lab/duke-archive | 8d127f18d5930803410e716a4b62aac7d4aa4beb | [
"MIT"
] | null | null | null | IG02_Scripts/neglect_mvpa/nperlabel_equal/redos_ROIperms/submit_MVPA_ROI_perm_redo_LinN_c2.py | DVS-Lab/duke-archive | 8d127f18d5930803410e716a4b62aac7d4aa4beb | [
"MIT"
] | null | null | null | IG02_Scripts/neglect_mvpa/nperlabel_equal/redos_ROIperms/submit_MVPA_ROI_perm_redo_LinN_c2.py | DVS-Lab/duke-archive | 8d127f18d5930803410e716a4b62aac7d4aa4beb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys,os,time,re,datetime,smtplib
#########user section#########################
#user specific constants
username = "smith" #your cluster login name (use what shows up in qstatall)
useremail = "smith@biac.duke.edu" #email to send job notices to
template_f = file("MVPA_ROI_perm_redo_Lin.sh") #job template location (on head node)
experiment = "Imagene.02" #experiment name for qsub
nodes = 400 #number of nodes on cluster
maintain_n_jobs = 250 #leave one in q to keep them moving through
min_jobs = 10 #minimum number of jobs to keep in q even when crowded
n_fake_jobs = 25 #during business hours, pretend there are extra jobs to try and leave a few spots open
sleep_time = 2 #pause time (sec) between job count checks
max_run_time = 999999 #maximum time any job is allowed to run in minutes
max_run_hours = 999999 #maximum number of hours submission script can run
warning_time = 999999 #send out a warning after this many hours informing you that the deamon is still running
delayt = 2 #delay time between job submissions
combos = [ "2" ]
datatypes = [ "normed" ]
classifiers = [ "LinearNuSVMC" ]
reps = [ "1", "2", "3", "4", "5"]
#masks = [ "old" ]
#combos = [ "2" ]
# make it more selective: & (line.find("HiRes") > 0)
###############################################
def daemonize(stdin='/dev/null',stdout='/dev/null',stderr='/dev/null'):
try:
#try first fork
pid=os.fork()
if pid>0:
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno,e.strerror))
sys.exit(1)
os.chdir("/")
os.umask(0)
os.setsid()
try:
#try second fork
pid=os.fork()
if pid>0:
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
for f in sys.stdout, sys.stderr: f.flush()
si=file(stdin,'r')
so=file(stdout,'a+')
se=file(stderr,'a+',0)
os.dup2(si.fileno(),sys.stdin.fileno())
os.dup2(so.fileno(),sys.stdout.fileno())
os.dup2(se.fileno(),sys.stderr.fileno())
start_dir = os.getcwd()
daemonize('/dev/null',os.path.join(start_dir,'daemon.log'),os.path.join(start_dir,'daemon.log'))
sys.stdout.close()
os.chdir(start_dir)
temp=time.localtime()
hour,minute,second=temp[3],temp[4],temp[5]
prev_hr=temp[3]
t0=str(hour)+':'+str(minute)+':'+str(second)
log_name=os.path.join(start_dir,'daemon.log')
log=file(log_name,'w')
log.write('Daemon started at %s with pid %d\n' %(t0,os.getpid()))
log.write('To kill this process type "kill %s" at the head node command line\n' % os.getpid())
log.close()
t0=time.time()
master_clock=0
#build allowed timedelta
kill_time_limit = datetime.timedelta(minutes=max_run_time)
def _check_jobs(username, kill_time_limit, n_fake_jobs):
#careful, looks like all vars are global
#see how many jobs we have in
#set number of jobs to maintain based on time of day.
cur_time = datetime.datetime.now() #get current time #time.localtime() #get current time
if (cur_time.weekday > 4) | (cur_time.hour < 8) | (cur_time.hour > 17):
n_other_jobs = 0
else: #its a weekday, fake an extra 6 jobs to leave 5 nodes open
n_other_jobs = n_fake_jobs
n_jobs = 0
status = os.popen("qstat -u '*'")
status_list = status.readlines()
for line in status_list:
#are these active or q'd jobs?
if (line.find(" r ") > -1):
running = 1
elif (line.find("qw") > -1): #all following jobs are in queue not running
running = 0
#if job is mine
if (line.find(username) > 0) & (line.find("interact.q") < 0): #name is in the line, not including first spot
n_jobs = n_jobs + 1
if running == 1: #if active job, check how long its been running and delete it if too long
job_info = line.split() #get job information
start_date = job_info[5].split("/") #split job start date
start_time = job_info[6].split(":") #split time from hours:minutes:seconds format
started = datetime.datetime(int(start_date[2]), int(start_date[0]), int(start_date[1]),
int(start_time[0]), int(start_time[1]), int(start_time[2]))
if ((cur_time - started) > kill_time_limit) & (line.find("stalled") == -1): #if the active job is over max run time, delete it
#os.system("qdel %s" % (job_info[0])) #delete the run away job
print("Job %s was deleted because it ran for more than the maximum time." % (job_info[0]))
# if line starts " ###" and isnt an interactive job
elif bool(re.match( "^\d+", line )) & (line.find("interact") < 0) & (line.find("(Error)") < 0):
n_other_jobs = n_other_jobs + 1
return n_jobs, n_other_jobs
#make a directory to write job files to and store the start directory
tmp_dir = str(os.getpid())
os.mkdir(tmp_dir)
#read in template
template = template_f.read()
template_f.close()
os.chdir(tmp_dir)
#neglect_combo2_LinearCSVMC_old_CV_performance_rawdata_missingROIs.txt
#neglect_combo2_LinearNuSVMC_old_CV_performance_rawdata_missingROIs.txt
#neglect_combo2_SMLR_old_CV_performance_rawdata_missingROIs.txt
#------START SCRIPT HERE----------------
for combo in combos:
for rep in reps:
for classifier in classifiers:
for datatype in datatypes:
fname = ("/home/%s/Imagene.02/neglect_mvpa/nperlabel_equal/redos_ROIperms/missing_lists/combo%s/missingROIs_combo%s_%s_%sdata_rep%s.txt") % (username,combo,combo,classifier,datatype,rep)
if os.path.isfile(fname):
combo_f = open(fname,"r")
combo_list = combo_f.readlines()
combo_f.close()
else:
continue
for line in combo_list:
c = line.split()
ROI = c[0]
perm = c[1]
tmp_job_file = template.replace( "SUB_USEREMAIL_SUB", useremail )
tmp_job_file = tmp_job_file.replace("SUB_ROI_SUB", ROI )
tmp_job_file = tmp_job_file.replace("SUB_COMBO_SUB", combo )
tmp_job_file = tmp_job_file.replace("SUB_DATATYPE_SUB", datatype )
tmp_job_file = tmp_job_file.replace("SUB_CLASSIFIER_SUB", classifier )
tmp_job_file = tmp_job_file.replace("SUB_PERM_SUB", perm )
tmp_job_file = tmp_job_file.replace("SUB_REPEAT_SUB", rep )
tmp_job_fname = "_".join( [ "redo", combo, classifier, datatype, ROI, rep, perm] )
tmp_job_f = file( tmp_job_fname, "w" )
tmp_job_f.write(tmp_job_file)
tmp_job_f.close()
#wait to submit the job until we have fewer than maintain in q
n_jobs = maintain_n_jobs
while n_jobs >= maintain_n_jobs:
#count jobs
n_jobs, n_other_jobs = _check_jobs(username, kill_time_limit, n_fake_jobs) #count jobs, delete jobs that are too old
#adjust job submission by how may jobs are submitted
#set to minimum number if all nodes are occupied
#should still try to leave # open on weekdays
if ((n_other_jobs+ n_jobs) > (nodes+1)):
n_jobs = maintain_n_jobs - (min_jobs - n_jobs)
if n_jobs >= maintain_n_jobs:
time.sleep(sleep_time)
elif n_jobs < maintain_n_jobs:
cmd = "qsub -l h_rt=00:10:00 -l h_vmem=2G -v EXPERIMENT=%s %s" % ( experiment, tmp_job_fname )
dummy, f = os.popen2(cmd)
#need to pause after each job submission
time.sleep(delayt)
os.remove(tmp_job_fname) #need to clean up as i go.
#Check what how long daemon has been running
#don't need to do this every loop
t1=time.time()
hour=(t1-t0)/3600
log=file(log_name,'a+')
log.write('Daemon has been running for %s hours\n' % hour)
log.close()
now_hr=time.localtime()[3]
if now_hr>prev_hr:
master_clock=master_clock+1
prev_hr=now_hr
serverURL="email.biac.duke.edu"
if master_clock==warning_time:
headers="From: %s\r\nTo: %s\r\nSubject: Daemon job still running!\r\n\r\n" % (useremail,useremail)
text="""Your daemon job has been running for %d hours. It will be killed after %d.
To kill it now, log onto the head node and type kill %d""" % (warning_time,max_run_hours,os.getpid())
message=headers+text
mailServer=smtplib.SMTP(serverURL)
mailServer.sendmail(useremail,useremail,message)
mailServer.quit()
elif master_clock==max_run_hours:
headers="From: %s\r\nTo: %s\r\nSubject: Daemon job killed!\r\n\r\n" % (useremail,useremail)
text="Your daemon job has been killed. It has run for the maximum time alotted"
message=headers+text
mailServer=smtplib.SMTP(serverURL)
mailServer.sendmail(useremail,useremail,message)
mailServer.quit()
ID=os.getpid()
os.system('kill '+str(ID))
#wait for jobs to complete
#delete them if they run too long
n_jobs = 1
while n_jobs > 0:
n_jobs, n_other_jobs = _check_jobs(username, kill_time_limit, n_fake_jobs)
time.sleep(sleep_time)
#remove tmp job files move to start dir and delete tmpdir
#terminated jobs will prevent this from executing
#you will then have to clean up a "#####" directory with
# ".job" files written in it.
cmd = "rm *.job"
os.system(cmd)
os.chdir(start_dir)
os.rmdir(tmp_dir)
| 36.373494 | 190 | 0.670531 |
ef99970570788e0007bed73e38f2b860290645b7 | 3,156 | py | Python | libs/gui/testlist.py | albertoccelli/vortex | 54bf15d2f74967d3dfeddec670b32868fbb2c51b | [
"MIT"
] | null | null | null | libs/gui/testlist.py | albertoccelli/vortex | 54bf15d2f74967d3dfeddec670b32868fbb2c51b | [
"MIT"
] | null | null | null | libs/gui/testlist.py | albertoccelli/vortex | 54bf15d2f74967d3dfeddec670b32868fbb2c51b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'testlist.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Testlist_DIalog(object):
def setupUi(self, Testlist_DIalog):
Testlist_DIalog.setObjectName("Testlist_DIalog")
Testlist_DIalog.resize(360, 245)
Testlist_DIalog.setMinimumSize(QtCore.QSize(360, 245))
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(Testlist_DIalog)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem = QtWidgets.QSpacerItem(218, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.pushButton_3 = QtWidgets.QPushButton(Testlist_DIalog)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout_2.addWidget(self.pushButton_3)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_2 = QtWidgets.QPushButton(Testlist_DIalog)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton = QtWidgets.QPushButton(Testlist_DIalog)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.horizontalLayout_2.addLayout(self.horizontalLayout)
self.gridLayout.addLayout(self.horizontalLayout_2, 2, 0, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(20, 25, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 1, 0, 1, 1)
self.tableWidget = QtWidgets.QTableWidget(Testlist_DIalog)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.gridLayout.addWidget(self.tableWidget, 0, 0, 1, 1)
self.horizontalLayout_3.addLayout(self.gridLayout)
self.retranslateUi(Testlist_DIalog)
QtCore.QMetaObject.connectSlotsByName(Testlist_DIalog)
def retranslateUi(self, Testlist_DIalog):
_translate = QtCore.QCoreApplication.translate
Testlist_DIalog.setWindowTitle(_translate("Testlist_DIalog", "Dialog"))
self.pushButton_3.setText(_translate("Testlist_DIalog", "Cancel"))
self.pushButton_2.setText(_translate("Testlist_DIalog", "Default"))
self.pushButton.setText(_translate("Testlist_DIalog", "Ok"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Testlist_DIalog = QtWidgets.QDialog()
ui = Ui_Testlist_DIalog()
ui.setupUi(Testlist_DIalog)
Testlist_DIalog.show()
sys.exit(app.exec_())
| 46.411765 | 115 | 0.734791 |
9b96dca698c8caebbfd00e55cb89755d94fb55ce | 126 | py | Python | Lab4/lab4_2_Boiko.py | Nickas47/python_savka | 31101bba6a7e75bc398136d01e5e0cb9d68df097 | [
"Apache-2.0"
] | null | null | null | Lab4/lab4_2_Boiko.py | Nickas47/python_savka | 31101bba6a7e75bc398136d01e5e0cb9d68df097 | [
"Apache-2.0"
] | null | null | null | Lab4/lab4_2_Boiko.py | Nickas47/python_savka | 31101bba6a7e75bc398136d01e5e0cb9d68df097 | [
"Apache-2.0"
] | null | null | null | while(1 == 1):
s = str(input())
k=0
for x in s:
if(x in '1234567890'):
k = k + 1
print(k)
| 15.75 | 30 | 0.388889 |
4a1045bf2cfebffff6d30bc1fdbbacdfa74f3d17 | 5,493 | py | Python | offb_posctl/scripts/GPM_test.py | SensenLiu/aggrecup | 0c381ee259b388684205c1fa5fc41265a7e849b3 | [
"MIT"
] | null | null | null | offb_posctl/scripts/GPM_test.py | SensenLiu/aggrecup | 0c381ee259b388684205c1fa5fc41265a7e849b3 | [
"MIT"
] | null | null | null | offb_posctl/scripts/GPM_test.py | SensenLiu/aggrecup | 0c381ee259b388684205c1fa5fc41265a7e849b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
import socket
from scipy.optimize import minimize
import numpy as np
import time
from numba import jit, float64
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import datetime
# global variable start >>>
n = 7
t0 = 0
tf = 2
discretized_point_persecond = 50
pointnumber = tf * discretized_point_persecond # 离散点数
currentupdateflag = False # 是否计算控制量
k = np.array([50, 50])
c = np.array([0, 0]) # air drag effect in x & z
co = 0.5 * (tf - t0)
g = 9.8
px_ini = -3
pz_ini = 0
vx_ini = 0
vz_ini = 0
va_ini = 0 # absolute velocity of plane
# ini = np.array([[px_ini], [pz_ini], [vx_ini], [vz_ini], [va_ini]])
state_get_flag = False
# global variable start >>>
# D matrix
D = np.loadtxt(open("../data/D.csv", "rb"), delimiter=",", skiprows=0) # array
# Gauss weights
omega = np.loadtxt(open("../data/omega.csv", "rb"), delimiter=",", skiprows=0) # array
# Lagrange coefficient of x
L1 = np.loadtxt(open("../data/L1.csv", "rb"), delimiter=",", skiprows=0) # array
# Lagrange coefficient of u
L2 = np.loadtxt(open("../data/L2.csv", "rb"), delimiter=",", skiprows=0) # array
# Objective
@jit(float64(float64[:]), nopython=True)
def J(x):
X1 = x[0: n]
X2 = x[n: 2 * n]
U1 = x[5 * n: 6 * n]
U2 = x[6 * n: 7 * n]
return co * 0.5 * np.dot(omega, (
0.5 * (U1 - 9.8) ** 2 + 0.5 * U2 ** 2 + k[0] * (X1 + 3) ** 2 + k[1] * (X1 * U2 + X2) ** 2))
# the derivative of objective function J
@jit(float64[:](float64[:]), nopython=True)
def fast_jac(x):
h = 1e-11
N = x.shape[0]
jac = np.zeros_like(x)
f_0 = J(x)
for i in range(N):
x_d = np.copy(x)
x_d[i] += h
f_d = J(x_d)
jac[i] = (f_d - f_0) / h
return jac
# Constraint
@jit(float64[:](float64[:]), nopython=True)
def mycon(x):
global px_ini, pz_ini, vx_ini, vz_ini, va_ini
X1 = x[0: n]
X2 = x[n: 2 * n]
X3 = x[2 * n: 3 * n]
X4 = x[3 * n: 4 * n]
X5 = x[4 * n: 5 * n]
U1 = x[5 * n: 6 * n]
U2 = x[6 * n: 7 * n]
print('===================????', px_ini)
Ceq1 = np.dot(D, np.append(px_ini, X1)) - co * X3
Ceq2 = np.dot(D, np.append(pz_ini, X2)) - co * X4
Ceq3 = np.dot(D, np.append(vx_ini, X3)) - co * (g * U2 - c[0] * X5)
Ceq4 = np.dot(D, np.append(vz_ini, X4)) - co * (U1 - g - c[1] * X5)
Ceq5 = np.dot(D, np.append(va_ini, X5)) - co * (g * U2 - c[0] * X5)
return np.hstack((Ceq1, Ceq2, Ceq3, Ceq4, Ceq5))
def do_process(result):
global tau
x = result.x.reshape(7, n)
print('===================!!!!', px_ini)
ini = np.array([[px_ini], [pz_ini], [vx_ini], [vz_ini], [va_ini]])
# print('ini.{}'.format(ini))
poly_x = np.dot(np.hstack((ini, x[0:5, :])), L1) # 拟合出的x的系数矩阵
poly_u = np.dot(x[5:7, :], L2) # 拟合出的u的系数矩阵
# 将数据代入系数矩阵求x和u
x1 = np.polyval(poly_x[0], tau)
x2 = np.polyval(poly_x[1], tau)
x3 = np.polyval(poly_x[2], tau)
x4 = np.polyval(poly_x[3], tau)
x5 = np.polyval(poly_x[4], tau)
u1 = np.polyval(poly_u[0], tau)
u2 = np.polyval(poly_u[1], tau)
return np.vstack((x1, x2, x3, x4, u1, u2))
def parse(data): # 解析收到的client的px等数据
global state_get_flag
if len(data) > 6: # 判断是否包含至少包头
for i in range(len(data)):
if data[i:i + 3].decode() == 'LEN':
Length = int(data[i + 3:i + 6].decode())
# print('data:{}'.format(data))
# print('time now:{}'.format(time.time()))
if len(data[i:]) >= (Length + 6): # 消息包含包头+state
msg = eval(data[i + 6:i + 6 + Length].decode()) # 直到处理完,最新msg
print('msg:{}'.format(msg))
if len(msg) == 6:
state_get_flag = True
if len(data[i + 6+Length:]) < Length + 6: # 剩下的不够一条消息的长度
break
else:
break
try:
return data[Length + i + 6:], msg # 返回剩余不能构成一帧数据的data
except:
print('----data:{}----'.format(data))
return b''
else:
return b''
pass
def main():
global currentupdateflag, discretized_point_persecond, tau, state_get_flag, msg
global px_ini, pz_ini, vx_ini, vz_ini, va_ini
constraint = [dict(type='eq', fun=mycon)]
tau = np.linspace(-1, 1, pointnumber)
# t = 0.5 * (tf - t0) * tau + 0.5 * (tf + t0)
# while True:
state_get_flag = True
if state_get_flag:
px_ini = -3.5
pz_ini = 0.0
vx_ini = -0.1
vz_ini = 0
va_ini = 0
print('px_ini:{}; pz_ini:{}; vx_ini:{}; vz_ini:{}; va_ini:{};'.format(px_ini, pz_ini, vx_ini, vz_ini, va_ini))
start = time.time()
# core calculate code
result = minimize(J, np.zeros((7 * n)), method='SLSQP', tol=1e-4, constraints=constraint, jac=fast_jac)
print(result)
res = do_process(result)
# print(res)
# core calculate code
end = time.time()
running_time = end - start
print('time cost : %.5f sec' % running_time)
## core part 1 >>>>
time_now = time.time()
thrust_pitch_x1234 = [res[4, 0:20].tolist(), res[5, 0:20].tolist(), res[0, 0:20].tolist(), res[1, 0:20].tolist(),
res[2, 0:20].tolist(), res[3, 0:20].tolist(), time_now]
plt.plot(tau*(tf-t0)/2.0+(tf+t0), res[0, 0:100])
plt.show()
if __name__ == '__main__': # 主函数
main()
| 30.687151 | 121 | 0.532132 |
78ac3ce5bb7c365b752e76fb8b9a54167062c140 | 524 | py | Python | stream.py | HaydenPWoods/radio-stream-to-spotify | 748f4a8a08210edf916fafcad620dfa2e64fa940 | [
"MIT"
] | null | null | null | stream.py | HaydenPWoods/radio-stream-to-spotify | 748f4a8a08210edf916fafcad620dfa2e64fa940 | [
"MIT"
] | 4 | 2020-09-26T13:45:00.000Z | 2021-03-20T18:31:02.000Z | stream.py | HaydenPWoods/radio-stream-to-spotify | 748f4a8a08210edf916fafcad620dfa2e64fa940 | [
"MIT"
] | null | null | null | class Stream:
def __init__(self, name, url, encoding, regex, separator, order, playlist_id, include_remixes):
self.name = name
self.url = url
self.encoding = encoding
self.regex = regex
self.separator = separator
self.order = order
self.playlist_id = playlist_id
self.include_remixes = include_remixes
@classmethod
def build_from_list(cls, v_v):
obj = cls(v_v[0], v_v[1], v_v[2], v_v[3], v_v[4], v_v[5], v_v[6], v_v[7])
return obj
| 32.75 | 99 | 0.610687 |
47a98d0b292136b1f757d222525ed3fc373b6662 | 2,335 | py | Python | test/test_integralequations.py | neitzke/stokes-numerics | 8845aef7598ca245d095cca690bf48568758a8c9 | [
"MIT"
] | 1 | 2020-08-03T16:24:06.000Z | 2020-08-03T16:24:06.000Z | test/test_integralequations.py | neitzke/stokes-numerics | 8845aef7598ca245d095cca690bf48568758a8c9 | [
"MIT"
] | null | null | null | test/test_integralequations.py | neitzke/stokes-numerics | 8845aef7598ca245d095cca690bf48568758a8c9 | [
"MIT"
] | null | null | null | import logging
import logconfig
logconfig.logconfig(filename=None)
logconfig.loglevel(logging.INFO)
from integralequations import *
def test_integralequations(theoryname = "A1A2", R = 0.4, oper = True):
steps = 256
tolerance = 1e-11
def report(xar):
"""Print a few quantities derived from given xar"""
print("cluster X-variables at theta = 0.00: %s" % xar.getCluster())
print("cluster X-variables at theta = 0.10: %s" % xar.getCluster(theta = 0.10))
print("cluster X-variables at theta = 0.20: %s" % xar.getCluster(theta = 0.20))
print("Computing xars in theory %s with R = %0.8f, steps = %d, tolerance = %s" % (theoryname,R,steps,tolerance))
global xarfourier,xarsfourier
xarfourier = computeXar(theoryname = theoryname, R = R, oper = oper, tolerance = tolerance, steps = steps, method = "fourier")
global xarsimps,xarssimps
xarsimps = computeXar(theoryname = theoryname, R = R, oper = oper, tolerance = tolerance, steps = steps, method = "simps")
print("----------------------------------------------------------------------")
print(" FOURIER METHOD")
print("----------------------------------------------------------------------")
report(xarfourier)
print("----------------------------------------------------------------------")
print(" SIMPS METHOD")
print("----------------------------------------------------------------------")
report(xarsimps)
print("----------------------------------------------------------------------")
print(" APPROXIMATE CLUSTER ")
print("----------------------------------------------------------------------")
for nterms in range(2):
print("cluster X-variables at theta = 0.00, nterms = %d: %s" % (nterms,getApproxCluster(theoryname = theoryname, R = R, theta = 0, nterms = nterms, oper = oper)))
if not oper:
print("cluster X-variables at theta = 0.10, nterms = %d: %s" % (nterms,getApproxCluster(theoryname = theoryname, R = R, theta = 0.10, nterms = nterms, oper = oper)))
print("cluster X-variables at theta = 0.20, nterms = %d: %s" % (nterms,getApproxCluster(theoryname = theoryname, R = R, theta = 0.20, nterms = nterms, oper = oper)))
| 54.302326 | 177 | 0.500642 |
32eee3dbf1825f0cf3a62ddb1dcd99d401a35cca | 25,160 | py | Python | bgx/validator-bgx/sawtooth_validator/execution/context_manager.py | sparsov/DGT-Kawartha-demo | edfbc18f2c70e813805ec23c28fbc35bf7866ffc | [
"Apache-2.0"
] | null | null | null | bgx/validator-bgx/sawtooth_validator/execution/context_manager.py | sparsov/DGT-Kawartha-demo | edfbc18f2c70e813805ec23c28fbc35bf7866ffc | [
"Apache-2.0"
] | 10 | 2020-05-12T06:58:15.000Z | 2022-02-26T23:59:35.000Z | bgx/validator-bgx/sawtooth_validator/execution/context_manager.py | DGT-Network/DGT-Mississauga | 52b5f1f4015db2aa7196e727a25b399de5fbf3c3 | [
"Apache-2.0"
] | 1 | 2021-03-09T03:36:36.000Z | 2021-03-09T03:36:36.000Z | # Copyright NTRLab 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
import re
from collections import deque
from threading import Lock
from queue import Queue
from sawtooth_validator.concurrent.thread import InstrumentedThread
from sawtooth_validator.state.merkle import MerkleDatabase
from sawtooth_validator.execution.execution_context import AuthorizationException
from sawtooth_validator.execution.execution_context import ExecutionContext
LOGGER = logging.getLogger(__name__)
class CreateContextException(Exception):
pass
class SquashException(Exception):
pass
_SHUTDOWN_SENTINEL = -1
class ContextManager(object):
def __init__(self, database):
"""
Args:
database (database.Database subclass): the subclass/implementation
of the Database
"""
self._database = database
self._first_merkle_root = None
self._contexts = _ThreadsafeContexts()
self._address_regex = re.compile('^[0-9a-f]{70}$')
self._namespace_regex = re.compile('^([0-9a-f]{2}){0,35}$')
self._address_queue = Queue()
self._inflated_addresses = Queue()
self._context_reader = _ContextReader(database, self._address_queue,
self._inflated_addresses)
self._context_reader.start()
self._context_writer = _ContextWriter(self._inflated_addresses,
self._contexts)
self._context_writer.start()
def get_first_root(self):
if self._first_merkle_root is not None:
return self._first_merkle_root
self._first_merkle_root = MerkleDatabase(
self._database).get_merkle_root()
return self._first_merkle_root
def address_is_valid(self, address):
return self._address_regex.match(address) is not None
def namespace_is_valid(self, namespace):
return self._namespace_regex.match(namespace) is not None
def create_context(self, state_hash, base_contexts, inputs, outputs):
"""Create a ExecutionContext to run a transaction against.
Args:
state_hash: (str): Merkle root to base state on.
base_contexts (list of str): Context ids of contexts that will
have their state applied to make this context.
inputs (list of str): Addresses that can be read from.
outputs (list of str): Addresses that can be written to.
Returns:
context_id (str): the unique context_id of the session
"""
LOGGER.debug('create_context: STATE=%s',state_hash[:8])
for address in inputs:
if not self.namespace_is_valid(address):
raise CreateContextException(
"Address or namespace {} listed in inputs is not "
"valid".format(address))
for address in outputs:
if not self.namespace_is_valid(address):
raise CreateContextException(
"Address or namespace {} listed in outputs is not "
"valid".format(address))
addresses_to_find = [add for add in inputs if len(add) == 70]
address_values, reads = self._find_address_values_in_chain(
base_contexts=base_contexts,
addresses_to_find=addresses_to_find)
context = ExecutionContext(
state_hash=state_hash,
read_list=inputs,
write_list=outputs,
base_context_ids=base_contexts)
contexts_asked_not_found = [cid for cid in base_contexts
if cid not in self._contexts]
if contexts_asked_not_found:
raise KeyError(
"Basing a new context off of context ids {} "
"that are not in context manager".format(
contexts_asked_not_found))
context.create_initial(address_values)
self._contexts[context.session_id] = context
if reads:
context.create_prefetch(reads)
self._address_queue.put_nowait(
(context.session_id, state_hash, reads))
return context.session_id
def _find_address_values_in_chain(self, base_contexts, addresses_to_find):
"""Breadth first search through the chain of contexts searching for
the bytes values at the addresses in addresses_to_find.
Args:
base_contexts (list of str): The context ids to start with.
addresses_to_find (list of str): Addresses to find values in the
chain of contexts.
Returns:
tuple of found address_values and still not found addresses
"""
contexts_in_chain = deque()
contexts_in_chain.extend(base_contexts)
reads = list(addresses_to_find)
address_values = []
context_ids_already_searched = []
context_ids_already_searched.extend(base_contexts)
# There are two loop exit conditions, either all the addresses that
# are being searched for have been found, or we run out of contexts
# in the chain of contexts.
while reads:
try:
current_c_id = contexts_in_chain.popleft()
except IndexError:
# There aren't any more contexts known about.
break
current_context = self._contexts[current_c_id]
# First, check for addresses that have been deleted.
deleted_addresses = current_context.get_if_deleted(reads)
for address in deleted_addresses:
if address is not None:
address_values.append((address, None))
reads = list(set(reads) - set(deleted_addresses))
# Second, check for addresses that have been set in the context,
# and remove those addresses from being asked about again. Here
# any value of None means the address hasn't been set.
values = current_context.get_if_set(reads)
addresses_not_found = []
for address, value in zip(reads, values):
if value is not None:
address_values.append((address, value))
else:
addresses_not_found.append(address)
reads = addresses_not_found
# Next check for addresses that might be in a context
# because they were inputs.
addresses_in_inputs = [address for address in reads
if address in current_context]
values = current_context.get_if_not_set(addresses_in_inputs)
address_values.extend(list(zip(addresses_in_inputs, values)))
for add in addresses_in_inputs:
reads.remove(add)
for c_id in current_context.base_contexts:
if c_id not in context_ids_already_searched:
contexts_in_chain.append(c_id)
context_ids_already_searched.append(c_id)
return address_values, reads
def delete_contexts(self, context_id_list):
"""Delete contexts from the ContextManager.
Args:
context_id_list (list): a list of context ids
Returns:
None
"""
for c_id in context_id_list:
if c_id in self._contexts:
del self._contexts[c_id]
def delete(self, context_id, address_list):
"""Delete the values associated with list of addresses, for a specific
context referenced by context_id.
Args:
context_id (str): the return value of create_context, referencing
a particular context.
address_list (list): a list of address strs
Returns:
(bool): True if the operation is successful, False if
the context_id doesn't reference a known context.
Raises:
AuthorizationException: Raised when an address in address_list is
not authorized either by not being in the inputs for the
txn associated with this context, or it is under a namespace
but the characters that are under the namespace are not valid
address characters.
"""
if context_id not in self._contexts:
return False
context = self._contexts[context_id]
for add in address_list:
if not self.address_is_valid(address=add):
raise AuthorizationException(address=add)
context.delete_direct(address_list)
return True
def get(self, context_id, address_list):
"""Get the values associated with list of addresses, for a specific
context referenced by context_id.
Args:
context_id (str): the return value of create_context, referencing
a particular context.
address_list (list): a list of address strs
Returns:
values_list (list): a list of (address, value) tuples
Raises:
AuthorizationException: Raised when an address in address_list is
not authorized either by not being in the inputs for the
txn associated with this context, or it is under a namespace
but the characters that are under the namespace are not valid
address characters.
"""
if context_id not in self._contexts:
return []
for add in address_list:
if not self.address_is_valid(address=add):
raise AuthorizationException(address=add)
context = self._contexts[context_id]
addresses_in_ctx = [add for add in address_list if add in context]
addresses_not_in_ctx = list(set(address_list) - set(addresses_in_ctx))
values = context.get(addresses_in_ctx)
values_list = list(zip(addresses_in_ctx, values))
if addresses_not_in_ctx:
# Validate the addresses that won't be validated by a direct get on
# the context.
for address in addresses_not_in_ctx:
context.validate_read(address)
address_values, reads = self._find_address_values_in_chain(
base_contexts=[context_id],
addresses_to_find=addresses_not_in_ctx)
values_list.extend(address_values)
if reads:
tree = MerkleDatabase(self._database, context.merkle_root)
add_values = []
for add in reads:
value = None
try:
value = tree.get(add)
except KeyError:
# The address is not in the radix tree/merkle tree
pass
add_values.append((add, value))
values_list.extend(add_values)
values_list.sort(key=lambda x: address_list.index(x[0]))
return values_list
def set(self, context_id, address_value_list):
"""Within a context, sets addresses to a value.
Args:
context_id (str): the context id returned by create_context
address_value_list (list): list of {address: value} dicts
Returns:
(bool): True if the operation is successful, False if
the context_id doesn't reference a known context.
Raises:
AuthorizationException if an address is given in the
address_value_list that was not in the original
transaction's outputs, or was under a namespace but the
characters after the namespace are not valid address
characters.
"""
if context_id not in self._contexts:
LOGGER.warning("Context_id not in contexts, %s", context_id)
return False
context = self._contexts.get(context_id)
add_value_dict = {}
for d in address_value_list:
for add, val in d.items():
if not self.address_is_valid(address=add):
raise AuthorizationException(address=add)
add_value_dict[add] = val
# check ADDR for testing
#LOGGER.debug("context.set_direct %s",add_value_dict)
context.set_direct(add_value_dict)
return True
def get_context_handlers(self):
# list of handlers for DAG version
def _recompute_state_hash(state_root,context=None):
# for DAG only - recompute state
state_hash = None
try:
tree = MerkleDatabase(self._database, state_root)
state_hash = tree.update(context['updates'],context['deletes'], virtual=True)
LOGGER.debug('_recompute_state_hash: STATE=%s->%s\n',state_root[:8],state_hash[:8])
except :
LOGGER.debug('_recompute_state_hash: BAD STATE=%s\n',state_root[:8])
return state_hash
def _update_state(old,new):
# for DAG only - make virtual root state correction for block state using mapping
#LOGGER.debug('_update_state: OLD STATE=%s\n',old[:8])
if old in self._database:
LOGGER.debug('_update_state:THERE IS MAPPING FOR STATE=%s!\n\n',old[:8])
else:
LOGGER.debug('_update_state:ADD MAPPING FOR STATE=%s->%s\n',old[:8],new[:8])
if new in self._database:
ref = self._database[new]
if isinstance(ref,str):
new = ref
LOGGER.debug('_update_state: TIGHT REF ON REAL STATE=%s\n',ref[:8])
self._database.put(old,new)
def get_merkle_root():
return MerkleDatabase.get_real_merkle_root(self._database)
def update_merkle_root(key_hash):
return MerkleDatabase.update_merkle_root(self._database,key_hash)
def _check_merkle(state_root,context=''):
# for testing
# check state for testing
try:
tree = MerkleDatabase(self._database, state_root)
except :
LOGGER.debug('_CHECK: BAD STATE=%s ROOT %s\n',state_root[:8],context)
return
try:
tree._get_by_addr("449095bc5d9deba00a635d8db93c9deeb043416204f494b9f07862e9445559f0185109")
LOGGER.debug('_CHECK: ADDRESS YES CHECK STATE=%s %s\n',state_root[:8],context)
except :
LOGGER.debug('_CHECK: ADDRESS NO CHECK STATE=%s %s\n',state_root[:8],context)
return {'recompute_state':_recompute_state_hash,'update_state':_update_state,'merkle_root':get_merkle_root,'update_merkle_root':update_merkle_root,'check_merkle':_check_merkle}
def get_squash_handler(self):
def _squash(state_root, context_ids, persist, clean_up):
contexts_in_chain = deque()
contexts_in_chain.extend(context_ids)
context_ids_already_searched = []
context_ids_already_searched.extend(context_ids)
"""
# for testing
# check state for testing
tree = MerkleDatabase(self._database, state_root)
try:
tree._get_by_addr("449095bc5d9deba00a635d8db93c9deeb043416204f494b9f07862e9445559f0185109")
LOGGER.debug('_SQUASH: ADDRESS YES BEFORE\n')
except :
LOGGER.debug('_SQUASH: ADDRESS NO BEFORE\n')
"""
# There is only one exit condition and that is when all the
# contexts have been accessed once.
#LOGGER.debug('_SQUASH: persist=%s clean_up=%s \n',persist,clean_up)
updates = dict()
deletes = set()
while contexts_in_chain:
current_c_id = contexts_in_chain.popleft()
current_context = self._contexts[current_c_id]
if not current_context.is_read_only():
current_context.make_read_only()
addresses_w_values = current_context.get_all_if_set()
for add, val in addresses_w_values.items():
# Since we are moving backwards through the graph of
# contexts, only update if the address hasn't been set
# or deleted
if add not in updates and add not in deletes:
updates[add] = val
addresses_w_values = current_context.get_all_if_deleted()
for add, _ in addresses_w_values.items():
# Since we are moving backwards through the graph of
# contexts, only add to deletes if the address hasn't been
# previously deleted or set in the graph
if add not in updates and add not in deletes:
deletes.add(add)
for c_id in current_context.base_contexts:
if c_id not in context_ids_already_searched:
contexts_in_chain.append(c_id)
context_ids_already_searched.append(c_id)
tree = MerkleDatabase(self._database, state_root) # was here
"""
# check state for testing
try:
tree._get_by_addr("449095bc5d9deba00a635d8db93c9deeb043416204f494b9f07862e9445559f0185109")
LOGGER.debug('_SQUASH: ADDRESS YES STATE=%s\n',state_root[:8] if state_root is not None else None)
except :
LOGGER.debug('_SQUASH: ADDRESS NO STATE=%s\n',state_root[:8] if state_root is not None else None)
"""
# filter the delete list to just those items in the tree
deletes = [addr for addr in deletes if addr in tree]
if not updates and not deletes:
state_hash = state_root
else:
virtual = not persist
# for compute new state - we can save updates, deletes for recompute it for DAG
state_hash = tree.update(updates, deletes, virtual=virtual)
#LOGGER.debug('_SQUASH: virtual=%s updates=%s deletes=%s STATE=%s\n',virtual,updates,deletes,state_hash[:8])
if clean_up:
self.delete_contexts(context_ids_already_searched)
"""
# check state for testing
try:
tree._get_by_addr("449095bc5d9deba00a635d8db93c9deeb043416204f494b9f07862e9445559f0185109")
LOGGER.debug('_SQUASH: ADDRESS YES AFTER STATE=%s\n',state_root[:8] if state_root is not None else None)
except :
LOGGER.debug('_SQUASH: ADDRESS NO AFTER STATE=%s\n',state_root[:8] if state_root is not None else None)
"""
return (state_hash,updates,deletes) # for DAG
return _squash
def stop(self):
self._address_queue.put_nowait(_SHUTDOWN_SENTINEL)
self._inflated_addresses.put_nowait(_SHUTDOWN_SENTINEL)
def add_execution_data(self, context_id, data):
"""Within a context, append data to the execution result.
Args:
context_id (str): the context id returned by create_context
data_type (str): type of data to append
data (bytes): data to append
Returns:
(bool): True if the operation is successful, False if
the context_id doesn't reference a known context.
"""
if context_id not in self._contexts:
LOGGER.warning("Context_id not in contexts, %s", context_id)
return False
context = self._contexts.get(context_id)
context.add_execution_data(data)
return True
def add_execution_event(self, context_id, event):
"""Within a context, append data to the execution result.
Args:
context_id (str): the context id returned by create_context
data_type (str): type of data to append
data (bytes): data to append
Returns:
(bool): True if the operation is successful, False if
the context_id doesn't reference a known context.
"""
if context_id not in self._contexts:
LOGGER.warning("Context_id not in contexts, %s", context_id)
return False
context = self._contexts.get(context_id)
context.add_execution_event(event)
return True
def get_execution_results(self, context_id):
context = self._contexts.get(context_id)
return (context.get_all_if_set().copy(),
context.get_all_if_deleted().copy(),
context.get_execution_events().copy(),
context.get_execution_data().copy())
class _ContextReader(InstrumentedThread):
"""
Attributes:
_in_condition (threading.Condition): threading object for notification
_addresses (queue.Queue): each item is a tuple
(context_id, state_hash, address_list)
_inflated_addresses (queue.Queue): each item is a tuple
(context_id, [(address, value), ...
"""
def __init__(self, database, address_queue, inflated_addresses):
super(_ContextReader, self).__init__(name='_ContextReader')
self._database = database
self._addresses = address_queue
self._inflated_addresses = inflated_addresses
def run(self):
# start once and works all time
#LOGGER.debug('_ContextReader: run \n')
while True:
context_state_addresslist_tuple = self._addresses.get(block=True)
if context_state_addresslist_tuple is _SHUTDOWN_SENTINEL:
break
c_id, state_hash, address_list = context_state_addresslist_tuple
#LOGGER.debug('_ContextReader: run state_hash=%s\n',state_hash)
tree = MerkleDatabase(self._database, state_hash)
"""
# for testing only
# check state for testing
try:
tree._get_by_addr("449095bc5d9deba00a635d8db93c9deeb043416204f494b9f07862e9445559f0185109")
LOGGER.debug('_ContextReader: ADDRESS YES \n')
except :
LOGGER.debug('_ContextReader: ADDRESS NO \n')
"""
return_values = []
for address in address_list:
value = None
try:
value = tree.get(address)
except KeyError:
pass
return_values.append((address, value))
self._inflated_addresses.put((c_id, return_values))
class _ContextWriter(InstrumentedThread):
"""Reads off of a shared queue from _ContextReader and writes values
to the contexts shared with the ContextManager.
"""
def __init__(self, inflated_addresses, contexts):
"""
Args:
inflated_addresses (queue.Queue): Contains the context id of the
context to write to, and the address-value pairs.
contexts (_ThreadsafeContexts): The datastructures to write the
address-value pairs to.
"""
super(_ContextWriter, self).__init__(name='_ContextWriter')
self._inflated_addresses = inflated_addresses
self._contexts = contexts
def run(self):
# start once and works all time
#LOGGER.debug('_ContextWriter: run \n')
while True:
context_id_list_tuple = self._inflated_addresses.get(
block=True)
if context_id_list_tuple is _SHUTDOWN_SENTINEL:
break
c_id, inflated_address_list = context_id_list_tuple
inflated_value_map = {k: v for k, v in inflated_address_list}
if c_id in self._contexts:
self._contexts[c_id].set_from_tree(inflated_value_map)
class _ThreadsafeContexts(object):
def __init__(self):
self._lock = Lock()
self._data = dict()
def __getitem__(self, item):
return self.get(item)
def __setitem__(self, key, value):
with self._lock:
self._data[key] = value
def __contains__(self, item):
with self._lock:
return item in self._data
def get(self, item):
with self._lock:
return self._data[item]
def __delitem__(self, key):
with self._lock:
del self._data[key]
| 39.068323 | 184 | 0.607313 |
1858bb59f33697a44cc76ca5fc34da62a33abb95 | 128 | py | Python | terraform_projects/ecs_django_template/project/util.py | romelBen/aws-projects | 3fa94c4d6367a17349c1162350d05733be32a4b6 | [
"MIT"
] | null | null | null | terraform_projects/ecs_django_template/project/util.py | romelBen/aws-projects | 3fa94c4d6367a17349c1162350d05733be32a4b6 | [
"MIT"
] | null | null | null | terraform_projects/ecs_django_template/project/util.py | romelBen/aws-projects | 3fa94c4d6367a17349c1162350d05733be32a4b6 | [
"MIT"
] | null | null | null | version https://git-lfs.github.com/spec/v1
oid sha256:d81f425793ed3587346b8bbe9bbfce56ab89626cc5c8ecf92dcf262aed2d26d8
size 560
| 32 | 75 | 0.882813 |
df54f550d2609e4c3fb30a586b6e703a19de6b76 | 615 | py | Python | tests/test_linalg.py | roytu/ml-dft-hubbard | 86d703720a0c6513d69de2ad1135602eaadaf0e6 | [
"MIT"
] | null | null | null | tests/test_linalg.py | roytu/ml-dft-hubbard | 86d703720a0c6513d69de2ad1135602eaadaf0e6 | [
"MIT"
] | null | null | null | tests/test_linalg.py | roytu/ml-dft-hubbard | 86d703720a0c6513d69de2ad1135602eaadaf0e6 | [
"MIT"
] | null | null | null |
import numpy as np
from numpy import linalg as LA
from mldfthubbard.hubbard import HubbardInstance
def test_linalg():
""" Testing LA.eigh against results from wolfram alpha
"""
# Test 1
M = np.array([
[0, 1, 2],
[1, 0, 3],
[2, 3, 0]
])
w, v = LA.eigh(M)
w_true = np.array([-3.20191, -0.911179, 4.11309])
assert np.allclose(w, w_true)
# Test 2
M = np.array([
[ 0, -1, -2],
[-1, 0, -3],
[-2, -3, 0]
])
w, v = LA.eigh(M)
w_true = np.array([-4.11309, 0.911179, 3.20191])
assert np.allclose(w, w_true)
| 16.621622 | 58 | 0.508943 |
bce33ebee24f9957221ff3a929dd03664bbbc82d | 922 | py | Python | cryptoportfolio/interfaces/miningpools/f2pool.py | a1fred/cryptoportfolio | e49436324ccaa7325c3daa74e02fcc1b6b9aeae1 | [
"MIT"
] | 7 | 2018-02-26T13:37:09.000Z | 2022-02-09T03:44:26.000Z | cryptoportfolio/interfaces/miningpools/f2pool.py | a1fred/cryptoportfolio | e49436324ccaa7325c3daa74e02fcc1b6b9aeae1 | [
"MIT"
] | null | null | null | cryptoportfolio/interfaces/miningpools/f2pool.py | a1fred/cryptoportfolio | e49436324ccaa7325c3daa74e02fcc1b6b9aeae1 | [
"MIT"
] | null | null | null | from decimal import Decimal
import requests
from cryptoportfolio.interfaces.base import Address
class F2PoolWallet(Address):
decimal_places = 18
symbol = None
f2pool_currecnices_mapping = {
'bitcoin': "BTC",
'litecoin': "LTC",
'etc': "ETC",
'eth': "ETH",
'zec': "ZEC",
'sc': "SC",
'monero': "XMR",
'dash': "DASH",
}
def __init__(self, currency, user, **kwargs):
assert currency in self.f2pool_currecnices_mapping.keys()
self.symbol = self.f2pool_currecnices_mapping[currency]
self.currency = currency
self.user = user
super(F2PoolWallet, self).__init__(**kwargs)
def _get_addr_coins_and_tokens_balance(self):
result = requests.get("http://api.f2pool.com/%s/%s" % (self.currency, self.user)).json()
return [
(self.symbol, Decimal(result['balance']))
]
| 26.342857 | 96 | 0.600868 |
dbfd23c37de189cbd2c656db4819eed0f163acd6 | 550 | py | Python | test/data/fair-crcc-send-data/workflow/scripts/gen_rename_index.py | crs4/snakemake-crate | d10167eee64b5fd9f7c09b50093a4b630b48db54 | [
"Apache-2.0"
] | null | null | null | test/data/fair-crcc-send-data/workflow/scripts/gen_rename_index.py | crs4/snakemake-crate | d10167eee64b5fd9f7c09b50093a4b630b48db54 | [
"Apache-2.0"
] | null | null | null | test/data/fair-crcc-send-data/workflow/scripts/gen_rename_index.py | crs4/snakemake-crate | d10167eee64b5fd9f7c09b50093a4b630b48db54 | [
"Apache-2.0"
] | null | null | null |
from typing import List, TextIO
from uuid import uuid4
def write_index_file(f: TextIO, mapping: dict) -> None:
for new_fname in sorted(mapping):
original_name = mapping[new_fname]
f.write(f"{new_fname}\t{original_name}\n")
def gen_rename_index(source_items: List[str], output_path: str):
mapping = {str(uuid4()) + '.c4gh': original_name for original_name in source_items}
with open(output_path, 'w') as f:
write_index_file(f, mapping)
gen_rename_index(snakemake.params.source_items, snakemake.output.index)
| 28.947368 | 87 | 0.721818 |
2279b89acabf2f8fa4aa51eb5f011498a1f36e79 | 48,569 | py | Python | src/sage/combinat/skew_tableau.py | felix-salfelder/sage | 5d8b2ff4794c44c7fa7a9d86ec567ecfa337e566 | [
"BSL-1.0"
] | 2 | 2021-08-20T00:30:35.000Z | 2021-11-17T10:54:00.000Z | src/sage/combinat/skew_tableau.py | felix-salfelder/sage | 5d8b2ff4794c44c7fa7a9d86ec567ecfa337e566 | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/skew_tableau.py | felix-salfelder/sage | 5d8b2ff4794c44c7fa7a9d86ec567ecfa337e566 | [
"BSL-1.0"
] | null | null | null | r"""
Skew Tableaux
"""
#*****************************************************************************
# Copyright (C) 2007 Mike Hansen <mhansen@gmail.com>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.rings.all import Integer, QQ, ZZ
from sage.misc.misc import uniq
from sage.functions.all import factorial
from sage.matrix.all import zero_matrix
import partition
import sage.combinat.tableau
import skew_partition
import partition
import copy
from combinat import CombinatorialObject, CombinatorialClass, InfiniteAbstractCombinatorialClass
from integer_vector import IntegerVectors
from sage.combinat.words.words import Words
def SkewTableau(st=None, expr=None):
"""
Returns the skew tableau object corresponding to st.
Note that Sage uses the English convention for partitions and
tableaux.
EXAMPLES::
sage: st = SkewTableau([[None, 1],[2,3]]); st
[[None, 1], [2, 3]]
sage: st.inner_shape()
[1]
sage: st.outer_shape()
[2, 2]
The expr form of a skew tableau consists of the inner partition
followed by a list of the entries in row from bottom to top.
::
sage: SkewTableau(expr=[[1,1],[[5],[3,4],[1,2]]])
[[None, 1, 2], [None, 3, 4], [5]]
"""
if isinstance(st, SkewTableau_class):
return st
if expr is not None:
return from_expr(expr)
for row in st:
if not isinstance(row, list):
raise TypeError, "each element of the skew tableau must be a list"
if row == []:
raise TypeError, "a skew tableau cannot have an empty list for a row"
return SkewTableau_class(st)
class SkewTableau_class(CombinatorialObject):
def __init__(self, t):
"""
TESTS::
sage: st = SkewTableau([[None, 1],[2,3]])
sage: st == loads(dumps(st))
True
"""
CombinatorialObject.__init__(self,t)
def _repr_diagram(self):
"""
Return a string representation of ``self`` as a diagram.
EXAMPLES::
sage: print SkewTableau([[None,2,3],[None,4],[5]])._repr_diagram()
. 2 3
. 4
5
"""
none_str = lambda x: " ." if x is None else "%3s"%str(x)
new_rows = ["".join(map(none_str, row)) for row in self]
return '\n'.join(new_rows)
def pp(self):
"""
Return a pretty print string of the tableau.
EXAMPLES::
sage: SkewTableau([[None,2,3],[None,4],[5]]).pp()
. 2 3
. 4
5
"""
print self._repr_diagram()
def _ascii_art_(self):
"""
TESTS::
sage: ascii_art(RibbonTableaux([[2,1],[]],[1,1,1],1).list())
[ 1 3 1 2 ]
[ 2 , 3 ]
"""
from sage.misc.ascii_art import AsciiArt
return AsciiArt(self._repr_diagram().splitlines())
def outer_shape(self):
"""
Returns the outer shape of the tableau.
EXAMPLES::
sage: SkewTableau([[None,1,2],[None,3],[4]]).outer_shape()
[3, 2, 1]
"""
return partition.Partition([len(row) for row in self])
def inner_shape(self):
"""
Returns the inner shape of the tableau.
EXAMPLES::
sage: SkewTableau([[None,1,2],[None,3],[4]]).inner_shape()
[1, 1]
"""
return partition.Partition(filter(lambda x: x != 0, [len(filter(lambda x: x is None, row)) for row in self]))
def shape(self):
r"""
Returns the shape of a tableau t.
EXAMPLES::
sage: SkewTableau([[None,1,2],[None,3],[4]]).shape()
[[3, 2, 1], [1, 1]]
"""
return skew_partition.SkewPartition([self.outer_shape(), self.inner_shape()])
def outer_size(self):
"""
Returns the size of the outer shape of the skew tableau.
EXAMPLES::
sage: SkewTableau([[None, 2, 4], [None, 3], [1]]).outer_size()
6
sage: SkewTableau([[None, 2], [1, 3]]).outer_size()
4
"""
return self.outer_shape().size()
def inner_size(self):
"""
Returns the size of the inner shape of the skew tableau.
EXAMPLES::
sage: SkewTableau([[None, 2, 4], [None, 3], [1]]).inner_size()
2
sage: SkewTableau([[None, 2], [1, 3]]).inner_size()
1
"""
return self.inner_shape().size()
def size(self):
"""
Returns the number of cells in the skew tableau.
EXAMPLES::
sage: SkewTableau([[None, 2, 4], [None, 3], [1]]).size()
4
sage: SkewTableau([[None, 2], [1, 3]]).size()
3
"""
return sum([len(filter(lambda x: x is not None,row)) for row in self])
def conjugate(self):
"""
Returns the conjugate of the skew tableau.
EXAMPLES::
sage: SkewTableau([[None,1],[2,3]]).conjugate()
[[None, 2], [1, 3]]
"""
conj_shape = self.outer_shape().conjugate()
conj = [[None]*row_length for row_length in conj_shape]
for i in range(len(conj)):
for j in range(len(conj[i])):
conj[i][j] = self[j][i]
return SkewTableau(conj)
def to_word_by_row(self):
"""
Returns a word obtained from a row reading of the skew tableau.
EXAMPLES::
sage: s = SkewTableau([[None,1],[2,3]])
sage: s.pp()
. 1
2 3
sage: s.to_word_by_row()
word: 231
sage: s = SkewTableau([[None, 2, 4], [None, 3], [1]])
sage: s.pp()
. 2 4
. 3
1
sage: s.to_word_by_row()
word: 1324
TESTS::
sage: SkewTableau([[None, None, None], [None]]).to_word_by_row()
word:
sage: SkewTableau([]).to_word_by_row()
word:
"""
word = []
for row in self:
word = row + word
return Words("positive integers")([i for i in word if i is not None])
def to_word_by_column(self):
"""
Returns the word obtained from a column reading of the skew
tableau
EXAMPLES::
sage: s = SkewTableau([[None,1],[2,3]])
sage: s.pp()
. 1
2 3
sage: s.to_word_by_column()
word: 132
::
sage: s = SkewTableau([[None, 2, 4], [None, 3], [1]])
sage: s.pp()
. 2 4
. 3
1
sage: s.to_word_by_column()
word: 4231
"""
return self.conjugate().to_word_by_row()
to_word = to_word_by_row
def to_permutation(self):
"""
Return a permutation with the entries of ``self`` obtained by reading
``self`` row by row, from the bottommost to the topmost row, with
each row being read from left to right, in English convention.
See :meth:`to_word_by_row()`.
EXAMPLES::
sage: SkewTableau([[None,2],[3,4],[None],[1]]).to_permutation()
[1, 3, 4, 2]
sage: SkewTableau([[None]]).to_permutation()
[]
"""
from sage.combinat.permutation import Permutation
return Permutation(self.to_word())
def evaluation(self):
"""
Returns the evaluation of the word from skew tableau.
EXAMPLES::
sage: SkewTableau([[1,2],[3,4]]).evaluation()
[1, 1, 1, 1]
"""
ed = self.to_word().evaluation_dict()
entries = ed.keys()
m = max(entries) + 1 if entries else -1
return [ed.get(k,0) for k in range(1,m)]
weight = evaluation
def is_standard(self):
"""
Returns True if self is a standard skew tableau and False
otherwise.
EXAMPLES::
sage: SkewTableau([[None, 2], [1, 3]]).is_standard()
True
sage: SkewTableau([[None, 2], [2, 4]]).is_standard()
False
sage: SkewTableau([[None, 3], [2, 4]]).is_standard()
False
sage: SkewTableau([[None, 2], [2, 4]]).is_standard()
False
"""
#Check to make sure that it is filled with 1...size
w = self.to_word()
if sorted(w) != range(1, self.size()+1):
return False
else:
return self.is_semistandard()
def is_semistandard(self):
"""
Returns True if self is a semistandard skew tableau and False
otherwise.
EXAMPLES::
sage: SkewTableau([[None, 2, 2], [1, 3]]).is_semistandard()
True
sage: SkewTableau([[None, 2], [2, 4]]).is_semistandard()
True
sage: SkewTableau([[None, 3], [2, 4]]).is_semistandard()
True
sage: SkewTableau([[None, 2], [1, 2]]).is_semistandard()
False
"""
t = self
#Check to make sure it is weakly increasing along the rows
for row in t:
for i in range(1, len(row)):
if row[i-1] is not None and row[i] < row[i-1]:
return False
#Check to make sure it is strictly increasing along the columns
conj = t.conjugate()
for row in conj:
for i in range(1, len(row)):
if row[i-1] is not None and row[i] <= row[i-1]:
return False
return True
def to_tableau(self):
"""
Returns a tableau with the same filling. This only works if the
inner shape of the skew tableau has size zero.
EXAMPLES::
sage: SkewTableau([[1,2],[3,4]]).to_tableau()
[[1, 2], [3, 4]]
"""
if self.inner_size() != 0:
raise ValueError, "the inner size of the skew tableau must be 0"
else:
return sage.combinat.tableau.Tableau(self[:])
def restrict(self, n):
"""
Returns the restriction of the (semi)standard skew tableau to all
the numbers less than or equal to n.
EXAMPLES::
sage: SkewTableau([[None,1],[2],[3]]).restrict(2)
[[None, 1], [2]]
sage: SkewTableau([[None,1],[2],[3]]).restrict(1)
[[None, 1]]
sage: SkewTableau([[None,1],[1],[2]]).restrict(1)
[[None, 1], [1]]
"""
t = self[:]
return SkewTableau( filter(lambda z: z != [], map(lambda x: filter(lambda y: y is None or y <= n, x), t)) )
def to_chain(self):
"""
Returns the chain of partitions corresponding to the (semi)standard
skew tableau.
EXAMPLES::
sage: SkewTableau([[None,1],[2],[3]]).to_chain()
[[1], [2], [2, 1], [2, 1, 1]]
sage: SkewTableau([[None,1],[1],[2]]).to_chain()
[[1], [2, 1], [2, 1, 1]]
"""
weights = [0] + uniq(sorted(self.to_word()))
return [ self.restrict(x).shape()[0] for x in weights]
def slide(self, corner=None):
"""
Jeu-de-taquin slide
Apply a jeu-de-taquin slide to self on the specified corner and returns the new tableau.
If no corner is given an arbitrary corner is chosen.
Fulton, William. 'Young Tableaux'. p12-13
EXAMPLES::
sage: st = SkewTableau([[None, None, None, None,2],[None, None, None, None,6], [None, 2, 4, 4], [2, 3, 6], [5,5]])
sage: st.slide((2,0))
[[None, None, None, None, 2], [None, None, None, None, 6], [2, 2, 4, 4], [3, 5, 6], [5]]
TESTS::
sage: st
[[None, None, None, None, 2], [None, None, None, None, 6], [None, 2, 4, 4], [2, 3, 6], [5, 5]]
"""
new_st = [x[:] for x in self]
inner_corners = self.inner_shape().corners()
outer_corners = self.outer_shape().corners()
if corner is not None:
if tuple(corner) not in inner_corners:
raise ValueError, "corner must be an inner corner"
else:
if len(inner_corners) == 0:
return self
else:
corner = inner_corners[0]
spotl, spotc = corner
while (spotl, spotc) not in outer_corners:
#print spot
#Check to see if there is nothing to the right
if spotc == len(new_st[spotl]) - 1:
#print "nr"
#Swap the hole with the cell below
new_st[spotl][spotc] = new_st[spotl+1][spotc]
new_st[spotl+1][spotc] = None
spotl += 1
continue
#Check to see if there is nothing below
if (spotl == len(new_st) - 1) or (len(new_st[spotl+1]) <= spotc):
#print "nb"
#Swap the hole with the cell to the right
new_st[spotl][spotc] = new_st[spotl][spotc+1]
new_st[spotl][spotc+1] = None
spotc += 1
continue
#If we get to this stage, we need to compare
below = new_st[spotl+1][spotc]
right = new_st[spotl][spotc+1]
if below <= right:
#Swap with the cell below
#print "b"
new_st[spotl][spotc] = new_st[spotl+1][spotc]
new_st[spotl+1][spotc] = None
spotl += 1
continue
else:
#Swap with the cell to the right
#print "r"
new_st[spotl][spotc] = new_st[spotl][spotc+1]
new_st[spotl][spotc+1] = None
spotc += 1
continue
#Clean up to remove the "None" at an outside corner
#Remove the last row if there is nothing left in it
new_st[spotl].pop()
if len(new_st[spotl]) == 0:
new_st.pop()
return SkewTableau(new_st)
def rectify(self):
"""
Returns a Tableau formed by applying the jeu de taquin process to
self.
Fulton, William. 'Young Tableaux'. p15
EXAMPLES::
sage: s = SkewTableau([[None,1],[2,3]])
sage: s.rectify()
[[1, 3], [2]]
sage: SkewTableau([[None, None, None, 4],[None,None,1,6],[None,None,5],[2,3]]).rectify()
[[1, 3, 4, 6], [2, 5]]
TESTS::
sage: s
[[None, 1], [2, 3]]
"""
rect = copy.deepcopy(self)
inner_corners = rect.inner_shape().corners()
while len(inner_corners) > 0:
rect = rect.slide()
inner_corners = rect.inner_shape().corners()
return rect.to_tableau()
def standardization(self, check=True):
r"""
Return the standardization of ``self``, assuming ``self`` is a
semistandard skew tableau.
The standardization of a semistandard skew tableau `T` is the standard
skew tableau `\mathrm{st}(T)` of the same shape as `T` whose
reversed reading word is the standardization of the reversed reading
word of `T`.
The standardization of a word `w` can be formed by replacing all `1`'s
in `w` by `1, 2, \ldots, k_1` from left to right, all `2`'s in `w` by
`k_1 + 1, k_1 + 2, \ldots, k_2`, and repeating for all letters that
appear in `w`.
See also :meth:`Word.standard_permutation()`.
INPUT:
- ``check`` -- (Default: ``True``) Check to make sure ``self`` is
semistandard. Set to ``False`` to avoid this check.
EXAMPLES::
sage: t = SkewTableau([[None,None,3,4,7,19],[None,4,4,8],[None,5,16,17],[None],[2],[3]])
sage: t.standardization()
[[None, None, 3, 6, 8, 12], [None, 4, 5, 9], [None, 7, 10, 11], [None], [1], [2]]
Standard skew tableaux are fixed under standardization::
sage: p = Partition([4,3,3,2])
sage: q = Partitions(3).random_element()
sage: all((t == t.standardization() for t in StandardSkewTableaux([p, q])))
True
The reading word of the standardization is the
standardization of the reading word::
sage: t = SkewTableau([[None,3,4,4],[None,6,10],[7,7,11],[18]])
sage: t.to_word().standard_permutation() == t.standardization().to_permutation()
True
TESTS:
Some corner cases::
sage: t = SkewTableau([[None,None],[None]])
sage: t.standardization()
[[None, None], [None]]
sage: t = SkewTableau([])
sage: t.standardization()
[]
"""
if check and not self.is_semistandard():
raise ValueError("the skew tableau must be semistandard")
# This should be a SkewStandardTableau
return from_shape_and_word(self.shape(), self.to_word_by_row().standard_permutation())
def bender_knuth_involution(self, k, rows=None, check=True):
r"""
Return the image of ``self`` under the `k`-th Bender--Knuth
involution, assuming ``self`` is a skew semistandard tableau.
Let `T` be a tableau, then a *lower free `k` in `T`* means a cell of
`T` which is filled with the integer `k` and whose direct lower
neighbor is not filled with the integer `k + 1` (in particular,
this lower neighbor might not exist at all). Let an *upper free `k + 1`
in `T`* mean a cell of `T` which is filled with the integer `k + 1`
and whose direct upper neighbor is not filled with the integer `k`
(in particular, this neighbor might not exist at all). It is clear
that for any row `r` of `T`, the lower free `k`'s and the upper
free `k + 1`'s in `r` together form a contiguous interval or `r`.
The *`k`-th Bender--Knuth switch at row `i`* changes the entries of
the cells in this interval in such a way that if it used to have
`a` entries of `k` and `b` entries of `k + 1`, it will now
have `b` entries of `k` and `a` entries of `k + 1`. For fixed `k`, the
`k`-th Bender--Knuth switches for different `i` commute. The
composition of the `k`-th Bender--Knuth switches for all rows is
called the *`k`-th Bender--Knuth involution*. This is used to show that
the Schur functions defined by semistandard (skew) tableaux are
symmetric functions.
INPUT:
- ``k`` -- an integer
- ``rows`` -- (Default ``None``) When set to ``None``, the method
computes the `k`-th Bender--Knuth involution as defined above.
When an iterable, this computes the composition of the `k`-th
Bender--Knuth switches at row `i` over all `i` in ``rows``. When set
to an integer `i`, the method computes the `k`-th Bender--Knuth
switch at row `i`. Note the indexing of the rows starts with `1`.
- ``check`` -- (Default: ``True``) Check to make sure ``self`` is
semistandard. Set to ``False`` to avoid this check.
OUTPUT:
The image of ``self`` under either the `k`-th Bender--Knuth
involution, the `k`-th Bender--Knuth switch at a certain row, or
the composition of such switches, as detailed in the INPUT section.
EXAMPLES::
sage: t = SkewTableau([[None,None,None,4,4,5,6,7],[None,2,4,6,7,7,7],[None,4,5,8,8,9],[None,6,7,10],[None,8,8,11],[None],[4]])
sage: t
[[None, None, None, 4, 4, 5, 6, 7], [None, 2, 4, 6, 7, 7, 7], [None, 4, 5, 8, 8, 9], [None, 6, 7, 10], [None, 8, 8, 11], [None], [4]]
sage: t.bender_knuth_involution(1)
[[None, None, None, 4, 4, 5, 6, 7], [None, 1, 4, 6, 7, 7, 7], [None, 4, 5, 8, 8, 9], [None, 6, 7, 10], [None, 8, 8, 11], [None], [4]]
sage: t.bender_knuth_involution(4)
[[None, None, None, 4, 5, 5, 6, 7], [None, 2, 4, 6, 7, 7, 7], [None, 5, 5, 8, 8, 9], [None, 6, 7, 10], [None, 8, 8, 11], [None], [5]]
sage: t.bender_knuth_involution(5)
[[None, None, None, 4, 4, 5, 6, 7], [None, 2, 4, 5, 7, 7, 7], [None, 4, 6, 8, 8, 9], [None, 5, 7, 10], [None, 8, 8, 11], [None], [4]]
sage: t.bender_knuth_involution(6)
[[None, None, None, 4, 4, 5, 6, 6], [None, 2, 4, 6, 6, 7, 7], [None, 4, 5, 8, 8, 9], [None, 6, 7, 10], [None, 8, 8, 11], [None], [4]]
sage: t.bender_knuth_involution(666) == t
True
sage: t.bender_knuth_involution(4, 2) == t
True
sage: t.bender_knuth_involution(4, 3)
[[None, None, None, 4, 4, 5, 6, 7], [None, 2, 4, 6, 7, 7, 7], [None, 5, 5, 8, 8, 9], [None, 6, 7, 10], [None, 8, 8, 11], [None], [4]]
The Bender--Knuth involution is an involution::
sage: t = SkewTableau([[None,3,4,4],[None,6,10],[7,7,11],[18]])
sage: all(t.bender_knuth_involution(k).bender_knuth_involution(k) == t for k in range(1,4))
True
The same for the single switches::
sage: all(t.bender_knuth_involution(k, j).bender_knuth_involution(k, j) == t for k in range(1,5) for j in range(1, 5))
True
Locality of the Bender--Knuth involutions::
sage: all(t.bender_knuth_involution(k).bender_knuth_involution(l) == t.bender_knuth_involution(l).bender_knuth_involution(k) for k in range(1,5) for l in range(1,5) if abs(k - l) > 1)
True
Coxeter relation of the Bender--Knuth involutions (they have the form
`(ab)^6 = 1`)::
sage: p = lambda t, k: t.bender_knuth_involution(k).bender_knuth_involution(k + 1)
sage: all(p(p(p(p(p(p(t,k),k),k),k),k),k) == t for k in range(1,5))
True
TESTS::
sage: t = SkewTableau([])
sage: t.bender_knuth_involution(3)
[]
sage: t = SkewTableau([[None,None],[None]])
sage: t.bender_knuth_involution(3)
[[None, None], [None]]
AUTHORS:
- Darij Grinberg (2013-05-14)
"""
if check and not self.is_semistandard():
raise ValueError("the skew tableau must be semistandard")
l = len(self) # l is the number of rows of self.
# Sanitizing the rows input so that it always becomes a list of
# nonnegative integers. We also subtract 1 from these integers
# because the i-th row of a tableau T is T[i - 1].
if rows is None:
rows = range(l)
elif rows in ZZ:
rows = [rows - 1]
else:
rows = [i - 1 for i in rows]
# Now, rows should be iterable.
# result_tab is going to be the result tableau (as a list of lists);
# we will build it up step by step, starting with a deep copy of self.
result_tab = [row[:] for row in self]
for i in rows:
if i >= l:
continue
# Setup the previous and next rows
if i == 0:
prev_row = [None] * len(result_tab[i])
else:
prev_row = result_tab[i-1]
if i == l - 1:
next_row = [None] * len(result_tab[i])
else:
next_row = result_tab[i+1] + [None] * (len(result_tab[i]) - len(result_tab[i+1]))
a = 0
b = 0
sk = None # The first entry of k
sk1 = None # The first entry of k+1
for j, val in enumerate(result_tab[i]):
if val == k and next_row[j] != k + 1:
if sk is None:
sk = j
a += 1
elif val == k + 1 and prev_row[j] != k:
if sk1 is None:
sk1 = j
b += 1
if sk1 is not None:
if a > b:
for j in range(sk1-(a-b), sk1):
result_tab[i][j] = k + 1
elif a < b:
for j in range(sk1, sk1+b-a):
result_tab[i][j] = k
elif sk is not None:
for j in range(sk, sk+a):
result_tab[i][j] = k + 1
return SkewTableau(result_tab) # This should be a SkewSemistandardTableau
def to_expr(self):
"""
The first list in a result corresponds to the inner partition of
the skew shape. The second list is a list of the rows in the skew
tableau read from the bottom up.
Provided for compatibility with MuPAD-Combinat. In MuPAD-Combinat,
if t is a skew tableau, then to_expr gives the same result as
expr(t) would give in MuPAD-Combinat.
EXAMPLES::
sage: SkewTableau([[None,1,1,3],[None,2,2],[1]]).to_expr()
[[1, 1], [[1], [2, 2], [1, 1, 3]]]
sage: SkewTableau([]).to_expr()
[[], []]
"""
rows = self.filling()
rows.reverse()
return [self.inner_shape(), rows]
def is_ribbon(self):
"""
Returns True if and only if self is a ribbon, that is if it has no
2x2 boxes.
EXAMPLES::
sage: SkewTableau([[None,1],[2,3]]).is_ribbon()
True
sage: SkewTableau([[None,1,2],[3,4,5]]).is_ribbon()
False
"""
outer = list(self.outer_shape())
inner = list(self.inner_shape())
inner += [0]*(len(outer)-len(inner))
for i in range(1, len(outer)):
if outer[i] > inner[i-1]+1:
return False
return True
def to_ribbon(self):
"""
Returns the ribbon version of self.
EXAMPLES::
sage: SkewTableau([[None,1],[2,3]]).to_ribbon()
[[1], [2, 3]]
"""
if not self.is_ribbon():
raise ValueError, "self must be a ribbon"
import ribbon
r = [ [i for i in row if i is not None] for row in self]
return ribbon.Ribbon_class(r)
def filling(self):
"""
Returns a list of the non-empty entries in self.
EXAMPLES::
sage: t = SkewTableau([[None,1],[2,3]])
sage: t.filling()
[[1], [2, 3]]
"""
return [ [i for i in row if i is not None] for row in self ]
def cells_by_content(self, c):
"""
Returns the coordinates of the cells in self with content c.
::
sage: s = SkewTableau([[None,1,2],[3,4,5],[6]])
sage: s.cells_by_content(0)
[(1, 1)]
sage: s.cells_by_content(1)
[(0, 1), (1, 2)]
sage: s.cells_by_content(2)
[(0, 2)]
sage: s.cells_by_content(-1)
[(1, 0)]
sage: s.cells_by_content(-2)
[(2, 0)]
"""
if len(self) == 0:
return []
if c >= 0:
if c >= len(self[0]):
return []
i,j = 0,c
else:
c = -c
if c >= len(self):
return []
i,j = c,0
res = []
while True:
if self[i][j] is not None:
res.append((i,j))
i,j = i+1, j+1
if i >= len(self) or j >= len(self[i]):
break
return res
def entries_by_content(self, c):
"""
Returns on the entries in self with content c.
EXAMPLES::
sage: s = SkewTableau([[None,1,2],[3,4,5],[6]])
sage: s.entries_by_content(0)
[4]
sage: s.entries_by_content(1)
[1, 5]
sage: s.entries_by_content(2)
[2]
sage: s.entries_by_content(-1)
[3]
sage: s.entries_by_content(-2)
[6]
"""
return [self[i][j] for i,j in self.cells_by_content(c)]
def cells(self):
"""
Returns the cells in self.
EXAMPLES::
sage: s = SkewTableau([[None,1,2],[3],[6]])
sage: s.cells()
[(0, 1), (0, 2), (1, 0), (2, 0)]
"""
res = []
for i in range(len(self)):
for j in range(len(self[i])):
if self[i][j] is not None:
res.append( (i,j) )
return res
def _label_skew(list, sk):
"""
Returns a filled in a standard skew tableaux given an ordered list
of the coordinates to filled in.
EXAMPLES::
sage: import sage.combinat.skew_tableau as skew_tableau
sage: l = [ '0,0', '1,1', '1,0', '0,1' ]
sage: empty = [[None,None],[None,None]]
sage: skew_tableau._label_skew(l, empty)
[[1, 4], [3, 2]]
"""
i = 1
skew = copy.deepcopy(sk)
for coordstring in list:
coords = coordstring.split(",")
row = int(coords[0])
column = int(coords[1])
skew[row][column] = i
i += 1
return skew
def StandardSkewTableaux(skp=None):
"""
Returns the combinatorial class of standard skew tableaux of shape
skp (where skp is a skew partition).
EXAMPLES::
sage: StandardSkewTableaux([[3, 2, 1], [1, 1]]).list()
[[[None, 1, 2], [None, 3], [4]],
[[None, 1, 2], [None, 4], [3]],
[[None, 1, 3], [None, 2], [4]],
[[None, 1, 4], [None, 2], [3]],
[[None, 1, 3], [None, 4], [2]],
[[None, 1, 4], [None, 3], [2]],
[[None, 2, 3], [None, 4], [1]],
[[None, 2, 4], [None, 3], [1]]]
"""
if skp is None:
return StandardSkewTableaux_all()
elif isinstance(skp, (int, Integer)):
return StandardSkewTableaux_size(skp)
elif skp in skew_partition.SkewPartitions():
return StandardSkewTableaux_skewpartition(skew_partition.SkewPartition(skp))
else:
raise TypeError
class StandardSkewTableaux_all(InfiniteAbstractCombinatorialClass):
def __repr__(self):
"""
EXAMPLES::
sage: StandardSkewTableaux() #indirect doctest
Standard skew tableaux
"""
return "Standard skew tableaux"
def __contains__(self, x):
"""
EXAMPLES::
sage: [[None, 2], [1, 3]] in StandardSkewTableaux()
True
sage: [[None, 2], [2, 4]] in StandardSkewTableaux()
False
sage: [[None, 3], [2, 4]] in StandardSkewTableaux()
False
sage: [[None, 2], [2, 4]] in StandardSkewTableaux()
False
"""
if isinstance(x, SkewTableau_class):
return True
try:
x = SkewTableau(x)
except TypeError:
return False
return x.is_standard()
def _infinite_cclass_slice(self, n):
"""
Needed by InfiniteAbstractCombinatorialClass to build __iter__.
TESTS::
sage: StandardSkewTableaux()._infinite_cclass_slice(4) == StandardSkewTableaux(4)
True
sage: it = iter(StandardSkewTableaux()) # indirect doctest
sage: [it.next() for i in range(10)]
[[], [[1]], [[1, 2]], [[1], [2]], [[None, 1], [2]], [[None, 2], [1]], [[1, 2, 3]], [[1, 2], [3]], [[1, 3], [2]], [[None, 1, 2], [3]]]
"""
return StandardSkewTableaux_size(n)
class StandardSkewTableaux_size(CombinatorialClass):
def __init__(self, n):
"""
EXAMPLES::
sage: s = StandardSkewTableaux(3)
sage: s == loads(dumps(s))
True
"""
self.n = n
def __repr__(self):
"""
EXAMPLES::
sage: StandardSkewTableaux(3) #indirect doctest
Standard skew tableaux of size 3
"""
return "Standard skew tableaux of size %s"%self.n
def cardinality(self):
"""
EXAMPLES::
sage: StandardSkewTableaux(1).cardinality()
1
sage: StandardSkewTableaux(2).cardinality()
4
sage: StandardSkewTableaux(3).cardinality()
24
sage: StandardSkewTableaux(4).cardinality()
194
"""
count = 0
for skp in skew_partition.SkewPartitions(self.n):
count += StandardSkewTableaux_skewpartition(skp).cardinality()
return count
def __iter__(self):
"""
EXAMPLES::
sage: StandardSkewTableaux(2).list() #indirect doctest
[[[1, 2]], [[1], [2]], [[None, 1], [2]], [[None, 2], [1]]]
sage: StandardSkewTableaux(3).list() #indirect doctest
[[[1, 2, 3]],
[[1, 2], [3]], [[1, 3], [2]],
[[None, 1, 2], [3]], [[None, 1, 3], [2]],
[[None, 2, 3], [1]],
[[None, 1], [2, 3]], [[None, 2], [1, 3]],
[[None, None, 1], [2, 3]], [[None, None, 2], [1, 3]], [[None, None, 3], [1, 2]],
[[1], [2], [3]],
[[None, 1], [None, 2], [3]], [[None, 1], [None, 3], [2]], [[None, 2], [None, 3], [1]],
[[None, 1], [2], [3]], [[None, 2], [1], [3]], [[None, 3], [1], [2]],
[[None, None, 1], [None, 2], [3]], [[None, None, 1], [None, 3], [2]],
[[None, None, 2], [None, 1], [3]], [[None, None, 3], [None, 1], [2]],
[[None, None, 2], [None, 3], [1]], [[None, None, 3], [None, 2], [1]]]
"""
for skp in skew_partition.SkewPartitions(self.n):
for sst in StandardSkewTableaux_skewpartition(skp):
yield sst
class StandardSkewTableaux_skewpartition(CombinatorialClass):
Element = SkewTableau_class
def __init__(self, skp):
"""
TESTS::
sage: S = StandardSkewTableaux([[3, 2, 1], [1, 1]])
sage: S == loads(dumps(S))
True
"""
self.skp = skp
def list(self):
"""
Returns a list for all the standard skew tableaux with shape of the
skew partition skp. The standard skew tableaux are ordered
lexicographically by the word obtained from their row reading.
EXAMPLES::
sage: StandardSkewTableaux([[3, 2, 1], [1, 1]]).list()
[[[None, 1, 2], [None, 3], [4]],
[[None, 1, 2], [None, 4], [3]],
[[None, 1, 3], [None, 2], [4]],
[[None, 1, 4], [None, 2], [3]],
[[None, 1, 3], [None, 4], [2]],
[[None, 1, 4], [None, 3], [2]],
[[None, 2, 3], [None, 4], [1]],
[[None, 2, 4], [None, 3], [1]]]
"""
return [st for st in self]
def cardinality(self):
"""
Returns the number of standard skew tableaux with shape of the skew
partition skp. This uses a formula due to Aitken
(see Cor. 7.16.3 of [Sta1999]_).
EXAMPLES::
sage: StandardSkewTableaux([[3, 2, 1], [1, 1]]).cardinality()
8
"""
outer, inner = self.skp
m = len(outer)
n = sum(outer) - sum(inner)
outer = list(outer)
inner = list(inner) + [0]*(m-len(inner))
a = zero_matrix(QQ, m)
for i in range(m):
for j in range(m):
v = outer[i] - inner[j] - i + j
if v < 0:
a[i,j] = 0
else:
a[i,j] = 1/factorial(v)
return ZZ(factorial(n) * a.det())
def __iter__(self):
"""
An iterator for all the standard skew tableau with shape of the
skew partition skp. The standard skew tableaux are ordered
lexicographically by the word obtained from their row reading.
EXAMPLES::
sage: [st for st in StandardSkewTableaux([[3, 2, 1], [1, 1]])] # indirect doctest
[[[None, 1, 2], [None, 3], [4]],
[[None, 1, 2], [None, 4], [3]],
[[None, 1, 3], [None, 2], [4]],
[[None, 1, 4], [None, 2], [3]],
[[None, 1, 3], [None, 4], [2]],
[[None, 1, 4], [None, 3], [2]],
[[None, 2, 3], [None, 4], [1]],
[[None, 2, 4], [None, 3], [1]]]
"""
skp = self.skp
dag = skp.to_dag()
le_list = list(dag.topological_sort_generator())
empty = [[None]*row_length for row_length in skp.outer()]
for le in le_list:
yield SkewTableau(_label_skew(le, empty))
def SemistandardSkewTableaux(p=None, mu=None):
"""
Returns a combinatorial class of semistandard skew tableaux.
EXAMPLES::
sage: SemistandardSkewTableaux()
Semistandard skew tableaux
::
sage: SemistandardSkewTableaux(3)
Semistandard skew tableaux of size 3
::
sage: SemistandardSkewTableaux([[2,1],[]])
Semistandard skew tableaux of shape [[2, 1], []]
::
sage: SemistandardSkewTableaux([[2,1],[]],[2,1])
Semistandard skew tableaux of shape [[2, 1], []] and weight [2, 1]
::
sage: SemistandardSkewTableaux(3, [2,1])
Semistandard skew tableaux of size 3 and weight [2, 1]
"""
if p is None and mu is None:
return SemistandardSkewTableaux_all()
if p is None:
raise ValueError, "you must specify either a size or shape"
if isinstance(p, (int, Integer)):
if mu is None:
return SemistandardSkewTableaux_size(p)
else:
return SemistandardSkewTableaux_size_weight(p, mu)
if p in skew_partition.SkewPartitions():
p = skew_partition.SkewPartition(p)
if mu is None:
return SemistandardSkewTableaux_shape(p)
else:
return SemistandardSkewTableaux_shape_weight(p, mu)
class SemistandardSkewTableaux_all(CombinatorialClass):
def __repr__(self):
"""
EXAMPLES::
sage: SemistandardSkewTableaux().__repr__()
'Semistandard skew tableaux'
"""
return "Semistandard skew tableaux"
class SemistandardSkewTableaux_size(CombinatorialClass):
def __init__(self, n):
"""
EXAMPLES::
sage: s = SemistandardSkewTableaux(3)
sage: s == loads(dumps(s))
True
"""
self.n = n
def __repr__(self):
"""
EXAMPLES::
sage: SemistandardSkewTableaux(3).__repr__()
'Semistandard skew tableaux of size 3'
"""
return "Semistandard skew tableaux of size %s"%self.n
def cardinality(self):
"""
EXAMPLES::
sage: SemistandardSkewTableaux(2).cardinality()
8
"""
count = 0
for p in skew_partition.SkewPartitions(self.n):
count += SemistandardSkewTableaux_shape(p).cardinality()
return count
def __iter__(self):
"""
EXAMPLES::
sage: SemistandardSkewTableaux(2).list() # indirect doctest
[[[1, 1]],
[[1, 2]],
[[2, 2]],
[[1], [2]],
[[None, 1], [1]],
[[None, 2], [1]],
[[None, 1], [2]],
[[None, 2], [2]]]
"""
for p in skew_partition.SkewPartitions(self.n):
for ssst in SemistandardSkewTableaux_shape(p):
yield ssst
class SemistandardSkewTableaux_size_weight(CombinatorialClass):
def __init__(self, n, mu):
"""
EXAMPLES::
sage: s = SemistandardSkewTableaux(3,[2,1])
sage: s == loads(dumps(s))
True
"""
self.n = n
self.mu = mu
def __repr__(self):
"""
EXAMPLES::
sage: SemistandardSkewTableaux(3,[2,1]).__repr__()
'Semistandard skew tableaux of size 3 and weight [2, 1]'
"""
return "Semistandard skew tableaux of size %s and weight %s"%(self.n,self.mu)
def cardinality(self):
"""
EXAMPLES::
sage: SemistandardSkewTableaux(2,[1,1]).cardinality()
4
"""
count = 0
for p in skew_partition.SkewPartitions(self.n):
count += SemistandardSkewTableaux_shape_weight(p, self.mu).cardinality()
return count
def __iter__(self):
"""
EXAMPLES::
sage: SemistandardSkewTableaux(2,[1,1]).list() # indirect doctest
[[[1, 2]], [[1], [2]], [[None, 2], [1]], [[None, 1], [2]]]
"""
for p in skew_partition.SkewPartitions(self.n):
for ssst in SemistandardSkewTableaux_shape_weight(p, self.mu):
yield ssst
class SemistandardSkewTableaux_shape(CombinatorialClass):
def __init__(self, p):
"""
EXAMPLES::
sage: s = SemistandardSkewTableaux([[2,1],[]])
sage: s == loads(dumps(s))
True
"""
self.p = skew_partition.SkewPartition(p)
def __repr__(self):
"""
EXAMPLES::
sage: repr(SemistandardSkewTableaux([[2,1],[]]))
'Semistandard skew tableaux of shape [[2, 1], []]'
"""
return "Semistandard skew tableaux of shape %s"%self.p
def cardinality(self):
"""
EXAMPLES::
sage: SemistandardSkewTableaux([[2,1],[]]).cardinality()
8
"""
count = 0
for mu in IntegerVectors(self.p.size(), self.p.size()):
count += SemistandardSkewTableaux_shape_weight(self.p, mu).cardinality()
return count
def __iter__(self):
"""
EXAMPLES::
sage: SemistandardSkewTableaux([[2,1],[]]).list() #indirect test
[[[1, 1], [2]],
[[1, 1], [3]],
[[1, 2], [2]],
[[1, 3], [2]],
[[1, 2], [3]],
[[1, 3], [3]],
[[2, 2], [3]],
[[2, 3], [3]]]
"""
for mu in IntegerVectors(self.p.size(), self.p.size()):
for ssst in SemistandardSkewTableaux_shape_weight(self.p, mu):
yield ssst
class SemistandardSkewTableaux_shape_weight(CombinatorialClass):
def __init__(self, p, mu):
"""
EXAMPLES::
sage: s = SemistandardSkewTableaux([[2,1],[]],[2,1])
sage: s == loads(dumps(s))
True
"""
self.p = p
self.mu = mu
def __repr__(self):
"""
EXAMPLES::
sage: SemistandardSkewTableaux([[2,1],[]],[2,1]).__repr__()
'Semistandard skew tableaux of shape [[2, 1], []] and weight [2, 1]'
"""
return "Semistandard skew tableaux of shape %s and weight %s"%(self.p, self.mu)
def list(self):
"""
EXAMPLES::
sage: SemistandardSkewTableaux([[2,1],[]],[2,1]).list()
[[[1, 1], [2]]]
"""
import ribbon_tableau
res = ribbon_tableau.RibbonTableaux_shapeweightlength(self.p, self.mu, 1).list()
return [ SkewTableau_class(x._list) for x in res]
def from_expr(expr):
"""
Returns a SkewTableau from a MuPAD-Combinat expr for a skew
tableau. The first list in expr is the inner shape of the skew
tableau. The second list are the entries in the rows of the skew
tableau from bottom to top.
Provided primarily for compatibility with MuPAD-Combinat.
EXAMPLES::
sage: import sage.combinat.skew_tableau as skew_tableau
sage: sage.combinat.skew_tableau.from_expr([[1,1],[[5],[3,4],[1,2]]])
[[None, 1, 2], [None, 3, 4], [5]]
"""
skp = []
outer = expr[1]
inner = expr[0]+[0]*(len(outer)-len(expr[0]))
for i in range(len(outer)):
skp.append( [None]*(inner[i]) + outer[-(i+1)] )
return SkewTableau(skp)
def from_shape_and_word(shape, word):
"""
Returns the skew tableau corresponding to the skew partition shape
and the word obtained from the row reading.
EXAMPLES::
sage: import sage.combinat.skew_tableau as skew_tableau
sage: t = SkewTableau([[None, 1, 3], [None, 2], [4]])
sage: shape = t.shape()
sage: word = t.to_word()
sage: skew_tableau.from_shape_and_word(shape, word)
[[None, 1, 3], [None, 2], [4]]
"""
st = [ [None]*row_length for row_length in shape[0] ]
w_count = 0
for i in reversed(range(len(shape[0]))):
for j in range(shape[0][i]):
if i >= len(shape[1]) or j >= shape[1][i]:
st[i][j] = word[w_count]
w_count += 1
return SkewTableau(st)
# Deprecation of internal classes seems to be unnecessarily painful...
from sage.misc.superseded import deprecation
def SemistandardSkewTableaux_n(*args, **kargs):
"""
EXAMPLES::
sage: sage.combinat.skew_tableau.SemistandardSkewTableaux_n(3)
doctest:1: DeprecationWarning: this class is deprecated. Use SemistandardSkewTableaux_size instead
See http://trac.sagemath.org/9265 for details.
Semistandard skew tableaux of size 3
"""
deprecation(9265,'this class is deprecated. Use SemistandardSkewTableaux_size instead')
return SemistandardSkewTableaux(*args, **kargs)
def SemistandardSkewTableaux_nmu(*args, **kargs):
"""
EXAMPLES::
sage: sage.combinat.skew_tableau.SemistandardSkewTableaux_nmu(3,[2,1])
doctest:1: DeprecationWarning: this class is deprecated. Use SemistandardSkewTableaux_size_weight instead
See http://trac.sagemath.org/9265 for details.
Semistandard skew tableaux of size 3 and weight [2, 1]
"""
deprecation(9265,'this class is deprecated. Use SemistandardSkewTableaux_size_weight instead')
return SemistandardSkewTableaux(*args, **kargs)
def SemistandardSkewTableaux_p(*args, **kargs):
"""
EXAMPLES::
sage: sage.combinat.skew_tableau.SemistandardSkewTableaux_p([[2,1],[]])
doctest:1: DeprecationWarning: this class is deprecated. Use SemistandardSkewTableaux_shape instead
See http://trac.sagemath.org/9265 for details.
Semistandard skew tableaux of shape [[2, 1], []]
"""
deprecation(9265,'this class is deprecated. Use SemistandardSkewTableaux_shape instead')
return SemistandardSkewTableaux_shape(*args, **kargs)
def SemistandardSkewTableaux_pmu(*args, **kargs):
"""
EXAMPLES::
sage: sage.combinat.skew_tableau.SemistandardSkewTableaux_pmu([[2,1],[]],[2,1])
doctest:1: DeprecationWarning: this class is deprecated. Use SemistandardSkewTableaux_shape_weight instead
See http://trac.sagemath.org/9265 for details.
Semistandard skew tableaux of shape [[2, 1], []] and weight [2, 1]
"""
deprecation(9265,'this class is deprecated. Use SemistandardSkewTableaux_shape_weight instead')
return SemistandardSkewTableaux_shape_weight(*args, **kargs)
def StandardSkewTableaux_n(*args, **kargs):
"""
EXAMPLES::
sage: sage.combinat.skew_tableau.StandardSkewTableaux_n(2)
doctest:1: DeprecationWarning: this class is deprecated. Use StandardSkewTableaux_size instead
See http://trac.sagemath.org/9265 for details.
Standard skew tableaux of size 2
"""
deprecation(9265,'this class is deprecated. Use StandardSkewTableaux_size instead')
return StandardSkewTableaux(*args, **kargs)
# October 2012: fixing outdated pickles which use the classes being deprecated
from sage.structure.sage_object import register_unpickle_override
register_unpickle_override('sage.combinat.skew_tableau', 'StandardSkewTableaux_n', StandardSkewTableaux_size)
register_unpickle_override('sage.combinat.skew_tableau', 'SemistandardSkewTableaux_n', SemistandardSkewTableaux_size)
register_unpickle_override('sage.combinat.skew_tableau', 'SemistandardSkewTableaux_nmu', SemistandardSkewTableaux_size_weight)
register_unpickle_override('sage.combinat.skew_tableau', 'SemistandardSkewTableaux_p', SemistandardSkewTableaux_shape)
register_unpickle_override('sage.combinat.skew_tableau', 'SemistandardSkewTableaux_pmu', SemistandardSkewTableaux_shape_weight)
| 32.379333 | 195 | 0.527641 |
49e85201ca3defbbe323879bd603ac5b593bc71d | 294 | py | Python | 01_WordTokenize.py | pemagrg1/SPACY-for-Beginners | b4647a5b7b1a784cfe86d594cb045337bef198fc | [
"MIT"
] | 4 | 2019-03-16T04:16:27.000Z | 2020-02-15T13:46:26.000Z | 01_WordTokenize.py | pemagrg1/Natural-Language-Processing-NLP-using-Spacy | b4647a5b7b1a784cfe86d594cb045337bef198fc | [
"MIT"
] | null | null | null | 01_WordTokenize.py | pemagrg1/Natural-Language-Processing-NLP-using-Spacy | b4647a5b7b1a784cfe86d594cb045337bef198fc | [
"MIT"
] | 1 | 2019-10-17T14:55:41.000Z | 2019-10-17T14:55:41.000Z | import spacy
nlp = spacy.load("en")
text = """Most of the outlay will be at home. No surprise there, either. While Samsung has expanded overseas, South Korea is still host to most of its factories and research engineers. """
doc = nlp(text)
words = [token.text for token in doc]
print (words) | 36.75 | 188 | 0.734694 |
180cd6e2be29877fdf4552aac8ea4b2218a6a411 | 2,500 | py | Python | cli.py | banditsbeware/wikitools | 1c08b47bf111007c674a9a73c9968a7b57fdf83b | [
"MIT"
] | 1 | 2020-11-26T03:39:50.000Z | 2020-11-26T03:39:50.000Z | cli.py | banditsbeware/wikitools | 1c08b47bf111007c674a9a73c9968a7b57fdf83b | [
"MIT"
] | null | null | null | cli.py | banditsbeware/wikitools | 1c08b47bf111007c674a9a73c9968a7b57fdf83b | [
"MIT"
] | null | null | null | '''
WikiTools -- cli.py
author: david rademacher
'''
from wikitools import *
from random import sample
comm = dict()
comm['c'] = 'list page categories'
comm['l'] = 'view table of contents'
comm['r'] = 'find related pages'
comm['a'] = 'list directly linked pages'
comm['n'] = 'get a new page'
comm['j'] = 'take a journey'
comm['h'] = 'display commands'
comm['q'] = 'quit'
def print_commands():
for k,v in comm.items():
print(f' {k} -> {v}')
def prompt():
global user_pg
return input(f'\n▶︎ {user_pg} ◀︎\n .: ')
def print_cats():
global user_pg
print('\nyour page\'s categories: ')
for cat in user_pg.cats:
print(f' {cat}')
def print_toc():
global user_pg
toc = user_pg.toc
if toc is not None:
for i in range(len(toc)):
if toc[i][0][0] == toc[i-1][0][0]: print(' ',end='')
print(f'{i+1}. {toc[i][1]}')
sec = int(input('\nread: '))
if sec:
print(read_section(user_pg, sec-1))
else:
print('no table of contents found.')
def print_related():
global user_pg
if user_pg.related is None:
k = input('generate related pages? (y/n) ')
if k == 'n': return
user_pg.generate_related()
k = input(f'print all {len(user_pg.related)}? (y/n) ')
if k == 'y':
for p in user_pg.related: print(f' {p}')
def print_linked():
global user_pg
for link in user_pg.links:
print(f' ・{link}')
def new_page():
global user_pg
n = input('\nrandom page? (y/n) ')
if n not in ['y','n']: new_page()
if n == 'y': user_pg = page(page_rand())
if n == 'n':
qry = input('query: ')
res = search(qry)
while isinstance(res, list):
print(f'\nno page found for \'{qry}\'.\nsome suggestions:')
for sg in res:
print(f' ・{sg}')
res = search(input('\nquery: '))
user_pg = page(res)
def take_journey():
global user_pg
n = input('how far? (integer) ')
k = input('one way ticket? (y/n) ')
print()
dest = journey(user_pg.title, int(n))[-1]
if k == 'y': user_pg = page(dest)
print('WikiTools CLI')
user_pg = []
new_page()
print('enter \'h\' for help ✔︎')
while True:
n = prompt()
if n == 'c': print_cats()
if n == 'l': print_toc()
if n == 'r': print_related()
if n == 'a': print_linked()
if n == 'n': new_page()
if n == 'h': print_commands()
if n == 'j': take_journey()
if n == 'q': break
| 25.252525 | 71 | 0.5416 |
39f387bd3857597d07f53089c06ed7673ccf0f2c | 3,663 | py | Python | results/migrations/0001_initial.py | dreardon/megamillions-group | 7c9fcd4c6fe0a151a9f2f48113406154bedd0a2f | [
"MIT"
] | null | null | null | results/migrations/0001_initial.py | dreardon/megamillions-group | 7c9fcd4c6fe0a151a9f2f48113406154bedd0a2f | [
"MIT"
] | 5 | 2021-04-08T18:28:50.000Z | 2022-03-05T21:38:41.000Z | results/migrations/0001_initial.py | dreardon/megamillions-group | 7c9fcd4c6fe0a151a9f2f48113406154bedd0a2f | [
"MIT"
] | 1 | 2019-09-05T22:35:34.000Z | 2019-09-05T22:35:34.000Z | # Generated by Django 2.0.4 on 2018-04-11 22:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AgreementPeriod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('periodName', models.CharField(default=0, max_length=14)),
('agreementFile', models.FileField(blank=True, null=True, upload_to='attachments')),
('startDate', models.DateField(blank=True, null=True)),
('endDate', models.DateField(blank=True, null=True)),
],
options={
'verbose_name_plural': 'Agreement Periods',
},
),
migrations.CreateModel(
name='MegaNumbers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numbers', models.CharField(default=0, max_length=14)),
('megaBall', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='PaidOut',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prizeAmount', models.IntegerField(default=0)),
('agreementPeriod', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='results.AgreementPeriod')),
],
options={
'verbose_name_plural': 'Paid Out',
},
),
migrations.CreateModel(
name='PrizesWon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('groupPrizeAmount', models.IntegerField()),
],
options={
'verbose_name_plural': 'Prizes Won',
},
),
migrations.CreateModel(
name='Drawing',
fields=[
('meganumbers_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='results.MegaNumbers')),
('multiplier', models.IntegerField()),
('drawingDate', models.DateField()),
],
bases=('results.meganumbers',),
),
migrations.CreateModel(
name='GroupTicket',
fields=[
('meganumbers_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='results.MegaNumbers')),
('autoPick', models.BooleanField()),
('agreementPeriod', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='results.AgreementPeriod')),
],
options={
'verbose_name_plural': 'Group Tickets',
},
bases=('results.meganumbers',),
),
migrations.AddField(
model_name='prizeswon',
name='drawing',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='results.Drawing'),
),
migrations.AddField(
model_name='prizeswon',
name='ticket',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='results.GroupTicket'),
),
]
| 41.157303 | 201 | 0.567568 |
d6957c24e81a185f1ef6db4207a6b2b903475d8d | 2,491 | py | Python | tests/test_forms.py | Egregors/django-rest-framework-filters | a56327618676db698f162b2c38f69a4e5d6f3a00 | [
"Unlicense"
] | null | null | null | tests/test_forms.py | Egregors/django-rest-framework-filters | a56327618676db698f162b2c38f69a4e5d6f3a00 | [
"Unlicense"
] | null | null | null | tests/test_forms.py | Egregors/django-rest-framework-filters | a56327618676db698f162b2c38f69a4e5d6f3a00 | [
"Unlicense"
] | null | null | null | from django import forms
from django.test import TestCase
from rest_framework_filters import FilterSet, filters
from .testapp.filters import PostFilter
from .testapp.models import Post, User
class FilterSetFormTests(TestCase):
def test_form_inheritance(self):
class MyForm(forms.Form):
pass
class F(FilterSet):
class Meta:
model = Post
fields = []
form = MyForm
self.assertIsInstance(F().form, MyForm)
def test_subset_disabled_form_fields(self):
# Form fields should reliably display when the subset is disabled
class F(FilterSet):
class Meta:
model = Post
fields = ['title', 'content']
F = F.disable_subset()
form = F({}).form
self.assertEqual(list(form.fields), ['title', 'content'])
def test_unbound_form_fields(self):
class F(FilterSet):
class Meta:
model = Post
fields = ['title', 'content']
form = F().form
self.assertEqual(list(form.fields), [])
def test_bound_form_fields(self):
class F(FilterSet):
class Meta:
model = Post
fields = ['title', 'content']
form = F({}).form
self.assertEqual(list(form.fields), [])
form = F({'title': 'foo'}).form
self.assertEqual(list(form.fields), ['title'])
def test_related_form_fields(self):
# FilterSet form should not contain fields from related filtersets
class F(FilterSet):
author = filters.RelatedFilter(
'tests.testapp.filters.UserFilter',
queryset=User.objects.all(),
)
class Meta:
model = Post
fields = ['title', 'author']
form = F({'title': '', 'author': '', 'author__email': ''}).form
self.assertEqual(list(form.fields), ['title', 'author'])
form = F({'title': '', 'author': '', 'author__email': ''}).related_filtersets['author'].form
self.assertEqual(list(form.fields), ['email'])
def test_validation_errors(self):
f = PostFilter({
'publish_date__year': 'foo',
'author__last_login__date': 'bar',
})
self.assertEqual(f.form.errors, {
'publish_date__year': ['Enter a number.'],
'author__last_login__date': ['Enter a valid date.'],
})
| 29.654762 | 100 | 0.556002 |
9dcdf50e77ec4a6acca3ae6fa0a30dba1d208c10 | 2,127 | py | Python | wedge/robot.py | TeamIllusion/RapidReact | 638b0b8c0192b7e71ed5cb608586eca12f1e8f00 | [
"BSD-3-Clause"
] | null | null | null | wedge/robot.py | TeamIllusion/RapidReact | 638b0b8c0192b7e71ed5cb608586eca12f1e8f00 | [
"BSD-3-Clause"
] | 1 | 2022-01-29T16:42:23.000Z | 2022-01-29T16:42:23.000Z | wedge/robot.py | TeamIllusion/RapidReact | 638b0b8c0192b7e71ed5cb608586eca12f1e8f00 | [
"BSD-3-Clause"
] | 4 | 2022-01-28T20:05:26.000Z | 2022-01-31T21:05:40.000Z | #!/usr/bin/env python3
"""
This is a demo program showing how to use Mecanum control with the
MecanumDrive class.
Since the team has decided to build a wedge, this code is going to
have to be modified to use only three motors.
The theory for such a design can be found at
https://www.mdpi.com/2073-8994/11/10/1268
"""
import wpilib
import rev
from wpilib.drive import MecanumDrive
class MyRobot(wpilib.TimedRobot):
# Channels on the roboRIO that the motor controllers are plugged in to
frontLeftChannel = 2
rearLeftChannel = 3
frontRightChannel = 1
rearRightChannel = 0
# The channel on the driver station that the joystick is connected to
joystickChannel = 0
def robotInit(self):
"""Robot initialization function"""
self.frontLeftMotor = rev.CANSparkMax(self.frontLeftChannel, kBrushless)
self.rearLeftMotor = rev.CANSparkMax(self.rearLeftChannel, kBrushless)
self.frontRightMotor = rev.CANSparkMax(self.frontRightChannel, kBrushless)
self.rearRightMotor = rev.CANSparkMax(self.rearRightChannel, kBrushless)
# invert the left side motors
self.frontLeftMotor.setInverted(True)
# you may need to change or remove this to match your robot
self.rearLeftMotor.setInverted(True)
self.drive = MecanumDrive(
self.frontLeftMotor,
self.rearLeftMotor,
self.frontRightMotor,
self.rearRightMotor,
)
self.drive.setExpiration(0.1)
self.stick = wpilib.Joystick(self.joystickChannel)
def teleopInit(self):
self.drive.setSafetyEnabled(True)
def teleopPeriodic(self):
"""Runs the motors with Mecanum drive."""
# Use the joystick X axis for lateral movement, Y axis for forward movement, and Z axis for rotation.
# This sample does not use field-oriented drive, so the gyro input is set to zero.
self.drive.driveCartesian(
self.stick.getX(), self.stick.getY(), self.stick.getZ(), 0
)
if __name__ == "__main__":
wpilib.run(MyRobot)
| 31.746269 | 109 | 0.67654 |
6f173477abc503cdfe1596163403900ea50db311 | 2,442 | py | Python | ros2doctor/test/test_cli.py | craigh92/ros2cli | 6c1af39c728145942346b40e998b8a3984f1b6c1 | [
"Apache-2.0"
] | 90 | 2017-06-06T22:16:23.000Z | 2022-03-08T20:16:05.000Z | ros2doctor/test/test_cli.py | craigh92/ros2cli | 6c1af39c728145942346b40e998b8a3984f1b6c1 | [
"Apache-2.0"
] | 586 | 2017-06-12T18:00:16.000Z | 2022-03-29T21:17:13.000Z | ros2doctor/test/test_cli.py | craigh92/ros2cli | 6c1af39c728145942346b40e998b8a3984f1b6c1 | [
"Apache-2.0"
] | 108 | 2017-07-11T16:20:12.000Z | 2022-03-27T21:30:22.000Z | # Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import Namespace
import unittest
import unittest.mock as mock
from launch import LaunchDescription
from launch.actions import ExecuteProcess
import launch_testing.actions
import launch_testing.markers
import pytest
from ros2doctor.verb.hello import HelloVerb
from ros2doctor.verb.hello import SummaryTable
@pytest.mark.rostest
@launch_testing.markers.keep_alive
def generate_test_description():
return LaunchDescription([
ExecuteProcess(
cmd=['ros2', 'daemon', 'stop'],
name='daemon-stop',
on_exit=[
launch_testing.actions.ReadyToTest()
]
)
])
def _generate_expected_summary_table():
"""Generate expected summary table for one emit period on a single host."""
expected_summary = SummaryTable()
# 1 pub/send per default emit period
expected_summary.increment_pub()
expected_summary.increment_send()
return expected_summary
class TestROS2DoctorCLI(unittest.TestCase):
def test_hello_single_host(self):
"""Run HelloVerb for one emit period on a single host."""
args = Namespace()
args.topic = '/canyouhearme'
args.emit_period = 0.1
args.print_period = 1.0
args.ttl = None
args.once = True
with mock.patch('socket.gethostname', return_value='!nv@lid-n*de-n4me'):
summary = SummaryTable()
hello_verb = HelloVerb()
hello_verb.main(args=args, summary_table=summary)
expected_summary = _generate_expected_summary_table()
self.assertEqual(summary._pub, expected_summary._pub)
self.assertEqual(summary._sub, expected_summary._sub)
self.assertEqual(summary._send, expected_summary._send)
self.assertEqual(summary._receive, expected_summary._receive)
| 33.452055 | 80 | 0.707617 |
c57dd653d59ec79af2b65737f3b839661b746109 | 1,551 | py | Python | fn_kafka/setup.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 65 | 2017-12-04T13:58:32.000Z | 2022-03-24T18:33:17.000Z | fn_kafka/setup.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 48 | 2018-03-02T19:17:14.000Z | 2022-03-09T22:00:38.000Z | fn_kafka/setup.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 95 | 2018-01-11T16:23:39.000Z | 2022-03-21T11:34:29.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='fn_kafka',
version='1.0.0',
license='MIT',
author='Mark Scherfling',
author_email='Resilient Labs',
url='https://github.com/ibmresilient/resilient-community-apps',
description="Resilient Circuits Components for 'fn_kafka'",
long_description="""Support the ability to produce and consume Kafka messages over a nunber of brokers.
Key features:
* Ability to define multiple brokers for producing and consuming messages
* Send to Kafka allows key/value or just value transmissions on a topic
* Poller for listening on broker topics with configurable templates""",
install_requires=[
'resilient_circuits>=30.0.0',
'kafka>=1.3.5',
'resilient-lib',
'resilient'
],
packages=find_packages(),
include_package_data=True,
platforms='any',
classifiers=[
'Programming Language :: Python',
],
entry_points={
"resilient.circuits.components": [
"KafkaSendFunctionComponent = fn_kafka.components.kafka_send:FunctionComponent",
"KafkaListenerComponent = fn_kafka.components.kafka_listener:KafkaListenerComponent"
],
"resilient.circuits.configsection": ["gen_config = fn_kafka.util.config:config_section_data"],
"resilient.circuits.customize": ["customize = fn_kafka.util.customize:customization_data"],
"resilient.circuits.selftest": ["selftest = fn_kafka.util.selftest:selftest_function"]
}
) | 38.775 | 107 | 0.698904 |
e082e8b381f72c80d52788ae169bfc14b15d03fb | 1,930 | py | Python | marathoner/commands/multi_test.py | Mimino666/tc-marathoner | ab388f8706d671f6bdd8667c9e863c37a2cb21ae | [
"MIT"
] | 14 | 2015-04-21T14:04:37.000Z | 2021-12-23T01:40:02.000Z | marathoner/commands/multi_test.py | Mimino666/tc-marathoner | ab388f8706d671f6bdd8667c9e863c37a2cb21ae | [
"MIT"
] | 3 | 2015-04-23T13:33:28.000Z | 2015-09-17T14:54:53.000Z | marathoner/commands/multi_test.py | Mimino666/tc-marathoner | ab388f8706d671f6bdd8667c9e863c37a2cb21ae | [
"MIT"
] | 1 | 2021-12-23T01:40:09.000Z | 2021-12-23T01:40:09.000Z | import re
from six import print_
from six.moves import xrange
from marathoner.commands.base import BaseCommand
class Command(BaseCommand):
syntax = '<seed1> <seed2> [<vis params>]'
help = 'run batch of tests from interval [seed1, seed2]'
cmd_re = re.compile(r'^\s*(\d+)\s+(\d+)(?:\s+([^\d\s].*))?\s*$', re.IGNORECASE)
def is_match(self, command):
return self.cmd_re.match(command)
def handle(self, command):
match = self.cmd_re.match(command)
seed1 = int(match.group(1))
seed2 = int(match.group(2))
vis_params = match.group(3) or ''
if seed1 < 1:
print_('Error: seed1 has to be at least 1.')
return
if seed2 < seed1:
print_('Error: seed1 can\'t be larger than seed2.')
return
self.project.source_hash_transaction_begin()
tag = self.project.current_tag
self.contest.multiple_tests_starting(seed2-seed1+1)
tests_run = 0
self.executor.kill_solution_listener_start()
for seed in xrange(seed1, seed2+1):
self.contest.one_test_starting(seed)
current_score, visualizer_stdout, solution_stderr = self.executor.run(seed, False, vis_params)
if current_score is None:
print_('Stopping execution...')
break
if seed:
self.project.scores[seed] = current_score
if tag:
tag.scores[seed] = current_score
self.contest.one_test_ending(seed, visualizer_stdout, solution_stderr,
self.project.scores[seed], current_score)
tests_run += 1
self.executor.kill_solution_listener_stop()
self.project.scores.save()
if tag:
tag.scores.save()
self.contest.multiple_tests_ending(tests_run)
self.project.source_hash_transaction_end()
| 35.740741 | 106 | 0.599482 |
9aa5d84ec6f6c069d11eddee09e6b05374cd5877 | 1,034 | py | Python | tests/extmod/uctypes_sizeof_native.py | TG-Techie/circuitpython | 390295dd218fb705fe652de77132dea472adf1ed | [
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 3 | 2020-01-09T21:50:22.000Z | 2020-01-15T08:27:48.000Z | tests/extmod/uctypes_sizeof_native.py | TG-Techie/circuitpython | 390295dd218fb705fe652de77132dea472adf1ed | [
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | null | null | null | tests/extmod/uctypes_sizeof_native.py | TG-Techie/circuitpython | 390295dd218fb705fe652de77132dea472adf1ed | [
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 1 | 2020-01-11T12:42:41.000Z | 2020-01-11T12:42:41.000Z | try:
import uctypes
except ImportError:
print("SKIP")
raise SystemExit
S1 = {}
assert uctypes.sizeof(S1) == 0
S2 = {"a": uctypes.UINT8 | 0}
assert uctypes.sizeof(S2) == 1
S3 = {"a": uctypes.UINT8 | 0, "b": uctypes.UINT8 | 1}
assert uctypes.sizeof(S3) == 2
S4 = {"a": uctypes.UINT8 | 0, "b": uctypes.UINT32 | 4, "c": uctypes.UINT8 | 8}
assert uctypes.sizeof(S4) == 12
S5 = {
"a": uctypes.UINT8 | 0,
"b": uctypes.UINT32 | 4,
"c": uctypes.UINT8 | 8,
"d": uctypes.UINT32 | 0,
"sub": (4, {"b0": uctypes.UINT8 | 0, "b1": uctypes.UINT8 | 1}),
}
assert uctypes.sizeof(S5) == 12
s5 = uctypes.struct(0, S5)
assert uctypes.sizeof(s5) == 12
assert uctypes.sizeof(s5.sub) == 2
S6 = {"ptr": (uctypes.PTR | 0, uctypes.UINT8)}
# As if there're no other arch bitnesses
assert uctypes.sizeof(S6) in (4, 8)
S7 = {"arr": (uctypes.ARRAY | 0, uctypes.UINT8 | 5)}
assert uctypes.sizeof(S7) == 5
S8 = {"arr": (uctypes.ARRAY | 0, 3, {"a": uctypes.UINT32 | 0, "b": uctypes.UINT8 | 4})}
assert uctypes.sizeof(S8) == 24
| 24.619048 | 87 | 0.609284 |
5ae79a10452200d27b2e174fc9ea163e8646f30f | 1,757 | py | Python | beanbot/parser.py | kevinzg/beanbot | 4558804f539720895f9fea2bb46bcdbfe886b034 | [
"MIT"
] | 4 | 2019-09-30T10:04:45.000Z | 2021-09-18T05:39:08.000Z | beanbot/parser.py | kevinzg/beanbot | 4558804f539720895f9fea2bb46bcdbfe886b034 | [
"MIT"
] | 2 | 2021-07-30T02:13:56.000Z | 2021-07-30T02:14:02.000Z | beanbot/parser.py | kevinzg/beanbot | 4558804f539720895f9fea2bb46bcdbfe886b034 | [
"MIT"
] | 1 | 2021-03-23T10:40:37.000Z | 2021-03-23T10:40:37.000Z | from decimal import Decimal, InvalidOperation
from .errors import UserError
from .models import Action, Event
# Parser
def parse_message(message: str) -> Event:
"""Transform a user message into an event that can be handled by the database."""
message = message.strip()
def inner_parse():
try:
*info, amount = message.split()
info = ' '.join(info).strip()
if not info:
raise UserError("Info can't be empty")
return dict(
info=info,
amount=Decimal(amount),
)
except (ValueError, InvalidOperation) as ex:
raise UserError("Invalid amount") from ex
if message.startswith('#'):
info = message[1:].strip()
if not info:
raise UserError("Info can't be empty")
return Event(Action.SET_INFO, info)
if message.startswith('+') or message.startswith('-'):
diff = None
try:
diff = Decimal(message)
except (ValueError, InvalidOperation):
pass
if diff is not None:
return Event(Action.FIX_AMOUNT, diff)
if message.startswith('+'):
message = message[1:]
return Event(Action.ADD, inner_parse())
return Event(Action.NEW, inner_parse())
def parse_keyboard_data(data: str) -> Event:
if data == 'delete':
return Event(Action.DELETE, None)
elif data == 'commit':
return Event(Action.COMMIT, None)
key, index = data.rsplit('_', maxsplit=1)
index = int(index)
if key == 'cur':
return Event(Action.SET_CURRENCY, index)
elif key == 'acc':
return Event(Action.SET_CREDIT_ACCOUNT, index)
raise ValueError(f'Invalid key ${key}')
| 27.453125 | 85 | 0.587934 |
06465814b17d5c9db050b362fc8305b4da7a3650 | 1,223 | py | Python | app/recipe/serializers.py | mossj77/recipe-app-api | 62cc41bdc258185491dabadba6e99588f455558c | [
"MIT"
] | null | null | null | app/recipe/serializers.py | mossj77/recipe-app-api | 62cc41bdc258185491dabadba6e99588f455558c | [
"MIT"
] | null | null | null | app/recipe/serializers.py | mossj77/recipe-app-api | 62cc41bdc258185491dabadba6e99588f455558c | [
"MIT"
] | null | null | null | from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id', )
class IngredientSerializer(serializers.ModelSerializer):
class Meta:
model = Ingredient
fields = ('id', 'name')
read_only_fields = ('id', )
class RecipeSerializer(serializers.ModelSerializer):
ingredients = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Ingredient.objects.all()
)
tags = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Tag.objects.all()
)
class Meta:
model = Recipe
fields = (
'id', 'title', 'ingredients', 'tags', 'time_minutes', 'price',
'link'
)
read_only_fields = ('id', )
class RecipeDetailSerializer(RecipeSerializer):
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
class RecipeImageSerializer(RecipeSerializer):
class Meta:
model = Recipe
fields = ('id', 'image')
read_only_fields = ('id',)
| 23.519231 | 74 | 0.634505 |
f297ac64e19011a31a3090cd2a4d6ca0a3a1763d | 3,238 | py | Python | experiment_scripts/train_poisson_grad_img.py | zongzi13545329/siren | 6b6f4b047bc5cb92c88374eb53299880f0ccdd56 | [
"MIT"
] | 1,245 | 2020-06-24T22:01:02.000Z | 2022-03-31T04:30:01.000Z | experiment_scripts/train_poisson_grad_img.py | zongzi13545329/siren | 6b6f4b047bc5cb92c88374eb53299880f0ccdd56 | [
"MIT"
] | 50 | 2020-06-25T09:16:45.000Z | 2022-02-15T14:49:07.000Z | experiment_scripts/train_poisson_grad_img.py | zongzi13545329/siren | 6b6f4b047bc5cb92c88374eb53299880f0ccdd56 | [
"MIT"
] | 203 | 2020-06-24T23:43:02.000Z | 2022-03-29T05:44:58.000Z | '''Reproduces Paper Sec. 4.1, Supplement Sec. 3, reconstruction from gradient.
'''
# Enable import from parent package
import sys
import os
sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) )
import dataio, meta_modules, utils, training, loss_functions, modules
from torch.utils.data import DataLoader
import configargparse
p = configargparse.ArgumentParser()
p.add('-c', '--config_filepath', required=False, is_config_file=True, help='Path to config file.')
p.add_argument('--logging_root', type=str, default='./logs', help='root for logging')
p.add_argument('--experiment_name', type=str, required=True,
help='Name of subdirectory in logging_root where summaries and checkpoints will be saved.')
# General training options
p.add_argument('--batch_size', type=int, default=16384)
p.add_argument('--lr', type=float, default=1e-4, help='learning rate. default=5e-5')
p.add_argument('--num_epochs', type=int, default=10000,
help='Number of epochs to train for.')
p.add_argument('--epochs_til_ckpt', type=int, default=25,
help='Time interval in seconds until checkpoint is saved.')
p.add_argument('--steps_til_summary', type=int, default=100,
help='Time interval in seconds until tensorboard summary is saved.')
p.add_argument('--dataset', type=str, choices=['camera','bsd500'], default='camera',
help='Dataset: choices=[camera,bsd500].')
p.add_argument('--model_type', type=str, default='sine',
help='Options are "sine" (all sine activations) and "mixed" (first layer sine, other layers tanh)')
p.add_argument('--checkpoint_path', default=None, help='Checkpoint to trained model.')
opt = p.parse_args()
if opt.dataset == 'camera':
img_dataset = dataio.Camera()
coord_dataset = dataio.Implicit2DWrapper(img_dataset, sidelength=256, compute_diff='gradients')
elif opt.dataset == 'bsd500':
# you can select the image your like in idx to sample
img_dataset = dataio.BSD500ImageDataset(in_folder='../data/BSD500/train',
idx_to_sample=[19])
coord_dataset = dataio.Implicit2DWrapper(img_dataset, sidelength=256, compute_diff='gradients')
dataloader = DataLoader(coord_dataset, shuffle=True, batch_size=opt.batch_size, pin_memory=True, num_workers=0)
# Define the model.
if opt.model_type == 'sine' or opt.model_type == 'relu' or opt.model_type == 'tanh' or opt.model_type == 'softplus':
model = modules.SingleBVPNet(type=opt.model_type, mode='mlp', sidelength=(256, 256))
elif opt.model_type == 'rbf' or opt.model_type == 'nerf':
model = modules.SingleBVPNet(type='relu', mode=opt.model_type, sidelength=(256, 256))
else:
raise NotImplementedError
model.cuda()
# Define the loss & summary functions
loss_fn = loss_functions.gradients_mse
summary_fn = utils.write_gradients_summary
root_path = os.path.join(opt.logging_root, opt.experiment_name)
training.train(model=model, train_dataloader=dataloader, epochs=opt.num_epochs, lr=opt.lr,
steps_til_summary=opt.steps_til_summary, epochs_til_checkpoint=opt.epochs_til_ckpt,
model_dir=root_path, loss_fn=loss_fn, summary_fn=summary_fn, double_precision=False)
| 46.927536 | 116 | 0.722359 |
2985ee754f28f4194058129fed33139057a48e40 | 11,794 | py | Python | autodc/components/transfer_learning/tlbo/models/abstract_model.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
] | 27 | 2021-07-19T09:03:34.000Z | 2022-03-31T06:19:23.000Z | autodc/components/transfer_learning/tlbo/models/abstract_model.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
] | 4 | 2021-07-15T12:17:10.000Z | 2022-01-26T17:16:58.000Z | autodc/components/transfer_learning/tlbo/models/abstract_model.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
] | 17 | 2020-05-12T20:24:50.000Z | 2021-07-11T03:31:38.000Z | import typing
import numpy as np
from typing import List, Optional, Tuple, Union
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.exceptions import NotFittedError
from ..config_space import ConfigurationSpace
from ..utils.constants import VERY_SMALL_NUMBER
from ..utils.logging import PickableLoggerAdapter
class AbstractModel(object):
"""Abstract implementation of the Surrogate Model API.
**Note:** The input dimensionality of Y for training and the output dimensions
of all predictions (also called ``n_objectives``) depends on the concrete
implementation of this abstract class.
Attributes
----------
instance_features : np.ndarray(I, K)
Contains the K dimensional instance features
of the I different instances
pca : sklearn.decomposition.PCA
Object to perform PCA
pca_components : float
Number of components to keep or None
n_feats : int
Number of instance features
n_params : int
Number of parameters in a configuration (only available after train has
been called)
scaler : sklearn.preprocessing.MinMaxScaler
Object to scale data to be withing [0, 1]
var_threshold : float
Lower bound vor variance. If estimated variance < var_threshold, the set
to var_threshold
types : list
If set, contains a list with feature types (cat,const) of input vector
"""
def __init__(self,
configspace: ConfigurationSpace,
types: np.ndarray,
bounds: typing.List[typing.Tuple[float, float]],
seed: int,
instance_features: np.ndarray=None,
pca_components: float=None,
return_normalized_y=False
):
"""Constructor
Parameters
----------
configspace : ConfigurationSpace
Configuration space to tune for.
types : np.ndarray (D)
Specifies the number of categorical values of an input dimension where
the i-th entry corresponds to the i-th input dimension. Let's say we
have 2 dimension where the first dimension consists of 3 different
categorical choices and the second dimension is continuous than we
have to pass np.array([3, 0]). Note that we count starting from 0.
bounds : list
bounds of input dimensions: (lower, uppper) for continuous dims; (n_cat, np.nan) for categorical dims
seed : int
The seed that is passed to the model library.
instance_features : np.ndarray (I, K)
Contains the K dimensional instance features
of the I different instances
pca_components : float
Number of components to keep when using PCA to reduce
dimensionality of instance features. Requires to
set n_feats (> pca_dims).
"""
self.configspace = configspace
self.seed = seed
self.instance_features = instance_features
self.pca_components = pca_components
self.return_normalized_y = return_normalized_y
if instance_features is not None:
self.n_feats = instance_features.shape[1]
else:
self.n_feats = 0
self.n_params = None # will be updated on train()
self.pca = None
self.scaler = None
if self.pca_components and self.n_feats > self.pca_components:
self.pca = PCA(n_components=self.pca_components)
self.scaler = MinMaxScaler()
# Never use a lower variance than this
self.var_threshold = VERY_SMALL_NUMBER
self.bounds = bounds
self.types = types
# Initial types array which is used to reset the type array at every call to train()
self._initial_types = types.copy()
self.logger = PickableLoggerAdapter(self.__module__ + "." + self.__class__.__name__)
def train(self, X: np.ndarray, Y: np.ndarray) -> 'AbstractModel':
"""Trains the EPM on X and Y.
Parameters
----------
X : np.ndarray [n_samples, n_features (config + instance features)]
Input data points.
Y : np.ndarray [n_samples, n_objectives]
The corresponding target values. n_objectives must match the
number of target names specified in the constructor.
Returns
-------
self : AbstractEPM
"""
self.types = self._initial_types.copy()
if len(X.shape) != 2:
raise ValueError('Expected 2d array, got %dd array!' % len(X.shape))
if X.shape[1] != len(self.types):
raise ValueError('Feature mismatch: X should have %d features, but has %d' % (len(self.types), X.shape[1]))
if X.shape[0] != Y.shape[0]:
raise ValueError('X.shape[0] (%s) != y.shape[0] (%s)' % (X.shape[0], Y.shape[0]))
self.n_params = X.shape[1] - self.n_feats
# reduce dimensionality of features of larger than PCA_DIM
if self.pca and X.shape[0] > self.pca.n_components:
X_feats = X[:, -self.n_feats:]
# scale features
X_feats = self.scaler.fit_transform(X_feats)
X_feats = np.nan_to_num(X_feats) # if features with max == min
# PCA
X_feats = self.pca.fit_transform(X_feats)
X = np.hstack((X[:, :self.n_params], X_feats))
if hasattr(self, "types"):
# for RF, adapt types list
# if X_feats.shape[0] < self.pca, X_feats.shape[1] ==
# X_feats.shape[0]
self.types = np.array(
np.hstack((self.types[:self.n_params], np.zeros((X_feats.shape[1])))),
dtype=np.uint,
)
return self._train(X, Y)
def _train(self, X: np.ndarray, Y: np.ndarray) -> 'AbstractModel':
"""Trains the random forest on X and y.
Parameters
----------
X : np.ndarray [n_samples, n_features (config + instance features)]
Input data points.
Y : np.ndarray [n_samples, n_objectives]
The corresponding target values. n_objectives must match the
number of target names specified in the constructor.
Returns
-------
self
"""
raise NotImplementedError
def predict(self, X: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray]:
"""
Predict means and variances for given X.
Parameters
----------
X : np.ndarray of shape = [n_samples, n_features (config + instance features)]
Training samples
Returns
-------
means : np.ndarray of shape = [n_samples, n_objectives]
Predictive mean
vars : np.ndarray of shape = [n_samples, n_objectives]
Predictive variance
"""
if len(X.shape) != 2:
raise ValueError('Expected 2d array, got %dd array!' % len(X.shape))
if X.shape[1] != len(self._initial_types):
raise ValueError('Rows in X should have %d entries but have %d!' % (len(self._initial_types), X.shape[1]))
if self.pca:
try:
X_feats = X[:, -self.n_feats:]
X_feats = self.scaler.transform(X_feats)
X_feats = self.pca.transform(X_feats)
X = np.hstack((X[:, :self.n_params], X_feats))
except NotFittedError:
pass # PCA not fitted if only one training sample
if X.shape[1] != len(self.types):
raise ValueError('Rows in X should have %d entries but have %d!' % (len(self.types), X.shape[1]))
mean, var = self._predict(X)
if len(mean.shape) == 1:
mean = mean.reshape((-1, 1))
if len(var.shape) == 1:
var = var.reshape((-1, 1))
return mean, var
def _predict(self, X: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray]:
"""
Predict means and variances for given X.
Parameters
----------
X : np.ndarray
[n_samples, n_features (config + instance features)]
Returns
-------
means : np.ndarray of shape = [n_samples, n_objectives]
Predictive mean
vars : np.ndarray of shape = [n_samples, n_objectives]
Predictive variance
"""
raise NotImplementedError()
def predict_marginalized_over_instances(self, X: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray]:
"""Predict mean and variance marginalized over all instances.
Returns the predictive mean and variance marginalised over all
instances for a set of configurations.
Parameters
----------
X : np.ndarray
[n_samples, n_features (config)]
Returns
-------
means : np.ndarray of shape = [n_samples, 1]
Predictive mean
vars : np.ndarray of shape = [n_samples, 1]
Predictive variance
"""
if len(X.shape) != 2:
raise ValueError('Expected 2d array, got %dd array!' % len(X.shape))
if X.shape[1] != self.bounds.shape[0]:
raise ValueError('Rows in X should have %d entries but have %d!' %
(self.bounds.shape[0],
X.shape[1]))
if self.instance_features is None or \
len(self.instance_features) == 0:
mean, var = self.predict(X)
var[var < self.var_threshold] = self.var_threshold
var[np.isnan(var)] = self.var_threshold
return mean, var
else:
n_instances = len(self.instance_features)
mean = np.zeros(X.shape[0])
var = np.zeros(X.shape[0])
for i, x in enumerate(X):
X_ = np.hstack(
(np.tile(x, (n_instances, 1)), self.instance_features))
means, vars = self.predict(X_)
# VAR[1/n (X_1 + ... + X_n)] =
# 1/n^2 * ( VAR(X_1) + ... + VAR(X_n))
# for independent X_1 ... X_n
var_x = np.sum(vars) / (len(vars) ** 2)
if var_x < self.var_threshold:
var_x = self.var_threshold
var[i] = var_x
mean[i] = np.mean(means)
if len(mean.shape) == 1:
mean = mean.reshape((-1, 1))
if len(var.shape) == 1:
var = var.reshape((-1, 1))
return mean, var
def _normalize_y(self, y: np.ndarray) -> np.ndarray:
"""Normalize data to zero mean unit standard deviation.
Parameters
----------
y : np.ndarray
Targets for the Gaussian process
Returns
-------
np.ndarray
"""
self.mean_y_ = np.mean(y)
self.std_y_ = np.std(y)
if self.std_y_ == 0:
self.std_y_ = 1
return (y - self.mean_y_) / self.std_y_
def _untransform_y(
self,
y: np.ndarray,
var: Optional[np.ndarray] = None,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Transform zeromean unit standard deviation data into the regular space.
This function should be used after a prediction with the Gaussian process which was trained on normalized data.
Parameters
----------
y : np.ndarray
Normalized data.
var : np.ndarray (optional)
Normalized variance
Returns
-------
np.ndarray on Tuple[np.ndarray, np.ndarray]
"""
y = y * self.std_y_ + self.mean_y_
if var is not None:
var = var * self.std_y_ ** 2
return y, var
return y
| 35.739394 | 119 | 0.573427 |
390e8272d9e4887ffbe08743f191d22b9fbba26a | 106 | py | Python | exercicios-turtle/.history/2_equation_20210624122516.py | Aleff13/poo-ufsc | bc1574df26f840a3c0fd5b1e0c72e5d69f61493d | [
"MIT"
] | 1 | 2021-11-28T18:49:21.000Z | 2021-11-28T18:49:21.000Z | exercicios-turtle/.history/2_equation_20210624122516.py | Aleff13/poo-ufsc | bc1574df26f840a3c0fd5b1e0c72e5d69f61493d | [
"MIT"
] | null | null | null | exercicios-turtle/.history/2_equation_20210624122516.py | Aleff13/poo-ufsc | bc1574df26f840a3c0fd5b1e0c72e5d69f61493d | [
"MIT"
] | null | null | null | print('A seguir digite os valores referentes a Ax² + Bx + c = 0')
A=float(input('Digite o valor de A: ')) | 35.333333 | 65 | 0.669811 |
ae14a8345c37caed683ac71316b3588f14adfb72 | 3,590 | py | Python | wheat/consensus/default_constants.py | grayfallstown/wheat-blockchain | f391cdd30a0cbcdb2adf4439a25581fd28b42c1f | [
"Apache-2.0"
] | null | null | null | wheat/consensus/default_constants.py | grayfallstown/wheat-blockchain | f391cdd30a0cbcdb2adf4439a25581fd28b42c1f | [
"Apache-2.0"
] | null | null | null | wheat/consensus/default_constants.py | grayfallstown/wheat-blockchain | f391cdd30a0cbcdb2adf4439a25581fd28b42c1f | [
"Apache-2.0"
] | null | null | null | from wheat.util.ints import uint64
from .constants import ConsensusConstants
testnet_kwargs = {
"SLOT_BLOCKS_TARGET": 32,
"MIN_BLOCKS_PER_CHALLENGE_BLOCK": 16, # Must be less than half of SLOT_BLOCKS_TARGET
"MAX_SUB_SLOT_BLOCKS": 128, # Must be less than half of SUB_EPOCH_BLOCKS
"NUM_SPS_SUB_SLOT": 64, # Must be a power of 2
"SUB_SLOT_ITERS_STARTING": 2 ** 27,
# DIFFICULTY_STARTING is the starting difficulty for the first epoch, which is then further
# multiplied by another factor of DIFFICULTY_CONSTANT_FACTOR, to be used in the VDF iter calculation formula.
"DIFFICULTY_CONSTANT_FACTOR": 2 ** 66,
"DIFFICULTY_STARTING": 7,
"DIFFICULTY_CHANGE_MAX_FACTOR": 3, # The next difficulty is truncated to range [prev / FACTOR, prev * FACTOR]
# These 3 constants must be changed at the same time
"SUB_EPOCH_BLOCKS": 384, # The number of blocks per sub-epoch, mainnet 384
"EPOCH_BLOCKS": 4608, # The number of blocks per epoch, mainnet 4608. Must be multiple of SUB_EPOCH_SB
"SIGNIFICANT_BITS": 8, # The number of bits to look at in difficulty and min iters. The rest are zeroed
"DISCRIMINANT_SIZE_BITS": 1024, # Max is 1024 (based on ClassGroupElement int size)
"NUMBER_ZERO_BITS_PLOT_FILTER": 9, # H(plot signature of the challenge) must start with these many zeroes
"MIN_PLOT_SIZE": 32, # 32 for mainnet
"MAX_PLOT_SIZE": 50,
"SUB_SLOT_TIME_TARGET": 600, # The target number of seconds per slot, mainnet 600
"NUM_SP_INTERVALS_EXTRA": 3, # The number of sp intervals to add to the signage point
"MAX_FUTURE_TIME": 5 * 60, # The next block can have a timestamp of at most these many seconds in the future
"NUMBER_OF_TIMESTAMPS": 11, # Than the average of the last NUMBER_OF_TIMESTAMPS blocks
# Used as the initial cc rc challenges, as well as first block back pointers, and first SES back pointer
# We override this value based on the chain being run (testnet0, testnet1, mainnet, etc)
# Default used for tests is std_hash(b'')
"GENESIS_CHALLENGE": bytes.fromhex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"),
# Forks of wheat should change this value to provide replay attack protection. This is set to mainnet genesis chall
"AGG_SIG_ME_ADDITIONAL_DATA": bytes.fromhex("2504307e5ea08f9edefb3a002990417c1b8ebec055bbe8cf673e7f56a0601511"),
"GENESIS_PRE_FARM_POOL_PUZZLE_HASH": bytes.fromhex(
"7417e0f9ad2b186d46c5ad00be271fddcae6461e374213e8862c31a7b79b13d7"
),
"GENESIS_PRE_FARM_FARMER_PUZZLE_HASH": bytes.fromhex(
"7417e0f9ad2b186d46c5ad00be271fddcae6461e374213e8862c31a7b79b13d7"
),
"MAX_VDF_WITNESS_SIZE": 64,
# Size of mempool = 50x the size of block
"MEMPOOL_BLOCK_BUFFER": 50,
# Max coin amount, fits into 64 bits
"MAX_COIN_AMOUNT": uint64((1 << 64) - 1),
# Max block cost in clvm cost units
"MAX_BLOCK_COST_CLVM": 11000000000,
# The cost per byte of generator program
"COST_PER_BYTE": 12000,
"WEIGHT_PROOF_THRESHOLD": 2,
"BLOCKS_CACHE_SIZE": 4608 + (128 * 4),
"WEIGHT_PROOF_RECENT_BLOCKS": 1000,
"MAX_BLOCK_COUNT_PER_REQUESTS": 32, # Allow up to 32 blocks per request
"INITIAL_FREEZE_END_TIMESTAMP": 1626182291, # 2021-07-13 21:18:11 GMT+8000
"NETWORK_TYPE": 0,
"MAX_GENERATOR_SIZE": 1000000,
"MAX_GENERATOR_REF_LIST_SIZE": 512, # Number of references allowed in the block generator ref list
"POOL_SUB_SLOT_ITERS": 37600000000, # iters limit * NUM_SPS
}
DEFAULT_CONSTANTS = ConsensusConstants(**testnet_kwargs) # type: ignore
| 57.903226 | 119 | 0.741504 |
373eff4f4128c09db6deaaa8361a8219f4c2682b | 11,372 | py | Python | src/softfab/render.py | boxingbeetle/softfab | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | [
"BSD-3-Clause"
] | 20 | 2019-02-07T17:03:04.000Z | 2020-03-16T20:45:19.000Z | src/softfab/render.py | boxingbeetle/softfab | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | [
"BSD-3-Clause"
] | 36 | 2019-02-11T08:57:16.000Z | 2020-09-29T05:32:08.000Z | src/softfab/render.py | boxingbeetle/softfab | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | [
"BSD-3-Clause"
] | null | null | null | # SPDX-License-Identifier: BSD-3-Clause
'''
Module to render the page
'''
from typing import ClassVar, Optional, Type, cast
import logging
from twisted.cred.error import LoginFailed, Unauthorized
from twisted.internet.defer import ensureDeferred
from twisted.internet.error import ConnectionClosed
from twisted.python.failure import Failure
from twisted.web.server import NOT_DONE_YET, Request as TwistedRequest
from softfab.FabPage import FabPage
from softfab.Page import (
FabResource, InternalError, InvalidRequest, PageProcessor,
PresentableError, ProcT, Redirect, Redirector, Responder, logPageException
)
from softfab.UIPage import UIPage, UIResponder
from softfab.pageargs import ArgsCorrected, ArgsInvalid, ArgsT, Query, dynamic
from softfab.request import Request
from softfab.response import NotModified, Response, ResponseHeaders
from softfab.users import AccessDenied, UnknownUser, User
from softfab.utils import abstract
from softfab.webgui import docLink
from softfab.xmlgen import XMLContent, xhtml
# Profiling options:
_timeRender = False
"""Print the time it took to render the page."""
_profileRender = False
"""Profile the rendering and create a file with profile data.
Note that capturing profile data adds considerable overhead, so don't
attach any value to the absolute times while doing that. The useful
information is in where most time gets spent relatively.
"""
if _timeRender:
from time import time
if _profileRender:
from cProfile import Profile
class ErrorPage(UIPage[ProcT]):
"""Abstract base class for error pages.
"""
status: ClassVar[int] = abstract
title: ClassVar[str] = abstract
def __init__(self, messageText: Optional[str] = None):
super().__init__()
if messageText is None:
messageText = self.title
self.messageText = messageText
def pageTitle(self, proc: ProcT) -> str:
return self.title
def writeHTTPHeaders(self, response: ResponseHeaders) -> None:
response.setStatus(self.status, self.messageText)
super().writeHTTPHeaders(response)
def presentContent(self, **kwargs: object) -> XMLContent:
raise NotImplementedError
class BadRequestPage(ErrorPage[ProcT]):
'''400 error page.
'''
status = 400
title = 'Bad Request'
def __init__(self, messageText: str, messageHTML: XMLContent):
super().__init__(messageText)
self.messageHTML = messageHTML
def presentContent(self, **kwargs: object) -> XMLContent:
return self.messageHTML
class ForbiddenPage(ErrorPage[ProcT]):
'''403 error page: shown when access is denied.
'''
status = 403
title = 'Access Denied'
def presentContent(self, **kwargs: object) -> XMLContent:
return xhtml.p[ f'Access denied: {self.messageText}.' ]
class NotFoundPage(ErrorPage[ProcT]):
'''404 error page.
TODO: When there is a directory in the URL, the style sheets and images
are not properly referenced.
'''
status = 404
title = 'Page Not Found'
def presentContent(self, **kwargs: object) -> XMLContent:
return (
xhtml.p[ 'The page you requested was not found on this server.' ],
xhtml.p[ xhtml.a(href = 'Home')[ 'Back to Home' ] ]
)
class InternalErrorPage(ErrorPage[ProcT]):
'''500 error page: shown when an internal error occurred.
'''
status = 500
title = 'Internal Error'
def presentContent(self, **kwargs: object) -> XMLContent:
return (
xhtml.p[ f'Internal error: {self.messageText}.' ],
xhtml.p[ 'Please ', docLink('/reference/contact/')[
'report this as a bug' ], '.' ]
)
class _PlainTextResponder(Responder):
def __init__(self, status: int, message: str):
super().__init__()
self.__status = status
self.__message = message
async def respond(self, response: Response) -> None:
response.setStatus(self.__status, self.__message)
response.setContentType('text/plain')
response.write(self.__message + '\n')
def renderAuthenticated(page: FabResource, request: TwistedRequest) -> object:
def done(result: object) -> None: # pylint: disable=unused-argument
request.finish()
def failed(reason: Failure) -> None:
ex = reason.value
if isinstance(ex, ConnectionClosed):
logging.debug(
'Connection closed while presenting "%s": %s',
request.path.decode(errors='replace'), ex
)
else:
request.processingFailed(reason)
# Returning None (implicitly) because the error is handled.
# Otherwise, it will be logged twice.
d = ensureDeferred(renderAsync(page, request))
d.addCallback(done).addErrback(failed)
return NOT_DONE_YET
def _unauthorizedResponder(ex: Exception) -> Responder:
return _PlainTextResponder(
403, ex.args[0] if ex.args else
"You are not authorized to perform this operation"
)
async def renderAsync(page: FabResource, request: TwistedRequest) -> None:
req: Request = Request(request)
authenticator = page.authenticator
response = Response(request,
authenticator.project.frameAncestors,
req.userAgent)
try:
try:
user: User = await authenticator.authenticate(req)
except LoginFailed as ex:
if req.getSubPath() is None:
responder = authenticator.askForAuthentication(
req, ex.args[0] if ex.args else None
)
else:
# Widget requests should just fail immediately instead of
# asking for authentication.
responder = _unauthorizedResponder(ex)
except Unauthorized as ex:
responder = _unauthorizedResponder(ex)
else:
responder = await _parseAndProcess(page, req, user)
except Redirect as ex:
responder = Redirector(ex.url)
except InternalError as ex:
logging.error(
'Internal error processing %s: %s', page.name, str(ex)
)
responder = UIResponder(
InternalErrorPage[PageProcessor](str(ex)),
PageProcessor(page, req, FabResource.Arguments(), UnknownUser())
)
await _present(responder, response)
def _checkActive(
page: FabResource[ArgsT, PageProcessor[ArgsT]],
args: ArgsT
) -> None:
'''If page is not active, redirect to parent.
'''
if isinstance(page, FabPage):
if not page.isActive():
raise Redirect(page.getParentURL(args))
async def _parseAndProcess(page: FabResource[ArgsT, PageProcessor[ArgsT]],
req: Request[ArgsT],
user: User
) -> Responder:
'''Parse step: determine values for page arguments.
Processing step: database interaction.
'''
# We might hit an error before argument parsing completes, for example
# if access is denied at the page level or if the argument parsing
# itself raises an exception.
# TODO: There should be a way to respond without having a processing
# result, or to construct a processing result without arguments.
args = cast(ArgsT, None)
# TODO: Create processor in the processing step.
# This is currently not possible because the error handlers
# need a PageProcessor instance.
proc: PageProcessor[ArgsT] = page.Processor(page, req, args, user)
try:
# Page-level authorization.
# It is possible for additional access checks to fail during the
# processing step.
page.checkAccess(user)
# Argument parsing.
try:
args = req.parseArgs(cast(Type[ArgsT], page.Arguments))
except ArgsCorrected as ex:
if req.method == 'GET':
raise
else:
# We can't correct args using redirection if args may have
# come from the request body instead of the URL.
args = cast(ArgsCorrected[ArgsT], ex).correctedArgs
req.args = args
proc.args = args
_checkActive(page, args)
# Processing step.
try:
await proc.process(req, user)
except PresentableError as ex:
proc.error = ex.args[0]
else:
assert all(
value is not dynamic
for name_, value in args.items()
), 'unhandled dynamic defaults: ' + ', '.join(
name
for name, value in args.items()
if value is dynamic
)
proc.processTables()
except AccessDenied as ex:
forbiddenPage: ErrorPage[PageProcessor[ArgsT]] = ForbiddenPage(
f"You don't have permission to {str(ex) or 'access this page'}"
)
responder: Responder = UIResponder(forbiddenPage, proc)
except ArgsCorrected as ex:
subPath = req.getSubPath()
query = Query.fromArgs(ex.correctedArgs)
if subPath is None:
url = f'{page.name}?{query.toURL()}'
else:
url = f'{page.name}/{subPath}?{query.toURL()}'
responder = Redirector(url)
except ArgsInvalid as ex:
badRequestPage: ErrorPage[PageProcessor[ArgsT]] = BadRequestPage(
str(ex),
( xhtml.p[ 'Invalid arguments:' ],
xhtml.dl[(
( xhtml.dt[ name ], xhtml.dd[ message ] )
for name, message in ex.errors.items()
)]
)
)
responder = UIResponder(badRequestPage, proc)
except InvalidRequest as ex:
badRequestPage = BadRequestPage(
str(ex),
xhtml.p[ 'Invalid request: ', str(ex) ]
)
responder = UIResponder(badRequestPage, proc)
except Exception as ex:
logPageException(req, 'Unexpected exception processing request')
responder = page.errorResponder(ex, proc)
else:
try:
responder = page.getResponder(req.getSubPath(), proc)
except KeyError:
notFoundPage: ErrorPage[PageProcessor[ArgsT]] = NotFoundPage()
responder = UIResponder(notFoundPage, proc)
req.processEnd()
return responder
async def _present(responder: Responder, response: Response) -> None:
"""Presentation step: write a response based on the processing results."""
if _timeRender:
start = time()
if _profileRender:
profile = Profile()
# Note: This will only profile the execution until the first
# 'await' in the responder. However, a lot of pages do
# their whole presentation in one go, so in many cases
# this is good enough.
try:
await profile.runcall(responder.respond, response)
except NotModified:
pass
profile.dump_stats('request.prof')
else:
try:
await responder.respond(response)
except NotModified:
pass
if _timeRender:
end = time()
print('Responding took %1.3f seconds' % (end - start))
response.finish()
| 34.776758 | 78 | 0.623373 |
4b2d37754ae48228e4e8d649224d0d7ce072bd1f | 4,360 | py | Python | eoxserver/resources/coverages/management/commands/eoxs_collection_purge.py | constantinius/eoxserver_combined | 68f261133fed65a4e8a6ddba82b0d2845171e4bf | [
"OML"
] | null | null | null | eoxserver/resources/coverages/management/commands/eoxs_collection_purge.py | constantinius/eoxserver_combined | 68f261133fed65a4e8a6ddba82b0d2845171e4bf | [
"OML"
] | null | null | null | eoxserver/resources/coverages/management/commands/eoxs_collection_purge.py | constantinius/eoxserver_combined | 68f261133fed65a4e8a6ddba82b0d2845171e4bf | [
"OML"
] | null | null | null | #-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2016 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from optparse import make_option
from django.core.management import call_command
from django.core.management.base import CommandError, BaseCommand
from eoxserver.resources.coverages import models
from eoxserver.resources.coverages.management.commands import (
CommandOutputMixIn, nested_commit_on_success
)
class Command(CommandOutputMixIn, BaseCommand):
option_list = BaseCommand.option_list + (
make_option("-i", "--identifier",
dest="identifier", action="store", default=None,
help=("Collection identifier.")
),
make_option("-r", "--recursive", "--recursive-purge",
dest="recursive", action="store_true", default=False,
help=("Optional. Purge all contained collections.")
),
make_option("-d", "--delete",
dest="delete", action="store_true", default=False,
help=("Optional. Delete the collection as-well.")
)
)
args = "-i <collection-id> [-r] [-f]"
help = """
Purges a Collection, by deleting all containing items.
By default, this command does not purge sub-collections contained in the
specified collection.
If the `--delete` option is set, then the collection(s) will even be
removed as-well.
"""
@nested_commit_on_success
def handle(self, *args, **kwargs):
identifier = kwargs['identifier']
if not identifier:
raise CommandError("Missing the mandatory collection identifier.")
try:
collection = models.Collection.objects.get(identifier=identifier)
except models.Collection.DoesNotExist:
raise CommandError("Collection '%s' does not exist." % identifier)
try:
count = self._purge_collection(
collection, kwargs["recursive"], kwargs["delete"]
)
except Exception, e:
self.print_traceback(e, kwargs)
raise CommandError("Purge of the collection failed: %s" % e)
self.print_msg("Successfully purged %d collections." % count)
def _purge_collection(self, collection, recursive, delete):
collection = collection.cast()
count = 1
if recursive:
sub_collections = collection.eo_objects.filter(
collection__isnull=False
)
for sub_collection in sub_collections:
count += self._purge_collection(
sub_collection, recursive, delete
)
identifiers = collection.eo_objects.filter(
collection__isnull=True
).values_list("identifier", flat=True)
if identifiers:
call_command("eoxs_dataset_deregister", *identifiers)
if delete:
call_command("eoxs_collection_delete",
identifier=collection.identifier
)
return count
| 37.586207 | 80 | 0.629587 |
d5108e610cda0269dfadaac70cf164cfe5e1092c | 7,693 | py | Python | astrorapid/get_training_data.py | kschamplin/astrorapid | acfd2cf953efe9ed89834a3347cf34f79693bf2f | [
"MIT"
] | 12 | 2019-01-15T19:40:11.000Z | 2020-12-17T11:36:18.000Z | astrorapid/get_training_data.py | kschamplin/astrorapid | acfd2cf953efe9ed89834a3347cf34f79693bf2f | [
"MIT"
] | 9 | 2019-07-19T15:29:19.000Z | 2022-03-12T00:59:37.000Z | astrorapid/get_training_data.py | kschamplin/astrorapid | acfd2cf953efe9ed89834a3347cf34f79693bf2f | [
"MIT"
] | 11 | 2019-02-19T20:35:08.000Z | 2021-07-16T05:56:28.000Z | import os
import pickle
import numpy as np
from astrorapid.read_snana_fits import read_light_curves_from_snana_fits_files
from astrorapid.helpers import delete_indexes
from astrorapid.process_light_curves import InputLightCurve
def get_data(get_data_func, class_num, data_dir, save_dir, passbands, known_redshift=True, nprocesses=1, redo=False,
calculate_t0=True):
"""
Get data using some function.
Parameters
----------
get_data_func : func
Function that returns light_curves and takes at least the arguments class_num, data_dir, save_dir, passbands.
E.g. get_data_func = get_data_custom(class_num, data_dir, save_dir, passbands, known_redshift)
class_num : int
Class number. E.g. SNIa is 1. See helpers.py for lookup table.
E.g. class_num = 1
data_dir : str
Directory where data is stored
E.g. data_dir='data/ZTF_20190512/'
save_dir : str
Directory to save processed data
E.g. save_dir='data/saved_light_curves/'
passbands : tuple
Passbands to use.
E.g. passbands=('g', 'r')
known_redshift : bool
Whether to correct the light curves for cosmological time dilation or not.
nprocesses : int or None
Number of processes to use
redo : bool
Whether to redo reading the data and saving the processed data.
calculate_t0 : bool
Whether to calculate t0 during preprocessing.
Returns
-------
light_curves : dict of astropy.table.Table objects
e.g light_curves['objid1'] =
passband time flux fluxErr photflag
str1 float32 float32 float32 int32
-------- -------- ----------- ---------- --------
g -46.8942 -48.926975 42.277767 0
g -43.9352 -105.35379 72.97575 0
g -35.9161 -46.264206 99.9172 0
g -28.9377 -28.978344 42.417065 0
g -25.9787 109.886566 46.03949 0
g -15.0399 -80.2485 80.38155 0
g -12.0218 93.51743 113.21529 0
g -6.9585 248.88364 108.606865 0
g -4.0411 341.41498 47.765404 0
g 0.0 501.7441 45.37485 6144
... ... ... ... ...
r 40.9147 194.32494 57.836903 4096
r 59.9162 67.59185 45.66463 4096
r 62.8976 80.85155 44.356197 4096
r 65.8974 28.174305 44.75049 4096
r 71.8966 -18.790287 108.049774 4096
r 74.9297 -3.1707647 125.15057 4096
r 77.9341 -11.0205965 125.784676 4096
r 80.8576 129.65466 69.99305 4096
r 88.8922 -14.259436 52.917866 4096
r 103.8734 27.178356 115.537704 4096
"""
return get_data_func(class_num, data_dir, save_dir, passbands, known_redshift, nprocesses, redo, calculate_t0)
def get_data_from_snana_fits(class_num, data_dir='data/ZTF_20190512/', save_dir='data/saved_light_curves/',
passbands=('g', 'r'), known_redshift=True, nprocesses=1, redo=False, calculate_t0=True):
"""
Get data from SNANA fits data files.
"""
save_lc_filepath = os.path.join(save_dir, f"lc_classnum_{class_num}.pickle")
if os.path.exists(save_lc_filepath) and not redo:
with open(save_lc_filepath, "rb") as fp: # Unpickling
light_curves = pickle.load(fp)
else:
class_dir = os.path.join(data_dir, 'ZTF_MSIP_MODEL{:02d}'.format(class_num))
files = os.listdir(class_dir)
head_files = []
phot_files = []
for file in files:
filepath = os.path.join(class_dir, file)
if filepath.endswith('HEAD.FITS'):
head_files.append(filepath)
phot_files.append(filepath.replace('_HEAD.FITS', '_PHOT.FITS'))
print(filepath)
light_curves = read_light_curves_from_snana_fits_files(head_files, phot_files, passbands,
known_redshift=known_redshift, nprocesses=nprocesses,
calculate_t0=calculate_t0)
with open(save_lc_filepath, "wb") as fp: # Pickling
pickle.dump(light_curves, fp)
return light_curves
def get_real_ztf_training_data(class_name, data_dir='data/real_ZTF_data_from_osc',
save_dir='data/saved_light_curves/', pbs=('g', 'r'),
known_redshift=True, nprocesses=1, redo=False, calculate_t0=True):
"""
Get data from saved real ZTF data with names and types from the Open Supernova Catalog
"""
save_lc_filepath = os.path.join(save_dir, f"lc_classnum_{class_name}.pickle")
if os.path.exists(save_lc_filepath) and not redo:
with open(save_lc_filepath, "rb") as fp: # Unpickling
light_curves = pickle.load(fp)
else:
light_curves = {}
data_filepath = os.path.join(data_dir, f"ZTF_data_{class_name}_osc-6-May-2020.pickle")
with open(data_filepath, "rb") as fp:
mjds, passbands, mags, magerrs, photflags, zeropoints, dc_mags, dc_magerrs, magnrs, \
sigmagnrs, isdiffposs, ras, decs, objids, redshifts, mwebvs = pickle.load(fp)
for i, objid in enumerate(objids):
if known_redshift and (redshifts[i] is None or np.isnan(redshifts[i])):
print(f"Skipping {objid} because redshift is unknown and known_redshift model is selected")
continue
flux = 10. ** (-0.4 * (mags[i] - zeropoints[i]))
fluxerr = np.abs(flux * magerrs[i] * (np.log(10.) / 2.5))
passbands[i] = np.where((passbands[i] == 1) | (passbands[i] == '1'), 'g', passbands[i])
passbands[i] = np.where((passbands[i] == 2) | (passbands[i] == '2'), 'r', passbands[i])
mjd_first_detection = min(mjds[i][photflags[i] == 4096])
photflags[i][np.where(mjds[i] == mjd_first_detection)] = 6144
deleteindexes = np.where(((passbands[i] == 3) | (passbands[i] == '3')) | ((mjds[i] > mjd_first_detection) & (photflags[i] == 0)) | (np.isnan(flux)))
if deleteindexes[0].size > 0:
print("Deleting indexes {} at mjd {} and passband {}".format(deleteindexes, mjds[i][deleteindexes], passbands[i][deleteindexes]))
mjd, passband, flux, fluxerr, zeropoint, photflag = delete_indexes(deleteindexes, mjds[i], passbands[i], flux, fluxerr, zeropoints[i], photflags[i])
peakmjd = mjd[np.argmax(flux)]
inputlightcurve = InputLightCurve(mjd, flux, fluxerr, passband, photflag,
ras[i], decs[i], objid, redshifts[i], mwebvs[i],
known_redshift=known_redshift,
training_set_parameters={'class_number': class_name,
'peakmjd': peakmjd},
calculate_t0=calculate_t0)
light_curves[objid] = inputlightcurve.preprocess_light_curve()
with open(save_lc_filepath, "wb") as fp:
pickle.dump(light_curves, fp)
return light_curves
| 48.689873 | 160 | 0.55856 |
ad80e463d1bb13e5227621b11f8926f1e3f4de29 | 1,341 | py | Python | var/spack/repos/builtin/packages/commons-lang/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/commons-lang/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/commons-lang/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class CommonsLang(Package):
"""The standard Java libraries fail to provide enough methods for
manipulation of its core classes. Apache Commons Lang provides these
extra methods.
Lang provides a host of helper utilities for the java.lang API, notably
String manipulation methods, basic numerical methods, object reflection,
concurrency, creation and serialization and System properties. Additionally
it contains basic enhancements to java.util.Date and a series of utilities
dedicated to help with building methods, such as hashCode, toString and
equals."""
homepage = "https://commons.apache.org/proper/commons-lang/"
url = "https://archive.apache.org/dist/commons/lang/binaries/commons-lang-2.6-bin.tar.gz"
version('2.6', sha256='ff6a244bb71a9a1c859e81cb744d0ce698c20e04f13a7ef7dbffb99c8122752c')
version('2.4', sha256='00e6b3174e31196d726c14302c8e7e9ba9b8409d57a8a9821c7648beeda31c5e')
extends('jdk')
depends_on('java@2:', type='run')
def install(self, spec, prefix):
install('commons-lang-{0}.jar'.format(self.version), prefix)
| 41.90625 | 98 | 0.751678 |
604ae340d4bce02eada005d14e01320049188d03 | 5,568 | py | Python | translate.py | GregoryZeng/nlu-cw2 | 6e92126f918b4f82a7f7a0afa6c4023ffd25706d | [
"MIT"
] | null | null | null | translate.py | GregoryZeng/nlu-cw2 | 6e92126f918b4f82a7f7a0afa6c4023ffd25706d | [
"MIT"
] | null | null | null | translate.py | GregoryZeng/nlu-cw2 | 6e92126f918b4f82a7f7a0afa6c4023ffd25706d | [
"MIT"
] | null | null | null | import os
import logging
import argparse
import numpy as np
from tqdm import tqdm
import torch
from torch.serialization import default_restore_location
from seq2seq import models, utils
from seq2seq.data.dictionary import Dictionary
from seq2seq.data.dataset import Seq2SeqDataset, BatchSampler
def get_args():
""" Defines generation-specific hyper-parameters. """
parser = argparse.ArgumentParser('Sequence to Sequence Model')
parser.add_argument('--cuda', default=False, help='Use a GPU')
parser.add_argument('--seed', default=42, type=int, help='pseudo random number generator seed')
# Add data arguments
parser.add_argument('--data', default='data-bin', help='path to data directory')
parser.add_argument('--checkpoint-path', default='checkpoints/checkpoint_best.pt', help='path to the model file')
parser.add_argument('--batch-size', default=None, type=int, help='maximum number of sentences in a batch')
parser.add_argument('--output', default='model_translations.txt', type=str,
help='path to the output file destination')
parser.add_argument('--max-len', default=25, type=int, help='maximum length of generated sequence')
return parser.parse_args()
def main(args):
""" Main translation function' """
# Load arguments from checkpoint
torch.manual_seed(args.seed)
state_dict = torch.load(args.checkpoint_path, map_location=lambda s, l: default_restore_location(s, 'cpu'))
if_cuda = args.cuda
args = argparse.Namespace(**{**vars(args), **vars(state_dict['args'])})
args.cuda = if_cuda
utils.init_logging(args)
# Load dictionaries
src_dict = Dictionary.load(os.path.join(args.data, 'dict.{:s}'.format(args.source_lang)))
logging.info('Loaded a source dictionary ({:s}) with {:d} words'.format(args.source_lang, len(src_dict)))
tgt_dict = Dictionary.load(os.path.join(args.data, 'dict.{:s}'.format(args.target_lang)))
logging.info('Loaded a target dictionary ({:s}) with {:d} words'.format(args.target_lang, len(tgt_dict)))
# Load dataset
test_dataset = Seq2SeqDataset(
src_file=os.path.join(args.data, 'test.{:s}'.format(args.source_lang)),
tgt_file=os.path.join(args.data, 'test.{:s}'.format(args.target_lang)),
src_dict=src_dict, tgt_dict=tgt_dict)
test_loader = torch.utils.data.DataLoader(test_dataset, num_workers=1, collate_fn=test_dataset.collater,
batch_sampler=BatchSampler(test_dataset, 9999999,
args.batch_size, 1, 0, shuffle=False,
seed=args.seed))
# Build model and criterion
model = models.build_model(args, src_dict, tgt_dict)
if args.cuda:
model = model.cuda()
model.eval()
model.load_state_dict(state_dict['model'])
logging.info('Loaded a model from checkpoint {:s}'.format(args.checkpoint_path))
progress_bar = tqdm(test_loader, desc='| Generation', leave=False)
#print(args.cuda)
# Iterate over the test set
all_hyps = {}
for i, sample in enumerate(progress_bar):
if args.cuda == 'True':
sample = utils.move_to_cuda(sample)
# print('66666')
#print(args.cuda,type(args.cuda))
#print(sample['src_tokens'].device)
with torch.no_grad():
# Compute the encoder output
encoder_out = model.encoder(sample['src_tokens'], sample['src_lengths'])
go_slice = \
torch.ones(sample['src_tokens'].shape[0], 1).fill_(tgt_dict.eos_idx).type_as(sample['src_tokens'])
prev_words = go_slice
next_words = None
for _ in range(args.max_len):
with torch.no_grad():
# Compute the decoder output by repeatedly feeding it the decoded sentence prefix
decoder_out, _ = model.decoder(prev_words, encoder_out)
# Suppress <UNK>s
_, next_candidates = torch.topk(decoder_out, 2, dim=-1)
best_candidates = next_candidates[:, :, 0]
backoff_candidates = next_candidates[:, :, 1]
next_words = torch.where(best_candidates == tgt_dict.unk_idx, backoff_candidates, best_candidates)
prev_words = torch.cat([go_slice, next_words], dim=1)
# Segment into sentences
decoded_batch = next_words.numpy()
output_sentences = [decoded_batch[row, :] for row in range(decoded_batch.shape[0])]
assert(len(output_sentences) == len(sample['id'].data))
# Remove padding
temp = list()
for sent in output_sentences:
first_eos = np.where(sent == tgt_dict.eos_idx)[0]
if len(first_eos) > 0:
temp.append(sent[:first_eos[0]])
else:
temp.append([])
output_sentences = temp
# Convert arrays of indices into strings of words
output_sentences = [tgt_dict.string(sent) for sent in output_sentences]
# Save translations
assert(len(output_sentences) == len(sample['id'].data))
for ii, sent in enumerate(output_sentences):
all_hyps[int(sample['id'].data[ii])] = sent
# Write to file
if args.output is not None:
with open(args.output, 'w') as out_file:
for sent_id in range(len(all_hyps.keys())):
out_file.write(all_hyps[sent_id] + '\n')
if __name__ == '__main__':
args = get_args()
main(args)
| 42.830769 | 117 | 0.636135 |
fa660e6159278f49001d05669dba6c4ceafd2774 | 6,661 | py | Python | tests/test_dual_ga.py | RAufschlaeger/tfga | 1607381fdcb3b479fc90b3fbb3af44b739ca720a | [
"MIT"
] | 30 | 2020-05-24T13:14:15.000Z | 2021-12-21T21:10:57.000Z | tests/test_dual_ga.py | RAufschlaeger/tfga | 1607381fdcb3b479fc90b3fbb3af44b739ca720a | [
"MIT"
] | 11 | 2020-06-06T13:16:14.000Z | 2021-12-27T08:05:48.000Z | tests/test_dual_ga.py | RAufschlaeger/tfga | 1607381fdcb3b479fc90b3fbb3af44b739ca720a | [
"MIT"
] | 7 | 2020-05-30T12:20:26.000Z | 2022-01-20T08:12:36.000Z | import unittest as ut
import tensorflow as tf
from tfga import GeometricAlgebra
# Make tensorflow not take over the entire GPU memory
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
dual_metric = [0]
dual_bases = ["0"]
dual_blades = ["", "0"]
dual_blade_degrees = [len(blade) for blade in dual_blades]
class TestDualGeometricAlgebraMultiply(ut.TestCase):
def assertTensorsEqual(self, a, b):
self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b))
def test_mul_mv_mv(self):
ga = GeometricAlgebra(metric=dual_metric)
zero = ga.from_scalar(0.0)
one = ga.from_scalar(1.0)
eps = ga.from_tensor_with_kind(tf.ones(1), kind="pseudoscalar")
ten = ga.from_scalar(10.0)
self.assertTensorsEqual(ga.geom_prod(eps, eps), zero)
self.assertTensorsEqual(ga.geom_prod(one, one), one)
self.assertTensorsEqual(ga.geom_prod(zero, one), zero)
self.assertTensorsEqual(ga.geom_prod(one, zero), zero)
self.assertTensorsEqual(ga.geom_prod(one, eps), eps)
self.assertTensorsEqual(ga.geom_prod(eps, one), eps)
self.assertTensorsEqual(ga.geom_prod(zero, zero), zero)
self.assertTensorsEqual(ga.geom_prod(ten, zero), zero)
self.assertTensorsEqual(ga.geom_prod(zero, ten), zero)
self.assertTensorsEqual(
ga.geom_prod(ga.geom_prod(ten, eps), eps),
zero
)
self.assertTensorsEqual(ga.geom_prod(ten, one), ten)
self.assertTensorsEqual(ga.geom_prod(one, ten), ten)
def test_mul_tf_mv(self):
ga = GeometricAlgebra(metric=dual_metric)
zero = ga.from_scalar(0.0)
one = ga.from_scalar(1.0)
eps = ga.from_tensor_with_kind(tf.ones(1), kind="pseudoscalar")
ten = ga.from_scalar(10.0)
zero_tf = tf.convert_to_tensor([0, 0], dtype=tf.float32)
one_tf = tf.convert_to_tensor([1, 0], dtype=tf.float32)
eps_tf = tf.convert_to_tensor([0, 1], dtype=tf.float32)
ten_tf = tf.convert_to_tensor([10, 0], dtype=tf.float32)
self.assertTensorsEqual(ga.geom_prod(one, one_tf), one)
self.assertTensorsEqual(ga.geom_prod(one_tf, one), one)
self.assertTensorsEqual(ga.geom_prod(zero, one_tf), zero)
self.assertTensorsEqual(ga.geom_prod(one_tf, zero), zero)
self.assertTensorsEqual(ga.geom_prod(zero_tf, one), zero)
self.assertTensorsEqual(ga.geom_prod(one, zero_tf), zero)
self.assertTensorsEqual(ga.geom_prod(one_tf, eps), eps)
self.assertTensorsEqual(ga.geom_prod(eps, one_tf), eps)
self.assertTensorsEqual(ga.geom_prod(zero_tf, zero), zero)
self.assertTensorsEqual(ga.geom_prod(zero, zero_tf), zero)
self.assertTensorsEqual(ga.geom_prod(ten_tf, zero), zero)
self.assertTensorsEqual(ga.geom_prod(zero, ten_tf), zero)
self.assertTensorsEqual(ga.geom_prod(ten, zero_tf), zero)
self.assertTensorsEqual(ga.geom_prod(zero_tf, ten), zero)
self.assertTensorsEqual(
ga.geom_prod(ga.geom_prod(ten_tf, eps), eps),
zero
)
self.assertTensorsEqual(ga.geom_prod(ten_tf, one), ten)
self.assertTensorsEqual(ga.geom_prod(one, ten_tf), ten)
self.assertTensorsEqual(ga.geom_prod(ten, one_tf), ten)
self.assertTensorsEqual(ga.geom_prod(one_tf, ten), ten)
class TestDualGeometricAlgebraMisc(ut.TestCase):
def assertTensorsEqual(self, a, b):
self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b))
def test_auto_diff_square(self):
"""Test automatic differentiation using
dual numbers for the square function.
f(x) = x^2
f'(x) = d/dx f(x) = 2x
"""
ga = GeometricAlgebra(metric=dual_metric)
one = ga.from_scalar(1.0)
five = ga.from_scalar(5.0)
eps = ga.from_tensor_with_kind(tf.ones(1), kind="pseudoscalar")
x = one + eps
# f(1) = 1^2 = 1, f'(1) = 2
x_squared = ga.geom_prod(x, x)
self.assertTensorsEqual(ga.select_blades_with_name(x_squared, ""), 1.0)
self.assertTensorsEqual(ga.select_blades_with_name(x_squared, "0"), 2.0)
y = five + eps
# f(5) = 5^2 = 25, f'(5) = 10
y_squared = ga.geom_prod(y, y)
self.assertTensorsEqual(ga.select_blades_with_name(y_squared, ""), 25.0)
self.assertTensorsEqual(ga.select_blades_with_name(y_squared, "0"), 10.0)
def test_batched_auto_diff_square(self):
"""Test automatic differentiation using
dual numbers for the square function.
Use batch with identical elements.
f(x) = x^2
f'(x) = d/dx f(x) = 2x
"""
ga = GeometricAlgebra(metric=dual_metric)
one = ga.from_tensor_with_kind(tf.ones([3, 4, 1]), kind="scalar")
five = ga.from_tensor_with_kind(tf.fill([3, 4, 1], 5.0), kind="scalar")
eps = ga.from_tensor_with_kind(tf.ones([3, 4, 1]), kind="pseudoscalar")
x = one + eps
# f(1) = 1^2 = 1, f'(1) = 2
x_squared = ga.geom_prod(x, x)
self.assertTensorsEqual(ga.select_blades_with_name(x_squared, ""), 1.0)
self.assertTensorsEqual(ga.select_blades_with_name(x_squared, "0"), 2.0)
y = five + eps
# f(5) = 5^2 = 25, f'(5) = 10
y_squared = ga.geom_prod(y, y)
self.assertTensorsEqual(ga.select_blades_with_name(y_squared, ""), 25.0)
self.assertTensorsEqual(ga.select_blades_with_name(y_squared, "0"), 10.0)
def test_mul_inverse(self):
ga = GeometricAlgebra(metric=dual_metric)
# a = 2
a = ga.from_tensor_with_kind(tf.fill([1], 2.0), kind="scalar")
# b = 3 + 3e0
b = ga.from_tensor_with_kind(tf.fill([2], 3.0), kind="mv")
# a * b = 2 * (3 + 3e0) = 6 + 6e0
c = ga.geom_prod(a, b)
self.assertTensorsEqual(c, ga.from_scalar(6.0) + 6.0 * ga.e("0"))
# a^-1 = 1 / 2
a_inv = ga.inverse(a)
self.assertTensorsEqual(ga.select_blades_with_name(a_inv, ""), 0.5)
# c = a * b
# => a_inv * c = b
self.assertTensorsEqual(ga.geom_prod(a_inv, c), b)
# Since a is scalar, should commute too.
# => c * a_inv = b
self.assertTensorsEqual(ga.geom_prod(c, a_inv), b)
# b is not simply invertible (because it does not square to a scalar)
# and will throw an exception
self.assertRaises(Exception, ga.simple_inverse, b)
# b is invertible with the shirokov inverse
b_inv = ga.inverse(b)
self.assertTensorsEqual(ga.geom_prod(b, b_inv), 1 * ga.e("")) | 39.64881 | 81 | 0.63474 |
1a6d6c40ed0fdd98f7f6df63154320225f51bc13 | 40,726 | py | Python | test/with_dummyserver/test_https.py | verhovsky/urllib3 | 86d7193783a5e5cf8c1eb07e34244910bbfe77d1 | [
"MIT"
] | null | null | null | test/with_dummyserver/test_https.py | verhovsky/urllib3 | 86d7193783a5e5cf8c1eb07e34244910bbfe77d1 | [
"MIT"
] | null | null | null | test/with_dummyserver/test_https.py | verhovsky/urllib3 | 86d7193783a5e5cf8c1eb07e34244910bbfe77d1 | [
"MIT"
] | null | null | null | import datetime
import logging
import os.path
import shutil
import ssl
import sys
import tempfile
import warnings
from pathlib import Path
from test import (
LONG_TIMEOUT,
SHORT_TIMEOUT,
TARPIT_HOST,
notSecureTransport,
requires_network,
requires_ssl_context_keyfile_password,
resolvesLocalhostFQDN,
)
from test.conftest import ServerConfig
from typing import List, Optional
from unittest import mock
import pytest
import trustme
import urllib3.util as util
import urllib3.util.ssl_
from dummyserver.server import (
DEFAULT_CA,
DEFAULT_CA_KEY,
DEFAULT_CERTS,
encrypt_key_pem,
)
from dummyserver.testcase import HTTPSDummyServerTestCase
from urllib3 import HTTPSConnectionPool
from urllib3.connection import RECENT_DATE, VerifiedHTTPSConnection
from urllib3.exceptions import (
ConnectTimeoutError,
InsecureRequestWarning,
MaxRetryError,
ProtocolError,
SSLError,
SystemTimeWarning,
)
from urllib3.util.ssl_match_hostname import CertificateError
from urllib3.util.timeout import Timeout
from .. import has_alpn
# Retry failed tests
pytestmark = pytest.mark.flaky
log = logging.getLogger("urllib3.connectionpool")
log.setLevel(logging.NOTSET)
log.addHandler(logging.StreamHandler(sys.stdout))
TLSv1_CERTS = DEFAULT_CERTS.copy()
TLSv1_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1", None)
TLSv1_1_CERTS = DEFAULT_CERTS.copy()
TLSv1_1_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1_1", None)
TLSv1_2_CERTS = DEFAULT_CERTS.copy()
TLSv1_2_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1_2", None)
TLSv1_3_CERTS = DEFAULT_CERTS.copy()
TLSv1_3_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLS", None)
CLIENT_INTERMEDIATE_PEM = "client_intermediate.pem"
CLIENT_NO_INTERMEDIATE_PEM = "client_no_intermediate.pem"
CLIENT_INTERMEDIATE_KEY = "client_intermediate.key"
PASSWORD_CLIENT_KEYFILE = "client_password.key"
CLIENT_CERT = CLIENT_INTERMEDIATE_PEM
class TestHTTPS(HTTPSDummyServerTestCase):
tls_protocol_name: Optional[str] = None
def tls_protocol_not_default(self) -> bool:
return self.tls_protocol_name in {"TLSv1", "TLSv1.1"}
def tls_version(self) -> "ssl.TLSVersion":
if self.tls_protocol_name is None:
return pytest.skip("Skipping base test class")
try:
from ssl import TLSVersion
except ImportError:
return pytest.skip("ssl.TLSVersion isn't available")
return TLSVersion[self.tls_protocol_name.replace(".", "_")]
def ssl_version(self) -> int:
if self.tls_protocol_name is None:
return pytest.skip("Skipping base test class")
attribute = f"PROTOCOL_{self.tls_protocol_name.replace('.', '_')}"
ssl_version = getattr(ssl, attribute, None)
if ssl_version is None:
return pytest.skip(f"ssl.{attribute} isn't available")
return ssl_version # type: ignore[no-any-return]
@classmethod
def setup_class(cls) -> None:
super().setup_class()
cls.certs_dir = tempfile.mkdtemp()
# Start from existing root CA as we don't want to change the server certificate yet
with open(DEFAULT_CA, "rb") as crt, open(DEFAULT_CA_KEY, "rb") as key:
root_ca = trustme.CA.from_pem(crt.read(), key.read())
# Generate another CA to test verification failure
bad_ca = trustme.CA()
cls.bad_ca_path = os.path.join(cls.certs_dir, "ca_bad.pem")
bad_ca.cert_pem.write_to_path(cls.bad_ca_path)
# client cert chain
intermediate_ca = root_ca.create_child_ca()
cert = intermediate_ca.issue_cert("example.com")
encrypted_key = encrypt_key_pem(cert.private_key_pem, b"letmein")
cert.private_key_pem.write_to_path(
os.path.join(cls.certs_dir, CLIENT_INTERMEDIATE_KEY)
)
encrypted_key.write_to_path(
os.path.join(cls.certs_dir, PASSWORD_CLIENT_KEYFILE)
)
# Write the client cert and the intermediate CA
client_cert = os.path.join(cls.certs_dir, CLIENT_INTERMEDIATE_PEM)
cert.cert_chain_pems[0].write_to_path(client_cert)
cert.cert_chain_pems[1].write_to_path(client_cert, append=True)
# Write only the client cert
cert.cert_chain_pems[0].write_to_path(
os.path.join(cls.certs_dir, CLIENT_NO_INTERMEDIATE_PEM)
)
@classmethod
def teardown_class(cls) -> None:
super().teardown_class()
shutil.rmtree(cls.certs_dir)
def test_simple(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200, r.data
@resolvesLocalhostFQDN()
def test_dotted_fqdn(self) -> None:
with HTTPSConnectionPool(
self.host + ".",
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as pool:
r = pool.request("GET", "/")
assert r.status == 200, r.data
def test_client_intermediate(self) -> None:
"""Check that certificate chains work well with client certs
We generate an intermediate CA from the root CA, and issue a client certificate
from that intermediate CA. Since the server only knows about the root CA, we
need to send it the certificate *and* the intermediate CA, so that it can check
the whole chain.
"""
with HTTPSConnectionPool(
self.host,
self.port,
key_file=os.path.join(self.certs_dir, CLIENT_INTERMEDIATE_KEY),
cert_file=os.path.join(self.certs_dir, CLIENT_INTERMEDIATE_PEM),
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/certificate")
subject = r.json()
assert subject["organizationalUnitName"].startswith("Testing cert")
def test_client_no_intermediate(self) -> None:
"""Check that missing links in certificate chains indeed break
The only difference with test_client_intermediate is that we don't send the
intermediate CA to the server, only the client cert.
"""
with HTTPSConnectionPool(
self.host,
self.port,
cert_file=os.path.join(self.certs_dir, CLIENT_NO_INTERMEDIATE_PEM),
key_file=os.path.join(self.certs_dir, CLIENT_INTERMEDIATE_KEY),
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises((SSLError, ProtocolError)):
https_pool.request("GET", "/certificate", retries=False)
@requires_ssl_context_keyfile_password()
def test_client_key_password(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
key_file=os.path.join(self.certs_dir, PASSWORD_CLIENT_KEYFILE),
cert_file=os.path.join(self.certs_dir, CLIENT_CERT),
key_password="letmein",
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/certificate")
subject = r.json()
assert subject["organizationalUnitName"].startswith("Testing cert")
@requires_ssl_context_keyfile_password()
def test_client_encrypted_key_requires_password(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
key_file=os.path.join(self.certs_dir, PASSWORD_CLIENT_KEYFILE),
cert_file=os.path.join(self.certs_dir, CLIENT_CERT),
key_password=None,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(MaxRetryError, match="password is required") as e:
https_pool.request("GET", "/certificate")
assert isinstance(e.value.reason, SSLError)
def test_verified(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
conn = https_pool._new_conn()
assert conn.__class__ == VerifiedHTTPSConnection
with warnings.catch_warnings(record=True) as w:
r = https_pool.request("GET", "/")
assert r.status == 200
assert w == []
def test_verified_with_context(self) -> None:
ctx = util.ssl_.create_urllib3_context(
cert_reqs=ssl.CERT_REQUIRED, ssl_minimum_version=self.tls_version()
)
ctx.load_verify_locations(cafile=DEFAULT_CA)
with HTTPSConnectionPool(self.host, self.port, ssl_context=ctx) as https_pool:
conn = https_pool._new_conn()
assert conn.__class__ == VerifiedHTTPSConnection
with mock.patch("warnings.warn") as warn:
r = https_pool.request("GET", "/")
assert r.status == 200
assert not warn.called, warn.call_args_list
def test_context_combines_with_ca_certs(self) -> None:
ctx = util.ssl_.create_urllib3_context(
cert_reqs=ssl.CERT_REQUIRED, ssl_minimum_version=self.tls_version()
)
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA, ssl_context=ctx
) as https_pool:
conn = https_pool._new_conn()
assert conn.__class__ == VerifiedHTTPSConnection
with mock.patch("warnings.warn") as warn:
r = https_pool.request("GET", "/")
assert r.status == 200
assert not warn.called, warn.call_args_list
@notSecureTransport() # SecureTransport does not support cert directories
def test_ca_dir_verified(self, tmp_path: Path) -> None:
# OpenSSL looks up certificates by the hash for their name, see c_rehash
# TODO infer the bytes using `cryptography.x509.Name.public_bytes`.
# https://github.com/pyca/cryptography/pull/3236
shutil.copyfile(DEFAULT_CA, str(tmp_path / "81deb5f7.0"))
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_cert_dir=str(tmp_path),
ssl_minimum_version=self.tls_version(),
) as https_pool:
conn = https_pool._new_conn()
assert conn.__class__ == VerifiedHTTPSConnection
with warnings.catch_warnings(record=True) as w:
r = https_pool.request("GET", "/")
assert r.status == 200
assert w == []
def test_invalid_common_name(self) -> None:
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/", retries=0)
assert isinstance(e.value.reason, SSLError)
assert "doesn't match" in str(
e.value.reason
) or "certificate verify failed" in str(e.value.reason)
def test_verified_with_bad_ca_certs(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=self.bad_ca_path,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/")
assert isinstance(e.value.reason, SSLError)
assert (
"certificate verify failed" in str(e.value.reason)
# PyPy is more specific
or "self signed certificate in certificate chain" in str(e.value.reason)
), f"Expected 'certificate verify failed', instead got: {e.value.reason!r}"
def test_wrap_socket_failure_resource_leak(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=self.bad_ca_path,
ssl_minimum_version=self.tls_version(),
) as https_pool:
conn = https_pool._get_conn()
try:
with pytest.raises(ssl.SSLError):
conn.connect()
assert conn.sock
finally:
conn.close()
def test_verified_without_ca_certs(self) -> None:
# default is cert_reqs=None which is ssl.CERT_NONE
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/")
assert isinstance(e.value.reason, SSLError)
# there is a different error message depending on whether or
# not pyopenssl is injected
assert (
"No root certificates specified" in str(e.value.reason)
# PyPy is more specific
or "self signed certificate in certificate chain" in str(e.value.reason)
# PyPy sometimes uses all-caps here
or "certificate verify failed" in str(e.value.reason).lower()
or "invalid certificate chain" in str(e.value.reason)
), (
"Expected 'No root certificates specified', "
"'certificate verify failed', or "
"'invalid certificate chain', "
"instead got: %r" % e.value.reason
)
def test_no_ssl(self) -> None:
with HTTPSConnectionPool(self.host, self.port) as pool:
pool.ConnectionCls = None # type: ignore[assignment]
with pytest.raises(SSLError):
pool._new_conn()
with pytest.raises(MaxRetryError) as cm:
pool.request("GET", "/", retries=0)
assert isinstance(cm.value.reason, SSLError)
def test_unverified_ssl(self) -> None:
"""Test that bare HTTPSConnection can connect, make requests"""
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs=ssl.CERT_NONE,
ssl_minimum_version=self.tls_version(),
) as pool:
with mock.patch("warnings.warn") as warn:
r = pool.request("GET", "/")
assert r.status == 200
assert warn.called
# Modern versions of Python, or systems using PyOpenSSL, only emit
# the unverified warning. Older systems may also emit other
# warnings, which we want to ignore here.
calls = warn.call_args_list
assert InsecureRequestWarning in [x[0][1] for x in calls]
def test_ssl_unverified_with_ca_certs(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_NONE",
ca_certs=self.bad_ca_path,
ssl_minimum_version=self.tls_version(),
) as pool:
with mock.patch("warnings.warn") as warn:
r = pool.request("GET", "/")
assert r.status == 200
assert warn.called
# Modern versions of Python, or systems using PyOpenSSL, only emit
# the unverified warning. Older systems may also emit other
# warnings, which we want to ignore here.
calls = warn.call_args_list
category = calls[0][0][1]
assert category == InsecureRequestWarning
def test_assert_hostname_false(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_hostname = False
https_pool.request("GET", "/")
def test_assert_specific_hostname(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_hostname = "localhost"
https_pool.request("GET", "/")
def test_server_hostname(self) -> None:
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
server_hostname="localhost",
ssl_minimum_version=self.tls_version(),
) as https_pool:
conn = https_pool._new_conn()
conn.request("GET", "/")
# Assert the wrapping socket is using the passed-through SNI name.
# pyopenssl doesn't let you pull the server_hostname back off the
# socket, so only add this assertion if the attribute is there (i.e.
# the python ssl module).
if hasattr(conn.sock, "server_hostname"):
assert conn.sock.server_hostname == "localhost"
def test_assert_fingerprint_md5(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_fingerprint = (
"55:39:BF:70:05:12:43:FA:1F:D1:BF:4E:E8:1B:07:1D"
)
https_pool.request("GET", "/")
def test_assert_fingerprint_sha1(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_fingerprint = (
"72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
)
https_pool.request("GET", "/")
def test_assert_fingerprint_sha256(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_fingerprint = (
"E3:59:8E:69:FF:C5:9F:C7:88:87:44:58:22:7F:90:8D:D9:BC:12:C4:90:79:D5:"
"DC:A8:5D:4F:60:40:1E:A6:D2"
)
https_pool.request("GET", "/")
def test_assert_invalid_fingerprint(self) -> None:
def _test_request(pool: HTTPSConnectionPool) -> SSLError:
with pytest.raises(MaxRetryError) as cm:
pool.request("GET", "/", retries=0)
assert isinstance(cm.value.reason, SSLError)
return cm.value.reason
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_fingerprint = (
"AA:AA:AA:AA:AA:AAAA:AA:AAAA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA"
)
e = _test_request(https_pool)
expected = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
got = "728b554c9afc1e88a11cad1bb2e7cc3edbc8f98a"
assert (
str(e)
== f'Fingerprints did not match. Expected "{expected}", got "{got}"'
)
# Uneven length
https_pool.assert_fingerprint = "AA:A"
e = _test_request(https_pool)
assert "Fingerprint of invalid length:" in str(e)
# Invalid length
https_pool.assert_fingerprint = "AA"
e = _test_request(https_pool)
assert "Fingerprint of invalid length:" in str(e)
def test_verify_none_and_bad_fingerprint(self) -> None:
with HTTPSConnectionPool(
"127.0.0.1", self.port, cert_reqs="CERT_NONE", ca_certs=self.bad_ca_path
) as https_pool:
https_pool.assert_fingerprint = (
"AA:AA:AA:AA:AA:AAAA:AA:AAAA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA"
)
with pytest.raises(MaxRetryError) as cm:
https_pool.request("GET", "/", retries=0)
assert isinstance(cm.value.reason, SSLError)
def test_verify_none_and_good_fingerprint(self) -> None:
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_NONE",
ca_certs=self.bad_ca_path,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_fingerprint = (
"72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
)
https_pool.request("GET", "/")
@notSecureTransport()
def test_good_fingerprint_and_hostname_mismatch(self) -> None:
# This test doesn't run with SecureTransport because we don't turn off
# hostname validation without turning off all validation, which this
# test doesn't do (deliberately). We should revisit this if we make
# new decisions.
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_fingerprint = (
"72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
)
https_pool.request("GET", "/")
@requires_network()
def test_https_timeout(self) -> None:
timeout = Timeout(total=None, connect=SHORT_TIMEOUT)
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=timeout,
retries=False,
cert_reqs="CERT_REQUIRED",
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(ConnectTimeoutError):
https_pool.request("GET", "/")
timeout = Timeout(read=0.01)
with HTTPSConnectionPool(
self.host,
self.port,
timeout=timeout,
retries=False,
cert_reqs="CERT_REQUIRED",
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.ca_certs = DEFAULT_CA
https_pool.assert_fingerprint = (
"72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
)
timeout = Timeout(total=None)
with HTTPSConnectionPool(
self.host,
self.port,
timeout=timeout,
cert_reqs="CERT_NONE",
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.request("GET", "/")
def test_tunnel(self) -> None:
"""test the _tunnel behavior"""
timeout = Timeout(total=None)
with HTTPSConnectionPool(
self.host,
self.port,
timeout=timeout,
cert_reqs="CERT_NONE",
ssl_minimum_version=self.tls_version(),
) as https_pool:
conn = https_pool._new_conn()
try:
conn.set_tunnel(self.host, self.port)
with mock.patch.object(
conn, "_tunnel", create=True, return_value=None
) as conn_tunnel:
https_pool._make_request(conn, "GET", "/")
conn_tunnel.assert_called_once_with()
finally:
conn.close()
@requires_network()
def test_enhanced_timeout(self) -> None:
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=Timeout(connect=SHORT_TIMEOUT),
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
conn = https_pool._new_conn()
try:
with pytest.raises(ConnectTimeoutError):
https_pool.request("GET", "/")
with pytest.raises(ConnectTimeoutError):
https_pool._make_request(conn, "GET", "/")
finally:
conn.close()
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=Timeout(connect=LONG_TIMEOUT),
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
with pytest.raises(ConnectTimeoutError):
https_pool.request("GET", "/", timeout=Timeout(connect=SHORT_TIMEOUT))
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=Timeout(total=None),
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
conn = https_pool._new_conn()
try:
with pytest.raises(ConnectTimeoutError):
https_pool.request(
"GET", "/", timeout=Timeout(total=None, connect=SHORT_TIMEOUT)
)
finally:
conn.close()
def test_enhanced_ssl_connection(self) -> None:
fingerprint = "72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
assert_fingerprint=fingerprint,
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
def test_ssl_correct_system_time(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.cert_reqs = "CERT_REQUIRED"
https_pool.ca_certs = DEFAULT_CA
w = self._request_without_resource_warnings("GET", "/")
assert [] == w
def test_ssl_wrong_system_time(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.cert_reqs = "CERT_REQUIRED"
https_pool.ca_certs = DEFAULT_CA
with mock.patch("urllib3.connection.datetime") as mock_date:
mock_date.date.today.return_value = datetime.date(1970, 1, 1)
w = self._request_without_resource_warnings("GET", "/")
assert len(w) == 1
warning = w[0]
assert SystemTimeWarning == warning.category
assert isinstance(warning.message, Warning)
assert str(RECENT_DATE) in warning.message.args[0]
def _request_without_resource_warnings(
self, method: str, url: str
) -> List[warnings.WarningMessage]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.request(method, url)
w = [x for x in w if not isinstance(x.message, ResourceWarning)]
return w
def test_set_ssl_version_to_tls_version(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA
) as https_pool:
https_pool.ssl_version = self.certs["ssl_version"]
r = https_pool.request("GET", "/")
assert r.status == 200, r.data
def test_set_cert_default_cert_required(self) -> None:
conn = VerifiedHTTPSConnection(self.host, self.port)
conn.set_cert()
assert conn.cert_reqs == ssl.CERT_REQUIRED
def test_tls_protocol_name_of_socket(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
conn = https_pool._get_conn()
try:
conn.connect()
if not hasattr(conn.sock, "version"):
pytest.skip("SSLSocket.version() not available")
assert conn.sock.version() == self.tls_protocol_name
finally:
conn.close()
def test_ssl_version_is_deprecated(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA, ssl_version=self.ssl_version()
) as https_pool:
conn = https_pool._get_conn()
try:
with warnings.catch_warnings(record=True) as w:
conn.connect()
finally:
conn.close()
assert len(w) >= 1
assert any(x.category == DeprecationWarning for x in w)
assert any(
str(x.message)
== (
"'ssl_version' option is deprecated and will be removed in "
"a future release of urllib3 2.x. Instead use 'ssl_minimum_version'"
)
for x in w
)
@pytest.mark.parametrize(
"ssl_version", [None, ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_CLIENT]
)
def test_ssl_version_with_protocol_tls_or_client_not_deprecated(
self, ssl_version: Optional[int]
) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
if self.tls_protocol_not_default():
pytest.skip(
f"Skipping because '{self.tls_protocol_name}' isn't set by default"
)
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA, ssl_version=ssl_version
) as https_pool:
conn = https_pool._get_conn()
try:
with warnings.catch_warnings(record=True) as w:
conn.connect()
finally:
conn.close()
assert w == []
def test_no_tls_version_deprecation_with_ssl_context(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
ctx = util.ssl_.create_urllib3_context(ssl_minimum_version=self.tls_version())
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_context=ctx,
) as https_pool:
conn = https_pool._get_conn()
try:
with warnings.catch_warnings(record=True) as w:
conn.connect()
finally:
conn.close()
assert w == []
def test_tls_version_maximum_and_minimum(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
from ssl import TLSVersion
min_max_versions = [
(self.tls_version(), self.tls_version()),
(TLSVersion.MINIMUM_SUPPORTED, self.tls_version()),
(TLSVersion.MINIMUM_SUPPORTED, TLSVersion.MAXIMUM_SUPPORTED),
]
for minimum_version, maximum_version in min_max_versions:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=minimum_version,
ssl_maximum_version=maximum_version,
) as https_pool:
conn = https_pool._get_conn()
try:
conn.connect()
assert conn.sock.version() == self.tls_protocol_name
finally:
conn.close()
@pytest.mark.skipif(sys.version_info < (3, 8), reason="requires python 3.8+")
def test_sslkeylogfile(
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
if not hasattr(util.SSLContext, "keylog_filename"):
pytest.skip("requires OpenSSL 1.1.1+")
keylog_file = tmp_path / "keylogfile.txt"
monkeypatch.setenv("SSLKEYLOGFILE", str(keylog_file))
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200, r.data
assert keylog_file.is_file(), "keylogfile '%s' should exist" % str(
keylog_file
)
assert keylog_file.read_text().startswith(
"# TLS secrets log file"
), "keylogfile '%s' should start with '# TLS secrets log file'" % str(
keylog_file
)
@pytest.mark.parametrize("sslkeylogfile", [None, ""])
def test_sslkeylogfile_empty(
self, monkeypatch: pytest.MonkeyPatch, sslkeylogfile: Optional[str]
) -> None:
# Assert that an HTTPS connection doesn't error out when given
# no SSLKEYLOGFILE or an empty value (ie 'SSLKEYLOGFILE=')
if sslkeylogfile is not None:
monkeypatch.setenv("SSLKEYLOGFILE", sslkeylogfile)
else:
monkeypatch.delenv("SSLKEYLOGFILE", raising=False)
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as pool:
r = pool.request("GET", "/")
assert r.status == 200, r.data
def test_alpn_default(self) -> None:
"""Default ALPN protocols are sent by default."""
if not has_alpn() or not has_alpn(ssl.SSLContext):
pytest.skip("ALPN-support not available")
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as pool:
r = pool.request("GET", "/alpn_protocol", retries=0)
assert r.status == 200
assert r.data.decode("utf-8") == util.ALPN_PROTOCOLS[0]
def test_default_ssl_context_ssl_min_max_versions(self) -> None:
ctx = urllib3.util.ssl_.create_urllib3_context()
assert ctx.minimum_version == ssl.TLSVersion.TLSv1_2
assert ctx.maximum_version == ssl.TLSVersion.MAXIMUM_SUPPORTED
def test_ssl_context_ssl_version_uses_ssl_min_max_versions(self) -> None:
ctx = urllib3.util.ssl_.create_urllib3_context(ssl_version=self.ssl_version())
assert ctx.minimum_version == self.tls_version()
assert ctx.maximum_version == self.tls_version()
@pytest.mark.usefixtures("requires_tlsv1")
class TestHTTPS_TLSv1(TestHTTPS):
tls_protocol_name = "TLSv1"
certs = TLSv1_CERTS
@pytest.mark.usefixtures("requires_tlsv1_1")
class TestHTTPS_TLSv1_1(TestHTTPS):
tls_protocol_name = "TLSv1.1"
certs = TLSv1_1_CERTS
@pytest.mark.usefixtures("requires_tlsv1_2")
class TestHTTPS_TLSv1_2(TestHTTPS):
tls_protocol_name = "TLSv1.2"
certs = TLSv1_2_CERTS
@pytest.mark.usefixtures("requires_tlsv1_3")
class TestHTTPS_TLSv1_3(TestHTTPS):
tls_protocol_name = "TLSv1.3"
certs = TLSv1_3_CERTS
class TestHTTPS_Hostname:
def test_can_validate_san(self, san_server: ServerConfig) -> None:
"""Ensure that urllib3 can validate SANs with IP addresses in them."""
with HTTPSConnectionPool(
san_server.host,
san_server.port,
cert_reqs="CERT_REQUIRED",
ca_certs=san_server.ca_certs,
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
def test_common_name_without_san_fails(self, no_san_server: ServerConfig) -> None:
with HTTPSConnectionPool(
no_san_server.host,
no_san_server.port,
cert_reqs="CERT_REQUIRED",
ca_certs=no_san_server.ca_certs,
) as https_pool:
with pytest.raises(
MaxRetryError,
) as e:
https_pool.request("GET", "/")
assert "mismatch, certificate is not valid" in str(
e.value
) or "no appropriate subjectAltName" in str(e.value)
def test_common_name_without_san_with_different_common_name(
self, no_san_server_with_different_commmon_name: ServerConfig
) -> None:
ctx = urllib3.util.ssl_.create_urllib3_context()
try:
ctx.hostname_checks_common_name = True
except AttributeError:
pytest.skip("Couldn't set 'SSLContext.hostname_checks_common_name'")
with HTTPSConnectionPool(
no_san_server_with_different_commmon_name.host,
no_san_server_with_different_commmon_name.port,
cert_reqs="CERT_REQUIRED",
ca_certs=no_san_server_with_different_commmon_name.ca_certs,
ssl_context=ctx,
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/")
assert "mismatch, certificate is not valid for 'localhost'" in str(
e.value
) or "hostname 'localhost' doesn't match 'example.com'" in str(e.value)
@pytest.mark.parametrize("use_assert_hostname", [True, False])
def test_hostname_checks_common_name_respected(
self, no_san_server: ServerConfig, use_assert_hostname: bool
) -> None:
ctx = urllib3.util.ssl_.create_urllib3_context()
if not hasattr(ctx, "hostname_checks_common_name"):
pytest.skip("Test requires 'SSLContext.hostname_checks_common_name'")
ctx.load_verify_locations(no_san_server.ca_certs)
try:
ctx.hostname_checks_common_name = True
except AttributeError:
pytest.skip("Couldn't set 'SSLContext.hostname_checks_common_name'")
err: Optional[MaxRetryError]
try:
with HTTPSConnectionPool(
no_san_server.host,
no_san_server.port,
cert_reqs="CERT_REQUIRED",
ssl_context=ctx,
assert_hostname=no_san_server.host if use_assert_hostname else None,
) as https_pool:
https_pool.request("GET", "/")
except MaxRetryError as e:
err = e
else:
err = None
# commonName is only valid for DNS names, not IP addresses.
if no_san_server.host == "localhost":
assert err is None
# IP addresses should fail for commonName.
else:
assert err is not None
assert type(err.reason) == SSLError
assert isinstance(
err.reason.args[0], (ssl.SSLCertVerificationError, CertificateError)
)
class TestHTTPS_IPV4SAN:
def test_can_validate_ip_san(self, ipv4_san_server: ServerConfig) -> None:
"""Ensure that urllib3 can validate SANs with IP addresses in them."""
with HTTPSConnectionPool(
ipv4_san_server.host,
ipv4_san_server.port,
cert_reqs="CERT_REQUIRED",
ca_certs=ipv4_san_server.ca_certs,
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
class TestHTTPS_IPV6SAN:
@pytest.mark.parametrize("host", ["::1", "[::1]"])
def test_can_validate_ipv6_san(
self, ipv6_san_server: ServerConfig, host: str
) -> None:
"""Ensure that urllib3 can validate SANs with IPv6 addresses in them."""
with HTTPSConnectionPool(
host,
ipv6_san_server.port,
cert_reqs="CERT_REQUIRED",
ca_certs=ipv6_san_server.ca_certs,
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
| 37.057325 | 91 | 0.598807 |
6fc40710af7f570cc480ed80d1ed294d3275015a | 2,304 | py | Python | scripts/sanitize_resources.py | DavidLegg/StockSim | b5da697dc5fa0c4126baf79d9a3285762937a2d4 | [
"MIT"
] | 1 | 2020-07-13T09:03:36.000Z | 2020-07-13T09:03:36.000Z | scripts/sanitize_resources.py | DavidLegg/StockSim | b5da697dc5fa0c4126baf79d9a3285762937a2d4 | [
"MIT"
] | null | null | null | scripts/sanitize_resources.py | DavidLegg/StockSim | b5da697dc5fa0c4126baf79d9a3285762937a2d4 | [
"MIT"
] | null | null | null | from glob import glob
import os, sys, re
USAGE="""sanitize_resources [PATTERN]...
Processes files matching PATTERN(s), scrubbing timestamps to be ints, and prices to be floats.
"""
# Unix Timestamp,Date,Symbol,Open,High,Low,Close,Volume
INT_COL_NAMES = ['Unix Timestamp']
FLOAT_COL_NAMES = ['Open', 'High', 'Low', 'Close', 'Volume']
def main():
try:
patterns = sys.argv[1:]
assert len(patterns) > 0, 'Must provide at least 1 pattern'
if patterns[0].lower() in {'-h', '--help'}:
print(USAGE)
return
except Exception as e:
print(e)
print(USAGE)
return
print('Finding files...')
fns = {fn for p in patterns for fn in glob(p)}
total = len(fns)
print('Found {} files. Processing...{:5.1f}%'.format(total, 0.0), end='', flush=True)
for i,filename in enumerate(fns):
output_filename = filename + '.in-progress'
process(filename, output_filename)
os.rename(output_filename, filename)
print("\b\b\b\b\b\b{:5.1f}%".format(100.0 * i / total), end='', flush=True)
print(" - Done.")
def process(in_file, out_file):
intCols = []
floatCols = []
with open(in_file) as f_in:
with open(out_file, 'w') as f_out:
parts = next(f_in).strip().split(',')
for icn in INT_COL_NAMES:
try:
intCols.append(parts.index(icn))
except (ValueError, IndexError):
pass
for fcn in FLOAT_COL_NAMES:
try:
floatCols.append(parts.index(fcn))
except (ValueError, IndexError):
pass
print(','.join(parts), file=f_out)
for line in f_in:
parts = line.strip().split(',')
for i in intCols:
try:
parts[i] = str(int(float(parts[i])))
except (ValueError, IndexError):
pass
for i in floatCols:
try:
parts[i] = str(float(parts[i]))
except (ValueError, IndexError):
pass
print(','.join(parts), file=f_out)
if __name__ == '__main__':
main()
| 33.882353 | 98 | 0.515191 |
bc447937bbff8d62a97087071c7ff8bd624adb01 | 1,145 | py | Python | src/python/pants/core/goals/style_request.py | jperkelens/pants | b7ad997b5ef9175cc5e22e36574d8590bc8da120 | [
"Apache-2.0"
] | null | null | null | src/python/pants/core/goals/style_request.py | jperkelens/pants | b7ad997b5ef9175cc5e22e36574d8590bc8da120 | [
"Apache-2.0"
] | null | null | null | src/python/pants/core/goals/style_request.py | jperkelens/pants | b7ad997b5ef9175cc5e22e36574d8590bc8da120 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from abc import ABCMeta
from dataclasses import dataclass
from typing import ClassVar, Generic, Iterable, Optional, Type, TypeVar
from pants.engine.collection import Collection
from pants.engine.fs import Snapshot
from pants.engine.target import FieldSetWithOrigin
from pants.util.meta import frozen_after_init
_FS = TypeVar("_FS", bound=FieldSetWithOrigin)
@frozen_after_init
@dataclass(unsafe_hash=True)
class StyleRequest(Generic[_FS], metaclass=ABCMeta):
"""A request to style or lint a collection of `FieldSet`s.
Should be subclassed for a particular style engine in order to support autoformatting or
linting.
"""
field_set_type: ClassVar[Type[_FS]]
field_sets: Collection[_FS]
prior_formatter_result: Optional[Snapshot] = None
def __init__(
self, field_sets: Iterable[_FS], *, prior_formatter_result: Optional[Snapshot] = None,
) -> None:
self.field_sets = Collection[_FS](field_sets)
self.prior_formatter_result = prior_formatter_result
| 32.714286 | 94 | 0.762445 |
4673e8d603ddaf8ae66fa1c73718d10927d11f27 | 14,958 | py | Python | test/functional/interface_rest.py | tachacoin/tachacoin | a40df0f17ac1c201d3e339944c8c74e668c61a1b | [
"MIT"
] | null | null | null | test/functional/interface_rest.py | tachacoin/tachacoin | a40df0f17ac1c201d3e339944c8c74e668c61a1b | [
"MIT"
] | null | null | null | test/functional/interface_rest.py | tachacoin/tachacoin | a40df0f17ac1c201d3e339944c8c74e668c61a1b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
import binascii
from decimal import Decimal
from enum import Enum
from io import BytesIO
import json
from struct import pack, unpack
import http.client
import urllib.parse
from test_framework.tachacoinconfig import COINBASE_MATURITY, INITIAL_BLOCK_REWARD
from test_framework.tachacoin import convert_btc_address_to_tachacoin
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
hex_str_to_bytes,
)
from test_framework.messages import CBlockHeader
BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
class ReqType(Enum):
JSON = 1
BIN = 2
HEX = 3
class RetType(Enum):
OBJ = 1
BYTES = 2
JSON = 3
def filter_output_indices_by_value(vouts, value):
for vout in vouts:
if vout['value'] == value:
yield vout['n']
class RESTTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-rest"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON, body='', status=200, ret_type=RetType.JSON):
rest_uri = '/rest' + uri
if req_type == ReqType.JSON:
rest_uri += '.json'
elif req_type == ReqType.BIN:
rest_uri += '.bin'
elif req_type == ReqType.HEX:
rest_uri += '.hex'
conn = http.client.HTTPConnection(self.url.hostname, self.url.port)
self.log.debug('%s %s %s', http_method, rest_uri, body)
if http_method == 'GET':
conn.request('GET', rest_uri)
elif http_method == 'POST':
conn.request('POST', rest_uri, body)
resp = conn.getresponse()
assert_equal(resp.status, status)
if ret_type == RetType.OBJ:
return resp
elif ret_type == RetType.BYTES:
return resp.read()
elif ret_type == RetType.JSON:
return json.loads(resp.read().decode('utf-8'), parse_float=Decimal)
def run_test(self):
self.url = urllib.parse.urlparse(self.nodes[0].url)
self.log.info("Mine blocks and send Bitcoin to node 1")
# Random address so node1's balance doesn't increase
not_related_address = convert_btc_address_to_tachacoin("2MxqoHEdNQTyYeX1mHcbrrpzgojbosTpCvJ")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generatetoaddress(COINBASE_MATURITY, not_related_address)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), INITIAL_BLOCK_REWARD)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.log.info("Test the /tx URI")
json_obj = self.test_rest_request("/tx/{}".format(txid))
assert_equal(json_obj['txid'], txid)
# Check hex format response
hex_response = self.test_rest_request("/tx/{}".format(txid), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than_or_equal(int(hex_response.getheader('content-length')),
json_obj['size']*2)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
self.log.info("Query an unspent TXO using the /getutxos URI")
self.nodes[1].generatetoaddress(1, not_related_address)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))
# Check chainTip response
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], Decimal('0.1'))
self.log.info("Query a spent TXO using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
# Check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is no utxo in the response because this outpoint has been spent
assert_equal(len(json_obj['utxos']), 0)
# Check bitmap
assert_equal(json_obj['bitmap'], "0")
self.log.info("Query two TXOs using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}/{}-{}".format(*(spending + spent)))
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
self.log.info("Query the TXOs using the /getutxos URI with a binary response")
bin_request = b'\x01\x02'
for txid, n in [spending, spent]:
bin_request += hex_str_to_bytes(txid)
bin_request += pack("i", n)
bin_response = self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES)
output = BytesIO(bin_response)
chain_height, = unpack("i", output.read(4))
response_hash = binascii.hexlify(output.read(32)[::-1]).decode('ascii')
assert_equal(bb_hash, response_hash) # check if getutxo's chaintip during calculation was fine
assert_equal(chain_height, COINBASE_MATURITY+2) # chain height must be 102
self.log.info("Test the /getutxos URI with and without /checkmempool")
# Create a transaction, check that it's found with /checkmempool, but
# not found without. Then confirm the transaction and check that it's
# found with or without /checkmempool.
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_obj = self.test_rest_request("/tx/{}".format(txid))
# get the spent output to later check for utxo (should be spent by then)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 0)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 0)
self.nodes[0].generate(1)
self.sync_all()
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
# Do some invalid requests
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.JSON, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos/checkmempool", http_method='POST', req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ)
# Test limits
long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=400, ret_type=RetType.OBJ)
long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200)
self.nodes[0].generate(1) # generate block to not affect upcoming tests
self.sync_all()
self.log.info("Test the /block, /blockhashbyheight and /headers URIs")
bb_hash = self.nodes[0].getbestblockhash()
# Check result if block does not exists
assert_equal(self.test_rest_request('/headers/1/0000000000000000000000000000000000000000000000000000000000000000'), [])
self.test_rest_request('/block/0000000000000000000000000000000000000000000000000000000000000000', status=404, ret_type=RetType.OBJ)
# Check result if block is not in the active chain
self.nodes[0].invalidateblock(bb_hash)
assert_equal(self.test_rest_request('/headers/1/{}'.format(bb_hash)), [])
self.test_rest_request('/block/{}'.format(bb_hash))
self.nodes[0].reconsiderblock(bb_hash)
# Check binary format
response = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_greater_than(int(response.getheader('content-length')), BLOCK_HEADER_SIZE)
response_bytes = response.read()
# Compare with block header
response_header = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_equal(int(response_header.getheader('content-length')), 181)
response_header_bytes = response_header.read()
assert_equal(response_bytes[:181], response_header_bytes)
# Check block hex format
response_hex = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_hex_bytes = response_hex.read().strip(b'\n')
assert_equal(binascii.hexlify(response_bytes), response_hex_bytes)
# Compare with hex block header
response_header_hex = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_header_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_header_hex_bytes = response_header_hex.read(BLOCK_HEADER_SIZE*2)
assert_equal(binascii.hexlify(response_bytes[:BLOCK_HEADER_SIZE]), response_header_hex_bytes)
# Check json format
block_json_obj = self.test_rest_request("/block/{}".format(bb_hash))
assert_equal(block_json_obj['hash'], bb_hash)
assert_equal(self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']))['blockhash'], bb_hash)
# Check hex/bin format
resp_hex = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_equal(resp_hex.read().decode('utf-8').rstrip(), bb_hash)
resp_bytes = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.BIN, ret_type=RetType.BYTES)
blockhash = binascii.hexlify(resp_bytes[::-1]).decode('utf-8')
assert_equal(blockhash, bb_hash)
# Check invalid blockhashbyheight requests
resp = self.test_rest_request("/blockhashbyheight/abc", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: abc")
resp = self.test_rest_request("/blockhashbyheight/1000000", ret_type=RetType.OBJ, status=404)
assert_equal(resp.read().decode('utf-8').rstrip(), "Block height out of range")
resp = self.test_rest_request("/blockhashbyheight/-1", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: -1")
self.test_rest_request("/blockhashbyheight/", ret_type=RetType.OBJ, status=400)
# Compare with json block header
json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash))
assert_equal(len(json_obj), 1) # ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) # request/response hash should be the same
# Compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot', 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']:
assert_equal(json_obj[0][key], rpc_block_json[key])
# See if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash))
assert_equal(len(json_obj), 5) # now we should have 5 header objects
self.log.info("Test tx inclusion in the /mempool and /block URIs")
# Make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
self.sync_all()
# Check that there are exactly 3 transactions in the TX memory pool before generating the block
json_obj = self.test_rest_request("/mempool/info")
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# Check that there are our submitted transactions in the TX memory pool
json_obj = self.test_rest_request("/mempool/contents")
for i, tx in enumerate(txs):
assert tx in json_obj
assert_equal(json_obj[tx]['spentby'], txs[i + 1:i + 2])
assert_equal(json_obj[tx]['depends'], txs[i - 1:i])
# Now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
# Check if the 3 tx show up in the new block
json_obj = self.test_rest_request("/block/{}".format(newblockhash[0]))
non_coinbase_txs = {tx['txid'] for tx in json_obj['tx']
if 'coinbase' not in tx['vin'][0]}
assert_equal(non_coinbase_txs, set(txs))
# Check the same but without tx details
json_obj = self.test_rest_request("/block/notxdetails/{}".format(newblockhash[0]))
for tx in txs:
assert tx in json_obj['tx']
self.log.info("Test the /chaininfo URI")
bb_hash = self.nodes[0].getbestblockhash()
json_obj = self.test_rest_request("/chaininfo")
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest().main()
| 44.918919 | 153 | 0.665731 |
dfed13a266993f7ed87183dcac1ecce1e05e4f99 | 30,341 | py | Python | tensorflow/python/eager/def_function_xla_jit_test.py | TL-Rubick/tensorflow | 6cf1ccf6060a95aad3ccc84544d0aa166990ec72 | [
"Apache-2.0"
] | 3 | 2020-12-27T23:28:09.000Z | 2022-03-26T02:10:18.000Z | tensorflow/python/eager/def_function_xla_jit_test.py | TL-Rubick/tensorflow | 6cf1ccf6060a95aad3ccc84544d0aa166990ec72 | [
"Apache-2.0"
] | 3 | 2021-08-25T15:06:34.000Z | 2022-02-10T02:50:24.000Z | tensorflow/python/eager/def_function_xla_jit_test.py | TL-Rubick/tensorflow | 6cf1ccf6060a95aad3ccc84544d0aa166990ec72 | [
"Apache-2.0"
] | 2 | 2017-10-10T02:34:56.000Z | 2019-04-29T15:13:56.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DefFunctionTest(xla_test.XLATestCase):
def testAutoclusteringWithTfFunction(self):
if 'tpu' in self.device.lower():
self.skipTest('Autoclustering does not run on TPU')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=False)
def outer(a, b, c):
return a * inner(b, c) + c
@def_function.function(jit_compile=True)
def inner(b, c):
return b + c * b
i1 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i2 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i3 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
with context.collect_graphs(optimized=True) as graphs:
outer(i1, i2, i3)
if test_util.is_xla_enabled():
self.assertIn('_XlaRun', [n.op for n in graphs[0].node])
else:
self.assertNotIn('_XlaRun', [n.op for n in graphs[0].node])
def testBasic(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x, a):
return x + a
func = def_function.function(fn, jit_compile=False)
xla_func = def_function.function(fn, jit_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([2, 3, 3, 4, 4], func(inputs, 1))
self.assertAllClose([2, 3, 3, 4, 4], xla_func(inputs, 1))
def testBasicInt32(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x, a):
return x + a
inputs = constant_op.constant([1, 2, 2, 3, 3], dtype=dtypes.int32)
self.assertAllClose([2, 3, 3, 4, 4], fn(inputs, 1))
def testDerivative(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x, a):
return 2 * x + a
xla_func = def_function.function(fn, jit_compile=True)
with backprop.GradientTape() as tape:
inputs = constant_op.constant([1., 2., 2., 3., 3.])
tape.watch(inputs)
outputs = xla_func(inputs, 1)
self.assertAllClose([2, 2, 2, 2, 2], tape.gradient(outputs, inputs))
# pylint: disable=protected-access
(forward, backward) = xla_func.get_concrete_function(
inputs, 1)._delayed_rewrite_functions.forward_backward()
# Check that the must-compile attribute gets correctly propagated to the
# created derivatives.
self.assertTrue(backward.function_def.attr['_XlaMustCompile'])
self.assertTrue(forward.definition.attr['_XlaMustCompile'])
# Calling function with jit_compile=True from
# jit_compile=False should compile the inner func.
def testNestedCall(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162800687: Inner function runs on host')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x, a):
return x + a
@def_function.function(jit_compile=False)
def fn2(x, a):
return fn(x, a)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([2, 3, 3, 4, 4], fn2(inputs, 1))
@test_util.disable_mlir_bridge('TODO(b/162272821): MLIR bridge returns'
' wrong status type')
def testNestedCallUnsupportedOps(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x):
return array_ops.unique(x).y
xla_func = def_function.function(fn, jit_compile=True)
def fn2(x):
return xla_func(x)
func = def_function.function(fn2, jit_compile=False)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError,
'not compilable'):
func(inputs)
@test_util.disable_mlir_bridge('TODO(b/162272821): MLIR bridge returns'
' wrong status type')
def testUnsupportedOps(self):
if 'tpu' in self.device.lower():
self.skipTest('XLA TPU supports tf.unique')
with ops.device('device:{}:0'.format(self.device)):
def fn(x):
return array_ops.unique(x).y # Unique is not supported by XLA
func = def_function.function(fn, jit_compile=False)
xla_func = def_function.function(fn, jit_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([1, 2, 3], func(inputs))
with self.assertRaisesRegex(errors.InvalidArgumentError,
'not compilable'):
xla_func(inputs)
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonLocationInMetadata(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x, y):
return x + y
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertIn('def_function_xla_jit_test',
fn.experimental_get_compiler_ir(inputs, inputs)())
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonLocationNestedInMetadata(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x, y):
return x + y
@def_function.function(jit_compile=True)
def g(x, y):
return f(x, y)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertIn('def_function_xla_jit_test',
g.experimental_get_compiler_ir(inputs, inputs)())
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonStackTrace(self):
if 'tpu' in self.device.lower():
self.skipTest('XLA TPU supports tf.unique')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x):
return array_ops.unique(x).y # COMMENT2
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError, 'COMMENT2'):
fn(inputs)
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonStackTraceControlFlow(self):
if 'tpu' in self.device.lower():
self.skipTest('XLA TPU supports tf.unique')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x):
x = ops.convert_to_tensor(x)
def body(i, a):
return i + 1 + array_ops.unique([i]).y[0], \
control_flow_ops.cond(i > 2, lambda: a + (x**2), lambda: a + 3)
return control_flow_ops.while_loop(
lambda i, *_: i < 10,
body, (constant_op.constant(0), constant_op.constant(3.)),
maximum_iterations=10)[1]
with self.assertRaisesRegex(errors.InvalidArgumentError, r'\.y\[0\]'):
f(constant_op.constant(100.0))
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonStackTraceUncompiledWithinCompiled(self):
if 'tpu' in self.device.lower():
self.skipTest('XLA TPU supports tf.unique')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function
def fn(x):
return array_ops.unique(x).y # COMMENT3
@def_function.function(jit_compile=True)
def outer(x):
return fn(x)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError, 'COMMENT3'):
outer(inputs)
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonStackTraceCompiledWithinUncompiled(self):
if 'tpu' in self.device.lower():
self.skipTest('XLA TPU supports tf.unique')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x):
return array_ops.unique(x).y # COMMENT1
@def_function.function
def outer(x):
return fn(x)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError, 'COMMENT1'):
outer(inputs)
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testPythonStackTraceCompiledWithinCompiled(self):
if 'tpu' in self.device.lower():
self.skipTest('XLA TPU supports tf.unique')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x):
return array_ops.unique(x).y # COMMENT4
@def_function.function
def outer(x):
return fn(x)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError, 'COMMENT4'):
outer(inputs)
def testFunctionGradient(self):
with ops.device('device:{}:0'.format(self.device)):
v = resource_variable_ops.ResourceVariable(2.0)
def fn(x):
return v * x
func = def_function.function(fn, jit_compile=False)
xla_func = def_function.function(fn, jit_compile=True)
def run_and_check(test_func):
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = test_func(x)
dy = tape.gradient(y, v)
self.assertAllClose(6.0, y)
self.assertAllClose(3.0, dy)
run_and_check(func)
run_and_check(xla_func)
@test_util.disable_mlir_bridge('TODO(b/162521846): MLIR bridge fails'
' msan, function library not found')
def testControlFlow(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x):
assert control_flow_util.GraphOrParentsInXlaContext(
ops.get_default_graph())
x = ops.convert_to_tensor(x)
def body(i, a):
return i + 1, control_flow_ops.cond(i > 2, lambda: a + (x**2),
lambda: a + 3)
return control_flow_ops.while_loop(
lambda i, *_: i < 10,
body, (constant_op.constant(0), constant_op.constant(3.)),
maximum_iterations=10)[1]
@def_function.function(jit_compile=True)
def g(x):
x = ops.convert_to_tensor(x)
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
return y, tape.gradient(y, x)
# Test that XLA context gets correctly propagated.
g._get_concrete_function_garbage_collected(2.0)(2.0)
self.assertAllClose(40.0, f(2.0))
self.assertAllClose([40.0, 28.0], g(2.0))
self.assertAllClose(40.0, f.get_concrete_function(2.0)(2.0))
self.assertAllClose([40.0, 28.0], g.get_concrete_function(2.0)(2.0))
def testMethodCompilation(self):
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@def_function.function(jit_compile=True)
def f1(self, x, a):
return x + a
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
self.assertAllClose([2, 3, 3, 4, 4], c.f1(inputs, 1))
@test_util.disable_mlir_bridge('TODO(b/162272821): MLIR bridge returns '
' wrong status type')
def testMethodCompilationUnsupportedFunc(self):
if 'tpu' in self.device.lower():
self.skipTest('XLA TPU supports tf.unique')
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@def_function.function(jit_compile=True)
def f1(self, x):
return array_ops.unique(x).y
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
with self.assertRaisesRegex(errors.InvalidArgumentError,
'not compilable'):
c.f1(inputs)
def testMustBeConstantPropagation(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162799319: Cannot resolve constant on TPU')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f():
return constant_op.constant([0, 2, 1], dtype=dtypes.int32)
@def_function.function(jit_compile=True)
def g(a, b):
return array_ops.transpose(a, b)
@def_function.function
def z():
return g(array_ops.ones([3, 4, 3], dtype=dtypes.float32), f())
z()
@test_util.disable_mlir_bridge('TODO(b/162271237): argmax gives different'
' results in MLIR-based bridge')
def testArgMinMax(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def argmax(x):
return math_ops.argmax(x)
@def_function.function(jit_compile=True)
def argmin(x):
return math_ops.argmin(x)
self.assertAllClose(0, argmax(array_ops.ones([10], dtype=dtypes.float32)))
self.assertAllClose(0, argmax(array_ops.ones([10])))
self.assertAllClose(0, argmin(array_ops.ones([10], dtype=dtypes.float32)))
self.assertAllClose(0, argmin(array_ops.ones([10])))
@test_util.disable_mlir_bridge('TensorArray support not implemented')
def testErrorMessagePassingTensorArray(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=1, element_shape=[])
ta = ta.write(0, 2 * x)
y = ta.read(0)
return y
x = constant_op.constant(3.14)
with backprop.GradientTape() as tape:
tape.watch(x)
with self.assertRaisesRegex(errors.UnimplementedError,
'TensorList crossing the XLA/TF boundary'):
y = f(x)
tape.gradient(y, x)
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(jit_compile=True)(f)
inputs = constant_op.constant([3.14, 2.68, 7.69])
self.assertAllClose([6.28, 5.36, 15.38, 9.42, 8.04, 23.07], f(inputs))
self.assertAllClose(compiled_f(inputs), f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2Multidim(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3, 2])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(jit_compile=True)(f)
inputs = constant_op.constant([[3.14, 21.1], [2.68, 22.2], [7.69, 23.3]])
self.assertAllClose(f(inputs), compiled_f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2Scalars(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[1])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(jit_compile=True)(f)
inputs = constant_op.constant([3.14])
self.assertAllClose(f(inputs), compiled_f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatGrad(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
def g():
x = constant_op.constant([3.14, 2.68, 7.69])
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
return tape.gradient(y, x)
compiled_g = def_function.function(jit_compile=True)(g)
self.assertAllClose([5.0, 5.0, 5.0], g())
self.assertAllClose(compiled_g(), g())
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatGradNestedCompile(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
@def_function.function(jit_compile=True)
def g():
x = constant_op.constant([3.14, 2.68, 7.69])
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
out = tape.gradient(y, x)
return out
self.assertAllClose([5.0, 5.0, 5.0], g())
def testCumsum(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162771302: 64bit rewrite of cumsum not supported')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x):
return math_ops.cumsum(x)
f64_input = constant_op.constant([1.1, 2.2, 3.3], dtype=dtypes.float64)
self.assertAllClose([1.1, 3.3, 6.6], f(f64_input))
def testNoExcessiveRetracing(self):
with ops.device('device:{}:0'.format(self.device)):
inner_retracings = 0
@def_function.function(jit_compile=True)
def inner(a, b):
nonlocal inner_retracings
inner_retracings += 1
return a * b + a
def outer(a, b):
return inner(a, b)
func_input = random_ops.random_normal([10, 10])
for _ in range(2):
def_function.function(outer)(func_input, func_input)
self.assertEqual(inner_retracings, 1)
def testUpdateVariable(self):
with ops.device('device:{}:0'.format(self.device)):
on_gpu = 'gpu' in self.device.lower()
v = variables.Variable([3.1, 3.2])
@def_function.function(jit_compile=True)
def update_var(a, b):
v.assign_add(a * b)
arg1 = random_ops.random_normal([2])
arg2 = random_ops.random_normal([2])
initial_usage = context.context().get_total_memory_usage(
v.device) if on_gpu else 0
update_var(arg1, arg2)
final_usage = context.context().get_total_memory_usage(
v.device) if on_gpu else 0
self.assertEqual(initial_usage, final_usage)
@test_util.disable_mlir_bridge('TODO(b/162381930): MLIR bridge renames '
' functions')
def testUpdateVariableInClass(self):
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@def_function.function(jit_compile=True)
def update_var(self, a, b):
if not hasattr(self, 'v'):
self.v = variables.Variable(3.1)
self.v.assign_add(a * b)
c = C()
@def_function.function
def outer():
c.update_var(constant_op.constant(0.7), constant_op.constant(0.6))
outer()
self.assertAllClose(c.v, 3.52)
@test_util.disable_mlir_bridge('TODO(b/162801728): MLIR bridge causes '
' invalid free on TPUs')
def testUpdateVariableMultipleOutputs(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable(3.1)
@def_function.function(jit_compile=True)
def update_var(a, b):
v.assign_add(a * b)
return a * b + v
out = update_var(constant_op.constant(0.7), constant_op.constant(0.6))
self.assertAllClose(v, 3.52)
self.assertAllClose(out, 3.94)
def testReturnIdentity(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(a, b):
return (a, b)
a = random_ops.random_normal([10, 10])
b = random_ops.random_normal([10, 10])
on_gpu = 'gpu' in self.device.lower()
initial_usage = context.context().get_total_memory_usage(
b.backing_device) if on_gpu else 0
f(a, b)
final_usage = context.context().get_total_memory_usage(
b.backing_device) if on_gpu else 0
self.assertEqual(initial_usage, final_usage)
def testGetCompilerIrConstants(self):
if 'tpu' in self.device.lower():
self.skipTest('TPU generates different HLO')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(a, b):
return array_ops.transpose(a, b)
a = array_ops.ones([3, 4, 3], dtype=dtypes.float32)
b = constant_op.constant([0, 2, 1], dtype=dtypes.int32)
self.assertIn('{1,2,0}',
f.experimental_get_compiler_ir(a, b)(stage='optimized_hlo'))
@test_util.disable_mlir_bridge('TODO(b/168732524): MLIR bridge does not '
' optimize single-element tuples to scalars')
def testGetCompilerIrResourceVars(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable([3.1, 3.2])
@def_function.function(jit_compile=True)
def f(a, b):
v.assign_add(a * b)
a = random_ops.random_normal([2])
b = random_ops.random_normal([2])
self.assertIn('input_output_alias={ {}: (2, {}, may-alias) }',
f.experimental_get_compiler_ir(a, b)('optimized_hlo'))
def testGetCompilerIrNotCompiled(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function
def f(x):
return x + 1
a = random_ops.random_normal([10, 10])
with self.assertRaisesRegex(ValueError,
'marked with \'jit_compile'):
f.experimental_get_compiler_ir(a)()
def testGetCompilerIrNested(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def fn(x, a):
return x + a
@def_function.function(jit_compile=False)
def fn2(x, a):
fn.experimental_get_compiler_ir(x, a)()
return fn(x, a)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(TypeError, '"Graph" tensor'):
fn2(inputs, 1)
def testGetCompilerIrKwargs(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable([0.1, 0.1])
@def_function.function(jit_compile=True)
def f(a, b):
return (a + b) * v
a = constant_op.constant([1.1, 1.1])
b = constant_op.constant([2.2, 2.2])
self.assertIn('multiply',
f.experimental_get_compiler_ir(b=a, a=b)(stage='hlo'))
def testGetCompilerIrDot(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(a, b):
return a + b
a = constant_op.constant([1.1, 1.1])
b = constant_op.constant([2.2, 2.2])
self.assertIn(
'label',
f.experimental_get_compiler_ir(a, b)(stage='optimized_hlo_dot'))
def testGetCompilerIrNoDevicePlacement(self):
if 'gpu' not in self.device.lower():
self.skipTest('Testing get_compiler_ir on GPUs without placement')
@def_function.function(jit_compile=True)
def f(a, b):
return a + b
a = constant_op.constant([1.1, 1.1])
b = constant_op.constant([2.2, 2.2])
self.assertIn(
'label',
f.experimental_get_compiler_ir(a, b)(stage='optimized_hlo_dot'))
def testGetCompilerIrNonTensors(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(l):
return l[0] + l[1]
l = [constant_op.constant(1.1), constant_op.constant(2.2)]
self.assertIn('tuple',
f.experimental_get_compiler_ir(l)())
def testConstantOnWrongDevice(self):
with ops.device('device:{}:0'.format(self.device)):
s = random_ops.random_uniform([2], 1, 10, dtypes.int32)
l = random_ops.random_normal([s[0] * s[1]])
@def_function.function(jit_compile=True)
def f(l):
return array_ops.reshape(l, s)
self.assertIn('tuple',
f.experimental_get_compiler_ir(l)())
@test_util.disable_mlir_bridge('TODO(b/172845417): MLIR bridge does not '
'support getting constants out of resources')
def testGetConstantOutOfResourceVariable(self):
with ops.device('device:{}:0'.format(self.device)):
# Use floats to force device placement.
a = variables.Variable(50.0)
b = variables.Variable(2.0)
@def_function.function(jit_compile=True)
def f(x):
return array_ops.reshape(
x, [math_ops.cast(a, dtypes.int32),
math_ops.cast(b, dtypes.int32)])
# OK since the value is known at compile time.
out = f(random_ops.random_normal([10, 10]))
self.assertEqual(out.shape[0], 50)
self.assertEqual(out.shape[1], 2)
@test_util.disable_mlir_bridge('TODO(b/172845417): MLIR bridge does not '
'support getting constants out of resources')
def testGetConstantOutOfResourceVariableAfterWrite(self):
with ops.device('device:{}:0'.format(self.device)):
# Use floats to force device placement.
a = variables.Variable(50.0)
b = variables.Variable(2.0)
@def_function.function(jit_compile=True)
def f(x, val1, val2):
a.assign(math_ops.cast(val1, dtypes.float32))
b.assign(math_ops.cast(val2, dtypes.float32))
return array_ops.reshape(
x, [math_ops.cast(a, dtypes.int32),
math_ops.cast(b, dtypes.int32)])
val1 = constant_op.constant(2)
val2 = constant_op.constant(50)
# Returns an error, since the value known at compile time was overriden.
with self.assertRaisesRegex(errors.InvalidArgumentError,
'concrete values at compile time'):
f(random_ops.random_normal([10, 10]), val1, val2)
@test_util.disable_mlir_bridge('TODO(b/172845417): MLIR bridge does not '
'support getting constants out of resources')
def testGetConstantOutOfResourceVariableBeforeWrite(self):
with ops.device('device:{}:0'.format(self.device)):
# Use floats to force device placement.
a = variables.Variable(50.0)
b = variables.Variable(2.0)
@def_function.function(jit_compile=True)
def f(x, val1, val2):
out = array_ops.reshape(
x, [math_ops.cast(a, dtypes.int32),
math_ops.cast(b, dtypes.int32)])
a.assign(math_ops.cast(val1, dtypes.float32))
b.assign(math_ops.cast(val2, dtypes.float32))
return out
val1 = constant_op.constant(2)
val2 = constant_op.constant(50)
# OK since the write happens after the reshape.
out = f(random_ops.random_normal([10, 10]), val1, val2)
self.assertEqual(out.shape[0], 50)
self.assertEqual(out.shape[1], 2)
def testTfAssert(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f(x):
control_flow_ops.Assert(x == 1, ['Wrong value'])
f(constant_op.constant(1))
@test_util.disable_mlir_bridge('TODO(b/155782411): MLIR bridge does not'
'support stack traces')
def testTensorArrayErrorMessage(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(jit_compile=True)
def f():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
size=2,
dynamic_size=True,
element_shape=(None,))
return ta.concat() # EXPECTED_MESSAGE
with self.assertRaisesRegex(errors.InvalidArgumentError,
'EXPECTED_MESSAGE'):
f()
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| 33.862723 | 80 | 0.62727 |
dea18888240dd83f462a4e5c60d7ec0dfd7093de | 2,373 | py | Python | aws_marketplace/using_algorithms/autogluon/src/algorithm_arns.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 5 | 2019-01-19T23:53:35.000Z | 2022-01-29T14:04:31.000Z | aws_marketplace/using_algorithms/autogluon/src/algorithm_arns.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 2 | 2021-08-25T16:15:24.000Z | 2022-02-10T02:49:50.000Z | aws_marketplace/using_algorithms/autogluon/src/algorithm_arns.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 7 | 2020-03-04T22:23:51.000Z | 2021-07-13T14:05:46.000Z | class AlgorithmArnProvider:
@staticmethod
def get_algorithm_arn(current_region):
mapping = {
"ap-northeast-1" : "arn:aws:sagemaker:ap-northeast-1:977537786026:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"ap-northeast-2" : "arn:aws:sagemaker:ap-northeast-2:745090734665:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"ap-southeast-1" : "arn:aws:sagemaker:ap-southeast-1:192199979996:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"ap-southeast-2" : "arn:aws:sagemaker:ap-southeast-2:666831318237:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"us-east-1" : "arn:aws:sagemaker:us-east-1:865070037744:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"eu-central-1" : "arn:aws:sagemaker:eu-central-1:446921602837:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"ap-south-1" : "arn:aws:sagemaker:ap-south-1:077584701553:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"sa-east-1" : "arn:aws:sagemaker:sa-east-1:270155090741:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"ca-central-1" : "arn:aws:sagemaker:ca-central-1:470592106596:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"eu-west-1" : "arn:aws:sagemaker:eu-west-1:985815980388:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"eu-west-2" : "arn:aws:sagemaker:eu-west-2:856760150666:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"eu-west-3" : "arn:aws:sagemaker:eu-west-3:843114510376:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"eu-north-1" : "arn:aws:sagemaker:eu-north-1:136758871317:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"us-west-1" : "arn:aws:sagemaker:us-west-1:382657785993:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"us-east-2" : "arn:aws:sagemaker:us-east-2:057799348421:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9",
"us-west-2" : "arn:aws:sagemaker:us-west-2:594846645681:algorithm/autogluon-tabular-v3-3-3b713280f53340e66804815d6707dbc9"
}
return mapping[current_region] | 103.173913 | 143 | 0.748841 |
d882617a2750f600393bd53539ed8a04e06f8413 | 3,207 | py | Python | jupytext/languages.py | hwasiti/jupytext | b531be1e1933531ac6ffef488232285766f05cea | [
"MIT"
] | null | null | null | jupytext/languages.py | hwasiti/jupytext | b531be1e1933531ac6ffef488232285766f05cea | [
"MIT"
] | null | null | null | jupytext/languages.py | hwasiti/jupytext | b531be1e1933531ac6ffef488232285766f05cea | [
"MIT"
] | null | null | null | """Determine notebook or cell language"""
import re
_JUPYTER_LANGUAGES = ['R', 'bash', 'sh', 'python', 'python2', 'python3', 'javascript', 'js', 'perl']
_JUPYTER_LANGUAGES_RE = [re.compile(r"^%%{}\s*".format(lang)) for lang in _JUPYTER_LANGUAGES]
_SCRIPT_EXTENSIONS = {'.py': {'language': 'python', 'comment': '#'},
'.R': {'language': 'R', 'comment': '#'},
'.r': {'language': 'R', 'comment': '#'},
'.jl': {'language': 'julia', 'comment': '#'},
'.cpp': {'language': 'c++', 'comment': '//'},
'.ss': {'language': 'scheme', 'comment': ';;'},
'.sh': {'language': 'bash', 'comment': '#'}}
def default_language_from_metadata_and_ext(notebook, ext):
"""Return the default language for a notebook that was read
from the given file extension"""
default_from_ext = _SCRIPT_EXTENSIONS.get(ext, {}).get('language', 'python')
language = (notebook.metadata.get('kernelspec', {}).get('language')
or notebook.metadata.get('jupytext', {}).get('main_language')
or default_from_ext)
if language.startswith('C++'):
language = 'c++'
return language
def set_main_and_cell_language(metadata, cells, ext):
"""Set main language for the given collection of cells, and
use magics for cells that use other languages"""
default_from_ext = _SCRIPT_EXTENSIONS.get(ext, {}).get('language', 'python')
main_language = (metadata.get('kernelspec', {}).get('language') or
metadata.get('jupytext', {}).get('main_language'))
if main_language is None:
languages = {default_from_ext: 0.5}
for cell in cells:
if 'language' in cell['metadata']:
language = cell['metadata']['language']
languages[language] = languages.get(language, 0.0) + 1
main_language = max(languages, key=languages.get)
# save main language when no kernel is set
if 'language' not in metadata.get('kernelspec', {}):
metadata.setdefault('jupytext', {})['main_language'] = main_language
# Remove 'language' meta data and add a magic if not main language
for cell in cells:
if 'language' in cell['metadata']:
language = cell['metadata'].pop('language')
if language != main_language and language in _JUPYTER_LANGUAGES:
if 'magic_args' in cell['metadata']:
magic_args = cell['metadata'].pop('magic_args')
cell['source'] = u'%%{} {}\n'.format(language, magic_args) + cell['source']
else:
cell['source'] = u'%%{}\n'.format(language) + cell['source']
def cell_language(source):
"""
Return cell language and language options, if any
:param source:
:return:
"""
if source:
for lang, pattern in zip(_JUPYTER_LANGUAGES, _JUPYTER_LANGUAGES_RE):
line = source[0]
if pattern.match(source[0]):
source.pop(0)
magic_args = line[len(pattern.findall(line)[0]):].strip()
return lang, magic_args
return None, None
| 41.115385 | 100 | 0.57125 |
a2def25547a2155ad851fe7988932a060076ace7 | 3,171 | py | Python | frangiclave/compendium/slot_specification.py | DeSevilla/frangiclave-compendium | 2389fb0c5492e76b2de06227c31ff19e247d8376 | [
"CC0-1.0"
] | 6 | 2018-07-09T13:01:12.000Z | 2020-04-18T15:10:43.000Z | frangiclave/compendium/slot_specification.py | DeSevilla/frangiclave-compendium | 2389fb0c5492e76b2de06227c31ff19e247d8376 | [
"CC0-1.0"
] | 1 | 2018-07-20T00:00:05.000Z | 2018-09-24T10:54:35.000Z | frangiclave/compendium/slot_specification.py | Lyrositor/frangiclave | 5aae57fc3161e47a7bc5fe84712aa06ec87d6b78 | [
"CC0-1.0"
] | null | null | null | from typing import TYPE_CHECKING, Any, Dict, List
from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Table
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from frangiclave.compendium.base import Base
from frangiclave.compendium.game_content import GameContents
from frangiclave.compendium.utils import get, to_bool
if TYPE_CHECKING:
from frangiclave.compendium.verb import Verb
class SlotSpecification:
__tablename__ = None
label: str = Column(String)
description: str = Column(String)
@declared_attr
def required(self) -> List['SlotSpecificationItem']:
return relationship(
'SlotSpecificationItem',
secondary=lambda: self.secondary('required')
)
@declared_attr
def forbidden(self) -> List['SlotSpecificationItem']:
return relationship(
'SlotSpecificationItem',
secondary=lambda: self.secondary('forbidden')
)
@classmethod
def secondary(cls, attr: str):
return Table(
cls.__tablename__ + '_' + attr + '_items_associations',
Base.metadata,
Column(
'slot_specification_id',
Integer,
ForeignKey(cls.__tablename__ + '.id')
),
Column(
'item_id',
Integer,
ForeignKey('slot_specification_items.id')
)
)
greedy: bool = Column(Boolean)
consumes: bool = Column(Boolean)
no_animation: bool = Column(Boolean)
@declared_attr
def for_verb_id(self) -> Column:
return Column(Integer, ForeignKey('verbs.id'))
@declared_attr
def for_verb(self) -> 'Verb':
return relationship('Verb', foreign_keys=self.for_verb_id)
@classmethod
def from_data(
cls,
data: Dict[str, Any],
game_contents: GameContents
) -> 'SlotSpecification':
s = cls()
s.element = game_contents.get_element(data['id'])
s.label = get(data, 'label', s.element.element_id)
s.description = get(data, 'description', '')
s.required = [
SlotSpecificationItem(
element=game_contents.get_element(element_id),
quantity=quantity
) for element_id, quantity in get(data, 'required', {}).items()
]
s.forbidden = [
SlotSpecificationItem(
element=game_contents.get_element(element_id),
quantity=quantity
) for element_id, quantity in get(data, 'forbidden', {}).items()
]
s.greedy = get(data, 'greedy', False, to_bool)
s.consumes = get(data, 'consumes', False, to_bool)
s.no_animation = get(data, 'noanim', False, to_bool)
s.for_verb = game_contents.get_verb(get(data, 'actionId', None))
return s
class SlotSpecificationItem(Base):
__tablename__ = 'slot_specification_items'
id = Column(Integer, primary_key=True)
element_id = Column(Integer, ForeignKey('elements.id'))
element = relationship('Element')
quantity = Column(Integer)
| 31.71 | 76 | 0.619994 |
8dc94760ae7fb3ce64fafd58eb24271514ad1688 | 4,724 | py | Python | 2021/05-day/modules.py | smv7/Advent_Of_Code | ee22bc08ab663b5aa1f42deb50774b493a4cf7ba | [
"MIT"
] | null | null | null | 2021/05-day/modules.py | smv7/Advent_Of_Code | ee22bc08ab663b5aa1f42deb50774b493a4cf7ba | [
"MIT"
] | null | null | null | 2021/05-day/modules.py | smv7/Advent_Of_Code | ee22bc08ab663b5aa1f42deb50774b493a4cf7ba | [
"MIT"
] | null | null | null | '''Functions to Advent Code 2021, day 5'''
def create_dots_map(x:int, y:int) -> list:
'''Return a 2D array of "y" arrays and each array filled with "x" dots (".").'''
dots_map = []
# Appending the rows filled with dots to the map.
# The function recieves coordinates (0-9) to construct the dots map and
# because of that the "x" and "y" parameters are incremented by 1.
i = 0
while i < y+1:
dots_map.append(['.' for j in range(x+1)])
i += 1
return dots_map
def find_largest_coordinate(sequence: list) -> tuple:
'''Return a tuple with the largest numbers of 'x' coordinates (index 0) and 'y' (index 1)'''
largest_x = 0
largest_y = 0
for outer in sequence:
for inner in outer:
x = int(inner[0])
y = int(inner[1])
if x > largest_x:
largest_x = x
if y > largest_y:
largest_y = y
return (largest_x, largest_y)
def mark_coordinates(coords_list:list, dots_map:list, diagonal:bool=None) -> list:
'''Return a new dots map marked with the coordinates
x1,y1 to x2,y2 from coords_list.'''
new_dots_map = list(dots_map)
for coords in coords_list:
x1_coord = int(coords[0][0])
y1_coord = int(coords[0][1])
x2_coord = int(coords[1][0])
y2_coord = int(coords[1][1])
# Range of the coordinates from x1,x2 and y1,y2 sorted in ascending order.
# Using only for horizontal and vertical vent lines.
sorted_coords = None
# Ranges for the diagonal vent lines.
horizontal_range = None
vertical_range = None
# Create the ranges for vertical vent lines.
if x1_coord == x2_coord:
sorted_coords = tuple(sorted([y1_coord, y2_coord]))
vertical_range = tuple(range(sorted_coords[0], sorted_coords[1]))
# Create the ranges for horizontal vent lines.
elif y1_coord == y2_coord:
sorted_coords = tuple(sorted([x1_coord, x2_coord]))
horizontal_range = tuple(range(sorted_coords[0], sorted_coords[1]))
# Diagonal vent line processing.
else:
if diagonal:
# Create the index range for the diagonal vent line.
if (x1_coord < x2_coord) and (y1_coord < y2_coord): # Vent line from upper left to lower right.
horizontal_range = tuple(range(x1_coord, x2_coord+1))
vertical_range = tuple(range(y1_coord, y2_coord+1))
elif (x1_coord > x2_coord) and (y1_coord > y2_coord): # Vent line from lower right to upper left.
horizontal_range = tuple(range(x2_coord, x1_coord+1))
vertical_range = tuple(range(y2_coord, y1_coord+1))
elif (x1_coord > x2_coord) and (y1_coord < y2_coord): # Vent line from upper right to lower left.
horizontal_range = tuple(reversed(range(x2_coord, x1_coord+1)))
vertical_range = tuple(range(y1_coord, y2_coord+1))
elif (x1_coord < x2_coord) and (y1_coord > y2_coord): # Vent line from lower left to upper right.
horizontal_range = tuple(range(x1_coord, x2_coord+1))
vertical_range = tuple(reversed(range(y2_coord, y1_coord+1)))
# Mark the diagonal vent line.
i = 0
while i < len(horizontal_range):
x = horizontal_range[i]
y = vertical_range[i]
if new_dots_map[y][x] == '.':
new_dots_map[y][x] = 1
else:
new_dots_map[y][x] += 1
i += 1
continue # Skip horizontal and vertical vent line processing.
else:
continue
# Use a range of coordinates x1,x2 or y1,y2 (depending of the
# orientation of the vent line) to mark the positions.
# Only for horizontal and vertical vent lines.
line_range = tuple(range(sorted_coords[0], sorted_coords[1]+1))
for point in line_range:
# Vertical vent line marks.
if x1_coord == x2_coord:
if new_dots_map[point][x1_coord] == '.':
new_dots_map[point][x1_coord] = 1
else:
new_dots_map[point][x1_coord] += 1
# Horizontal vent line marks.
elif y1_coord == y2_coord:
if new_dots_map[y1_coord][point] == '.':
new_dots_map[y1_coord][point] = 1
else:
new_dots_map[y1_coord][point] += 1
return new_dots_map
| 38.721311 | 113 | 0.565411 |
087547aca64f868848f4ad6fa815668c51002dcb | 593 | py | Python | Elevator.py | Idbruh/Elevator | a895cd0ac1bdd79eeec281be90e4a85dea84f0de | [
"MIT"
] | null | null | null | Elevator.py | Idbruh/Elevator | a895cd0ac1bdd79eeec281be90e4a85dea84f0de | [
"MIT"
] | null | null | null | Elevator.py | Idbruh/Elevator | a895cd0ac1bdd79eeec281be90e4a85dea84f0de | [
"MIT"
] | null | null | null | from random import choice
class Elevator:
def __init__(self):
self.__left_elevator = [0,1,2]
self.__right_elevator = [0,1,2]
self.__call_elevator = [0,1,2]
def get_random_left_elevator_floor(self) -> int:
left_elevator = self.__left_elevator
return choice(left_elevator)
def get_random_right_elevator_floor(self) -> int:
right_elevator = self.__right_elevator
return choice(right_elevator)
def get_random_elevator_call(self) -> int:
call_elevator = self.__call_elevator
return choice(call_elevator)
| 26.954545 | 53 | 0.684654 |
c89048b2ef5c768434002868bdd22ab8f9e36189 | 1,423 | py | Python | lambda/main.py | Invicton-Labs/terraform-aws-lambda-shell | ac21d5e733e7e63726f01f32fe71d88fcd008585 | [
"CC-BY-4.0"
] | null | null | null | lambda/main.py | Invicton-Labs/terraform-aws-lambda-shell | ac21d5e733e7e63726f01f32fe71d88fcd008585 | [
"CC-BY-4.0"
] | null | null | null | lambda/main.py | Invicton-Labs/terraform-aws-lambda-shell | ac21d5e733e7e63726f01f32fe71d88fcd008585 | [
"CC-BY-4.0"
] | null | null | null | import subprocess
import uuid
import os
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def lambda_handler(event, context):
logger.debug("Input event: {}".format(event))
# If a special flag is set, skip the execution
if '__IL_TF_LS_SKIP_RUN' in event:
return {}
cmd = event['interpreter']
if event['command'] is not None:
# Create a unique file for the script to be temporarily stored in
scriptpath = "/tmp/botoform-{}".format(uuid.uuid1())
f = open(scriptpath, "x")
# Write the script to the file
f.write(event['command'])
f.close()
cmd.append(scriptpath)
# Run the command as a subprocess
logger.info("Running command: {}".format(cmd))
result = subprocess.run(cmd, shell=False, capture_output=True)
logger.debug("Result: {}".format(result))
if event['command'] is not None:
os.remove(scriptpath)
stdout = result.stdout.decode('utf-8')
stderr = result.stderr.decode('utf-8')
if result.returncode != 0 and event['fail_on_error']:
raise subprocess.SubprocessError(
"Command returned non-zero exit code ({}) with stdout '{}' and stderr '{}'".format(result.returncode, stdout, stderr))
return {
'exitstatus': result.returncode,
'stdout': stdout,
'stderr': stderr
}
| 29.645833 | 131 | 0.619115 |
e9812a61daa3bf5fc894e00be066d8238fc7d04c | 276 | py | Python | app/talk/views.py | Deteriorator/Alog | bdb7b3199d13c28030987275fe020955e3bf7b74 | [
"MIT"
] | null | null | null | app/talk/views.py | Deteriorator/Alog | bdb7b3199d13c28030987275fe020955e3bf7b74 | [
"MIT"
] | null | null | null | app/talk/views.py | Deteriorator/Alog | bdb7b3199d13c28030987275fe020955e3bf7b74 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("Here is the about page. www.liangz.org")
def about(request):
return HttpResponse("Here is the about page. www.liangz.org")
| 21.230769 | 65 | 0.753623 |
9caa7695b3227fc4da4a1fd4aaacbb37988be800 | 6,349 | py | Python | mri/operators/proximity/ordered_weighted_l1_norm.py | mathrip/pysap-mri | d6acae5330c684b66a99d0fbf13e44b0b66046cf | [
"CECILL-B"
] | null | null | null | mri/operators/proximity/ordered_weighted_l1_norm.py | mathrip/pysap-mri | d6acae5330c684b66a99d0fbf13e44b0b66046cf | [
"CECILL-B"
] | null | null | null | mri/operators/proximity/ordered_weighted_l1_norm.py | mathrip/pysap-mri | d6acae5330c684b66a99d0fbf13e44b0b66046cf | [
"CECILL-B"
] | null | null | null | # -*- coding: utf-8 -*-
##########################################################################
# pySAP - Copyright (C) CEA, 2017 - 2018
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
from modopt.opt.proximity import ProximityParent, OrderedWeightedL1Norm
import numpy as np
from joblib import Parallel, delayed
class OWL(ProximityParent):
"""This class handles reshaping coefficients based on mode
and feeding in right format the OWL operation to OrderedWeightedL1Norm
Parameters
----------
alpha: float
value of alpha for parameterizing weights
beta: float
value of beta for parameterizing weights
band_shape: list of tuples
the shape of all bands, this corresponds to linear_op.coeffs_shape
n_coils: int
number of channels
mode: string 'all' | 'band_based' | 'coeff_based', default 'band_based'
Mode of operation of proximity:
all -> on all coefficients in all channels
band_based -> on all coefficients in each band
coeff_based -> on all coefficients but across each channel
n_jobs: int, default 1
number of cores to be used for operation
"""
def __init__(self, alpha, beta, bands_shape, n_coils,
mode='band_based', n_jobs=1):
self.mode = mode
self.n_jobs = n_jobs
self.n_coils = n_coils
if n_coils < 1:
raise ValueError('Number of channels must be strictly positive')
elif n_coils > 1:
self.band_shape = bands_shape[0]
else:
self.band_shape = bands_shape
if self.mode is 'all':
data_shape = 0
for band_shape in self.band_shape:
data_shape += np.prod(band_shape)
weights = np.reshape(
self._oscar_weights(alpha, beta, data_shape * self.n_coils),
(self.n_coils, data_shape)
)
self.owl_operator = OrderedWeightedL1Norm(weights)
elif self.mode is 'band_based':
self.owl_operator = []
for band_shape in self.band_shape:
weights = self._oscar_weights(
alpha,
beta,
self.n_coils * np.prod(band_shape),
)
self.owl_operator.append(OrderedWeightedL1Norm(weights))
elif self.mode is 'coeff_based':
weights = self._oscar_weights(alpha, beta, self.n_coils)
self.owl_operator = OrderedWeightedL1Norm(weights)
else:
raise ValueError('Unknow mode, please choose between `all` or '
'`band_based` or `coeff_based`')
self.weights = self.owl_operator
self.op = self._op_method
self.cost = self._cost_method
@staticmethod
def _oscar_weights(alpha, beta, size):
"""Here we parametrize weights based on alpha and beta"""
w = np.arange(size-1, -1, -1, dtype=np.float64)
w *= beta
w += alpha
return w
def _reshape_band_based(self, data):
"""Function to reshape incoming data based on bands"""
output = []
start = 0
n_channel = data.shape[0]
for band_shape_idx in self.band_shape:
n_coeffs = np.prod(band_shape_idx)
stop = start + n_coeffs
output.append(np.reshape(
data[:, start: stop],
(n_channel * n_coeffs)))
start = stop
return output
def _op_method(self, data, extra_factor=1.0):
"""
Based on mode, reshape the coefficients and call OrderedWeightedL1Norm
Parameters
----------
data: np.ndarray
Input array of data
"""
if self.mode is 'all':
output = np.reshape(
self.owl_operator.op(data.flatten(), extra_factor),
data.shape
)
return output
elif self.mode is 'band_based':
data_r = self._reshape_band_based(data)
output = Parallel(n_jobs=self.n_jobs)(
delayed(self.owl_operator[i].op)(
data_band,
extra_factor)
for i, data_band in enumerate(data_r))
reshaped_data = np.zeros(data.shape, dtype=data.dtype)
start = 0
n_channel = data.shape[0]
for band_shape_idx, band_data in zip(self.band_shape, output):
stop = start + np.prod(band_shape_idx)
reshaped_data[:, start:stop] = np.reshape(
band_data,
(n_channel, np.prod(band_shape_idx)))
start = stop
output = np.asarray(reshaped_data).T
elif self.mode is 'coeff_based':
output = Parallel(n_jobs=self.n_jobs)(
delayed(self.owl_operator.op)(
data[:, i],
extra_factor)
for i in range(data.shape[1]))
return np.asarray(output).T
def _cost_method(self, data):
"""Cost function
Based on mode, reshape the incoming data and call cost in
OrderedWeightedL1Norm
This method calculate the cost function of the proximable part.
Parameters
----------
data: np.ndarray
Input array of the sparse code.
Returns
-------
The cost of this sparse code
"""
if self.mode is 'all':
cost = self.owl_operator.cost(data)
elif self.mode is 'band_based':
data_r = self._reshape_band_based(data)
output = Parallel(n_jobs=self.n_jobs)(
delayed(self.owl_operator[i].cost)(
data_band)
for i, data_band in enumerate(data_r))
cost = np.sum(output)
elif self.mode is 'coeff_based':
output = Parallel(n_jobs=self.n_jobs)(
delayed(self.owl_operator.cost)(
data[:, i])
for i in range(data.shape[1]))
cost = np.sum(output)
return cost
| 37.347059 | 78 | 0.554576 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.