code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
"""Test Logex Initization and Error Handling"""
import subprocess
from unittest import TestCase
from samples import app, api, logex
from samples import bp_app, api_v1, api_v2, bp_logex
class BaseTestCase(TestCase):
DEBUG = True
__blueprints__ = False
@classmethod
def setUpClass(cls):
cls.app = app
cls.api = api
cls.logex = logex
if cls.__blueprints__:
cls.app = bp_app
cls.api = [api_v1, api_v2]
cls.logex = bp_logex
# App test client, config, and context
cls.log_name = cls.app.name + ".log"
cls.app.config['DEBUG'] = cls.DEBUG
cls.ac = cls.app.app_context()
cls.test_client = cls.app.test_client()
cls.test_client.testing = True
cls.ctx = cls.app.test_request_context()
cls.ctx.push()
@classmethod
def tearDownClass(cls):
subprocess.call(['rm', '-rf', 'logs'])
def setUp(self):
with self.ac:
self.logs = self.logex.logs
def tearDown(self):
pass
|
pinntech/flask-logex
|
tests/base.py
|
Python
|
mit
| 1,058
|
# InteractiveWidgets.py
#!/bin/python
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
import ipywidgets as widgets
from IPython.display import display
import numpy as np
from itertools import cycle
plt.style.use("fivethirtyeight")
class PlotContainerGUI:
""" PlotContainerGUI
An object that contains meta-objects that cam
be used as a general pyplot interface with
interactive widgets to control the plotting
styles.
Only requires initial input of a Pandas dataframe,
everything should be self-explanatory from then
onwards.
Two sub-classes are used in PlotContainerGUI:
1. FigureSetup
Container with the general figure settings
like style sheet and axis labels.
2. ClassReferences
Objects that will control the plot settings
for individual plots. This is a child of the
object MainTabs, which houses each ClassReference
object as tabs.
General way this was written is
"""
def __init__(self, DataFrame):
self.DataFrame = DataFrame
# Generate a list of what the tabs will be called
self.PlotTabs = [str(Key) for Key in DataFrame.keys()]
# For each tab, generate a PlotSettings object (tab menu objects)
self.ClassReferences = [self.PlotSettings(Key) for Key in self.PlotTabs]
# Generate a list of references to the tabs
self.TabReferences = [Key.SettingsContainer for Key in self.ClassReferences]
# Populate the tab menu with tabs
self.MainTabs = widgets.Tab(children=self.TabReferences)
self.FigureSetup = self.FigureSettings()
self.PlotSettings = dict() # Needs and wants of all each plot
self.DatatoPlot = dict() # Dictionary of instances of plots
display(self.MainTabs)
display(self.FigureSetup.Container)
self.InitialisePlots()
plt.show()
self.FigureSetup.UpdateFigure.on_click(self.UpdatePlot)
self.UpdateNames()
def UpdateNames(self):
for Index, Key in enumerate(self.PlotTabs):
self.MainTabs.set_title(Index, Key)
def UpdateFigureSettings(self):
self.Settings = dict()
self.Settings = self.FigureSetup.GetSettings()
plt.xlabel(self.Settings["XLabel"])
plt.ylabel(self.Settings["YLabel"])
plt.title(self.Settings["PlotTitle"])
plt.style.use(self.Settings["Style"])
def DefineColours(self):
""" Method for generating a colour palette.
This is done by checking how many plots are
actually going to be plotted by their Booleans,
then generating a 1D array with the RGB values
for a specific colourmap.
"""
# Count the total number of plots we're going to make; sum of boolean values
PlotList = []
for Key in self.ClassReferences:
self.PlotSettings[Key.Name.value] = Key.GetSettings()
PlotList.append(self.PlotSettings[Key.Name.value]["PlotBoolean"])
self.PlotCount = np.sum(PlotList)
try:
ColourMap = cm.__dict__[self.FigureSetup.PlotColours.value]
except KeyError:
ColourMap = cm.Spectral # Default to spectral, good for several plots
if self.PlotCount <= 2:
ColourGenerator = cycle(["red", "green", "blue"])
self.Colours = [Colour for Colour, Key in zip(ColourGenerator,
self.DataFrame.keys())] # if there's only one plot, make it red.
elif self.PlotCount > 2:
self.Colours = ColourMap(np.linspace(0, 1, self.PlotCount)) # This generates enough colours
def InitialisePlots(self):
""" Method for plotting data. This will reference a figure
called "Main", and generate plots in that figure.
Before the plotting is done, the figure settings are
retrieved from FigureSetup, called in UpdateFigureSettings.
"""
plt.figure("Main", figsize=(12,6))
self.UpdateFigureSettings()
self.DefineColours() # Generates colour palette as well as retrieves plot settings
for Key, Colour in zip(self.ClassReferences, self.Colours):
if self.PlotSettings[Key.Name.value]["PlotColour"] == "Default":
self.DatatoPlot[Key.Name.value] = plt.plot(self.DataFrame.index,
self.DataFrame[Key.DataReference],
label=Key.Name.value,
marker=self.PlotSettings[Key.Name.value]["PlotType"],
alpha=0.8,
linestyle=":",
markersize=10,
)
else:
self.DatatoPlot[Key.Name.value] = plt.plot(self.DataFrame.index,
self.DataFrame[Key.DataReference],
label=Key.Name.value,
marker=self.PlotSettings[Key.Name.value]["PlotType"],
alpha=0.8,
linestyle=":",
markersize=10,
c=Colour
)
plt.legend()
def UpdatePlot(self, Blank):
""" This method is called each time
the Update Plot button is clicked.
It sets the current figure to "Main",
clears it and re-plots with the latest
figure settings.
After plotting, it checks if the PlotBoolean
checkbox is clicked; if it is then we make the
plot visible. Otherwise, we make it invisible.
"""
plt.figure("Main")
plt.clf()
self.InitialisePlots()
for Key in self.ClassReferences:
if not self.PlotSettings[Key.Name.value]["PlotBoolean"]:
plt.setp(self.DatatoPlot[Key.Name.value], visible=False)
else:
plt.setp(self.DatatoPlot[Key.Name.value], visible=True)
plt.legend()
plt.draw()
class FigureSettings:
""" General plotting settings, such as colours and whatnot """
def __init__(self):
ColourMaps = ["Default", "viridis", "inferno", "magma",
"Spectral", "Pastel1", "coolwarm"]
self.Style = widgets.Dropdown(description="Plot Style Sheet",
value="seaborn-pastel",
options=plt.style.available)
self.PlotColours = widgets.Dropdown(description="Plot Colours",
options=ColourMaps,
value="Default")
self.UpdateFigure = widgets.Button(description="Update plot")
self.XLabel = widgets.Text(description="X Axis Label",
value="X Axis",
width=120)
self.YLabel = widgets.Text(description="Y Axis Label",
value="Y Axis",
width=120)
self.PlotTitle = widgets.Text(description="Plot Title",
value="Main",
width=120)
self.Container = widgets.HBox(children=[self.UpdateFigure,
self.Style,
self.PlotColours,
self.XLabel,
self.YLabel,
self.PlotTitle
],
padding=0)
def GetSettings(self):
""" Method for retrieving a dictionary
of all of the plot settings.
"""
self.Settings = dict()
for Setting in self.__dict__:
try:
self.Settings[Setting] = self.__dict__[Setting].value
except AttributeError: # Ignore what we can't get!
pass
return self.Settings
class PlotSettings:
""" Interactive widgets for each key in a dataframe to plot """
def __init__(self, Key):
Types = Line2D.filled_markers
self.DataReference = Key
self.Name = widgets.Text(description="Label", # This only changes
value=Key) # the plot label!
self.PlotColour = widgets.Dropdown(description="Plot Colour",
options=["red",
"blue",
"green",
"cyan",
"magenta",
"black"])
self.PlotType = widgets.Dropdown(description="Plot Type",
options=Types,
value="o"
)
self.PlotBoolean = widgets.Checkbox(description="Show Plot?",
value=False)
self.SettingsContainer = widgets.HBox(children=[self.PlotBoolean,
self.PlotType,
#self.PlotColour,
self.Name
],
padding=20)
def GetSettings(self):
""" Returns a dictionary of the plot settings """
self.Settings = dict()
for Setting in self.__dict__:
try:
self.Settings[Setting] = self.__dict__[Setting].value
except AttributeError:
pass
return self.Settings
|
laserkelvin/IPython-Notebook-Tools
|
InteractiveWidgets.py
|
Python
|
gpl-3.0
| 11,014
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateAgent
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_v3_generated_Agents_CreateAgent_sync]
from google.cloud import dialogflowcx_v3
def sample_create_agent():
# Create a client
client = dialogflowcx_v3.AgentsClient()
# Initialize request argument(s)
agent = dialogflowcx_v3.Agent()
agent.display_name = "display_name_value"
agent.default_language_code = "default_language_code_value"
agent.time_zone = "time_zone_value"
request = dialogflowcx_v3.CreateAgentRequest(
parent="parent_value",
agent=agent,
)
# Make the request
response = client.create_agent(request=request)
# Handle the response
print(response)
# [END dialogflow_v3_generated_Agents_CreateAgent_sync]
|
googleapis/python-dialogflow-cx
|
samples/generated_samples/dialogflow_v3_generated_agents_create_agent_sync.py
|
Python
|
apache-2.0
| 1,645
|
import os
from django import forms
from django.contrib.staticfiles.storage import staticfiles_storage
from django.db.models import fields
from django.db.models.fields import files
from django.utils.six import text_type
try:
from django.core import checks
except ImportError: pass
from smartfields.settings import KEEP_ORPHANS
from smartfields.managers import FieldManager
from smartfields.models import SmartfieldsModelMixin
from smartfields.utils import VALUE_NOT_SET
__all__ = [
'BigIntegerField', 'BinaryField', 'BooleanField', 'CharField',
'CommaSeparatedIntegerField', 'DateField', 'DateTimeField', 'DecimalField',
'DurationField', 'EmailField', 'Field', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField',
'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField',
'SlugField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField',
'UUIDField', 'FileField', 'ImageField',
]
class SmartfieldsDescriptor(object):
field = None
def __init__(self, field):
self.field = field
def __set__(self, instance, value):
if self.field.manager is not None:
value = self.field.manager.pre_process(instance, value)
if self.field.manager.should_process:
previous_value = instance.__dict__.get(self.field.name)
if previous_value is not VALUE_NOT_SET:
self.field.manager.stash_previous_value(previous_value)
instance.__dict__[self.field.name] = self.field.to_python(value)
def __get__(self, instance=None, model=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, model.__name__))
return instance.__dict__[self.field.name]
class Field(fields.Field):
descriptor_class = SmartfieldsDescriptor
manager_class = FieldManager
manager = None
def __init__(self, verbose_name=None, name=None, dependencies=None, **kwargs):
if dependencies is not None:
self.manager = self.manager_class(self, dependencies)
self._dependencies = dependencies
super(Field, self).__init__(verbose_name=verbose_name, name=name, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
super(Field, self).contribute_to_class(cls, name, **kwargs)
if not issubclass(cls, SmartfieldsModelMixin):
cls.__bases__ = (SmartfieldsModelMixin,) + cls.__bases__
if not hasattr(cls, '_smartfields_managers'):
cls._smartfields_managers = {}
if self.manager is not None:
if not isinstance(self, FileField):
# FileField will itself set the descriptor
setattr(cls, name, self.descriptor_class(self))
self.manager.contribute_to_model(cls, name)
def get_status(self, instance):
if self.manager is not None:
return self.manager.get_status(instance)
def pre_save(self, model_instance, add):
value = super(Field, self).pre_save(model_instance, add)
if self.manager is not None:
self.manager.process(model_instance)
value = getattr(model_instance, self.attname)
return value
class BooleanField(Field, fields.BooleanField):
pass
class NullBooleanField(Field, fields.NullBooleanField):
pass
class SmallIntegerField(Field, fields.SmallIntegerField):
pass
class IntegerField(Field, fields.IntegerField):
pass
class BigIntegerField(Field, fields.BigIntegerField):
pass
class PositiveIntegerField(Field, fields.PositiveIntegerField):
pass
class PositiveSmallIntegerField(Field, fields.PositiveSmallIntegerField):
pass
class FloatField(Field, fields.FloatField):
pass
class DecimalField(Field, fields.DecimalField):
pass
if hasattr(fields, 'BinaryField'):
# Django>=1.6
class BinaryField(Field, getattr(fields, 'BinaryField')):
pass
else:
BinaryField = None
class CharField(Field, fields.CharField):
pass
class TextField(Field, fields.TextField):
pass
class CommaSeparatedIntegerField(Field, fields.CommaSeparatedIntegerField):
pass
class DateField(Field, fields.DateField):
pass
class DateTimeField(Field, fields.DateTimeField):
pass
class TimeField(Field, fields.TimeField):
pass
class IPAddressField(Field, fields.IPAddressField):
pass
class GenericIPAddressField(Field, fields.GenericIPAddressField):
pass
class EmailField(Field, fields.EmailField):
pass
class URLField(Field, fields.URLField):
pass
class SlugField(Field, fields.SlugField):
pass
##################
# FILES
##################
class FilePathField(Field, fields.FilePathField):
pass
class FieldFile(files.FieldFile):
def __init__(self, *args, **kwargs):
self.is_static = kwargs.pop('is_static', False)
super(FieldFile, self).__init__(*args, **kwargs)
if self.is_static:
self.storage = staticfiles_storage
def save(self, name, content, save=True, instance_update=True):
# prevent static files from being modified
if self.is_static:
return
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content)
if instance_update:
# omit descriptor to prevent stashing the same file
self.instance.__dict__[self.field.name] = self.name
self._size = content.size
self._committed = True
if save and instance_update:
self.instance.save()
save.alters_data = True
def delete(self, save=True, instance_update=True):
# prevent static files from being deleted
if self.is_static or not self:
return
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
if instance_update:
# omit descriptor to prevent stashing the same file
self.instance.__dict__[self.field.name] = self.name
if hasattr(self, '_size'):
del self._size
self._committed = False
if instance_update and getattr(self.field, 'manager', None) is not None:
self.field.manager.cleanup(self.instance)
if save and instance_update:
self.instance.save()
delete.alters_data = True
@property
def state(self):
if getattr(self.field, 'manager', None) is not None:
return self.field.manager._get_status(self.instance)[1]['state']
@property
def name_base(self):
if self:
return os.path.split(self.name)[1]
return ""
@property
def html_tag(self):
if self:
return text_type(getattr(self.instance, "%s_html_tag" % self.field.name, ""))
return ""
class FileDescriptor(files.FileDescriptor):
def __set__(self, instance, value):
if self.field.manager is not None:
value = self.field.manager.pre_process(instance, value)
previous_value = self.__get__(instance)
if previous_value is not VALUE_NOT_SET and previous_value._committed and \
previous_value != value:
# make sure form saving doesn't replace current file with itself
self.field.manager.stash_previous_value(previous_value)
super(FileDescriptor, self).__set__(instance, value)
class FileField(Field, files.FileField):
attr_class = FieldFile
descriptor_class = FileDescriptor
def __init__(self, verbose_name=None, name=None, keep_orphans=KEEP_ORPHANS,
dependencies=None, **kwargs):
self.keep_orphans = keep_orphans
if not keep_orphans and dependencies is None:
# make sure there is a manger so orphans will get cleaned up
self.manager = self.manager_class(self, [])
super(FileField, self).__init__(verbose_name, name,
dependencies=dependencies, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(FileField, self).deconstruct()
if self.keep_orphans != KEEP_ORPHANS:
kwargs['keep_orphans'] = self.keep_orphans
return name, path, args, kwargs
class ImageFieldFile(FieldFile, files.ImageFieldFile):
pass
def _get_width(image, **kwargs):
if image:
return image.width
def _get_height(image, **kwargs):
if image:
return image.height
class ImageField(FileField):
attr_class = ImageFieldFile
def _get_dim_dependency(self, dim):
from smartfields.dependencies import Dependency
field = getattr(self, "%s_field" % dim)
# ugly, but lambdas and bound methods are not picklable
getter = globals()["_get_%s" % dim]
return Dependency(attname=field, processor=getter, uid='ImageField._%s' % dim)
def __init__(self, verbose_name=None, name=None, dependencies=None,
width_field=None, height_field=None, **kwargs):
dependencies = [d for d in dependencies or []]
self.width_field, self.height_field = width_field, height_field
if self.width_field:
dependencies.append(self._get_dim_dependency('width'))
if self.height_field:
dependencies.append(self._get_dim_dependency('height'))
dependencies = dependencies or None
super(ImageField, self).__init__(verbose_name, name,
dependencies=dependencies, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_image_library_installed())
return errors
def _check_image_library_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
return [
checks.Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install Pillow".'),
obj=self,
id='fields.E210',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ImageField, self).deconstruct()
dependencies = kwargs.get('dependencies', None)
if dependencies:
dims = map(self._get_dim_dependency, ['width', 'height'])
dependencies = filter(lambda d: d not in dims, dependencies)
kwargs['dependencies'] = dependencies
if self.width_field:
kwargs['width_field'] = self.width_field
if self.height_field:
kwargs['height_field'] = self.height_field
return name, path, args, kwargs
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
# future added fields
if hasattr(fields, 'DurationField'):
class DurationField(Field, getattr(fields, 'DurationField')):
pass
else:
DurationField = None
if hasattr(fields, 'UUIDField'):
class UUIDField(Field, getattr(fields, 'UUIDField')):
pass
else:
UUIDField = None
|
un33k/django-smartfields
|
smartfields/fields/__init__.py
|
Python
|
mit
| 11,464
|
# -*- coding: utf-8 -*-
"""
B2HANDLE utilities
"""
import os
from b2stage.apis.commons.endpoint import EudatEndpoint
try:
from b2handle.handleclient import EUDATHandleClient as b2handle
from b2handle.clientcredentials import PIDClientCredentials as credentials
from b2handle import handleexceptions
except BaseException:
b2handle, credentials, handleexceptions = [None] * 3
from restapi.utilities.htmlcodes import hcodes
from b2stage.apis.commons import path
from restapi.utilities.logs import log
class PIDgenerator(object):
pid_separator = '/'
def pid_name_fix(self, irule_output):
pieces = irule_output.split(self.pid_separator)
pid = self.pid_separator.join([pieces[0], pieces[1].lower()])
log.debug("Parsed PID: {}", pid)
return pid
def pid_request(self, icom, ipath):
""" EUDAT RULE for PID """
outvar = 'newPID'
inputs = {
'*path': '"{}"'.format(ipath),
'*fixed': '"true"',
# empty variables
'*parent_pid': '""',
'*ror': '""',
'*fio': '""',
}
body = """
EUDATCreatePID(*parent_pid, *path, *ror, *fio, *fixed, *{});
writeLine("stdout", *{});
""".format(
outvar,
outvar,
)
rule_output = icom.rule('get_pid', body, inputs, output=True)
return self.pid_name_fix(rule_output)
def parse_pid_dataobject_path(self, metadata, key='URL'):
""" Parse url / irods path """
url = metadata.get(key)
if url is None:
return url
# NOTE: this would only work until the protocol is unchanged
url = url.replace('irods://', '')
# path_pieces = url.split(path.os.sep)[1:]
path_pieces = url.split(path.os.sep)
path_pieces[0] = path.os.sep
# TEMPORARY FIX, waiting to decide final PID structure
try:
if path_pieces[3] == 'api' and path_pieces[4] == 'registered':
path_pieces[0] = "/"
path_pieces[1] = "/"
path_pieces[2] = "/"
path_pieces[3] = "/"
path_pieces[4] = "/"
except BaseException:
log.error("Error parsing URL, not enough tokens? {}", path_pieces)
# print("pieces", path_pieces)
ipath = str(path.build(path_pieces))
log.verbose("Data object: {}", ipath)
return ipath
class B2HandleEndpoint(EudatEndpoint, PIDgenerator):
"""
Handling PID requests.
It includes some methods to connect to B2HANDLE.
FIXME: it should become a dedicated service in rapydo.
This way the client could be registered in memory with credentials
only if the provided credentials are working.
It should be read only access otherwise.
"""
eudat_pid_fields = [
"URL",
"EUDAT/CHECKSUM",
"EUDAT/UNPUBLISHED",
"EUDAT/UNPUBLISHED_DATE",
"EUDAT/UNPUBLISHED_REASON",
]
eudat_internal_fields = ["EUDAT/FIXED_CONTENT", 'PID']
def connect_client(self, force_no_credentials=False, disable_logs=False):
if getattr(self, '_handle_client', None) is None:
if disable_logs:
import logging
logging.getLogger('b2handle').setLevel(logging.WARNING)
# With credentials
if force_no_credentials:
self._handle_client = b2handle.instantiate_for_read_access()
log.debug("HANDLE client connected [w/out credentials]")
else:
found = False
file = os.environ.get('HANDLE_CREDENTIALS', None)
if file is not None:
credentials_path = path.build(file)
found = path.file_exists_and_nonzero(credentials_path)
if not found:
log.warning("B2HANDLE credentials file not found {}", file)
if found:
self._handle_client = b2handle.instantiate_with_credentials(
credentials.load_from_JSON(file)
)
log.debug("HANDLE client connected [w/ credentials]")
return self._handle_client, True
return self._handle_client, False
def check_pid_content(self, pid):
# from b2handle.handleclient import EUDATHandleClient as b2handle
# client = b2handle.instantiate_for_read_access()
client, authenticated = self.connect_client(
force_no_credentials=True, disable_logs=True
)
return client.retrieve_handle_record(pid)
def handle_pid_fields(self, client, pid):
""" Perform B2HANDLE request: retrieve URL from handle """
import requests
data = {}
try:
for field in self.eudat_pid_fields:
value = client.get_value_from_handle(pid, field)
log.info("B2HANDLE: {}={}", field, value)
data[field] = value
except handleexceptions.HandleSyntaxError as e:
return data, e, hcodes.HTTP_BAD_REQUEST
except handleexceptions.HandleNotFoundException as e:
return data, e, hcodes.HTTP_BAD_NOTFOUND
except handleexceptions.GenericHandleError as e:
return data, e, hcodes.HTTP_SERVER_ERROR
except handleexceptions.HandleAuthenticationError as e:
return data, e, hcodes.HTTP_BAD_UNAUTHORIZED
except requests.exceptions.ConnectionError as e:
log.warning("No connection available...")
return data, e, hcodes.HTTP_SERVER_ERROR
except BaseException as e:
log.error("Generic:\n{}({})", e.__class__.__name__, e)
return data, e, hcodes.HTTP_SERVER_ERROR
return data, None, hcodes.HTTP_FOUND
def get_pid_metadata(self, pid, head_method=False):
# First test: check if credentials exists and works
client, authenticated = self.connect_client(
force_no_credentials=True, disable_logs=True
)
# client, authenticated = self.connect_client()
data, error, code = self.handle_pid_fields(client, pid)
# If credentials were found but they gave error
# TODO: this should be tested at server startup!
if error is not None and authenticated:
log.error("B2HANDLE credentials problem: {}", error)
client, _ = self.connect_client(force_no_credentials=True)
data, error, code = self.handle_pid_fields(client, pid)
# Still getting error? Raise any B2HANDLE library problem
if error is not None:
log.error("B2HANDLE problem: {}", error)
return (
data,
self.send_errors(
message='B2HANDLE: {}'.format(error), code=code, head_method=head_method
),
)
else:
return data, None
|
EUDAT-B2STAGE/http-api
|
projects/b2stage/backend/apis/commons/b2handle.py
|
Python
|
mit
| 6,987
|
# python imports
import unittest
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
from foreman.application import make_app
class MockSession(dict):
def __init__(self, sid):
self.sid = sid
def should_save(self):
return True
class MockSessionStore(object):
def __init__(self):
sid = 'testsid'
self.session = MockSession(sid)
def get(self, uid):
return self.session
def new(self):
return self.session
def save(self, session):
pass
class URLTestCase(unittest.TestCase):
client = None
resp = None
def setUp(self):
self.session_store = MockSessionStore()
app = make_app(self.session_store)
self.client = Client(app, BaseResponse, use_cookies=True)
def tearDown(self):
pass
def _check_url(self, url_to_test, user_id, expected_code=200):
if user_id is not None:
self.login_user(user_id)
self.resp = self.client.get(url_to_test)
self.assertEqual(self.resp.status_code, expected_code)
if user_id is not None:
self.logout_user()
def login_user(self, user_id):
self.session_store.session['userid'] = user_id
def logout_user(self):
if 'userid' in self.session_store.session:
del self.session_store.session['userid']
|
ubunteroz/foreman
|
foreman/tests/url_tests/base_tester.py
|
Python
|
gpl-3.0
| 1,395
|
"""
Test whether a process started by lldb has no extra file descriptors open.
"""
from __future__ import print_function
import os
import lldb
from lldbsuite.test import lldbutil
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
def python_leaky_fd_version(test):
import sys
# Python random module leaks file descriptors on some versions.
if sys.version_info >= (2, 7, 8) and sys.version_info < (2, 7, 10):
return "Python random module leaks file descriptors in this python version"
return None
class AvoidsFdLeakTestCase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
@expectedFailure(python_leaky_fd_version, "bugs.freebsd.org/197376")
@expectedFailureAll(
oslist=['freebsd'],
bugnumber="llvm.org/pr25624 still failing with Python 2.7.10")
# The check for descriptor leakage needs to be implemented differently
# here.
@skipIfWindows
@skipIfTargetAndroid() # Android have some other file descriptors open by the shell
@skipIfDarwinEmbedded # <rdar://problem/33888742> # debugserver on ios has an extra fd open on launch
def test_fd_leak_basic(self):
self.do_test([])
@expectedFailure(python_leaky_fd_version, "bugs.freebsd.org/197376")
@expectedFailureAll(
oslist=['freebsd'],
bugnumber="llvm.org/pr25624 still failing with Python 2.7.10")
# The check for descriptor leakage needs to be implemented differently
# here.
@skipIfWindows
@skipIfTargetAndroid() # Android have some other file descriptors open by the shell
@skipIfDarwinEmbedded # <rdar://problem/33888742> # debugserver on ios has an extra fd open on launch
def test_fd_leak_log(self):
self.do_test(["log enable -f '/dev/null' lldb commands"])
def do_test(self, commands):
self.build()
exe = self.getBuildArtifact("a.out")
for c in commands:
self.runCmd(c)
target = self.dbg.CreateTarget(exe)
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
self.assertTrue(
process.GetState() == lldb.eStateExited,
"Process should have exited.")
self.assertTrue(
process.GetExitStatus() == 0,
"Process returned non-zero status. Were incorrect file descriptors passed?")
@expectedFailure(python_leaky_fd_version, "bugs.freebsd.org/197376")
@expectedFailureAll(
oslist=['freebsd'],
bugnumber="llvm.org/pr25624 still failing with Python 2.7.10")
# The check for descriptor leakage needs to be implemented differently
# here.
@skipIfWindows
@skipIfTargetAndroid() # Android have some other file descriptors open by the shell
@skipIfDarwinEmbedded # <rdar://problem/33888742> # debugserver on ios has an extra fd open on launch
def test_fd_leak_multitarget(self):
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
breakpoint = target.BreakpointCreateBySourceRegex(
'Set breakpoint here', lldb.SBFileSpec("main.c", False))
self.assertTrue(breakpoint, VALID_BREAKPOINT)
process1 = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process1, PROCESS_IS_VALID)
self.assertTrue(
process1.GetState() == lldb.eStateStopped,
"Process should have been stopped.")
target2 = self.dbg.CreateTarget(exe)
process2 = target2.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process2, PROCESS_IS_VALID)
self.assertTrue(
process2.GetState() == lldb.eStateExited,
"Process should have exited.")
self.assertTrue(
process2.GetExitStatus() == 0,
"Process returned non-zero status. Were incorrect file descriptors passed?")
|
youtube/cobalt
|
third_party/llvm-project/lldb/packages/Python/lldbsuite/test/functionalities/avoids-fd-leak/TestFdLeak.py
|
Python
|
bsd-3-clause
| 4,059
|
# -*- coding: utf8 -*-
__module__ = "test_Array.py"
__author__ = "Jonathan D. Lettvin"
__copyright__ = "\
Copyright(C) 2016 Jonathan D. Lettvin, All Rights Reserved"
__credits__ = [ "Jonathan D. Lettvin" ]
__license__ = "GPLv3"
__version__ = "0.0.1"
__maintainer__ = "Jonathan D. Lettvin"
__email__ = "jlettvin@gmail.com"
__contact__ = "jlettvin@gmail.com"
__status__ = "Demonstration"
__date__ = "20161107"
import unittest2
import inspect
import sys
sys.path.append('.')
sys.path.append('..')
from Self import ( Self )
from Array import ( Array )
class ArrayTestCase(unittest2.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_1x1(self):
"""
Construct a zero-initialized one member array.
Check for expected initialization.
"""
data = [[0]]
expect = {'shape': (1, 1), 'data': data, 'size': 1}
uniarray = Array((1,1), 0)
self.assertEquals(uniarray, expect, Self.doc())
def test_1x2(self):
"""
Construct a -1 initialized two member array.
Check for expected initialization.
"""
data = [[-1, -1]]
expect = {'shape': (1, 2), 'data': data, 'size': 2}
uniarray = Array((1,2), -1)
self.assertEquals(uniarray, expect, Self.doc())
def test_2x2x2(self):
"""
Construct a 9 initialized eight member array.
Check for expected initialization.
"""
data = [[[8, 8], [8, 8]], [[8, 8], [8, 8]]]
expect = {'shape': (2, 2, 2), 'data': data, 'size': 8}
uniarray = Array((2, 2, 2), 8)
self.assertEquals(uniarray, expect, Self.doc())
def test_2x2x2_modify_element(self):
"""
Construct a 9 initialized eight member array.
Modify last element by index.
"""
data = [[[8, 8], [8, 8]], [[8, 8], [8, 9]]]
expect = {'shape': (2, 2, 2), 'data': data, 'size': 8}
uniarray = Array((2, 2, 2), 8)
uniarray[1,1,1] = 9
self.assertEquals(uniarray, expect, Self.doc())
def test_2x2x2_modify_pair(self):
"""
Construct an 8 initialized eight member array.
Modify a pair.
"""
data = [[[8, 8], [8, 8]], [[8, 8], [7, 9]]]
expect = {'shape': (2, 2, 2), 'data': data, 'size': 8}
uniarray = Array((2, 2, 2), 8)
uniarray[1,1] = [7, 9]
self.assertEquals(uniarray, expect, Self.doc())
def test_2x2x2_modify_block(self):
"""
Construct a 0 initialized eight member array.
Modify a top-level block.
"""
data = [[[0, 0], [0, 0]], [[5, 6], [7, 8]]]
expect = {'shape': (2, 2, 2), 'data': data, 'size': 8}
uniarray = Array((2, 2, 2), 0)
#uniarray[1] = [[5, 6], [7, 8]]
#self.assertEquals(uniarray, expect, Self.doc())
def test_2x3_convert_4x1(self):
"""
Construct a 2-initialized 4 member array.
Check for expected initialization.
"""
before = [[2, 2, 2], [2, 2, 2]]
after = [[5], [5], [5], [5]]
expect = {'shape': (4, 1), 'data': after, 'size': 4}
uniarray = Array((2,3), 2)
uniarray(**expect)
self.assertEquals(uniarray, expect, Self.doc())
|
jlettvin/Unicode
|
py2/test/test_Array.py
|
Python
|
gpl-3.0
| 3,301
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ironic test utilities."""
from ironic.common import states
def get_test_ipmi_info():
return {
"ipmi_address": "1.2.3.4",
"ipmi_username": "admin",
"ipmi_password": "fake"
}
def get_test_ipmi_bridging_parameters():
return {
"ipmi_bridging": "dual",
"ipmi_local_address": "0x20",
"ipmi_transit_channel": "0",
"ipmi_transit_address": "0x82",
"ipmi_target_channel": "7",
"ipmi_target_address": "0x72"
}
def get_test_ssh_info(auth_type='password'):
result = {
"ssh_address": "1.2.3.4",
"ssh_username": "admin",
"ssh_port": 22,
"ssh_virt_type": "vbox",
}
if 'password' == auth_type:
result['ssh_password'] = 'fake'
elif 'file' == auth_type:
result['ssh_key_filename'] = '/not/real/file'
elif 'key' == auth_type:
result['ssh_key_contents'] = '--BEGIN PRIVATE ...blah'
elif 'too_many' == auth_type:
result['ssh_password'] = 'fake'
result['ssh_key_filename'] = '/not/real/file'
else:
# No auth details (is invalid)
pass
return result
def get_test_pxe_driver_info():
return {
"pxe_deploy_kernel": "glance://deploy_kernel_uuid",
"pxe_deploy_ramdisk": "glance://deploy_ramdisk_uuid",
}
def get_test_pxe_instance_info():
return {
"image_source": "glance://image_uuid",
"root_gb": 100,
}
def get_test_seamicro_info():
return {
"seamicro_api_endpoint": "http://1.2.3.4",
"seamicro_username": "admin",
"seamicro_password": "fake",
"seamicro_server_id": "0/0",
}
def get_test_ilo_info():
return {
"ilo_address": "1.2.3.4",
"ilo_username": "admin",
"ilo_password": "fake",
}
def get_test_drac_info():
return {
"drac_host": "1.2.3.4",
"drac_port": "443",
"drac_path": "/wsman",
"drac_protocol": "https",
"drac_username": "admin",
"drac_password": "fake",
}
def get_test_agent_instance_info():
return {
'image_source': 'fake-image',
'image_url': 'http://image',
'image_checksum': 'checksum'
}
def get_test_agent_driver_info():
return {
'agent_url': 'http://127.0.0.1/foo',
'deploy_kernel': 'glance://deploy_kernel_uuid',
'deploy_ramdisk': 'glance://deploy_ramdisk_uuid',
}
def get_test_iboot_info():
return {
"iboot_address": "1.2.3.4",
"iboot_username": "admin",
"iboot_password": "fake",
}
def get_test_snmp_info(**kw):
result = {
"snmp_driver": kw.get("snmp_driver", "teltronix"),
"snmp_address": kw.get("snmp_address", "1.2.3.4"),
"snmp_port": kw.get("snmp_port", "161"),
"snmp_outlet": kw.get("snmp_outlet", "1"),
"snmp_version": kw.get("snmp_version", "1")
}
if result["snmp_version"] in ("1", "2c"):
result["snmp_community"] = kw.get("snmp_community", "public")
elif result["snmp_version"] == "3":
result["snmp_security"] = kw.get("snmp_security", "public")
return result
def get_test_node(**kw):
properties = {
"cpu_arch": "x86_64",
"cpus": "8",
"local_gb": "10",
"memory_mb": "4096",
}
fake_info = {"foo": "bar"}
return {
'id': kw.get('id', 123),
'uuid': kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c123'),
'chassis_id': kw.get('chassis_id', 42),
'power_state': kw.get('power_state', states.NOSTATE),
'target_power_state': kw.get('target_power_state', states.NOSTATE),
'provision_state': kw.get('provision_state', states.NOSTATE),
'target_provision_state': kw.get('target_provision_state',
states.NOSTATE),
'provision_updated_at': kw.get('provision_updated_at'),
'last_error': kw.get('last_error'),
'instance_uuid': kw.get('instance_uuid'),
'instance_info': kw.get('instance_info', fake_info),
'driver': kw.get('driver', 'fake'),
'driver_info': kw.get('driver_info', fake_info),
'properties': kw.get('properties', properties),
'reservation': kw.get('reservation'),
'maintenance': kw.get('maintenance', False),
'console_enabled': kw.get('console_enabled', False),
'extra': kw.get('extra', {}),
'updated_at': kw.get('created_at'),
'created_at': kw.get('updated_at'),
}
def get_test_port(**kw):
return {
'id': kw.get('id', 987),
'uuid': kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c781'),
'node_id': kw.get('node_id', 123),
'address': kw.get('address', '52:54:00:cf:2d:31'),
'extra': kw.get('extra', {}),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
def get_test_chassis(**kw):
return {
'id': kw.get('id', 42),
'uuid': kw.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'),
'extra': kw.get('extra', {}),
'description': kw.get('description', 'data-center-1-chassis'),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
def get_test_conductor(**kw):
return {
'id': kw.get('id', 6),
'hostname': kw.get('hostname', 'test-conductor-node'),
'drivers': kw.get('drivers', ['fake-driver', 'null-driver']),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
|
faizan-barmawer/openstack_ironic
|
ironic/tests/db/utils.py
|
Python
|
apache-2.0
| 6,199
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import logging
import random
import shlex
from eventlet.green import subprocess
from eventlet import greenthread
from nova.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
% (description, cmd, exit_code, stdout, stderr))
super(ProcessExecutionError, self).__init__(message)
def execute(*cmd, **kwargs):
"""
Helper method to shell out and execute a command through subprocess with
optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type proces_input: string
:param check_exit_code: Defaults to 0. Will raise
:class:`ProcessExecutionError`
if the command exits without returning this value
as a returncode
:type check_exit_code: int
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix all cmd's with
:type root_helper: string
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', 0)
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
if len(kwargs):
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root:
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=True)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
if _returncode:
LOG.debug(_('Result was %s') % _returncode)
if (isinstance(check_exit_code, int) and
not isinstance(check_exit_code, bool) and
_returncode != check_exit_code):
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
|
zestrada/nova-cs498cc
|
nova/openstack/common/processutils.py
|
Python
|
apache-2.0
| 5,489
|
from lsd.models import LSDRun, RunTrees, RunTaxonDates, RunOutGroups
from lsd.exceptions.RunParserException import RunParserException
from datetime import datetime
from django.utils import timezone
from django.db import transaction
import re
import tempfile
class LSDRunParser:
"""A parser of request that returns a LSDRun saved in DB"""
@staticmethod
def parseFile(f):
tree="";
for chunk in f.chunks():
tree=tree+chunk
return(tree);
@staticmethod
def parse(request):
tree=request.POST['inputtreestring'];
if tree=="":
raise RunParserException("Information is missing","Input tree file is missing")
dates=""
if 'inputdate' in request.FILES and request.POST['datesornot']=="yes":
dates=LSDRunParser.parseFile(request.FILES['inputdate']);
rate=request.POST.get('substrate','None')
if rate != 'None' and rate != '' :
substrate=float(rate)
else:
substrate=-1
outgroup = request.POST.get("outgroupornot")=="yes"
outgroups= request.POST['outgrouplist']
# if 'outgroups' in request.FILES:
# outgroups=LSDRunParser.parseFile(request.FILES['outgroups']);
if request.POST['rootdate'] == '':
rootdate=-1
else:
rootdate=float(request.POST['rootdate'])
if request.POST['tipsdate'] == '':
tipsdate=-1
else:
tipsdate=float(request.POST['tipsdate'])
if request.POST['varianceparam'] == '':
varianceparam=-1
else:
varianceparam=int(request.POST['varianceparam'])
if request.POST['lowboundrate'] == '':
lowboundrate=-1
else:
lowboundrate=float(request.POST['lowboundrate'])
if request.POST['seqlength'] == '':
seqlength = -1
else:
seqlength = int(request.POST['seqlength'])
if request.POST['nb_samples'] == '':
nb_samples = 0
else:
nb_samples=int(request.POST['nb_samples'])
confinterval=request.POST.get('with_conf_int', False)
if confinterval == 'on':
confinterval = True
if confinterval == 'off':
confinterval = False
r = LSDRun(
run_date = timezone.now(),
run_root_date = rootdate,
run_tips_date = tipsdate,
run_constraints = request.POST.get('constraints', False),
run_with_conf_int = confinterval,
run_nb_samples = nb_samples,
run_variance = request.POST.get('variancesornot', False),
run_seq_length = seqlength,
run_param_variance = varianceparam,
run_rooting_method = "no" if outgroup else request.POST['estimateroot'],
run_rate_lower_bound = lowboundrate)
r.save()
with transaction.atomic():
r.runtrees_set.create(
tree_newick = tree,
tree_index = 0,
tree_subst_rate = substrate
)
with transaction.atomic():
try:
index = 0
num = 0
for line in dates.splitlines():
if index == 0 :
num = int(line)
else:
date = re.split('\s',line)
r.runtaxondates_set.create(
taxon_name = date[0],
taxon_date = date[1])
index+=1
except :
raise RunParserException("Parsing error","Cannot parse date file")
with transaction.atomic():
for taxon in outgroups.splitlines():
r.runoutgroups_set.create(taxon_name=taxon)
r.save()
return(r)
|
fredericlemoine/lsd-web
|
lsd_web/lsd/controlers/LSDRunParser.py
|
Python
|
gpl-2.0
| 4,054
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""HTTP utility code shared by clients and servers.
This module also defines the `HTTPServerRequest` class which is exposed
via `tornado.web.RequestHandler.request`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import calendar
import collections
import copy
import datetime
import email.utils
import numbers
import re
import time
from tornado.escape import native_str, parse_qs_bytes, utf8
from tornado.log import gen_log
from tornado.util import ObjectDict
try:
import Cookie # py2
except ImportError:
import http.cookies as Cookie # py3
try:
from httplib import responses # py2
except ImportError:
from http.client import responses # py3
# responses is unused in this file, but we re-export it to other files.
# Reference it so pyflakes doesn't complain.
responses
try:
from urllib import urlencode # py2
except ImportError:
from urllib.parse import urlencode # py3
try:
from ssl import SSLError
except ImportError:
# ssl is unavailable on app engine.
class SSLError(Exception):
pass
# RFC 7230 section 3.5: a recipient MAY recognize a single LF as a line
# terminator and ignore any preceding CR.
_CRLF_RE = re.compile(r'\r?\n')
class _NormalizedHeaderCache(dict):
"""Dynamic cached mapping of header names to Http-Header-Case.
Implemented as a dict subclass so that cache hits are as fast as a
normal dict lookup, without the overhead of a python function
call.
>>> normalized_headers = _NormalizedHeaderCache(10)
>>> normalized_headers["coNtent-TYPE"]
'Content-Type'
"""
def __init__(self, size):
super(_NormalizedHeaderCache, self).__init__()
self.size = size
self.queue = collections.deque()
def __missing__(self, key):
normalized = "-".join([w.capitalize() for w in key.split("-")])
self[key] = normalized
self.queue.append(key)
if len(self.queue) > self.size:
# Limit the size of the cache. LRU would be better, but this
# simpler approach should be fine. In Python 2.7+ we could
# use OrderedDict (or in 3.2+, @functools.lru_cache).
old_key = self.queue.popleft()
del self[old_key]
return normalized
_normalized_headers = _NormalizedHeaderCache(1000)
class HTTPHeaders(dict):
"""A dictionary that maintains ``Http-Header-Case`` for all keys.
Supports multiple values per key via a pair of new methods,
`add()` and `get_list()`. The regular dictionary interface
returns a single value per key, with multiple values joined by a
comma.
>>> h = HTTPHeaders({"content-type": "text/html"})
>>> list(h.keys())
['Content-Type']
>>> h["Content-Type"]
'text/html'
>>> h.add("Set-Cookie", "A=B")
>>> h.add("Set-Cookie", "C=D")
>>> h["set-cookie"]
'A=B,C=D'
>>> h.get_list("set-cookie")
['A=B', 'C=D']
>>> for (k,v) in sorted(h.get_all()):
... print('%s: %s' % (k,v))
...
Content-Type: text/html
Set-Cookie: A=B
Set-Cookie: C=D
"""
def __init__(self, *args, **kwargs):
# Don't pass args or kwargs to dict.__init__, as it will bypass
# our __setitem__
dict.__init__(self)
self._as_list = {}
self._last_key = None
if (len(args) == 1 and len(kwargs) == 0 and
isinstance(args[0], HTTPHeaders)):
# Copy constructor
for k, v in args[0].get_all():
self.add(k, v)
else:
# Dict-style initialization
self.update(*args, **kwargs)
# new public methods
def add(self, name, value):
"""Adds a new value for the given key."""
norm_name = _normalized_headers[name]
self._last_key = norm_name
if norm_name in self:
# bypass our override of __setitem__ since it modifies _as_list
dict.__setitem__(self, norm_name,
native_str(self[norm_name]) + ',' +
native_str(value))
self._as_list[norm_name].append(value)
else:
self[norm_name] = value
def get_list(self, name):
"""Returns all values for the given header as a list."""
norm_name = _normalized_headers[name]
return self._as_list.get(norm_name, [])
def get_all(self):
"""Returns an iterable of all (name, value) pairs.
If a header has multiple values, multiple pairs will be
returned with the same name.
"""
for name, values in self._as_list.items():
for value in values:
yield (name, value)
def parse_line(self, line):
"""Updates the dictionary with a single header line.
>>> h = HTTPHeaders()
>>> h.parse_line("Content-Type: text/html")
>>> h.get('content-type')
'text/html'
"""
if line[0].isspace():
# continuation of a multi-line header
new_part = ' ' + line.lstrip()
self._as_list[self._last_key][-1] += new_part
dict.__setitem__(self, self._last_key,
self[self._last_key] + new_part)
else:
name, value = line.split(":", 1)
self.add(name, value.strip())
@classmethod
def parse(cls, headers):
"""Returns a dictionary from HTTP header text.
>>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n")
>>> sorted(h.items())
[('Content-Length', '42'), ('Content-Type', 'text/html')]
"""
h = cls()
for line in _CRLF_RE.split(headers):
if line:
h.parse_line(line)
return h
# dict implementation overrides
def __setitem__(self, name, value):
norm_name = _normalized_headers[name]
dict.__setitem__(self, norm_name, value)
self._as_list[norm_name] = [value]
def __getitem__(self, name):
return dict.__getitem__(self, _normalized_headers[name])
def __delitem__(self, name):
norm_name = _normalized_headers[name]
dict.__delitem__(self, norm_name)
del self._as_list[norm_name]
def __contains__(self, name):
norm_name = _normalized_headers[name]
return dict.__contains__(self, norm_name)
def get(self, name, default=None):
return dict.get(self, _normalized_headers[name], default)
def update(self, *args, **kwargs):
# dict.update bypasses our __setitem__
for k, v in dict(*args, **kwargs).items():
self[k] = v
def copy(self):
# default implementation returns dict(self), not the subclass
return HTTPHeaders(self)
# Use our overridden copy method for the copy.copy module.
__copy__ = copy
def __deepcopy__(self, memo_dict):
# Our values are immutable strings, so our standard copy is
# effectively a deep copy.
return self.copy()
class HTTPServerRequest(object):
"""A single HTTP request.
All attributes are type `str` unless otherwise noted.
.. attribute:: method
HTTP request method, e.g. "GET" or "POST"
.. attribute:: uri
The requested uri.
.. attribute:: path
The path portion of `uri`
.. attribute:: query
The query portion of `uri`
.. attribute:: version
HTTP version specified in request, e.g. "HTTP/1.1"
.. attribute:: headers
`.HTTPHeaders` dictionary-like object for request headers. Acts like
a case-insensitive dictionary with additional methods for repeated
headers.
.. attribute:: body
Request body, if present, as a byte string.
.. attribute:: remote_ip
Client's IP address as a string. If ``HTTPServer.xheaders`` is set,
will pass along the real IP address provided by a load balancer
in the ``X-Real-Ip`` or ``X-Forwarded-For`` header.
.. versionchanged:: 3.1
The list format of ``X-Forwarded-For`` is now supported.
.. attribute:: protocol
The protocol used, either "http" or "https". If ``HTTPServer.xheaders``
is set, will pass along the protocol used by a load balancer if
reported via an ``X-Scheme`` header.
.. attribute:: host
The requested hostname, usually taken from the ``Host`` header.
.. attribute:: arguments
GET/POST arguments are available in the arguments property, which
maps arguments names to lists of values (to support multiple values
for individual names). Names are of type `str`, while arguments
are byte strings. Note that this is different from
`.RequestHandler.get_argument`, which returns argument values as
unicode strings.
.. attribute:: query_arguments
Same format as ``arguments``, but contains only arguments extracted
from the query string.
.. versionadded:: 3.2
.. attribute:: body_arguments
Same format as ``arguments``, but contains only arguments extracted
from the request body.
.. versionadded:: 3.2
.. attribute:: files
File uploads are available in the files property, which maps file
names to lists of `.HTTPFile`.
.. attribute:: connection
An HTTP request is attached to a single HTTP connection, which can
be accessed through the "connection" attribute. Since connections
are typically kept open in HTTP/1.1, multiple requests can be handled
sequentially on a single connection.
.. versionchanged:: 4.0
Moved from ``tornado.httpserver.HTTPRequest``.
"""
def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None,
body=None, host=None, files=None, connection=None,
start_line=None):
if start_line is not None:
method, uri, version = start_line
self.method = method
self.uri = uri
self.version = version
self.headers = headers or HTTPHeaders()
self.body = body or b""
# set remote IP and protocol
context = getattr(connection, 'context', None)
self.remote_ip = getattr(context, 'remote_ip', None)
self.protocol = getattr(context, 'protocol', "http")
self.host = host or self.headers.get("Host") or "127.0.0.1"
self.files = files or {}
self.connection = connection
self._start_time = time.time()
self._finish_time = None
self.path, sep, self.query = uri.partition('?')
self.arguments = parse_qs_bytes(self.query, keep_blank_values=True)
self.query_arguments = copy.deepcopy(self.arguments)
self.body_arguments = {}
def supports_http_1_1(self):
"""Returns True if this request supports HTTP/1.1 semantics.
.. deprecated:: 4.0
Applications are less likely to need this information with the
introduction of `.HTTPConnection`. If you still need it, access
the ``version`` attribute directly.
"""
return self.version == "HTTP/1.1"
@property
def cookies(self):
"""A dictionary of Cookie.Morsel objects."""
if not hasattr(self, "_cookies"):
self._cookies = Cookie.SimpleCookie()
if "Cookie" in self.headers:
try:
self._cookies.load(
native_str(self.headers["Cookie"]))
except Exception:
self._cookies = {}
return self._cookies
def write(self, chunk, callback=None):
"""Writes the given chunk to the response stream.
.. deprecated:: 4.0
Use ``request.connection`` and the `.HTTPConnection` methods
to write the response.
"""
assert isinstance(chunk, bytes)
assert self.version.startswith("HTTP/1."), \
"deprecated interface ony supported in HTTP/1.x"
self.connection.write(chunk, callback=callback)
def finish(self):
"""Finishes this HTTP request on the open connection.
.. deprecated:: 4.0
Use ``request.connection`` and the `.HTTPConnection` methods
to write the response.
"""
self.connection.finish()
self._finish_time = time.time()
def full_url(self):
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri
def request_time(self):
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
def get_ssl_certificate(self, binary_form=False):
"""Returns the client's SSL certificate, if any.
To use client certificates, the HTTPServer's
`ssl.SSLContext.verify_mode` field must be set, e.g.::
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain("foo.crt", "foo.key")
ssl_ctx.load_verify_locations("cacerts.pem")
ssl_ctx.verify_mode = ssl.CERT_REQUIRED
server = HTTPServer(app, ssl_options=ssl_ctx)
By default, the return value is a dictionary (or None, if no
client certificate is present). If ``binary_form`` is true, a
DER-encoded form of the certificate is returned instead. See
SSLSocket.getpeercert() in the standard library for more
details.
http://docs.python.org/library/ssl.html#sslsocket-objects
"""
try:
return self.connection.stream.socket.getpeercert(
binary_form=binary_form)
except SSLError:
return None
def _parse_body(self):
parse_body_arguments(
self.headers.get("Content-Type", ""), self.body,
self.body_arguments, self.files,
self.headers)
for k, v in self.body_arguments.items():
self.arguments.setdefault(k, []).extend(v)
def __repr__(self):
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
return "%s(%s, headers=%s)" % (
self.__class__.__name__, args, dict(self.headers))
class HTTPInputError(Exception):
"""Exception class for malformed HTTP requests or responses
from remote sources.
.. versionadded:: 4.0
"""
pass
class HTTPOutputError(Exception):
"""Exception class for errors in HTTP output.
.. versionadded:: 4.0
"""
pass
class HTTPServerConnectionDelegate(object):
"""Implement this interface to handle requests from `.HTTPServer`.
.. versionadded:: 4.0
"""
def start_request(self, server_conn, request_conn):
"""This method is called by the server when a new request has started.
:arg server_conn: is an opaque object representing the long-lived
(e.g. tcp-level) connection.
:arg request_conn: is a `.HTTPConnection` object for a single
request/response exchange.
This method should return a `.HTTPMessageDelegate`.
"""
raise NotImplementedError()
def on_close(self, server_conn):
"""This method is called when a connection has been closed.
:arg server_conn: is a server connection that has previously been
passed to ``start_request``.
"""
pass
class HTTPMessageDelegate(object):
"""Implement this interface to handle an HTTP request or response.
.. versionadded:: 4.0
"""
def headers_received(self, start_line, headers):
"""Called when the HTTP headers have been received and parsed.
:arg start_line: a `.RequestStartLine` or `.ResponseStartLine`
depending on whether this is a client or server message.
:arg headers: a `.HTTPHeaders` instance.
Some `.HTTPConnection` methods can only be called during
``headers_received``.
May return a `.Future`; if it does the body will not be read
until it is done.
"""
pass
def data_received(self, chunk):
"""Called when a chunk of data has been received.
May return a `.Future` for flow control.
"""
pass
def finish(self):
"""Called after the last chunk of data has been received."""
pass
def on_connection_close(self):
"""Called if the connection is closed without finishing the request.
If ``headers_received`` is called, either ``finish`` or
``on_connection_close`` will be called, but not both.
"""
pass
class HTTPConnection(object):
"""Applications use this interface to write their responses.
.. versionadded:: 4.0
"""
def write_headers(self, start_line, headers, chunk=None, callback=None):
"""Write an HTTP header block.
:arg start_line: a `.RequestStartLine` or `.ResponseStartLine`.
:arg headers: a `.HTTPHeaders` instance.
:arg chunk: the first (optional) chunk of data. This is an optimization
so that small responses can be written in the same call as their
headers.
:arg callback: a callback to be run when the write is complete.
The ``version`` field of ``start_line`` is ignored.
Returns a `.Future` if no callback is given.
"""
raise NotImplementedError()
def write(self, chunk, callback=None):
"""Writes a chunk of body data.
The callback will be run when the write is complete. If no callback
is given, returns a Future.
"""
raise NotImplementedError()
def finish(self):
"""Indicates that the last body data has been written.
"""
raise NotImplementedError()
def url_concat(url, args):
"""Concatenate url and arguments regardless of whether
url has existing query parameters.
``args`` may be either a dictionary or a list of key-value pairs
(the latter allows for multiple values with the same key.
>>> url_concat("http://example.com/foo", dict(c="d"))
'http://example.com/foo?c=d'
>>> url_concat("http://example.com/foo?a=b", dict(c="d"))
'http://example.com/foo?a=b&c=d'
>>> url_concat("http://example.com/foo?a=b", [("c", "d"), ("c", "d2")])
'http://example.com/foo?a=b&c=d&c=d2'
"""
if not args:
return url
if url[-1] not in ('?', '&'):
url += '&' if ('?' in url) else '?'
return url + urlencode(args)
class HTTPFile(ObjectDict):
"""Represents a file uploaded via a form.
For backwards compatibility, its instance attributes are also
accessible as dictionary keys.
* ``filename``
* ``body``
* ``content_type``
"""
pass
def _parse_request_range(range_header):
"""Parses a Range header.
Returns either ``None`` or tuple ``(start, end)``.
Note that while the HTTP headers use inclusive byte positions,
this method returns indexes suitable for use in slices.
>>> start, end = _parse_request_range("bytes=1-2")
>>> start, end
(1, 3)
>>> [0, 1, 2, 3, 4][start:end]
[1, 2]
>>> _parse_request_range("bytes=6-")
(6, None)
>>> _parse_request_range("bytes=-6")
(-6, None)
>>> _parse_request_range("bytes=-0")
(None, 0)
>>> _parse_request_range("bytes=")
(None, None)
>>> _parse_request_range("foo=42")
>>> _parse_request_range("bytes=1-2,6-10")
Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed).
See [0] for the details of the range header.
[0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges
"""
unit, _, value = range_header.partition("=")
unit, value = unit.strip(), value.strip()
if unit != "bytes":
return None
start_b, _, end_b = value.partition("-")
try:
start = _int_or_none(start_b)
end = _int_or_none(end_b)
except ValueError:
return None
if end is not None:
if start is None:
if end != 0:
start = -end
end = None
else:
end += 1
return (start, end)
def _get_content_range(start, end, total):
"""Returns a suitable Content-Range header:
>>> print(_get_content_range(None, 1, 4))
bytes 0-0/4
>>> print(_get_content_range(1, 3, 4))
bytes 1-2/4
>>> print(_get_content_range(None, None, 4))
bytes 0-3/4
"""
start = start or 0
end = (end or total) - 1
return "bytes %s-%s/%s" % (start, end, total)
def _int_or_none(val):
val = val.strip()
if val == "":
return None
return int(val)
def parse_body_arguments(content_type, body, arguments, files, headers=None):
"""Parses a form request body.
Supports ``application/x-www-form-urlencoded`` and
``multipart/form-data``. The ``content_type`` parameter should be
a string and ``body`` should be a byte string. The ``arguments``
and ``files`` parameters are dictionaries that will be updated
with the parsed contents.
"""
if headers and 'Content-Encoding' in headers:
gen_log.warning("Unsupported Content-Encoding: %s",
headers['Content-Encoding'])
return
if content_type.startswith("application/x-www-form-urlencoded"):
try:
uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True)
except Exception as e:
gen_log.warning('Invalid x-www-form-urlencoded body: %s', e)
uri_arguments = {}
for name, values in uri_arguments.items():
if values:
arguments.setdefault(name, []).extend(values)
elif content_type.startswith("multipart/form-data"):
try:
fields = content_type.split(";")
for field in fields:
k, sep, v = field.strip().partition("=")
if k == "boundary" and v:
parse_multipart_form_data(utf8(v), body, arguments, files)
break
else:
raise ValueError("multipart boundary not found")
except Exception as e:
gen_log.warning("Invalid multipart/form-data: %s", e)
def parse_multipart_form_data(boundary, data, arguments, files):
"""Parses a ``multipart/form-data`` body.
The ``boundary`` and ``data`` parameters are both byte strings.
The dictionaries given in the arguments and files parameters
will be updated with the contents of the body.
"""
# The standard allows for the boundary to be quoted in the header,
# although it's rare (it happens at least for google app engine
# xmpp). I think we're also supposed to handle backslash-escapes
# here but I'll save that until we see a client that uses them
# in the wild.
if boundary.startswith(b'"') and boundary.endswith(b'"'):
boundary = boundary[1:-1]
final_boundary_index = data.rfind(b"--" + boundary + b"--")
if final_boundary_index == -1:
gen_log.warning("Invalid multipart/form-data: no final boundary")
return
parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n")
for part in parts:
if not part:
continue
eoh = part.find(b"\r\n\r\n")
if eoh == -1:
gen_log.warning("multipart/form-data missing headers")
continue
headers = HTTPHeaders.parse(part[:eoh].decode("utf-8"))
disp_header = headers.get("Content-Disposition", "")
disposition, disp_params = _parse_header(disp_header)
if disposition != "form-data" or not part.endswith(b"\r\n"):
gen_log.warning("Invalid multipart/form-data")
continue
value = part[eoh + 4:-2]
if not disp_params.get("name"):
gen_log.warning("multipart/form-data value missing name")
continue
name = disp_params["name"]
if disp_params.get("filename"):
ctype = headers.get("Content-Type", "application/unknown")
files.setdefault(name, []).append(HTTPFile(
filename=disp_params["filename"], body=value,
content_type=ctype))
else:
arguments.setdefault(name, []).append(value)
def format_timestamp(ts):
"""Formats a timestamp in the format used by HTTP.
The argument may be a numeric timestamp as returned by `time.time`,
a time tuple as returned by `time.gmtime`, or a `datetime.datetime`
object.
>>> format_timestamp(1359312200)
'Sun, 27 Jan 2013 18:43:20 GMT'
"""
if isinstance(ts, numbers.Real):
pass
elif isinstance(ts, (tuple, time.struct_time)):
ts = calendar.timegm(ts)
elif isinstance(ts, datetime.datetime):
ts = calendar.timegm(ts.utctimetuple())
else:
raise TypeError("unknown timestamp type: %r" % ts)
return email.utils.formatdate(ts, usegmt=True)
RequestStartLine = collections.namedtuple(
'RequestStartLine', ['method', 'path', 'version'])
def parse_request_start_line(line):
"""Returns a (method, path, version) tuple for an HTTP 1.x request line.
The response is a `collections.namedtuple`.
>>> parse_request_start_line("GET /foo HTTP/1.1")
RequestStartLine(method='GET', path='/foo', version='HTTP/1.1')
"""
try:
method, path, version = line.split(" ")
except ValueError:
raise HTTPInputError("Malformed HTTP request line")
if not re.match(r"^HTTP/1\.[0-9]$", version):
raise HTTPInputError(
"Malformed HTTP version in HTTP Request-Line: %r" % version)
return RequestStartLine(method, path, version)
ResponseStartLine = collections.namedtuple(
'ResponseStartLine', ['version', 'code', 'reason'])
def parse_response_start_line(line):
"""Returns a (version, code, reason) tuple for an HTTP 1.x response line.
The response is a `collections.namedtuple`.
>>> parse_response_start_line("HTTP/1.1 200 OK")
ResponseStartLine(version='HTTP/1.1', code=200, reason='OK')
"""
line = native_str(line)
match = re.match("(HTTP/1.[0-9]) ([0-9]+) ([^\r]*)", line)
if not match:
raise HTTPInputError("Error parsing response start line")
return ResponseStartLine(match.group(1), int(match.group(2)),
match.group(3))
# _parseparam and _parse_header are copied and modified from python2.7's cgi.py
# The original 2.7 version of this code did not correctly support some
# combinations of semicolons and double quotes.
# It has also been modified to support valueless parameters as seen in
# websocket extension negotiations.
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def _parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = next(parts)
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
else:
pdict[p] = None
return key, pdict
def _encode_header(key, pdict):
"""Inverse of _parse_header.
>>> _encode_header('permessage-deflate',
... {'client_max_window_bits': 15, 'client_no_context_takeover': None})
'permessage-deflate; client_max_window_bits=15; client_no_context_takeover'
"""
if not pdict:
return key
out = [key]
# Sort the parameters just to make it easy to test.
for k, v in sorted(pdict.items()):
if v is None:
out.append(k)
else:
# TODO: quote if necessary.
out.append('%s=%s' % (k, v))
return '; '.join(out)
def doctests():
import doctest
return doctest.DocTestSuite()
def split_host_and_port(netloc):
"""Returns ``(host, port)`` tuple from ``netloc``.
Returned ``port`` will be ``None`` if not present.
.. versionadded:: 4.1
"""
match = re.match(r'^(.+):(\d+)$', netloc)
if match:
host = match.group(1)
port = int(match.group(2))
else:
host = netloc
port = None
return (host, port)
|
kayzhou/tornado
|
tornado/httputil.py
|
Python
|
apache-2.0
| 29,436
|
"""
Integration with the Rachio Iro sprinkler system controller.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.rachio/
"""
from abc import abstractmethod
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.rachio import (DOMAIN as DOMAIN_RACHIO,
KEY_DEVICE_ID,
KEY_STATUS,
KEY_SUBTYPE,
SIGNAL_RACHIO_CONTROLLER_UPDATE,
STATUS_OFFLINE,
STATUS_ONLINE,
SUBTYPE_OFFLINE,
SUBTYPE_ONLINE,)
from homeassistant.helpers.dispatcher import dispatcher_connect
DEPENDENCIES = ['rachio']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Rachio binary sensors."""
devices = []
for controller in hass.data[DOMAIN_RACHIO].controllers:
devices.append(RachioControllerOnlineBinarySensor(hass, controller))
add_entities(devices)
_LOGGER.info("%d Rachio binary sensor(s) added", len(devices))
class RachioControllerBinarySensor(BinarySensorDevice):
"""Represent a binary sensor that reflects a Rachio state."""
def __init__(self, hass, controller, poll=True):
"""Set up a new Rachio controller binary sensor."""
self._controller = controller
if poll:
self._state = self._poll_update()
else:
self._state = None
dispatcher_connect(hass, SIGNAL_RACHIO_CONTROLLER_UPDATE,
self._handle_any_update)
@property
def should_poll(self) -> bool:
"""Declare that this entity pushes its state to HA."""
return False
@property
def is_on(self) -> bool:
"""Return whether the sensor has a 'true' value."""
return self._state
def _handle_any_update(self, *args, **kwargs) -> None:
"""Determine whether an update event applies to this device."""
if args[0][KEY_DEVICE_ID] != self._controller.controller_id:
# For another device
return
# For this device
self._handle_update()
@abstractmethod
def _poll_update(self, data=None) -> bool:
"""Request the state from the API."""
pass
@abstractmethod
def _handle_update(self, *args, **kwargs) -> None:
"""Handle an update to the state of this sensor."""
pass
class RachioControllerOnlineBinarySensor(RachioControllerBinarySensor):
"""Represent a binary sensor that reflects if the controller is online."""
def __init__(self, hass, controller):
"""Set up a new Rachio controller online binary sensor."""
super().__init__(hass, controller, poll=False)
self._state = self._poll_update(controller.init_data)
@property
def name(self) -> str:
"""Return the name of this sensor including the controller name."""
return "{} online".format(self._controller.name)
@property
def device_class(self) -> str:
"""Return the class of this device, from component DEVICE_CLASSES."""
return 'connectivity'
@property
def icon(self) -> str:
"""Return the name of an icon for this sensor."""
return 'mdi:wifi-strength-4' if self.is_on\
else 'mdi:wifi-strength-off-outline'
def _poll_update(self, data=None) -> bool:
"""Request the state from the API."""
if data is None:
data = self._controller.rachio.device.get(
self._controller.controller_id)[1]
if data[KEY_STATUS] == STATUS_ONLINE:
return True
if data[KEY_STATUS] == STATUS_OFFLINE:
return False
_LOGGER.warning('"%s" reported in unknown state "%s"', self.name,
data[KEY_STATUS])
def _handle_update(self, *args, **kwargs) -> None:
"""Handle an update to the state of this sensor."""
if args[0][KEY_SUBTYPE] == SUBTYPE_ONLINE:
self._state = True
elif args[0][KEY_SUBTYPE] == SUBTYPE_OFFLINE:
self._state = False
self.schedule_update_ha_state()
|
persandstrom/home-assistant
|
homeassistant/components/binary_sensor/rachio.py
|
Python
|
apache-2.0
| 4,460
|
import argparse
import configparser
import sys
from ..api import _v1
class ConfigHandler:
def __init__(
self,
args: argparse.Namespace,
config: configparser.ConfigParser,
default_config: _v1._private.DefaultConfig,
config_filename: _v1._private.ConfigFilename,
):
self._args = args
self._config = config
self._default_config = default_config
self._config_filename = config_filename
def __call__(self):
if self._args.filename:
print(self._config_filename)
return
if self._args.default:
self._default_config().write(sys.stdout)
return
self._config.write(sys.stdout)
def add_args(parser: argparse.ArgumentParser):
parser.add_argument("--default", action="store_true", default=False)
parser.add_argument("--filename", action="store_true", default=False)
config_command = _v1.Command("config", "Show config", ConfigHandler, add_args)
_v1.register_command(config_command)
|
larose/utt
|
utt/plugins/0_config.py
|
Python
|
gpl-3.0
| 1,041
|
# Copyright (c) 2010 Hunter Blanks http://artifex.org/~hblanks/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Initial, and very limited, unit tests for CloudWatchConnection.
"""
import datetime
from boto.ec2.cloudwatch import CloudWatchConnection
from tests.compat import unittest, OrderedDict
# HTTP response body for CloudWatchConnection.describe_alarms
DESCRIBE_ALARMS_BODY = """<DescribeAlarmsResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<DescribeAlarmsResult>
<NextToken>mynexttoken</NextToken>
<MetricAlarms>
<member>
<StateUpdatedTimestamp>2011-11-18T23:43:59.111Z</StateUpdatedTimestamp>
<InsufficientDataActions/>
<StateReasonData>{"version":"1.0","queryDate":"2011-11-18T23:43:59.089+0000","startDate":"2011-11-18T23:30:00.000+0000","statistic":"Maximum","period":60,"recentDatapoints":[1.0,null,null,null,null,null,null,null,null,null,1.0],"threshold":1.0}</StateReasonData>
<AlarmArn>arn:aws:cloudwatch:us-east-1:1234:alarm:FancyAlarm</AlarmArn>
<AlarmConfigurationUpdatedTimestamp>2011-11-18T23:43:58.489Z</AlarmConfigurationUpdatedTimestamp>
<AlarmName>FancyAlarm</AlarmName>
<StateValue>OK</StateValue>
<Period>60</Period>
<OKActions/>
<ActionsEnabled>true</ActionsEnabled>
<Namespace>AcmeCo/Cronjobs</Namespace>
<EvaluationPeriods>15</EvaluationPeriods>
<Threshold>1.0</Threshold>
<Statistic>Maximum</Statistic>
<AlarmActions>
<member>arn:aws:sns:us-east-1:1234:Alerts</member>
</AlarmActions>
<StateReason>Threshold Crossed: 2 datapoints were not less than the threshold (1.0). The most recent datapoints: [1.0, 1.0].</StateReason>
<Dimensions>
<member>
<Name>Job</Name>
<Value>ANiceCronJob</Value>
</member>
</Dimensions>
<ComparisonOperator>LessThanThreshold</ComparisonOperator>
<MetricName>Success</MetricName>
</member>
<member>
<StateUpdatedTimestamp>2011-11-19T08:09:20.655Z</StateUpdatedTimestamp>
<InsufficientDataActions/>
<StateReasonData>{"version":"1.0","queryDate":"2011-11-19T08:09:20.633+0000","startDate":"2011-11-19T08:07:00.000+0000","statistic":"Maximum","period":60,"recentDatapoints":[1.0],"threshold":1.0}</StateReasonData>
<AlarmArn>arn:aws:cloudwatch:us-east-1:1234:alarm:SuprtFancyAlarm</AlarmArn>
<AlarmConfigurationUpdatedTimestamp>2011-11-19T16:20:19.687Z</AlarmConfigurationUpdatedTimestamp>
<AlarmName>SuperFancyAlarm</AlarmName>
<StateValue>OK</StateValue>
<Period>60</Period>
<OKActions/>
<ActionsEnabled>true</ActionsEnabled>
<Namespace>AcmeCo/CronJobs</Namespace>
<EvaluationPeriods>60</EvaluationPeriods>
<Threshold>1.0</Threshold>
<Statistic>Maximum</Statistic>
<AlarmActions>
<member>arn:aws:sns:us-east-1:1234:alerts</member>
</AlarmActions>
<StateReason>Threshold Crossed: 1 datapoint (1.0) was not less than the threshold (1.0).</StateReason>
<Dimensions>
<member>
<Name>Job</Name>
<Value>ABadCronJob</Value>
</member>
</Dimensions>
<ComparisonOperator>GreaterThanThreshold</ComparisonOperator>
<MetricName>Success</MetricName>
</member>
</MetricAlarms>
</DescribeAlarmsResult>
<ResponseMetadata>
<RequestId>f621311-1463-11e1-95c3-312389123</RequestId>
</ResponseMetadata>
</DescribeAlarmsResponse>"""
class CloudWatchConnectionTest(unittest.TestCase):
ec2 = True
def test_build_list_params(self):
c = CloudWatchConnection()
params = {}
c.build_list_params(
params, ['thing1', 'thing2', 'thing3'], 'ThingName%d')
expected_params = {
'ThingName1': 'thing1',
'ThingName2': 'thing2',
'ThingName3': 'thing3'
}
self.assertEqual(params, expected_params)
def test_build_put_params_one(self):
c = CloudWatchConnection()
params = {}
c.build_put_params(params, name="N", value=1, dimensions={"D": "V"})
expected_params = {
'MetricData.member.1.MetricName': 'N',
'MetricData.member.1.Value': 1,
'MetricData.member.1.Dimensions.member.1.Name': 'D',
'MetricData.member.1.Dimensions.member.1.Value': 'V',
}
self.assertEqual(params, expected_params)
def test_build_put_params_multiple_metrics(self):
c = CloudWatchConnection()
params = {}
c.build_put_params(params, name=["N", "M"], value=[1, 2], dimensions={"D": "V"})
expected_params = {
'MetricData.member.1.MetricName': 'N',
'MetricData.member.1.Value': 1,
'MetricData.member.1.Dimensions.member.1.Name': 'D',
'MetricData.member.1.Dimensions.member.1.Value': 'V',
'MetricData.member.2.MetricName': 'M',
'MetricData.member.2.Value': 2,
'MetricData.member.2.Dimensions.member.1.Name': 'D',
'MetricData.member.2.Dimensions.member.1.Value': 'V',
}
self.assertEqual(params, expected_params)
def test_build_put_params_multiple_dimensions(self):
c = CloudWatchConnection()
params = {}
c.build_put_params(params, name="N", value=[1, 2], dimensions=[{"D": "V"}, {"D": "W"}])
expected_params = {
'MetricData.member.1.MetricName': 'N',
'MetricData.member.1.Value': 1,
'MetricData.member.1.Dimensions.member.1.Name': 'D',
'MetricData.member.1.Dimensions.member.1.Value': 'V',
'MetricData.member.2.MetricName': 'N',
'MetricData.member.2.Value': 2,
'MetricData.member.2.Dimensions.member.1.Name': 'D',
'MetricData.member.2.Dimensions.member.1.Value': 'W',
}
self.assertEqual(params, expected_params)
def test_build_put_params_multiple_parameter_dimension(self):
self.maxDiff = None
c = CloudWatchConnection()
params = {}
dimensions = [OrderedDict((("D1", "V"), ("D2", "W")))]
c.build_put_params(params,
name="N",
value=[1],
dimensions=dimensions)
expected_params = {
'MetricData.member.1.MetricName': 'N',
'MetricData.member.1.Value': 1,
'MetricData.member.1.Dimensions.member.1.Name': 'D1',
'MetricData.member.1.Dimensions.member.1.Value': 'V',
'MetricData.member.1.Dimensions.member.2.Name': 'D2',
'MetricData.member.1.Dimensions.member.2.Value': 'W',
}
self.assertEqual(params, expected_params)
def test_build_get_params_multiple_parameter_dimension1(self):
self.maxDiff = None
c = CloudWatchConnection()
params = {}
dimensions = OrderedDict((("D1", "V"), ("D2", "W")))
c.build_dimension_param(dimensions, params)
expected_params = {
'Dimensions.member.1.Name': 'D1',
'Dimensions.member.1.Value': 'V',
'Dimensions.member.2.Name': 'D2',
'Dimensions.member.2.Value': 'W',
}
self.assertEqual(params, expected_params)
def test_build_get_params_multiple_parameter_dimension2(self):
self.maxDiff = None
c = CloudWatchConnection()
params = {}
dimensions = OrderedDict((("D1", ["V1", "V2"]), ("D2", "W"), ("D3", None)))
c.build_dimension_param(dimensions, params)
expected_params = {
'Dimensions.member.1.Name': 'D1',
'Dimensions.member.1.Value': 'V1',
'Dimensions.member.2.Name': 'D1',
'Dimensions.member.2.Value': 'V2',
'Dimensions.member.3.Name': 'D2',
'Dimensions.member.3.Value': 'W',
'Dimensions.member.4.Name': 'D3',
}
self.assertEqual(params, expected_params)
def test_build_put_params_invalid(self):
c = CloudWatchConnection()
params = {}
try:
c.build_put_params(params, name=["N", "M"], value=[1, 2, 3])
except:
pass
else:
self.fail("Should not accept lists of different lengths.")
def test_get_metric_statistics(self):
c = CloudWatchConnection()
m = c.list_metrics()[0]
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(hours=24 * 14)
c.get_metric_statistics(
3600 * 24, start, end, m.name, m.namespace, ['Average', 'Sum'])
def test_put_metric_data(self):
c = CloudWatchConnection()
now = datetime.datetime.utcnow()
name, namespace = 'unit-test-metric', 'boto-unit-test'
c.put_metric_data(namespace, name, 5, now, 'Bytes')
# Uncomment the following lines for a slower but more thorough
# test. (Hurrah for eventual consistency...)
#
# metric = Metric(connection=c)
# metric.name = name
# metric.namespace = namespace
# time.sleep(60)
# l = metric.query(
# now - datetime.timedelta(seconds=60),
# datetime.datetime.utcnow(),
# 'Average')
# assert l
# for row in l:
# self.assertEqual(row['Unit'], 'Bytes')
# self.assertEqual(row['Average'], 5.0)
def test_describe_alarms(self):
c = CloudWatchConnection()
def make_request(*args, **kwargs):
class Body(object):
def __init__(self):
self.status = 200
def read(self):
return DESCRIBE_ALARMS_BODY
return Body()
c.make_request = make_request
alarms = c.describe_alarms()
self.assertEquals(alarms.next_token, 'mynexttoken')
self.assertEquals(alarms[0].name, 'FancyAlarm')
self.assertEquals(alarms[0].comparison, '<')
self.assertEquals(alarms[0].dimensions, {u'Job': [u'ANiceCronJob']})
self.assertEquals(alarms[1].name, 'SuperFancyAlarm')
self.assertEquals(alarms[1].comparison, '>')
self.assertEquals(alarms[1].dimensions, {u'Job': [u'ABadCronJob']})
if __name__ == '__main__':
unittest.main()
|
CapOM/ChromiumGStreamerBackend
|
tools/telemetry/third_party/gsutilz/third_party/boto/tests/integration/ec2/cloudwatch/test_connection.py
|
Python
|
bsd-3-clause
| 11,614
|
__version__ = '2.0.18'
|
gleitz/howdoi
|
howdoi/__init__.py
|
Python
|
mit
| 23
|
from conans import ConanFile, CMake, tools
class LLAConan(ConanFile):
name = "LogicalAccess"
version = "2.3.0"
license = "https://github.com/islog/liblogicalaccess/blob/develop/LICENSE"
url = "https://github.com/islog/liblogicalaccess"
description = "ISLOG RFID library"
settings = "os", "compiler", "build_type", "arch"
requires = 'boost/1.68.0@conan/stable', 'openssl/1.0.2t', 'jsonformoderncpp/3.6.1@vthiery/stable', 'zlib/1.2.11'
generators = "cmake"
options = {'LLA_BUILD_IKS': [True, False],
'LLA_BUILD_PKCS': [True, False],
'LLA_BUILD_UNITTEST': [True, False],
'LLA_BUILD_RFIDEAS': [True, False]}
revision_mode = "scm"
exports_sources = "plugins*", "src*", "include*", "CMakeLists.txt", "cmake*", "liblogicalaccess.config", "tests*", "samples*"
if tools.os_info.is_windows:
default_options = '''
openssl:shared=True
boost:shared=False
gtest:shared=True
LLA_BUILD_IKS=False
LLA_BUILD_PKCS=False
LLA_BUILD_RFIDEAS=False
LLA_BUILD_UNITTEST=False'''
else:
default_options = '''
openssl:shared=True
boost:shared=True
gtest:shared=True
LLA_BUILD_IKS=False
LLA_BUILD_PKCS=False
LLA_BUILD_UNITTEST=False'''
def configure(self):
if self.settings.os != 'Windows':
# This options is not used on Linux
del self.options.LLA_BUILD_RFIDEAS
def requirements(self):
if self.settings.os == 'Windows' and self.options.LLA_BUILD_RFIDEAS:
self.requires('rfideas/7.1.5@islog/stable')
if self.options.LLA_BUILD_IKS:
self.requires('grpc/1.25.0@inexorgame/stable')
if self.options.LLA_BUILD_UNITTEST:
self.requires('gtest/1.8.1@bincrafters/stable')
if self.options.LLA_BUILD_PKCS:
self.requires('cppkcs11/1.1@islog/master')
def imports(self):
if tools.os_info.is_windows:
self.copy("*.dll", "bin", "bin")
def configure_cmake(self):
cmake = CMake(self, build_type=self.settings.build_type)
if self.settings.os == 'Android':
# Workaround to avoid conan passing -stdlib=libc++
# to compiler. See https://github.com/conan-io/conan/issues/2856
cmake.definitions['CONAN_LIBCXX'] = ''
cmake.definitions['LLA_BOOST_ASIO_HAS_STD_STRING_VIEW'] = 1
if self.options.LLA_BUILD_IKS:
cmake.definitions['LLA_BUILD_IKS'] = True
else:
cmake.definitions['LLA_BUILD_IKS'] = False
if self.options.LLA_BUILD_PKCS:
cmake.definitions['LLA_BUILD_PKCS'] = True
else:
cmake.definitions['LLA_BUILD_PKCS'] = False
if self.options.LLA_BUILD_UNITTEST:
cmake.definitions['LLA_BUILD_UNITTEST'] = True
else:
cmake.definitions['LLA_BUILD_UNITTEST'] = False
if 'LLA_BUILD_RFIDEAS' in self.options and self.options.LLA_BUILD_RFIDEAS:
cmake.definitions['LLA_BUILD_RFIDEAS'] = True
else:
cmake.definitions['LLA_BUILD_RFIDEAS'] = False
cmake.definitions['LIBLOGICALACCESS_VERSION_STRING'] = self.version
cmake.definitions['LIBLOGICALACCESS_WINDOWS_VERSION'] = self.version.replace('.', ',') + ',0'
cmake.definitions['TARGET_ARCH'] = self.settings.arch
if tools.os_info.is_windows:
# For MSVC we need to restrict configuration type to avoid issues.
cmake.definitions['CMAKE_CONFIGURATION_TYPES'] = self.settings.build_type
cmake.configure()
return cmake
def build(self):
cmake = self.configure_cmake()
cmake.build()
def package(self):
cmake = self.configure_cmake()
cmake.install()
def package_info(self):
if self.settings.os == 'Android':
# For Android we only package a subsets of libraries.
self.cpp_info.libs.append('logicalaccess')
self.cpp_info.libs.append('llacommon')
self.cpp_info.libs.append('logicalaccess-cryptolib')
self.cpp_info.libs.append('iso7816readers')
self.cpp_info.libs.append('desfirecards')
self.cpp_info.libs.append('mifarecards')
self.cpp_info.libs.append('iso7816cards')
self.cpp_info.libs.append('samav2cards')
self.cpp_info.libs.append('epasscards')
self.cpp_info.libs.append('seoscards')
return
if self.settings.os == 'Windows':
# Those are some windows specific stuff.
if self.options.LLA_BUILD_RFIDEAS:
self.cpp_info.libs.append('rfideasreaders')
self.cpp_info.libs.append('keyboardreaders')
if self.settings.arch == 'x86_64':
self.cpp_info.libs.append('islogkbdhooklib64')
else:
self.cpp_info.libs.append('islogkbdhooklib32')
# Linux / Windows common plugins.
self.cpp_info.libs.append('llacommon')
self.cpp_info.libs.append('logicalaccess-cryptolib')
self.cpp_info.libs.append('a3mlgm5600readers')
self.cpp_info.libs.append('admittoreaders')
self.cpp_info.libs.append('axesstmc13readers')
self.cpp_info.libs.append('axesstmclegicreaders')
self.cpp_info.libs.append('cps3cards')
self.cpp_info.libs.append('deisterreaders')
self.cpp_info.libs.append('desfirecards')
self.cpp_info.libs.append('elatecreaders')
self.cpp_info.libs.append('em4102cards')
self.cpp_info.libs.append('em4135cards')
self.cpp_info.libs.append('felicacards')
self.cpp_info.libs.append('generictagcards')
self.cpp_info.libs.append('gigatmsreaders')
self.cpp_info.libs.append('gunneboreaders')
self.cpp_info.libs.append('icode1cards')
self.cpp_info.libs.append('icode2cards')
self.cpp_info.libs.append('idondemandreaders')
self.cpp_info.libs.append('indalacards')
self.cpp_info.libs.append('infineonmydcards')
self.cpp_info.libs.append('iso15693cards')
self.cpp_info.libs.append('iso7816cards')
self.cpp_info.libs.append('iso7816readers')
self.cpp_info.libs.append('legicprimecards')
self.cpp_info.libs.append('logicalaccess')
self.cpp_info.libs.append('mifarecards')
self.cpp_info.libs.append('mifarepluscards')
self.cpp_info.libs.append('mifareultralightcards')
self.cpp_info.libs.append('ok5553readers')
self.cpp_info.libs.append('osdpreaders')
self.cpp_info.libs.append('pcscreaders')
self.cpp_info.libs.append('promagreaders')
self.cpp_info.libs.append('proxcards')
self.cpp_info.libs.append('proxlitecards')
self.cpp_info.libs.append('rplethreaders')
self.cpp_info.libs.append('samav2cards')
self.cpp_info.libs.append('scielreaders')
self.cpp_info.libs.append('seoscards')
self.cpp_info.libs.append('smartframecards')
self.cpp_info.libs.append('smartidreaders')
self.cpp_info.libs.append('stidprgreaders')
self.cpp_info.libs.append('stidstrreaders')
self.cpp_info.libs.append('stmlri512cards')
self.cpp_info.libs.append('tagitcards')
self.cpp_info.libs.append('topazcards')
self.cpp_info.libs.append('twiccards')
self.cpp_info.libs.append('epasscards')
def package_id(self):
self.info.requires["boost"].full_package_mode()
self.info.requires["openssl"].full_package_mode()
|
islog/liblogicalaccess
|
conanfile.py
|
Python
|
lgpl-3.0
| 7,672
|
# Copyright 2014 IBM Corp.
from setuptools import setup, find_packages
setup(
name="hostmaintenance-client",
version="0.1",
packages=find_packages(exclude=['*.tests', 'tests',
'tests.*', '*.tests.*']),
entry_points={
'novaclient.extension': [
'host_maintenance = v1_1.host_maintenance',
],
}
)
|
openstack/powervc-driver
|
nova-powervc/hostmaintenanceclient/setup.py
|
Python
|
apache-2.0
| 380
|
class Card:
"""
Static class that handles cards. We represent cards as 32-bit integers, so
there is no object instantiation - they are just ints. Most of the bits are
used, and have a specific meaning. See below:
Card:
bitrank suit rank prime
+--------+--------+--------+--------+
|xxxbbbbb|bbbbbbbb|cdhsrrrr|xxpppppp|
+--------+--------+--------+--------+
1) p = prime number of rank (deuce=2,trey=3,four=5,...,ace=41)
2) r = rank of card (deuce=0,trey=1,four=2,five=3,...,ace=12)
3) cdhs = suit of card (bit turned on based on suit of card)
4) b = bit turned on depending on rank of card
5) x = unused
This representation will allow us to do very important things like:
- Make a unique prime prodcut for each hand
- Detect flushes
- Detect straights
and is also quite performant.
"""
# the basics
STR_RANKS = '23456789TJQKA'
INT_RANKS = list(range(13))
PRIMES = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
# converstion from string => int
CHAR_RANK_TO_INT_RANK = dict(zip(list(STR_RANKS), INT_RANKS))
CHAR_SUIT_TO_INT_SUIT = {
's': 1, # spades
'h': 2, # hearts
'd': 4, # diamonds
'c': 8, # clubs
}
INT_SUIT_TO_CHAR_SUIT = 'xshxdxxxc'
# for pretty printing
PRETTY_SUITS = {
1: u"\u2660", # spades
2: u"\u2764", # hearts
4: u"\u2666", # diamonds
8: u"\u2663" # clubs
}
@staticmethod
def new(string):
"""
Converts Card string to binary integer representation of card, inspired by:
http://www.suffecool.net/poker/evaluator.html
"""
rank_char = string[0]
suit_char = string[1]
rank_int = Card.CHAR_RANK_TO_INT_RANK[rank_char]
suit_int = Card.CHAR_SUIT_TO_INT_SUIT[suit_char]
rank_prime = Card.PRIMES[rank_int]
bitrank = 1 << rank_int << 16
suit = suit_int << 12
rank = rank_int << 8
return bitrank | suit | rank | rank_prime
@staticmethod
def int_to_str(card_int):
rank_int = Card.get_rank_int(card_int)
suit_int = Card.get_suit_int(card_int)
return Card.STR_RANKS[rank_int] + Card.INT_SUIT_TO_CHAR_SUIT[suit_int]
@staticmethod
def get_rank_int(card_int):
return (card_int >> 8) & 0xF
@staticmethod
def get_suit_int(card_int):
return (card_int >> 12) & 0xF
@staticmethod
def get_bitrank_int(card_int):
return (card_int >> 16) & 0x1FFF
@staticmethod
def get_prime(card_int):
return card_int & 0x3F
@staticmethod
def hand_to_binary(card_strs):
"""
Expects a list of cards as strings and returns a list
of integers of same length corresponding to those strings.
"""
bhand = []
for c in card_strs:
bhand.append(Card.new(c))
return bhand
@staticmethod
def prime_product_from_hand(card_ints):
"""
Expects a list of cards in integer form.
"""
product = 1
for c in card_ints:
product *= (c & 0xFF)
return product
@staticmethod
def prime_product_from_rankbits(rankbits):
"""
Returns the prime product using the bitrank (b)
bits of the hand. Each 1 in the sequence is converted
to the correct prime and multiplied in.
Params:
rankbits = a single 32-bit (only 13-bits set) integer representing
the ranks of 5 _different_ ranked cards
(5 of 13 bits are set)
Primarily used for evaulating flushes and straights,
two occasions where we know the ranks are *ALL* different.
Assumes that the input is in form (set bits):
rankbits
+--------+--------+
|xxxbbbbb|bbbbbbbb|
+--------+--------+
"""
product = 1
for i in Card.INT_RANKS:
# if the ith bit is set
if rankbits & (1 << i):
product *= Card.PRIMES[i]
return product
@staticmethod
def int_to_binary(card_int):
"""
For debugging purposes. Displays the binary number as a
human readable string in groups of four digits.
"""
bstr = bin(card_int)[2:][::-1] # chop off the 0b and THEN reverse string
output = list("".join(["0000" + "\t"] * 7) + "0000")
for i in range(len(bstr)):
output[i + int(i//4)] = bstr[i]
# output the string to console
output.reverse()
return "".join(output)
@staticmethod
def int_to_pretty_str(card_int):
"""
Returns a single card string
"""
# suit and rank
suit_int = Card.get_suit_int(card_int)
rank_int = Card.get_rank_int(card_int)
suit = Card.PRETTY_SUITS[suit_int]
rank = Card.STR_RANKS[rank_int]
return str(rank) + " of " + str(suit)
|
RIP95/kurisu-bot
|
addons/deuces/card.py
|
Python
|
mit
| 5,195
|
"""The tests for the manual Alarm Control Panel component."""
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.setup import setup_component
from homeassistant.const import (
STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_NIGHT, STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED)
from homeassistant.components import alarm_control_panel
import homeassistant.util.dt as dt_util
from tests.common import fire_time_changed, get_test_home_assistant
CODE = 'HELLO_CODE'
class TestAlarmControlPanelManual(unittest.TestCase):
"""Test the manual alarm module."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_arm_home_no_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_arm_home_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_arm_home_with_invalid_code(self):
"""Attempt to arm home without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_arm_away_no_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_arm_away_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_arm_away_with_invalid_code(self):
"""Attempt to arm away without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_arm_night_no_pending(self):
"""Test arm night method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_night(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_NIGHT,
self.hass.states.get(entity_id).state)
def test_arm_night_with_pending(self):
"""Test arm night method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_night(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_NIGHT,
self.hass.states.get(entity_id).state)
def test_arm_night_with_invalid_code(self):
"""Attempt to night home without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_night(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_no_pending(self):
"""Test triggering when no pending submitted method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=60)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
def test_trigger_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'pending_time': 2,
'trigger_time': 3,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_with_disarm_after_trigger(self):
"""Test disarm after trigger."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 5,
'pending_time': 0,
'disarm_after_trigger': True
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_disarm_while_pending_trigger(self):
"""Test disarming while pending state."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 5,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_disarm_during_trigger_with_invalid_code(self):
"""Test disarming while code is invalid."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'pending_time': 5,
'code': CODE + '2',
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
|
MungoRae/home-assistant
|
tests/components/alarm_control_panel/test_manual.py
|
Python
|
apache-2.0
| 16,012
|
import os
from util.util import *
from util.csproj import *
from collections import deque
class EnvironmentItem:
def __init__(self, name, joinchar, values):
self.name = name
self.joinchar = joinchar
self.values = values
def __str__(self):
return os.path.expandvars(self.joinchar.join(self.values))
class Environment:
def __init__(self, profile):
self._profile = profile
def set(self, *argv):
args = deque(argv)
name = args.popleft()
joinchar = args.popleft()
if len(args) == 0:
values = list(self.iter_flatten(joinchar))
joinchar = ''
else:
values = list(self.iter_flatten(list(args)))
self.__dict__[name] = EnvironmentItem(name, joinchar, values)
return self.__dict__[name]
def compile(self):
expand_macros(self, self._profile)
def write_source_script(self, filename):
trace (filename)
envscript = '#!/bin/sh\n'
for k in self.get_names():
envscript = envscript + 'export %s="%s"\n' % (k, self.__dict__[k])
with open(filename, 'w') as f:
f.write(envscript)
trace(envscript)
os.chmod(filename, 0o755)
def serialize(self):
names = sorted(self.get_names())
for k in names:
yield '%s = "%s"' % (k, self.__dict__[k])
def dump_csproj(self):
for k in self.get_names():
print '<Variable name="%s" value="%s" />' % (k, self.__dict__[k])
def write_csproj(self, file):
writer = csproj_writer(file, self)
writer.write()
def export(self):
for k in self.get_names():
os.environ[k] = str(self.__dict__[k])
def get_names(self):
for k in self.__dict__.keys():
if not k.startswith('_'):
yield k
def iter_flatten(self, iterable):
if not isinstance(iterable, (list, tuple)):
yield iterable
return
it = iter(iterable)
for e in it:
if isinstance(e, (list, tuple)):
for f in self.iter_flatten(e):
yield f
else:
yield e
|
mono/bockbuild
|
bockbuild/environment.py
|
Python
|
mit
| 2,224
|
import ctypes
libc = ctypes.CDLL(None, use_errno=True)
def errcheck(result, func, args):
if result < 0:
e = ctypes.get_errno()
raise OSError(e, errno.strerror(e))
return result
def lookup(restype, name, argtypes):
f = libc[name]
f.restye = restype
f.argtypes = argtypes
f.errcheck = errcheck
return f
class SelfClosing(object):
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
|
FuelCellUAV/FC_datalogger
|
quick2wire/quick2wire/syscall.py
|
Python
|
cc0-1.0
| 503
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.unbatch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class UnbatchTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testUnbatchWithUnknownRankInput(self):
dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3]).unbatch()
self.assertDatasetProduces(dataset, range(4))
@combinations.generate(test_base.default_test_combinations())
def testUnbatchScalarDataset(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = (dtypes.int32,) * 3
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, [(i,) * 3 for i in range(10)])
@combinations.generate(test_base.default_test_combinations())
def testUnbatchNestedDataset(self):
data = dataset_ops.Dataset.from_tensors(
[dataset_ops.Dataset.range(10) for _ in range(10)])
data = data.unbatch().flat_map(lambda x: x)
self.assertDatasetProduces(data, list(range(10)) * 10)
@combinations.generate(test_base.default_test_combinations())
def testUnbatchDatasetWithStrings(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z))
expected_types = (dtypes.int32, dtypes.string, dtypes.int32)
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(
data, [(i, compat.as_bytes(str(i)), i) for i in range(10)])
@combinations.generate(test_base.default_test_combinations())
def testUnbatchDatasetWithSparseTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
data = dataset_ops.Dataset.from_tensors(st)
data = data.unbatch()
data = data.batch(5)
data = data.unbatch()
expected_output = [
sparse_tensor.SparseTensorValue([[i]], [i], [10]) for i in range(10)
]
self.assertDatasetProduces(data, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testUnbatchDatasetWithDenseSparseAndRaggedTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
[[5]], [[6]], [[7]], [[8]], [[9]]])
data = dataset_ops.Dataset.from_tensors((list(range(10)), st, rt))
data = data.unbatch()
data = data.batch(5)
data = data.unbatch()
expected_output = [(i, sparse_tensor.SparseTensorValue([[i]], [i], [10]),
ragged_factory_ops.constant_value([[i]]))
for i in range(10)]
self.assertDatasetProduces(
data, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testUnbatchDatasetWithRaggedTensor(self):
rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
[[5]], [[6]], [[7]], [[8]], [[9]]])
data = dataset_ops.Dataset.from_tensors(rt)
data = data.unbatch()
data = data.batch(5)
data = data.batch(2)
data = data.unbatch()
expected_output = [
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]]]),
ragged_factory_ops.constant_value([[[5]], [[6]], [[7]], [[8]], [[9]]]),
]
self.assertDatasetProduces(
data, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testUnbatchSingleElementTupleDataset(self):
data = tuple([(math_ops.range(10),) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32,),) * 3
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, [((i,),) * 3 for i in range(10)])
@combinations.generate(test_base.default_test_combinations())
def testUnbatchMultiElementTupleDataset(self):
data = tuple([(math_ops.range(10 * i, 10 * i + 10),
array_ops.fill([10], "hi")) for i in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32, dtypes.string),) * 3
data = data.batch(2)
self.assertAllEqual(expected_types,
dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertAllEqual(expected_types,
dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(
data,
[((i, b"hi"), (10 + i, b"hi"), (20 + i, b"hi")) for i in range(10)])
@combinations.generate(test_base.default_test_combinations())
def testUnbatchEmpty(self):
data = dataset_ops.Dataset.from_tensors(
(constant_op.constant([]), constant_op.constant([], shape=[0, 4]),
constant_op.constant([], shape=[0, 4, 0])))
data = data.unbatch()
self.assertDatasetProduces(data, [])
@combinations.generate(test_base.default_test_combinations())
def testUnbatchStaticShapeMismatch(self):
data = dataset_ops.Dataset.from_tensors((np.arange(7), np.arange(8),
np.arange(9)))
with self.assertRaises(ValueError):
data.unbatch()
@combinations.generate(test_base.graph_only_combinations())
def testUnbatchDynamicShapeMismatch(self):
ph1 = array_ops.placeholder(dtypes.int32, shape=[None])
ph2 = array_ops.placeholder(dtypes.int32, shape=None)
data = dataset_ops.Dataset.from_tensors((ph1, ph2))
data = data.unbatch()
iterator = dataset_ops.make_initializable_iterator(data)
next_element = iterator.get_next()
with self.cached_session() as sess:
# Mismatch in the 0th dimension.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: np.arange(8).astype(np.int32)
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element)
# No 0th dimension (i.e. scalar value) for one component.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: 7
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element)
@combinations.generate(test_base.default_test_combinations())
def testUnbatchDatasetWithUintDtypes(self):
components = (
np.tile(np.array([[0], [1], [2], [3]], dtype=np.uint8), 2),
np.tile(np.array([[1], [2], [3], [256]], dtype=np.uint16), 2),
np.tile(np.array([[2], [3], [4], [65536]], dtype=np.uint32), 2),
np.tile(np.array([[3], [4], [5], [4294967296]], dtype=np.uint64), 2),
)
expected_types = (dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64)
expected_output = [tuple([c[i] for c in components]) for i in range(4)]
data = dataset_ops.Dataset.from_tensor_slices(components)
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testNoneComponent(self):
dataset = dataset_ops.Dataset.from_tensors(
(list(range(10)), None)).unbatch().map(lambda x, y: x)
self.assertDatasetProduces(dataset, expected_output=range(10))
if __name__ == "__main__":
test.main()
|
annarev/tensorflow
|
tensorflow/python/data/kernel_tests/unbatch_test.py
|
Python
|
apache-2.0
| 9,713
|
# $Id: simplecall.py 2171 2008-07-24 09:01:33Z bennylp $
#
# SIP account and registration sample. In this sample, the program
# will block to wait until registration is complete
#
# Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
import pjsua as pj
# Logging callback
def log_cb(level, str, len):
print str,
# Callback to receive events from Call
class MyCallCallback(pj.CallCallback):
def __init__(self, call=None):
pj.CallCallback.__init__(self, call)
# Notification when call state has changed
def on_state(self):
print "Call is ", self.call.info().state_text,
print "last code =", self.call.info().last_code,
print "(" + self.call.info().last_reason + ")"
# Notification when call's media state has changed.
def on_media_state(self):
global lib
if self.call.info().media_state == pj.MediaState.ACTIVE:
# Connect the call to sound device
call_slot = self.call.info().conf_slot
lib.conf_connect(call_slot, 0)
lib.conf_connect(0, call_slot)
print "Hello world, I can talk!"
# Check command line argument
if len(sys.argv) != 2:
print "Usage: simplecall.py <dst-URI>"
sys.exit(1)
try:
# Create library instance
lib = pj.Lib()
# Init library with default config
lib.init(log_cfg = pj.LogConfig(level=3, callback=log_cb))
# Create UDP transport which listens to any available port
transport = lib.create_transport(pj.TransportType.UDP)
# Start the library
lib.start()
# Create local/user-less account
acc = lib.create_account_for_transport(transport)
# Make call
call = acc.make_call(sys.argv[1], MyCallCallback())
# Wait for ENTER before quitting
print "Press <ENTER> to quit"
input = sys.stdin.readline().rstrip("\r\n")
# We're done, shutdown the library
lib.destroy()
lib = None
except pj.Error, e:
print "Exception: " + str(e)
lib.destroy()
lib = None
sys.exit(1)
|
ryanrdetzel/pjsip
|
pjsip-apps/src/python/samples/simplecall.py
|
Python
|
gpl-2.0
| 2,836
|
#!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/formatters.py
__all__=('Formatter','DecimalFormatter')
__version__=''' $Id: formatters.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__="""
These help format numbers and dates in a user friendly way.
Used by the graphics framework.
"""
import string, sys, os, re
class Formatter:
"Base formatter - simply applies python format strings"
def __init__(self, pattern):
self.pattern = pattern
def format(self, obj):
return self.pattern % obj
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self.pattern)
def __call__(self, x):
return self.format(x)
_ld_re=re.compile(r'^\d*\.')
_tz_re=re.compile('0+$')
class DecimalFormatter(Formatter):
"""lets you specify how to build a decimal.
A future NumberFormatter class will take Microsoft-style patterns
instead - "$#,##0.00" is WAY easier than this."""
def __init__(self, places=2, decimalSep='.', thousandSep=None, prefix=None, suffix=None):
if places=='auto':
self.calcPlaces = self._calcPlaces
else:
self.places = places
self.dot = decimalSep
self.comma = thousandSep
self.prefix = prefix
self.suffix = suffix
def _calcPlaces(self,V):
'''called with the full set of values to be formatted so we can calculate places'''
self.places = max([len(_tz_re.sub('',_ld_re.sub('',str(v)))) for v in V])
def format(self, num):
# positivize the numbers
sign=num<0
if sign:
num = -num
places, sep = self.places, self.dot
strip = places<=0
if places and strip: places = -places
strInt = ('%.' + str(places) + 'f') % num
if places:
strInt, strFrac = strInt.split('.')
strFrac = sep + strFrac
if strip:
while strFrac and strFrac[-1] in ['0',sep]: strFrac = strFrac[:-1]
else:
strFrac = ''
if self.comma is not None:
strNew = ''
while strInt:
left, right = strInt[0:-3], strInt[-3:]
if left == '':
#strNew = self.comma + right + strNew
strNew = right + strNew
else:
strNew = self.comma + right + strNew
strInt = left
strInt = strNew
strBody = strInt + strFrac
if sign: strBody = '-' + strBody
if self.prefix:
strBody = self.prefix + strBody
if self.suffix:
strBody = strBody + self.suffix
return strBody
def __repr__(self):
return "%s(places=%d, decimalSep=%s, thousandSep=%s, prefix=%s, suffix=%s)" % (
self.__class__.__name__,
self.places,
repr(self.dot),
repr(self.comma),
repr(self.prefix),
repr(self.suffix)
)
if __name__=='__main__':
def t(n, s, places=2, decimalSep='.', thousandSep=None, prefix=None, suffix=None):
f=DecimalFormatter(places,decimalSep,thousandSep,prefix,suffix)
r = f(n)
print("places=%2d dot=%-4s comma=%-4s prefix=%-4s suffix=%-4s result=%10s %s" %(f.places, f.dot, f.comma, f.prefix, f.suffix,r, r==s and 'OK' or 'BAD'))
t(1000.9,'1,000.9',1,thousandSep=',')
t(1000.95,'1,001.0',1,thousandSep=',')
t(1000.95,'1,001',-1,thousandSep=',')
t(1000.9,'1,001',0,thousandSep=',')
t(1000.9,'1000.9',1)
t(1000.95,'1001.0',1)
t(1000.95,'1001',-1)
t(1000.9,'1001',0)
t(1000.1,'1000.1',1)
t(1000.55,'1000.6',1)
t(1000.449,'1000.4',-1)
t(1000.45,'1000',0)
|
nakagami/reportlab
|
src/reportlab/lib/formatters.py
|
Python
|
bsd-3-clause
| 3,887
|
import unittest
from datetime import datetime, timedelta
from fs.memoryfs import MemoryFS
from mock import Mock, patch
from xblock.runtime import KvsFieldData, DictKeyValueStore
import xmodule.course_module
from xmodule.modulestore.xml import ImportSystem, XMLModuleStore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from django.utils.timezone import UTC
ORG = 'test_org'
COURSE = 'test_course'
NOW = datetime.strptime('2013-01-01T01:00:00', '%Y-%m-%dT%H:%M:00').replace(tzinfo=UTC())
class CourseFieldsTestCase(unittest.TestCase):
def test_default_start_date(self):
self.assertEqual(
xmodule.course_module.CourseFields.start.default,
datetime(2030, 1, 1, tzinfo=UTC())
)
class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda dir: MemoryFS())
def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", course_dirs=[],
load_error_modules=load_error_modules)
course_id = SlashSeparatedCourseKey(ORG, COURSE, 'test_run')
course_dir = "test_dir"
error_tracker = Mock()
parent_tracker = Mock()
super(DummySystem, self).__init__(
xmlstore=xmlstore,
course_id=course_id,
course_dir=course_dir,
error_tracker=error_tracker,
parent_tracker=parent_tracker,
load_error_modules=load_error_modules,
field_data=KvsFieldData(DictKeyValueStore()),
)
def get_dummy_course(start, announcement=None, is_new=None, advertised_start=None, end=None, certs=False):
"""Get a dummy course"""
system = DummySystem(load_error_modules=True)
def to_attrb(n, v):
return '' if v is None else '{0}="{1}"'.format(n, v).lower()
is_new = to_attrb('is_new', is_new)
announcement = to_attrb('announcement', announcement)
advertised_start = to_attrb('advertised_start', advertised_start)
end = to_attrb('end', end)
start_xml = '''
<course org="{org}" course="{course}" display_organization="{org}_display" display_coursenumber="{course}_display"
graceperiod="1 day" url_name="test"
start="{start}"
{announcement}
{is_new}
{advertised_start}
{end}
certificates_show_before_end="{certs}">
<chapter url="hi" url_name="ch" display_name="CH">
<html url_name="h" display_name="H">Two houses, ...</html>
</chapter>
</course>
'''.format(org=ORG, course=COURSE, start=start, is_new=is_new,
announcement=announcement, advertised_start=advertised_start, end=end,
certs=certs)
return system.process_xml(start_xml)
class HasEndedMayCertifyTestCase(unittest.TestCase):
"""Double check the semantics around when to finalize courses."""
def setUp(self):
system = DummySystem(load_error_modules=True)
#sample_xml = """
# <course org="{org}" course="{course}" display_organization="{org}_display" display_coursenumber="{course}_display"
# graceperiod="1 day" url_name="test"
# start="2012-01-01T12:00"
# {end}
# certificates_show_before_end={cert}>
# <chapter url="hi" url_name="ch" display_name="CH">
# <html url_name="h" display_name="H">Two houses, ...</html>
# </chapter>
# </course>
#""".format(org=ORG, course=COURSE)
past_end = (datetime.now() - timedelta(days=12)).strftime("%Y-%m-%dT%H:%M:00")
future_end = (datetime.now() + timedelta(days=12)).strftime("%Y-%m-%dT%H:%M:00")
self.past_show_certs = get_dummy_course("2012-01-01T12:00", end=past_end, certs=True)
self.past_noshow_certs = get_dummy_course("2012-01-01T12:00", end=past_end, certs=False)
self.future_show_certs = get_dummy_course("2012-01-01T12:00", end=future_end, certs=True)
self.future_noshow_certs = get_dummy_course("2012-01-01T12:00", end=future_end, certs=False)
#self.past_show_certs = system.process_xml(sample_xml.format(end=past_end, cert=True))
#self.past_noshow_certs = system.process_xml(sample_xml.format(end=past_end, cert=False))
#self.future_show_certs = system.process_xml(sample_xml.format(end=future_end, cert=True))
#self.future_noshow_certs = system.process_xml(sample_xml.format(end=future_end, cert=False))
def test_has_ended(self):
"""Check that has_ended correctly tells us when a course is over."""
self.assertTrue(self.past_show_certs.has_ended())
self.assertTrue(self.past_noshow_certs.has_ended())
self.assertFalse(self.future_show_certs.has_ended())
self.assertFalse(self.future_noshow_certs.has_ended())
def test_may_certify(self):
"""Check that may_certify correctly tells us when a course may wrap."""
self.assertTrue(self.past_show_certs.may_certify())
self.assertTrue(self.past_noshow_certs.may_certify())
self.assertTrue(self.future_show_certs.may_certify())
self.assertFalse(self.future_noshow_certs.may_certify())
class IsNewCourseTestCase(unittest.TestCase):
"""Make sure the property is_new works on courses"""
def setUp(self):
# Needed for test_is_newish
datetime_patcher = patch.object(
xmodule.course_module, 'datetime',
Mock(wraps=datetime)
)
mocked_datetime = datetime_patcher.start()
mocked_datetime.now.return_value = NOW
self.addCleanup(datetime_patcher.stop)
@patch('xmodule.course_module.datetime.now')
def test_sorting_score(self, gmtime_mock):
gmtime_mock.return_value = NOW
day1 = '2012-01-01T12:00'
day2 = '2012-01-02T12:00'
dates = [
# Announce date takes priority over actual start
# and courses announced on a later date are newer
# than courses announced for an earlier date
((day1, day2, None), (day1, day1, None), self.assertLess),
((day1, day1, None), (day2, day1, None), self.assertEqual),
# Announce dates take priority over advertised starts
((day1, day2, day1), (day1, day1, day1), self.assertLess),
((day1, day1, day2), (day2, day1, day2), self.assertEqual),
# Later start == newer course
((day2, None, None), (day1, None, None), self.assertLess),
((day1, None, None), (day1, None, None), self.assertEqual),
# Non-parseable advertised starts are ignored in preference to actual starts
((day2, None, "Spring"), (day1, None, "Fall"), self.assertLess),
((day1, None, "Spring"), (day1, None, "Fall"), self.assertEqual),
# Partially parsable advertised starts should take priority over start dates
((day2, None, "October 2013"), (day2, None, "October 2012"), self.assertLess),
((day2, None, "October 2013"), (day1, None, "October 2013"), self.assertEqual),
# Parseable advertised starts take priority over start dates
((day1, None, day2), (day1, None, day1), self.assertLess),
((day2, None, day2), (day1, None, day2), self.assertEqual),
]
for a, b, assertion in dates:
a_score = get_dummy_course(start=a[0], announcement=a[1], advertised_start=a[2]).sorting_score
b_score = get_dummy_course(start=b[0], announcement=b[1], advertised_start=b[2]).sorting_score
print "Comparing %s to %s" % (a, b)
assertion(a_score, b_score)
start_advertised_settings = [
# start, advertised, result, is_still_default
('2012-12-02T12:00', None, 'Dec 02, 2012', False),
('2012-12-02T12:00', '2011-11-01T12:00', 'Nov 01, 2011', False),
('2012-12-02T12:00', 'Spring 2012', 'Spring 2012', False),
('2012-12-02T12:00', 'November, 2011', 'November, 2011', False),
(xmodule.course_module.CourseFields.start.default, None, 'TBD', True),
(xmodule.course_module.CourseFields.start.default, 'January 2014', 'January 2014', False),
]
@patch('xmodule.course_module.datetime.now')
def test_start_date_text(self, gmtime_mock):
gmtime_mock.return_value = NOW
for s in self.start_advertised_settings:
d = get_dummy_course(start=s[0], advertised_start=s[1])
print "Checking start=%s advertised=%s" % (s[0], s[1])
self.assertEqual(d.start_date_text, s[2])
def test_start_date_is_default(self):
for s in self.start_advertised_settings:
d = get_dummy_course(start=s[0], advertised_start=s[1])
self.assertEqual(d.start_date_is_still_default, s[3])
def test_display_organization(self):
descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True)
self.assertNotEqual(descriptor.location.org, descriptor.display_org_with_default)
self.assertEqual(descriptor.display_org_with_default, "{0}_display".format(ORG))
def test_display_coursenumber(self):
descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True)
self.assertNotEqual(descriptor.location.course, descriptor.display_number_with_default)
self.assertEqual(descriptor.display_number_with_default, "{0}_display".format(COURSE))
def test_is_newish(self):
descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True)
assert(descriptor.is_newish is True)
descriptor = get_dummy_course(start='2013-02-02T12:00', is_new=False)
assert(descriptor.is_newish is False)
descriptor = get_dummy_course(start='2013-02-02T12:00', is_new=True)
assert(descriptor.is_newish is True)
descriptor = get_dummy_course(start='2013-01-15T12:00')
assert(descriptor.is_newish is True)
descriptor = get_dummy_course(start='2013-03-01T12:00')
assert(descriptor.is_newish is True)
descriptor = get_dummy_course(start='2012-10-15T12:00')
assert(descriptor.is_newish is False)
descriptor = get_dummy_course(start='2012-12-31T12:00')
assert(descriptor.is_newish is True)
def test_end_date_text(self):
# No end date set, returns empty string.
d = get_dummy_course('2012-12-02T12:00')
self.assertEqual('', d.end_date_text)
d = get_dummy_course('2012-12-02T12:00', end='2014-9-04T12:00')
self.assertEqual('Sep 04, 2014', d.end_date_text)
class DiscussionTopicsTestCase(unittest.TestCase):
def test_default_discussion_topics(self):
d = get_dummy_course('2012-12-02T12:00')
self.assertEqual({'General': {'id': 'i4x-test_org-test_course-course-test'}}, d.discussion_topics)
|
geekaia/edx-platform
|
common/lib/xmodule/xmodule/tests/test_course_module.py
|
Python
|
agpl-3.0
| 10,863
|
from dashie_sampler import DashieSampler
import random
import requests
import collections
import re
import datetime
class ConfluenceCutFromDevelopAndRelease(DashieSampler):
def __init__(self, *args, **kwargs):
DashieSampler.__init__(self, *args, **kwargs)
def name(self):
return 'confluencecutdatesandrelease'
#name is the link to the main.html
def sample(self):
wikiHome = requests.get("https://nhss-confluence.bjss.co.uk/display/SPINE/Web+Home%3A+NHS+Spine+II+Wiki", auth=('emma.holmes', 'Welcome123'), verify=False)
#wikihome is the url where the information is
currentLiveReleasePattern = "\<pre\sid\='currentLiveRelease'\>(.*?)</pre>"
nextLiveRelease = "\<pre\sid\='nextLiveRelease'\>(.*?)</pre>"
currentReleaseVersion = "\<pre\sid\='version0'\>(.*?)</pre>"
currentReleaseDate = "\<pre\sid\='cutDate0'\>(.*?)</pre>"
#Using the corresponding tags for the information you want
matchescurrent = re.search (currentLiveReleasePattern, wikiHome.text)
matchesnext = re.search (nextLiveRelease, wikiHome.text)
matchesversion = re.search (currentReleaseVersion, wikiHome.text)
matchesdate = re.search (currentReleaseDate, wikiHome.text)
#re.search searches the wikihome and the tags above to find the data
releaseCurrent = matchescurrent.group(1)
releaseNext = matchesnext.group(1)
releaseVersion = matchesversion.group(1)
releaseDate = matchesdate.group(1)
#.group(1) returns the first bit of info related to the tag (if you get and error about no groups in the terminal it might no matches were found (check tags))
return {'text': releaseCurrent,
'value': releaseNext,
'label': "Cut From Develop",
'writing': releaseVersion,
'data': releaseDate,
}
#if your returning something new always make sure to include the new class ('text','value' etc)into the html or css
|
edhiley/pydashie
|
pydashie/confluence_cut_from_develop_and_release.py
|
Python
|
mit
| 2,020
|
# -*- coding: utf-8 -*-
from . import renderers
|
chrisjsewell/PyGauss
|
pygauss/chemlab_patch/graphics/__init__.py
|
Python
|
gpl-3.0
| 48
|
import numpy as np
import tensorflow as tf
from autoencoder.io import read_text, preprocess
from autoencoder.api import autoencode
import keras.backend as K
# for full reproducibility
np.random.seed(1)
tf.set_random_seed(1)
sess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1))
K.set_session(sess)
x = read_text('biochemists.tsv', header='infer')
print(x.shape)
# test API
result = autoencode(x, 'test-ae', type='zinb-conddisp', hidden_size=(1,), epochs=3)
|
theislab/dca
|
data/test-biochemists-zinb-ae.py
|
Python
|
apache-2.0
| 551
|
# Generated by Django 2.0.1 on 2018-02-14 16:14
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0003_auto_20180131_1525'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='valid_from',
field=models.DateTimeField(blank=True, default=datetime.datetime.now, null=True),
),
migrations.AlterField(
model_name='customeraccountcontact',
name='valid_from',
field=models.DateTimeField(blank=True, default=datetime.datetime.now, null=True),
),
]
|
Semprini/cbe
|
cbe/cbe/customer/migrations/0004_auto_20180214_1614.py
|
Python
|
apache-2.0
| 692
|
"""Tests for OAuth2Reddit class."""
from __future__ import print_function, unicode_literals
from praw import Reddit, errors, decorators
from praw.objects import Submission
from six import text_type
from .helper import (PRAWTest, NewOAuthPRAWTest, USER_AGENT, betamax,
betamax_custom_header, mock_sys_stream)
class OAuth2RedditTest(PRAWTest):
def setUp(self):
self.configure()
self.r = Reddit(USER_AGENT, site_name='reddit_oauth_test',
disable_update_check=True)
def test_authorize_url(self):
self.r.set_oauth_app_info(None, None, None)
self.assertRaises(errors.OAuthAppRequired, self.r.get_authorize_url,
'dummy_state')
self.r.set_oauth_app_info(self.r.config.client_id,
self.r.config.client_secret,
self.r.config.redirect_uri)
url, params = self.r.get_authorize_url('...').split('?', 1)
self.assertTrue('api/v1/authorize/' in url)
params = dict(x.split('=', 1) for x in params.split('&'))
expected = {'client_id': self.r.config.client_id,
'duration': 'temporary',
'redirect_uri': ('https%3A%2F%2F127.0.0.1%3A65010%2F'
'authorize_callback'),
'response_type': 'code', 'scope': 'identity',
'state': '...'}
self.assertEqual(expected, params)
@betamax()
@mock_sys_stream("stdin")
def test_empty_captcha_file(self):
# Use the alternate account because it has low karma,
# so we can test the captcha.
self.r.refresh_access_information(self.other_refresh_token['submit'])
self.assertRaises(errors.InvalidCaptcha, self.r.submit,
self.sr, 'captcha test will fail', 'body')
@betamax()
def test_get_access_information(self):
# If this test fails, the following URL will need to be visted in order
# to obtain a new code to pass to `get_access_information`:
# self.r.get_authorize_url('...')
token = self.r.get_access_information('MQALrr1di8GzcnT8szbTWhLcBUQ')
expected = {'access_token': self.r.access_token,
'refresh_token': None,
'scope': set(('identity',))}
self.assertEqual(expected, token)
self.assertEqual('PyAPITestUser2', text_type(self.r.user))
@betamax()
def test_get_access_information_with_invalid_code(self):
self.assertRaises(errors.OAuthInvalidGrant,
self.r.get_access_information, 'invalid_code')
@betamax()
@mock_sys_stream("stdin")
def test_inject_captcha_into_kwargs_and_raise(self):
# Use the alternate account because it has low karma,
# so we can test the captcha.
self.r.refresh_access_information(self.other_refresh_token['submit'])
# praw doesn't currently add the captcha into kwargs so lets
# write a function in which it would and alias it to Reddit.submit
@decorators.restrict_access(scope='submit')
@decorators.require_captcha
def submit_alias(r, sr, title, text, **kw):
return self.r.submit.__wrapped__.__wrapped__(
r, sr, title, text, captcha=kw.get('captcha')
)
self.assertRaises(errors.InvalidCaptcha, submit_alias, self.r,
self.sr, 'captcha test will fail', 'body')
def test_invalid_app_access_token(self):
self.r.clear_authentication()
self.r.set_oauth_app_info(None, None, None)
self.assertRaises(errors.OAuthAppRequired,
self.r.get_access_information, 'dummy_code')
def test_invalid_app_authorize_url(self):
self.r.clear_authentication()
self.r.set_oauth_app_info(None, None, None)
self.assertRaises(errors.OAuthAppRequired,
self.r.get_authorize_url, 'dummy_state')
@betamax()
def test_invalid_set_access_credentials(self):
self.assertRaises(errors.OAuthInvalidToken,
self.r.set_access_credentials,
set(('identity',)), 'dummy_access_token')
def test_oauth_scope_required(self):
self.r.set_oauth_app_info('dummy_client', 'dummy_secret', 'dummy_url')
self.r.set_access_credentials(set('dummy_scope',), 'dummy_token')
self.assertRaises(errors.OAuthScopeRequired, self.r.get_me)
def test_raise_client_exception(self):
def raise_client_exception(*args):
raise errors.ClientException(*args)
self.assertRaises(errors.ClientException, raise_client_exception)
self.assertRaises(errors.ClientException, raise_client_exception,
'test')
ce_message = errors.ClientException('Test')
ce_no_message = errors.ClientException()
self.assertEqual(ce_message.message, str(ce_message))
self.assertEqual(ce_no_message.message, str(ce_no_message))
def test_raise_http_exception(self):
def raise_http_exception():
raise errors.HTTPException('fakeraw')
self.assertRaises(errors.HTTPException, raise_http_exception)
http_exception = errors.HTTPException('fakeraw')
self.assertEqual(http_exception.message, str(http_exception))
def test_raise_oauth_exception(self):
oerrormessage = "fakemessage"
oerrorurl = "http://oauth.reddit.com/"
def raise_oauth_exception():
raise errors.OAuthException(oerrormessage, oerrorurl)
self.assertRaises(errors.OAuthException, raise_oauth_exception)
oauth_exception = errors.OAuthException(oerrormessage, oerrorurl)
self.assertEqual(oauth_exception.message +
" on url {0}".format(oauth_exception.url),
str(oauth_exception))
def test_raise_redirect_exception(self):
apiurl = "http://api.reddit.com/"
oauthurl = "http://oauth.reddit.com/"
def raise_redirect_exception():
raise errors.RedirectException(apiurl, oauthurl)
self.assertRaises(errors.RedirectException, raise_redirect_exception)
redirect_exception = errors.RedirectException(apiurl, oauthurl)
self.assertEqual(redirect_exception.message, str(redirect_exception))
@betamax()
def test_scope_history(self):
self.r.refresh_access_information(self.refresh_token['history'])
self.assertTrue(list(self.r.get_redditor(self.un).get_upvoted()))
@betamax()
def test_scope_identity(self):
self.r.refresh_access_information(self.refresh_token['identity'])
self.assertEqual(self.un, self.r.get_me().name)
@betamax()
def test_scope_mysubreddits(self):
self.r.refresh_access_information(self.refresh_token['mysubreddits'])
self.assertTrue(list(self.r.get_my_moderation()))
@betamax()
def test_scope_creddits(self):
# Assume there are insufficient creddits.
self.r.refresh_access_information(
self.refresh_token['creddits'])
redditor = self.r.get_redditor('bboe')
sub = self.r.get_submission(url=self.comment_url)
# Test error conditions
self.assertRaises(TypeError, sub.gild, months=1)
for value in (False, 0, -1, '0', '-1', 37, '37'):
self.assertRaises(TypeError, redditor.gild, value)
# Test object gilding
self.assertRaises(errors.InsufficientCreddits, redditor.gild)
self.assertRaises(errors.InsufficientCreddits, sub.gild)
self.assertRaises(errors.InsufficientCreddits, sub.comments[0].gild)
@betamax()
def test_scope_privatemessages(self):
self.r.refresh_access_information(
self.refresh_token['privatemessages'])
self.assertTrue(list(self.r.get_inbox()))
@betamax()
def test_scope_read(self):
self.r.refresh_access_information(self.refresh_token['read'])
self.assertTrue(self.r.get_subreddit(self.priv_sr).subscribers > 0)
fullname = '{0}_{1}'.format(self.r.config.by_object[Submission],
self.priv_submission_id)
method1 = self.r.get_info(thing_id=fullname)
method2 = self.r.get_submission(submission_id=self.priv_submission_id)
self.assertEqual(method1, method2)
@betamax()
def test_scope_read_get_front_page(self):
self.r.refresh_access_information(self.refresh_token['mysubreddits'])
subscribed = list(self.r.get_my_subreddits(limit=None))
self.r.refresh_access_information(self.refresh_token['read'])
for post in self.r.get_front_page():
self.assertTrue(post.subreddit in subscribed)
@betamax()
def test_set_access_credentials(self):
self.assertTrue(self.r.user is None)
result = self.r.refresh_access_information(
self.refresh_token['identity'], update_session=False)
self.assertTrue(self.r.user is None)
self.r.set_access_credentials(**result)
self.assertFalse(self.r.user is None)
@betamax()
def test_set_access_credentials_with_list(self):
self.assertTrue(self.r.user is None)
result = self.r.refresh_access_information(
self.refresh_token['identity'], update_session=False)
self.assertTrue(self.r.user is None)
result['scope'] = list(result['scope'])
self.r.set_access_credentials(**result)
self.assertFalse(self.r.user is None)
@betamax()
def test_set_access_credentials_with_string(self):
self.assertTrue(self.r.user is None)
result = self.r.refresh_access_information(
self.refresh_token['identity'], update_session=False)
self.assertTrue(self.r.user is None)
result['scope'] = ' '.join(result['scope'])
self.r.set_access_credentials(**result)
self.assertFalse(self.r.user is None)
@betamax()
@mock_sys_stream("stdin", "ljgtoo")
def test_solve_captcha(self):
# Use the alternate account because it has low karma,
# so we can test the captcha.
self.r.refresh_access_information(self.other_refresh_token['submit'])
self.r.submit(self.sr, 'captcha test', 'body')
@betamax()
@mock_sys_stream("stdin", "DFIRSW")
def test_solve_captcha_on_bound_subreddit(self):
# Use the alternate account because it has low karma,
# so we can test the captcha.
self.r.refresh_access_information(self.other_refresh_token['submit'])
subreddit = self.r.get_subreddit(self.sr)
# praw doesn't currently have a function in which require_captcha
# gets a reddit instance from a subreddit and uses it, so lets
# write a function in which it would and alias it to Reddit.submit
@decorators.restrict_access(scope='submit')
@decorators.require_captcha
def submit_alias(sr, title, text, **kw):
return self.r.submit.__wrapped__.__wrapped__(
self.r, sr, title, text, captcha=kw.get('captcha')
)
submit_alias(subreddit, 'captcha test on bound subreddit', 'body')
@betamax()
def test_oauth_without_identy_doesnt_set_user(self):
self.assertTrue(self.r.user is None)
self.r.refresh_access_information(self.refresh_token['edit'])
self.assertTrue(self.r.user is None)
class AutoRefreshTest(NewOAuthPRAWTest):
@betamax_custom_header()
def test_auto_refresh_token(self):
# this test wasn't cached before the new test was made
# so the new app info needs to be set to avoid 401s
# also, the redirect uri doesn't need to be set on refreshes,
# but praw does this anyway. Changing this now would break
# all prior tests. The redirect uri should be removed and
# all tests rerecorded later.
with self.set_custom_header_match('test_auto_refresh_token__initial'):
self.r.refresh_access_information(
self.refresh_token['auto_refresh'])
old_token = self.r.access_token
self.r.access_token += 'x' # break the token
with self.set_custom_header_match('test_auto_refresh_token__refresh'):
# TODO: refreshing r.user wasn't actually updating the token
# because of special oauth handling in _get_json_dict of
# reddit content objects. Leaving this as a note until I
# fix it in the future
list(self.r.get_new(limit=5))
current_token = self.r.access_token
self.assertNotEqual(old_token, current_token)
with self.set_custom_header_match('test_auto_refresh_token__after'):
list(self.r.get_new(limit=5))
self.assertEqual(current_token, self.r.access_token)
|
michael-lazar/praw3
|
tests/test_oauth2_reddit.py
|
Python
|
gpl-3.0
| 12,877
|
#!/usr/bin/env python
#
# Copyright 2015, Viktor Stanchev and contributors
#
# This file is part of pirate-get.
#
# pirate-get is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pirate-get is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with pirate-get. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import re
import string
import gzip
import configparser
import argparse
import builtins
import subprocess
import webbrowser
import urllib.request as request
import urllib.parse as parse
from html.parser import HTMLParser
from urllib.error import URLError, HTTPError
from socket import timeout
from io import BytesIO
from os.path import expanduser, expandvars
colored_output = True
default_timeout = 10
default_headers = {'User-Agent': 'pirate get'}
categories = {
'All': 0,
'Applications': 300,
'Applications/Android': 306,
'Applications/Handheld': 304,
'Applications/IOS (iPad/iPhone)': 305,
'Applications/Mac': 302,
'Applications/Other OS': 399,
'Applications/UNIX': 303,
'Applications/Windows': 301,
'Audio': 100,
'Audio/Audio books': 102,
'Audio/FLAC': 104,
'Audio/Music': 101,
'Audio/Other': 199,
'Audio/Sound clips': 103,
'Games': 400,
'Games/Android': 408,
'Games/Handheld': 406,
'Games/IOS (iPad/iPhone)': 407,
'Games/Mac': 402,
'Games/Other': 499,
'Games/PC': 401,
'Games/PSx': 403,
'Games/Wii': 405,
'Games/XBOX360': 404,
'Other': 600,
'Other/Comics': 602,
'Other/Covers': 604,
'Other/E-books': 601,
'Other/Other': 699,
'Other/Physibles': 605,
'Other/Pictures': 603,
'Porn': 500,
'Porn/Games': 504,
'Porn/HD - Movies': 505,
'Porn/Movie clips': 506,
'Porn/Movies': 501,
'Porn/Movies DVDR': 502,
'Porn/Other': 599,
'Porn/Pictures': 503,
'Video': 200,
'Video/3D': 209,
'Video/HD - Movies': 207,
'Video/HD - TV shows': 208,
'Video/Handheld': 206,
'Video/Movie clips': 204,
'Video/Movies': 201,
'Video/Movies DVDR': 202,
'Video/Music videos': 203,
'Video/Other': 299,
'Video/TV shows': 205}
sorts = {
'TitleDsc': 1, 'TitleAsc': 2,
'DateDsc': 3, 'DateAsc': 4,
'SizeDsc': 5, 'SizeAsc': 6,
'SeedersDsc': 7, 'SeedersAsc': 8,
'LeechersDsc': 9, 'LeechersAsc': 10,
'CategoryDsc': 13, 'CategoryAsc': 14,
'Default': 99}
# create a subclass and override the handler methods
class BayParser(HTMLParser):
title = ''
q = ''
state = 'looking'
results = []
def __init__(self, q):
HTMLParser.__init__(self)
self.q = q.lower()
def handle_starttag(self, tag, attrs):
if tag == 'title':
self.state = 'title'
if tag == 'magnet' and self.state == 'matched':
self.state = 'magnet'
def handle_data(self, data):
if self.state == 'title':
if data.lower().find(self.q) != -1:
self.title = data
self.state = 'matched'
else:
self.state = 'looking'
if self.state == 'magnet':
self.results.append([
'magnet:?xt=urn:btih:' +
parse.quote(data) +
'&dn=' +
parse.quote(self.title), '?', '?'])
self.state = 'looking'
def print(*args, **kwargs):
if kwargs.get('color', False) and colored_output:
try:
import colorama
except (ImportError):
pass
else:
colorama.init()
color_dict = {
'default': '',
'header': colorama.Back.BLACK + colorama.Fore.WHITE,
'alt': colorama.Fore.YELLOW,
'zebra_0': '',
'zebra_1': colorama.Fore.BLUE,
'WARN': colorama.Fore.MAGENTA,
'ERROR': colorama.Fore.RED}
c = color_dict[kwargs.pop('color')]
args = (c + args[0],) + args[1:] + (colorama.Style.RESET_ALL,)
kwargs.pop('color', None)
return builtins.print(*args, **kwargs)
else:
kwargs.pop('color', None)
return builtins.print(*args, **kwargs)
def parse_cmd(cmd, url):
cmd_args_regex = r'''(('[^']*'|"[^"]*"|(\\\s|[^\s])+)+ *)'''
ret = re.findall(cmd_args_regex, cmd)
ret = [i[0].strip().replace('%s', url) for i in ret]
ret_no_quotes = []
for item in ret:
if (item[0] == "'" and item[-1] == "'") or (item[0] == '"' and item[-1] == '"'):
ret_no_quotes.append(item[1:-1])
else:
ret_no_quotes.append(item)
return ret_no_quotes
#todo: redo this with html parser instead of regex
def remote(args, mirror):
res_l = []
pages = int(args.pages)
if pages < 1:
raise ValueError('Please provide an integer greater than 0 '
'for the number of pages to fetch.')
if str(args.category) in categories.values():
category = args.category
elif args.category in categories.keys():
category = categories[args.category]
else:
category = '0'
print('Invalid category ignored', color='WARN')
if str(args.sort) in sorts.values():
sort = args.sort
elif args.sort in sorts.keys():
sort = sorts[args.sort]
else:
sort = '99'
print('Invalid sort ignored', color='WARN')
# Catch the Ctrl-C exception and exit cleanly
try:
sizes = []
uploaded = []
identifiers = []
for page in range(pages):
if args.browse:
path = '/browse/'
if(category == 0):
category = 100
path = '/browse/' + '/'.join(str(i) for i in (
category, page, sort))
elif len(args.search) == 0:
path = '/top/48h' if args.recent else '/top/'
if(category == 0):
path += 'all'
else:
path += str(category)
else:
path = '/search/' + '/'.join(str(i) for i in (
'+'.join(args.search),
page, sort,
category))
req = request.Request(mirror + path, headers=default_headers)
req.add_header('Accept-encoding', 'gzip')
f = request.urlopen(req, timeout=default_timeout)
if f.info().get('Content-Encoding') == 'gzip':
f = gzip.GzipFile(fileobj=BytesIO(f.read()))
res = f.read().decode('utf-8')
found = re.findall(r'"(magnet\:\?xt=[^"]*)|<td align="right">'
r'([^<]+)</td>', res)
# check for a blocked mirror
no_results = re.search(r'No hits\. Try adding an asterisk in '
r'you search phrase\.', res)
if found == [] and no_results is None:
# Contradiction - we found no results,
# but the page didn't say there were no results.
# The page is probably not actually the pirate bay,
# so let's try another mirror
raise IOError('Blocked mirror detected.')
# get sizes as well and substitute the character
sizes.extend([match.replace(' ', ' ').split()
for match in re.findall(r'(?<=Size )[0-9.]'
r'+\ \;[KMGT]*[i ]*B', res)])
uploaded.extend([match.replace(' ', ' ')
for match in re.findall(r'(?<=Uploaded )'
r'.+(?=\, Size)',res)])
identifiers.extend([match.replace(' ', ' ')
for match in re.findall('(?<=/torrent/)'
'[0-9]+(?=/)',res)])
state = 'seeds'
curr = ['', 0, 0] #magnet, seeds, leeches
for f in found:
if f[1] == '':
curr[0] = f[0]
else:
if state == 'seeds':
curr[1] = f[1]
state = 'leeches'
else:
curr[2] = f[1]
state = 'seeds'
res_l.append(curr)
curr = ['', 0, 0]
except KeyboardInterrupt :
print('\nCancelled.')
sys.exit(0)
# return the sizes in a spearate list
return res_l, sizes, uploaded, identifiers
def local(db, search):
xml = open(db).readlines()
parser = BayParser(' '.join(search))
parser.feed(''.join(xml))
return parser.results
def load_config():
config = configparser.ConfigParser()
# default options
config.add_section('Save')
config.set('Save', 'magnets', 'false')
config.set('Save', 'torrents', 'false')
config.set('Save', 'directory', os.getcwd())
config.add_section('LocalDB')
config.set('LocalDB', 'enabled', 'false')
config.set('LocalDB', 'path', expanduser('~/downloads/pirate-get/db'))
config.add_section('Misc')
config.set('Misc', 'openCommand', '')
config.set('Misc', 'transmission', 'false')
config.set('Misc', 'colors', 'true')
# user-defined config files
main = expandvars('$XDG_CONFIG_HOME/pirate-get')
alt = expanduser('~/.config/pirate-get')
# read config file
config.read([main] if os.path.isfile(main) else [alt])
# expand env variables
directory = expanduser(expandvars(config.get('Save', 'Directory')))
path = expanduser(expandvars(config.get('LocalDB', 'path')))
config.set('Save', 'Directory', directory)
config.set('LocalDB', 'path', path)
return config
def get_torrent(info_hash):
url = 'http://torcache.net/torrent/{:X}.torrent'
req = request.Request(url.format(info_hash), headers=default_headers)
req.add_header('Accept-encoding', 'gzip')
torrent = request.urlopen(req, timeout=default_timeout)
if torrent.info().get('Content-Encoding') == 'gzip':
torrent = gzip.GzipFile(fileobj=BytesIO(torrent.read()))
return torrent.read()
def print_search_results(mags, sizes, uploaded, local):
columns = int(os.popen('stty size', 'r').read().split()[1])
cur_color = 'zebra_0'
if local:
print('{:>4} {:{length}}'.format(
'LINK', 'NAME', length=columns - 8),
color='header')
else:
print('{:>4} {:>5} {:>5} {:>5} {:9} {:11} {:{length}}'.format(
'LINK', 'SEED', 'LEECH', 'RATIO',
'SIZE', 'UPLOAD', 'NAME', length=columns - 52),
color='header')
for m, magnet in enumerate(mags):
# Alternate between colors
cur_color = 'zebra_0' if cur_color == 'zebra_1' else 'zebra_1'
name = re.search(r'dn=([^\&]*)', magnet[0])
torrent_name = parse.unquote(name.group(1)).replace('+', ' ')
if local:
line = '{:5} {:{length}}'
content = [m, torrent_name[:columns]]
else:
no_seeders, no_leechers = map(int, magnet[1:])
size, unit = (float(sizes[m][0]), sizes[m][1]) if sizes else (0, '???')
date = uploaded[m]
# compute the S/L ratio (Higher is better)
try:
ratio = no_seeders / no_leechers
except ZeroDivisionError:
ratio = float('inf')
line = ('{:4} {:5} {:5} {:5.1f} {:5.1f}'
' {:3} {:<11} {:{length}}')
content = [m, no_seeders, no_leechers, ratio,
size, unit, date, torrent_name[:columns - 52]]
# enhanced print output with justified columns
print(line.format(*content, length=columns - 52), color=cur_color)
def print_descriptions(chosen_links, mags, site, identifiers):
for link in chosen_links:
link = int(link)
path = '/torrent/%s/' % identifiers[link]
req = request.Request(site + path, headers=default_headers)
req.add_header('Accept-encoding', 'gzip')
f = request.urlopen(req, timeout=default_timeout)
if f.info().get('Content-Encoding') == 'gzip':
f = gzip.GzipFile(fileobj=BytesIO(f.read()))
res = f.read().decode('utf-8')
name = re.search(r'dn=([^\&]*)', mags[link][0])
torrent_name = parse.unquote(name.group(1)).replace('+', ' ')
desc = re.search(r'<div class="nfo">\s*<pre>(.+?)(?=</pre>)',
res, re.DOTALL).group(1)
# Replace HTML links with markdown style versions
desc = re.sub(r'<a href="\s*([^"]+?)\s*"[^>]*>(\s*)([^<]+?)(\s*'
r')</a>', r'\2[\3](\1)\4', desc)
print('Description for "%s":' % torrent_name, color='zebra_1')
print(desc, color='zebra_0')
def print_file_lists(chosen_links, mags, site, identifiers):
for link in chosen_links:
path = '/ajax_details_filelist.php'
query = '?id=' + identifiers[int(link)]
req = request.Request(site + path + query, headers=default_headers)
req.add_header('Accept-encoding', 'gzip')
f = request.urlopen(req, timeout=default_timeout)
if f.info().get('Content-Encoding') == 'gzip':
f = gzip.GzipFile(fileobj=BytesIO(f.read()))
res = f.read().decode('utf-8').replace(' ', ' ')
files = re.findall(r'<td align="left">\s*([^<]+?)\s*</td><td ali'
r'gn="right">\s*([^<]+?)\s*</tr>', res)
name = re.search(r'dn=([^\&]*)', mags[int(link)][0])
torrent_name = parse.unquote(name.group(1)).replace('+', ' ')
print('Files in "%s":' % torrent_name, color='zebra_1')
cur_color = 'zebra_0'
for f in files:
print('{0[0]:>11} {0[1]}'.format(f), color=cur_color)
cur_color = 'zebra_0' if (cur_color == 'zebra_1') else 'zebra_1'
def save_torrents(chosen_links, mags, folder):
for link in chosen_links:
magnet = mags[int(link)][0]
name = re.search(r'dn=([^\&]*)', magnet)
torrent_name = parse.unquote(name.group(1)).replace('+', ' ')
info_hash = int(re.search(r'btih:([a-f0-9]{40})', magnet).group(1), 16)
file = os.path.join(folder, torrent_name + '.torrent')
try:
torrent = get_torrent(info_hash)
except HTTPError:
print('There is no cached file for this torrent :(', color='ERROR')
else:
open(file,'wb').write(torrent)
print('Saved {:X} in {}'.format(info_hash, file))
def save_magnets(chosen_links, mags, folder):
for link in chosen_links:
magnet = mags[int(link)][0]
name = re.search(r'dn=([^\&]*)', magnet)
torrent_name = parse.unquote(name.group(1)).replace('+', ' ')
info_hash = int(re.search(r'btih:([a-f0-9]{40})', magnet).group(1), 16)
file = os.path.join(folder, torrent_name + '.magnet')
print('Saved {:X} in {}'.format(info_hash, file))
with open(file, 'w') as f:
f.write(magnet + '\n')
def main():
config = load_config()
parser = argparse.ArgumentParser(
description='finds and downloads torrents from the Pirate Bay')
parser.add_argument('-b', dest='browse',
action='store_true',
help='display in Browse mode')
parser.add_argument('search', metavar='search',
nargs='*', help='term to search for')
parser.add_argument('-c', dest='category', metavar='category',
help='specify a category to search', default='All')
parser.add_argument('-s', dest='sort', metavar='sort',
help='specify a sort option', default='SeedersDsc')
parser.add_argument('-R', dest='recent', action='store_true',
help='torrents uploaded in the last 48hours.'
'*ignored in searches*')
parser.add_argument('-l', dest='list_categories',
action='store_true',
help='list categories')
parser.add_argument('--list_sorts', dest='list_sorts',
action='store_true',
help='list Sortable Types')
parser.add_argument('-L', '--local', dest='database',
help='an xml file containing the Pirate Bay database')
parser.add_argument('-p', dest='pages', default=1,
help='the number of pages to fetch '
"(doesn't work with --local)")
parser.add_argument('-0', dest='first',
action='store_true',
help='choose the top result')
parser.add_argument('-a', '--download-all',
action='store_true',
help='download all results')
parser.add_argument('-t', '--transmission',
action='store_true',
help='open magnets with transmission-remote')
parser.add_argument('-P', '--port', dest='port',
help='transmission-remote rpc port. default is 9091')
parser.add_argument('-C', '--custom', dest='command',
help='open magnets with a custom command'
' (%%s will be replaced with the url)')
parser.add_argument('-M', '--save-magnets',
action='store_true',
help='save magnets links as files')
parser.add_argument('-T', '--save-torrents',
action='store_true',
help='save torrent files')
parser.add_argument('-S', '--save-directory',
type=str, metavar='DIRECTORY',
help='directory where to save downloaded files'
' (if none is given $PWD will be used)')
parser.add_argument('--disable-colors', dest='color',
action='store_false',
help='disable colored output')
args = parser.parse_args()
if (config.getboolean('Misc', 'colors') and not args.color
or not config.getboolean('Misc', 'colors')):
global colored_output
colored_output = False
if args.save_directory:
config.set('Save', 'directory', args.save_directory)
transmission_command = ['transmission-remote']
if args.port:
transmission_command.append(args.port)
if args.transmission or config.getboolean('Misc', 'transmission'):
ret = subprocess.call(transmission_command + ['-l'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if ret != 0:
print('Transmission is not running.')
return
if args.list_categories:
cur_color = 'zebra_0'
for key, value in sorted(categories.items()) :
cur_color = 'zebra_0' if cur_color == 'zebra_1' else 'zebra_1'
print(str(value), '\t', key, sep='', color=cur_color)
return
if args.list_sorts:
cur_color = 'zebra_0'
for key, value in sorted(sorts.items()):
cur_color = 'zebra_0' if cur_color == 'zebra_1' else 'zebra_1'
print(str(value), '\t', key, sep='', color=cur_color)
return
if args.database or config.getboolean('LocalDB', 'enabled'):
if args.database:
path = args.database
else:
path = config.get('LocalDB', 'path')
mags = local(path, args.search)
sizes, uploaded = [], []
else:
mags, mirrors = [], {'https://thepiratebay.se'}
try:
req = request.Request('https://proxybay.co/list.txt',
headers=default_headers)
f = request.urlopen(req, timeout=default_timeout)
except IOError:
print('Could not fetch additional mirrors', color='WARN')
else:
if f.getcode() != 200:
raise IOError('The proxy bay responded with an error.')
mirrors = mirrors.union([i.decode('utf-8').strip()
for i in f.readlines()][3:])
for mirror in mirrors:
try:
print('Trying', mirror, end='... ')
mags, sizes, uploaded, identifiers = remote(args, mirror)
except (URLError, IOError, ValueError, timeout):
print('Failed', color='WARN')
else:
site = mirror
print('Ok', color='alt')
break
else:
print('No available mirrors :(', color='WARN')
return
if not mags:
print('No results')
return
print_search_results(mags, sizes, uploaded, local=args.database)
if args.first:
print('Choosing first result')
choices = [0]
elif args.download_all:
print('Downloading all results')
choices = range(len(mags))
else:
# New input loop to support different link options
while True:
print("\nSelect links (Type 'h' for more options"
", 'q' to quit)", end='\b', color='alt')
try:
l=input(': ')
except KeyboardInterrupt :
print('\nCancelled.')
return
try:
# Very permissive handling
# Check for any occurances or d, f, p, t, m, or q
cmd_code_match = re.search(r'([hdfpmtq])', l,
flags=re.IGNORECASE)
if cmd_code_match:
code = cmd_code_match.group(0).lower()
else:
code = None
# Clean up command codes
# Substitute multiple consecutive spaces/commas for single
# comma remove anything that isn't an integer or comma.
# Turn into list
l = re.sub(r'^[hdfp, ]*|[hdfp, ]*$', '', l)
l = re.sub('[ ,]+', ',', l)
l = re.sub('[^0-9,]', '', l)
choices = l.split(',')
# Act on option, if supplied
print('')
if code == 'h':
print('Options:',
'<links>: Download selected torrents',
'[m<links>]: Save magnets as files',
'[t<links>]: Save .torrent files',
'[d<links>]: Get descriptions',
'[f<links>]: Get files',
'[p] Print search results',
'[q] Quit', sep='\n')
elif code == 'q':
print('Bye.', color='alt')
return
elif code == 'd':
print_descriptions(choices, mags, site, identifiers)
elif code == 'f':
print_file_lists(choices, mags, site, identifiers)
elif code == 'p':
print_search_results(mags, sizes, uploaded)
elif code == 'm':
save_magnets(choices, mags,
config.get('Save', 'directory'))
elif code == 't':
save_torrents(choices, mags,
config.get('Save', 'directory'))
elif not l:
print('No links entered!', color='WARN')
else:
break
except Exception as e:
print('Exception:', e, color='ERROR')
choices = ()
save_to_file = False
if args.save_magnets or config.getboolean('Save', 'magnets'):
print('Saving selected magnets...')
save_magnets(choices, mags, config.get('Save', 'directory'))
save_to_file = True
if args.save_torrents or config.getboolean('Save', 'torrents'):
print('Saving selected torrents...')
save_torrents(choices, mags, config.get('Save', 'directory'))
save_to_file = True
if save_to_file:
return
for choice in choices:
url = mags[int(choice)][0]
if args.transmission or config.getboolean('Misc', 'transmission'):
subprocess.call(transmission_command + ['-l', '--add', url], shell=False)
subprocess.call(transmission_command + ['-l'])
elif args.command or config.get('Misc', 'openCommand'):
command = config.get('Misc', 'openCommand')
if args.command:
command = args.command
subprocess.call(parse_cmd(command, url), shell=False)
else:
webbrowser.open(url)
if __name__ == '__main__':
main()
|
yanguojun/pirate-get
|
pirate-get.py
|
Python
|
agpl-3.0
| 25,402
|
#
# Copyright (C) 2014
# Sean Poyser (seanpoyser@gmail.com)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import utils
import xbmc
import xbmcgui
import os
utils.safeCall(utils.VerifyZipFiles)
utils.safeCall(utils.VerifyKeymaps)
utils.safeCall(utils.verifyPlugins)
utils.safeCall(utils.verifyLocation)
utils.safeCall(utils.verifyRunning)
HOME = 10000
if utils.ADDON.getSetting('AUTOSTART') == 'true':
utils.LaunchSF()
#def checkDisabled():
# try:
# if xbmc.getCondVisibility('System.HasAddon(%s)' % utils.ADDONID) == 0:
# utils.DeleteKeymap(utils.KEYMAP_HOT)
# utils.DeleteKeymap(utils.KEYMAP_MENU)
# return True
# except:
# return False
class MyMonitor(xbmc.Monitor):
def __init__(self):
xbmc.Monitor.__init__(self)
self.hotkey = utils.ADDON.getSetting('HOTKEY')
self.context = utils.ADDON.getSetting('CONTEXT') == 'true'
self.updateStdContextMenuItem()
def onSettingsChanged(self):
hotkey = utils.ADDON.getSetting('HOTKEY')
context = utils.ADDON.getSetting('CONTEXT') == 'true'
self.updateStdContextMenuItem()
utils.VerifyKeymaps()
if self.hotkey == hotkey and self.context == context:
return
self.hotkey = hotkey
self.context = context
utils.UpdateKeymaps()
def updateStdContextMenuItem(self):
self.std_context = utils.ADDON.getSetting('CONTEXT_STD') == 'true'
self.std_addtofaves = utils.ADDON.getSetting('ADDTOFAVES_ON_STD') == 'true'
self.std_download = utils.ADDON.getSetting('DOWNLOAD_ON_STD') == 'true'
#useage in addon.xml : <visible>!IsEmpty(Window(Home).Property(SF_STD_CONTEXTMENU_ENABLED))</visible>
#---------- SF on standard context menu ------------------------------------------------
if self.std_context:
xbmcgui.Window(HOME).setProperty('SF_STD_CONTEXTMENU_ENABLED', 'True')
else:
xbmcgui.Window(HOME).clearProperty('SF_STD_CONTEXTMENU_ENABLED')
#---------- Add to Faves on standard context menu --------------------------------------
if self.std_addtofaves:
xbmcgui.Window(HOME).setProperty('SF_STD_ADDTOFAVES_ENABLED', 'True')
else:
xbmcgui.Window(HOME).clearProperty('SF_STD_ADDTOFAVES_ENABLED')
#---------- Download on standard context menu ------------------------------------------
if self.std_download:
xbmcgui.Window(HOME).setProperty('SF_STD_DOWNLOAD_ENABLED', 'True')
else:
xbmcgui.Window(HOME).clearProperty('SF_STD_DOWNLOAD_ENABLED')
monitor = MyMonitor()
while (not xbmc.abortRequested):
xbmc.sleep(1000)
del monitor
|
ronniehd/repository.ronniehd
|
plugin.program.super.favourites/service.py
|
Python
|
gpl-3.0
| 3,483
|
"""
Nose test running.
This module implements ``test()`` and ``bench()`` functions for NumPy modules.
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import warnings
from numpy.compat import basestring
import numpy as np
from .utils import import_nose, suppress_warnings
__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
'_numpy_tester', 'get_package_name', 'import_nose',
'suppress_warnings']
def get_package_name(filepath):
"""
Given a path where a package is installed, determine its name.
Parameters
----------
filepath : str
Path to a file. If the determination fails, "numpy" is returned.
Examples
--------
>>> np.testing.nosetester.get_package_name('nonsense')
'numpy'
"""
fullpath = filepath[:]
pkg_name = []
while 'site-packages' in filepath or 'dist-packages' in filepath:
filepath, p2 = os.path.split(filepath)
if p2 in ('site-packages', 'dist-packages'):
break
pkg_name.append(p2)
# if package name determination failed, just default to numpy/scipy
if not pkg_name:
if 'scipy' in fullpath:
return 'scipy'
else:
return 'numpy'
# otherwise, reverse to get correct order and return
pkg_name.reverse()
# don't include the outer egg directory
if pkg_name[0].endswith('.egg'):
pkg_name.pop(0)
return '.'.join(pkg_name)
def run_module_suite(file_to_run=None, argv=None):
"""
Run a test module.
Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from
the command line
Parameters
----------
file_to_run : str, optional
Path to test module, or None.
By default, run the module from which this function is called.
argv : list of strings
Arguments to be passed to the nose test runner. ``argv[0]`` is
ignored. All command line arguments accepted by ``nosetests``
will work. If it is the default value None, sys.argv is used.
.. versionadded:: 1.9.0
Examples
--------
Adding the following::
if __name__ == "__main__" :
run_module_suite(argv=sys.argv)
at the end of a test module will run the tests when that module is
called in the python interpreter.
Alternatively, calling::
>>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
from an interpreter will run all the test routine in 'test_matlib.py'.
"""
if file_to_run is None:
f = sys._getframe(1)
file_to_run = f.f_locals.get('__file__', None)
if file_to_run is None:
raise AssertionError
if argv is None:
argv = sys.argv + [file_to_run]
else:
argv = argv + [file_to_run]
nose = import_nose()
from .noseclasses import KnownFailurePlugin
nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
class NoseTester(object):
"""
Nose test runner.
This class is made available as numpy.testing.Tester, and a test function
is typically added to a package's __init__.py like so::
from numpy.testing import Tester
test = Tester().test
Calling this test function finds and runs all tests associated with the
package and all its sub-packages.
Attributes
----------
package_path : str
Full path to the package to test.
package_name : str
Name of the package to test.
Parameters
----------
package : module, str or None, optional
The package to test. If a string, this should be the full path to
the package. If None (default), `package` is set to the module from
which `NoseTester` is initialized.
raise_warnings : None, str or sequence of warnings, optional
This specifies which warnings to configure as 'raise' instead
of being shown once during the test execution. Valid strings are:
- "develop" : equals ``(Warning,)``
- "release" : equals ``()``, don't raise on any warnings.
Default is "release".
depth : int, optional
If `package` is None, then this can be used to initialize from the
module of the caller of (the caller of (...)) the code that
initializes `NoseTester`. Default of 0 means the module of the
immediate caller; higher values are useful for utility routines that
want to initialize `NoseTester` objects on behalf of other code.
"""
def __init__(self, package=None, raise_warnings="release", depth=0,
check_fpu_mode=False):
# Back-compat: 'None' used to mean either "release" or "develop"
# depending on whether this was a release or develop version of
# numpy. Those semantics were fine for testing numpy, but not so
# helpful for downstream projects like scipy that use
# numpy.testing. (They want to set this based on whether *they* are a
# release or develop version, not whether numpy is.) So we continue to
# accept 'None' for back-compat, but it's now just an alias for the
# default "release".
if raise_warnings is None:
raise_warnings = "release"
package_name = None
if package is None:
f = sys._getframe(1 + depth)
package_path = f.f_locals.get('__file__', None)
if package_path is None:
raise AssertionError
package_path = os.path.dirname(package_path)
package_name = f.f_locals.get('__name__', None)
elif isinstance(package, type(os)):
package_path = os.path.dirname(package.__file__)
package_name = getattr(package, '__name__', None)
else:
package_path = str(package)
self.package_path = package_path
# Find the package name under test; this name is used to limit coverage
# reporting (if enabled).
if package_name is None:
package_name = get_package_name(package_path)
self.package_name = package_name
# Set to "release" in constructor in maintenance branches.
self.raise_warnings = raise_warnings
# Whether to check for FPU mode changes
self.check_fpu_mode = check_fpu_mode
def _test_argv(self, label, verbose, extra_argv):
''' Generate argv for nosetest command
Parameters
----------
label : {'fast', 'full', '', attribute identifier}, optional
see ``test`` docstring
verbose : int, optional
Verbosity value for test outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to nosetests.
Returns
-------
argv : list
command line arguments that will be passed to nose
'''
argv = [__file__, self.package_path, '-s']
if label and label != 'full':
if not isinstance(label, basestring):
raise TypeError('Selection label should be a string')
if label == 'fast':
label = 'not slow'
argv += ['-A', label]
argv += ['--verbosity', str(verbose)]
# When installing with setuptools, and also in some other cases, the
# test_*.py files end up marked +x executable. Nose, by default, does
# not run files marked with +x as they might be scripts. However, in
# our case nose only looks for test_*.py files under the package
# directory, which should be safe.
argv += ['--exe']
if extra_argv:
argv += extra_argv
return argv
def _show_system_info(self):
nose = import_nose()
import numpy
print("NumPy version %s" % numpy.__version__)
relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous
print("NumPy relaxed strides checking option:", relaxed_strides)
npdir = os.path.dirname(numpy.__file__)
print("NumPy is installed in %s" % npdir)
if 'scipy' in self.package_name:
import scipy
print("SciPy version %s" % scipy.__version__)
spdir = os.path.dirname(scipy.__file__)
print("SciPy is installed in %s" % spdir)
pyversion = sys.version.replace('\n', '')
print("Python version %s" % pyversion)
print("nose version %d.%d.%d" % nose.__versioninfo__)
def _get_custom_doctester(self):
""" Return instantiated plugin for doctests
Allows subclassing of this class to override doctester
A return value of None means use the nose builtin doctest plugin
"""
from .noseclasses import NumpyDoctest
return NumpyDoctest()
def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, timer=False):
"""
Run tests for module using nose.
This method does the heavy lifting for the `test` method. It takes all
the same arguments, for details see `test`.
See Also
--------
test
"""
# fail with nice error message if nose is not present
import_nose()
# compile argv
argv = self._test_argv(label, verbose, extra_argv)
# our way of doing coverage
if coverage:
argv += ['--cover-package=%s' % self.package_name, '--with-coverage',
'--cover-tests', '--cover-erase']
if timer:
if timer is True:
argv += ['--with-timer']
elif isinstance(timer, int):
argv += ['--with-timer', '--timer-top-n', str(timer)]
# construct list of plugins
import nose.plugins.builtin
from nose.plugins import EntryPointPluginManager
from .noseclasses import (KnownFailurePlugin, Unplugger,
FPUModeCheckPlugin)
plugins = [KnownFailurePlugin()]
plugins += [p() for p in nose.plugins.builtin.plugins]
if self.check_fpu_mode:
plugins += [FPUModeCheckPlugin()]
argv += ["--with-fpumodecheckplugin"]
try:
# External plugins (like nose-timer)
entrypoint_manager = EntryPointPluginManager()
entrypoint_manager.loadPlugins()
plugins += [p for p in entrypoint_manager.plugins]
except ImportError:
# Relies on pkg_resources, not a hard dependency
pass
# add doctesting if required
doctest_argv = '--with-doctest' in argv
if doctests == False and doctest_argv:
doctests = True
plug = self._get_custom_doctester()
if plug is None:
# use standard doctesting
if doctests and not doctest_argv:
argv += ['--with-doctest']
else: # custom doctesting
if doctest_argv: # in fact the unplugger would take care of this
argv.remove('--with-doctest')
plugins += [Unplugger('doctest'), plug]
if doctests:
argv += ['--with-' + plug.name]
return argv, plugins
def test(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, raise_warnings=None,
timer=False):
"""
Run tests for module using nose.
Parameters
----------
label : {'fast', 'full', '', attribute identifier}, optional
Identifies the tests to run. This can be a string to pass to
the nosetests executable with the '-A' option, or one of several
special values. Special values are:
* 'fast' - the default - which corresponds to the ``nosetests -A``
option of 'not slow'.
* 'full' - fast (as above) and slow tests as in the
'no -A' option to nosetests - this is the same as ''.
* None or '' - run all tests.
attribute_identifier - string passed directly to nosetests as '-A'.
verbose : int, optional
Verbosity value for test outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to nosetests.
doctests : bool, optional
If True, run doctests in module. Default is False.
coverage : bool, optional
If True, report coverage of NumPy code. Default is False.
(This requires the `coverage module:
<http://nedbatchelder.com/code/modules/coverage.html>`_).
raise_warnings : None, str or sequence of warnings, optional
This specifies which warnings to configure as 'raise' instead
of being shown once during the test execution. Valid strings are:
- "develop" : equals ``(Warning,)``
- "release" : equals ``()``, don't raise on any warnings.
The default is to use the class initialization value.
timer : bool or int, optional
Timing of individual tests with ``nose-timer`` (which needs to be
installed). If True, time tests and report on all of them.
If an integer (say ``N``), report timing results for ``N`` slowest
tests.
Returns
-------
result : object
Returns the result of running the tests as a
``nose.result.TextTestResult`` object.
Notes
-----
Each NumPy module exposes `test` in its namespace to run all tests for it.
For example, to run all tests for numpy.lib:
>>> np.lib.test() #doctest: +SKIP
Examples
--------
>>> result = np.lib.test() #doctest: +SKIP
Running unit tests for numpy.lib
...
Ran 976 tests in 3.933s
OK
>>> result.errors #doctest: +SKIP
[]
>>> result.knownfail #doctest: +SKIP
[]
"""
# cap verbosity at 3 because nose becomes *very* verbose beyond that
verbose = min(verbose, 3)
from . import utils
utils.verbose = verbose
argv, plugins = self.prepare_test_args(
label, verbose, extra_argv, doctests, coverage, timer)
if doctests:
print("Running unit tests and doctests for %s" % self.package_name)
else:
print("Running unit tests for %s" % self.package_name)
self._show_system_info()
# reset doctest state on every run
import doctest
doctest.master = None
if raise_warnings is None:
raise_warnings = self.raise_warnings
_warn_opts = dict(develop=(Warning,),
release=())
if isinstance(raise_warnings, basestring):
raise_warnings = _warn_opts[raise_warnings]
with suppress_warnings("location") as sup:
# Reset the warning filters to the default state,
# so that running the tests is more repeatable.
warnings.resetwarnings()
# Set all warnings to 'warn', this is because the default 'once'
# has the bad property of possibly shadowing later warnings.
warnings.filterwarnings('always')
# Force the requested warnings to raise
for warningtype in raise_warnings:
warnings.filterwarnings('error', category=warningtype)
# Filter out annoying import messages.
sup.filter(message='Not importing directory')
sup.filter(message="numpy.dtype size changed")
sup.filter(message="numpy.ufunc size changed")
sup.filter(category=np.ModuleDeprecationWarning)
# Filter out boolean '-' deprecation messages. This allows
# older versions of scipy to test without a flood of messages.
sup.filter(message=".*boolean negative.*")
sup.filter(message=".*boolean subtract.*")
# Filter out distutils cpu warnings (could be localized to
# distutils tests). ASV has problems with top level import,
# so fetch module for suppression here.
with warnings.catch_warnings():
warnings.simplefilter("always")
from ...distutils import cpuinfo
sup.filter(category=UserWarning, module=cpuinfo)
# See #7949: Filter out deprecation warnings due to the -3 flag to
# python 2
if sys.version_info.major == 2 and sys.py3kwarning:
# This is very specific, so using the fragile module filter
# is fine
import threading
sup.filter(DeprecationWarning,
r"sys\.exc_clear\(\) not supported in 3\.x",
module=threading)
sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__")
sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__")
sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x")
sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x")
sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x")
# Filter out some deprecation warnings inside nose 1.3.7 when run
# on python 3.5b2. See
# https://github.com/nose-devs/nose/issues/929
# Note: it is hard to filter based on module for sup (lineno could
# be implemented).
warnings.filterwarnings("ignore", message=".*getargspec.*",
category=DeprecationWarning,
module=r"nose\.")
from .noseclasses import NumpyTestProgram
t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
return t.result
def bench(self, label='fast', verbose=1, extra_argv=None):
"""
Run benchmarks for module using nose.
Parameters
----------
label : {'fast', 'full', '', attribute identifier}, optional
Identifies the benchmarks to run. This can be a string to pass to
the nosetests executable with the '-A' option, or one of several
special values. Special values are:
* 'fast' - the default - which corresponds to the ``nosetests -A``
option of 'not slow'.
* 'full' - fast (as above) and slow benchmarks as in the
'no -A' option to nosetests - this is the same as ''.
* None or '' - run all tests.
attribute_identifier - string passed directly to nosetests as '-A'.
verbose : int, optional
Verbosity value for benchmark outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to nosetests.
Returns
-------
success : bool
Returns True if running the benchmarks works, False if an error
occurred.
Notes
-----
Benchmarks are like tests, but have names starting with "bench" instead
of "test", and can be found under the "benchmarks" sub-directory of the
module.
Each NumPy module exposes `bench` in its namespace to run all benchmarks
for it.
Examples
--------
>>> success = np.lib.bench() #doctest: +SKIP
Running benchmarks for numpy.lib
...
using 562341 items:
unique:
0.11
unique1d:
0.11
ratio: 1.0
nUnique: 56230 == 56230
...
OK
>>> success #doctest: +SKIP
True
"""
print("Running benchmarks for %s" % self.package_name)
self._show_system_info()
argv = self._test_argv(label, verbose, extra_argv)
argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
# import nose or make informative error
nose = import_nose()
# get plugin to disable doctests
from .noseclasses import Unplugger
add_plugins = [Unplugger('doctest')]
return nose.run(argv=argv, addplugins=add_plugins)
def _numpy_tester():
if hasattr(np, "__version__") and ".dev0" in np.__version__:
mode = "develop"
else:
mode = "release"
return NoseTester(raise_warnings=mode, depth=1,
check_fpu_mode=True)
|
b-carter/numpy
|
numpy/testing/nose_tools/nosetester.py
|
Python
|
bsd-3-clause
| 20,562
|
#!/usr/bin/env python
from __future__ import print_function, absolute_import
import os
import subprocess
from . import database
import pybedtools as pbt
def get_window_data(conn, analysis_type, temp_file):
"""
Create a temp file of the requested statistic for each variant.
Execute a query against the variants table
that extracts the requested column for each variant.
save the results to '.temp.pid', which will be loaded
into a pybedtools BedTool for use with the bedtools map
function. This will compute the requested statistic
for each variant in the variants table
"""
if analysis_type == "hwe":
column = 'hwe'
elif analysis_type == "nucl_div":
column = 'pi'
t = open(temp_file, 'w')
query = "SELECT chrom,start,end," + \
column + \
" FROM variants ORDER BY chrom,start"
for row in conn.execute(query):
if row[column] is not None:
t.write('%s\t%d\t%d\t%f\n' % (str(row['chrom']),
int(row['start']),
int(row['end']),
float(row[column])))
t.close()
# Tell bedtools map that the statistic is in the fourth column.
# Parameterized for future mods,
return 4
def make_windows(conn, args, temp_file):
"""
Compute the requested statistic for the user-defined windows.
"""
# create our windows with pybedtools
window = pbt.BedTool()
if args.step_size == 0:
args.step_size = args.window_size
windows = window.window_maker(genome='hg19',
w=args.window_size,
s=args.step_size)
# create a temp file ('.temp.pid') storing the requested stat
# for each variant. Load this into a pybedtools BedTool
op_col = get_window_data(conn, args.analysis_type, temp_file)
window_data = pbt.BedTool(temp_file)
# Use bedtools map to summarize and report
# the requested statistic for each window
windowed_analysis = windows.map(window_data, o=args.op_type, c=op_col)
for window in windowed_analysis:
each = str(window).strip().split("\t")
if args.op_type == "collapse" or each[3] is ".":
print("\t".join(each[0:]))
else:
print("\t".join(each[0:3])+"\t"+str(round(float(each[3]),4)))
# cleanup
os.remove(temp_file)
def windower(parser, args):
check_dependencies("windower", [["bedtools", "--version"]])
conn, metadata = database.get_session_metadata(args.db)
pid = os.getpid()
temp_file = ".".join(['.temp', str(pid)])
make_windows(conn, args, temp_file)
def check_dependencies(tool, deps):
"""Ensure required tools for installation are present.
"""
for cmd in deps:
try:
retcode = subprocess.call(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except OSError:
retcode = 127
if retcode == 127:
raise OSError("gemini %s requires %s. Please install and add to your PATH." % (tool, cmd[0]))
|
bgruening/gemini
|
gemini/gemini_windower.py
|
Python
|
mit
| 3,125
|
# Test SIR with fixed recovery time under different dynamics
#
# Copyright (C) 2017--2020 Simon Dobson
#
# This file is part of epydemic, epidemic network simulations in Python.
#
# epydemic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# epydemic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with epydemic. If not, see <http://www.gnu.org/licenses/gpl.html>.
from epydemic import *
from test.compartmenteddynamics import CompartmentedDynamicsTest
from test.test_sir import SIRTest
import epyc
import unittest
import networkx
class SIRFixedRecoveryTest(SIRTest):
def setUp( self ):
'''Set up the experimental parameters and experiment.'''
super().setUp()
self._params[SIR_FixedRecovery.T_INFECTED] = 1.0
self._lab[SIR_FixedRecovery.T_INFECTED] = [ 1.0, 2.0 ]
self._model = SIR_FixedRecovery()
if __name__ == '__main__':
unittest.main()
|
simoninireland/epydemic
|
test/test_sir_fixedrecovery.py
|
Python
|
gpl-3.0
| 1,340
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
import spack.installer as inst
import spack.repo
import spack.spec
def test_build_request_errors(install_mockery):
with pytest.raises(ValueError, match='must be a package'):
inst.BuildRequest('abc', {})
pkg = spack.repo.get('trivial-install-test-package')
with pytest.raises(ValueError, match='must have a concrete spec'):
inst.BuildRequest(pkg, {})
def test_build_request_basics(install_mockery):
spec = spack.spec.Spec('dependent-install')
spec.concretize()
assert spec.concrete
# Ensure key properties match expectations
request = inst.BuildRequest(spec.package, {})
assert not request.pkg.stop_before_phase
assert not request.pkg.last_phase
assert request.spec == spec.package.spec
# Ensure key default install arguments are set
assert 'install_package' in request.install_args
assert 'install_deps' in request.install_args
def test_build_request_strings(install_mockery):
"""Tests of BuildRequest repr and str for coverage purposes."""
# Using a package with one dependency
spec = spack.spec.Spec('dependent-install')
spec.concretize()
assert spec.concrete
# Ensure key properties match expectations
request = inst.BuildRequest(spec.package, {})
# Cover __repr__
irep = request.__repr__()
assert irep.startswith(request.__class__.__name__)
# Cover __str__
istr = str(request)
assert "package=dependent-install" in istr
assert "install_args=" in istr
|
iulian787/spack
|
lib/spack/spack/test/buildrequest.py
|
Python
|
lgpl-2.1
| 1,708
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import itertools
import random
import math
import json
import functools
import time
import logging
import copy
import os
import sys
import tempfile
import shutil
import signal
import StringIO
import threading
import traceback
from collections import deque
from nupic.frameworks.opf import opfhelpers
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.frameworks.opf.opfbasicenvironment import BasicPredictionLogger
from nupic.frameworks.opf.opftaskdriver import OPFTaskDriver
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement,
matchPatterns)
from nupic.frameworks.opf.periodic import (PeriodicActivityMgr,
PeriodicActivityRequest)
from nupic.frameworks.opf.predictionmetricsmanager import MetricsManager
from nupic.support.serializationutils import sortedJSONDumpS
from nupic.support.configuration import Configuration
from nupic.support.errorcodes import ErrorCodes
from nupic.database.ClientJobsDAO import ClientJobsDAO
from nupic.swarming import regression
from nupic.swarming import utils
################################################################################
class OPFModelRunner(object):
"""This class runs an a given Model"""
# The minimum number of records that need to have been read for this model
# to be a candidate for 'best model'
_MIN_RECORDS_TO_BE_BEST = None
# The number of points we look at when trying to figure out whether or not a
# model has matured
_MATURITY_NUM_POINTS = None
# The maximum rate of change in the model's metric for it to be considered 'mature'
_MATURITY_MAX_CHANGE = None
def __init__(self,
modelID,
jobID,
predictedField,
experimentDir,
reportKeyPatterns,
optimizeKeyPattern,
jobsDAO,
modelCheckpointGUID,
logLevel=None,
predictionCacheMaxRecords=None):
"""
Parameters:
-------------------------------------------------------------------------
modelID: ID for this model in the models table
jobID: ID for this hypersearch job in the jobs table
predictedField: Name of the input field for which this model is being
optimized
experimentDir: Directory path containing the experiment's
description.py script
reportKeyPatterns: list of items from the results dict to include in
the report. These can be regular expressions.
optimizeKeyPattern: Which report item, if any, we will be optimizing for.
This can also be a regular expression, but is an error
if it matches more than one key from the experiment's
results.
jobsDAO: Jobs data access object - the interface to the
jobs database which has the model's table.
modelCheckpointGUID:
A persistent, globally-unique identifier for
constructing the model checkpoint key. If None, then
don't bother creating a model checkpoint.
logLevel: override logging level to this value, if not None
predictionCacheMaxRecords:
Maximum number of records for the prediction output cache.
Pass None for default value.
"""
# -----------------------------------------------------------------------
# Initialize class constants
# -----------------------------------------------------------------------
self._MIN_RECORDS_TO_BE_BEST = int(Configuration.get('nupic.hypersearch.bestModelMinRecords'))
self._MATURITY_MAX_CHANGE = float(Configuration.get('nupic.hypersearch.maturityPctChange'))
self._MATURITY_NUM_POINTS = int(Configuration.get('nupic.hypersearch.maturityNumPoints'))
# -----------------------------------------------------------------------
# Initialize instance variables
# -----------------------------------------------------------------------
self._modelID = modelID
self._jobID = jobID
self._predictedField = predictedField
self._experimentDir = experimentDir
self._reportKeyPatterns = reportKeyPatterns
self._optimizeKeyPattern = optimizeKeyPattern
self._jobsDAO = jobsDAO
self._modelCheckpointGUID = modelCheckpointGUID
self._predictionCacheMaxRecords = predictionCacheMaxRecords
self._isMaturityEnabled = bool(int(Configuration.get('nupic.hypersearch.enableModelMaturity')))
self._logger = logging.getLogger(".".join( ['com.numenta',
self.__class__.__module__, self.__class__.__name__]))
self._optimizedMetricLabel = None
self._reportMetricLabels = []
# Our default completion reason
self._cmpReason = ClientJobsDAO.CMPL_REASON_EOF
if logLevel is not None:
self._logger.setLevel(logLevel)
# The manager object to compute the metrics for this model
self.__metricMgr = None
# Will be set to a new instance of OPFTaskDriver by __runTask()
#self.__taskDriver = None
# Current task control parameters. Will be set by __runTask()
self.__task = None
# Will be set to a new instance of PeriodicActivityManager by __runTask()
self._periodic = None
# Will be set to streamDef string by _runTask()
self._streamDef = None
# Will be set to new OpfExperiment instance by run()
self._model = None
# Will be set to new InputSource by __runTask()
self._inputSource = None
# 0-based index of the record being processed;
# Initialized and updated by __runTask()
self._currentRecordIndex = None
# Interface to write predictions to a persistent storage
self._predictionLogger = None
# In-memory cache for predictions. Predictions are written here for speed
# when they don't need to be written to a persistent store
self.__predictionCache = deque()
# Flag to see if this is the best model in the job (as determined by the
# model chooser logic). This is essentially a cache of the value in the
# ClientJobsDB
self._isBestModel = False
# Flag to see if there is a best model (not necessarily this one)
# stored in the DB
self._isBestModelStored = False
# -----------------------------------------------------------------------
# Flags for model cancelation/checkpointing
# -----------------------------------------------------------------------
# Flag to see if the job that this model is part of
self._isCanceled = False
# Flag to see if model was killed, either by the model terminator or by the
# hypsersearch implementation (ex. the a swarm is killed/matured)
self._isKilled = False
# Flag to see if the model is matured. In most cases, this means that we
# should stop running the model. The only execption is if this model is the
# best model for the job, in which case it should continue running.
self._isMature = False
# Event to see if interrupt signal has been sent
self._isInterrupted = threading.Event()
# -----------------------------------------------------------------------
# Facilities for measuring model maturity
# -----------------------------------------------------------------------
# List of tuples, (iteration, metric), used to see if the model has 'matured'
self._metricRegression = regression.AveragePctChange(windowSize=self._MATURITY_NUM_POINTS)
self.__loggedMetricPatterns = []
def run(self):
""" Runs the OPF Model
Parameters:
-------------------------------------------------------------------------
retval: (completionReason, completionMsg)
where completionReason is one of the ClientJobsDAO.CMPL_REASON_XXX
equates.
"""
# -----------------------------------------------------------------------
# Load the experiment's description.py module
descriptionPyModule = opfhelpers.loadExperimentDescriptionScriptFromDir(
self._experimentDir)
expIface = opfhelpers.getExperimentDescriptionInterfaceFromModule(
descriptionPyModule)
modelDescription = expIface.getModelDescription()
self._modelControl = expIface.getModelControl()
# -----------------------------------------------------------------------
# Create the input data stream for this task
streamDef = self._modelControl['dataset']
from nupic.data.stream_reader import StreamReader
readTimeout = 0
self._inputSource = StreamReader(streamDef, isBlocking=False,
maxTimeout=readTimeout)
# -----------------------------------------------------------------------
#Get field statistics from the input source
fieldStats = self._getFieldStats()
# -----------------------------------------------------------------------
# Construct the model instance
self._model = ModelFactory.create(modelDescription)
self._model.setFieldStatistics(fieldStats)
self._model.enableLearning()
self._model.enableInference(self._modelControl.get("inferenceArgs", None))
# -----------------------------------------------------------------------
# Instantiate the metrics
self.__metricMgr = MetricsManager(self._modelControl.get('metrics',None),
self._model.getFieldInfo(),
self._model.getInferenceType())
self.__loggedMetricPatterns = self._modelControl.get("loggedMetrics", [])
self._optimizedMetricLabel = self.__getOptimizedMetricLabel()
self._reportMetricLabels = matchPatterns(self._reportKeyPatterns,
self._getMetricLabels())
# -----------------------------------------------------------------------
# Initialize periodic activities (e.g., for model result updates)
self._periodic = self._initPeriodicActivities()
# -----------------------------------------------------------------------
# Create our top-level loop-control iterator
numIters = self._modelControl.get('iterationCount', -1)
# Are we asked to turn off learning for a certain # of iterations near the
# end?
learningOffAt = None
iterationCountInferOnly = self._modelControl.get('iterationCountInferOnly', 0)
if iterationCountInferOnly == -1:
self._model.disableLearning()
elif iterationCountInferOnly > 0:
assert numIters > iterationCountInferOnly, "when iterationCountInferOnly " \
"is specified, iterationCount must be greater than " \
"iterationCountInferOnly."
learningOffAt = numIters - iterationCountInferOnly
self.__runTaskMainLoop(numIters, learningOffAt=learningOffAt)
# -----------------------------------------------------------------------
# Perform final operations for model
self._finalize()
return (self._cmpReason, None)
################################################################################
def __runTaskMainLoop(self, numIters, learningOffAt=None):
""" Main loop of the OPF Model Runner.
Parameters:
-----------------------------------------------------------------------
recordIterator: Iterator for counting number of records (see _runTask)
learningOffAt: If not None, learning is turned off when we reach this
iteration number
"""
## Reset sequence states in the model, so it starts looking for a new
## sequence
self._model.resetSequenceStates()
self._currentRecordIndex = -1
while True:
# If killed by a terminator, stop running
if self._isKilled:
break
# If job stops or hypersearch ends, stop running
if self._isCanceled:
break
# If the process is about to be killed, set as orphaned
if self._isInterrupted.isSet():
self.__setAsOrphaned()
break
# If model is mature, stop running ONLY IF we are not the best model
# for the job. Otherwise, keep running so we can keep returning
# predictions to the user
if self._isMature:
if not self._isBestModel:
self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED
break
else:
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
# Turn off learning?
if learningOffAt is not None \
and self._currentRecordIndex == learningOffAt:
self._model.disableLearning()
# Read input record. Note that any failure here is a critical JOB failure
# and results in the job being immediately canceled and marked as
# failed. The runModelXXX code in hypesearch.utils, if it sees an
# exception of type utils.JobFailException, will cancel the job and
# copy the error message into the job record.
try:
inputRecord = self._inputSource.getNextRecordDict()
if self._currentRecordIndex < 0:
self._inputSource.setTimeout(10)
except Exception, e:
raise utils.JobFailException(ErrorCodes.streamReading, str(e.args),
traceback.format_exc())
if inputRecord is None:
# EOF
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
break
if inputRecord:
# Process input record
self._currentRecordIndex += 1
result = self._model.run(inputRecord=inputRecord)
# Compute metrics.
result.metrics = self.__metricMgr.update(result)
# If there are None, use defaults. see MetricsManager.getMetrics()
# TODO remove this when JAVA API server is gone
if not result.metrics:
result.metrics = self.__metricMgr.getMetrics()
# Write the result to the output cache. Don't write encodings, if they
# were computed
if InferenceElement.encodings in result.inferences:
result.inferences.pop(InferenceElement.encodings)
result.sensorInput.dataEncodings = None
self._writePrediction(result)
# Run periodic activities
self._periodic.tick()
if numIters >= 0 and self._currentRecordIndex >= numIters-1:
break
else:
# Input source returned an empty record.
#
# NOTE: This is okay with Stream-based Source (when it times out
# waiting for next record), but not okay with FileSource, which should
# always return either with a valid record or None for EOF.
raise ValueError("Got an empty record from FileSource: %r" %
inputRecord)
def _finalize(self):
"""Run final activities after a model has run. These include recording and
logging the final score"""
self._logger.info(
"Finished: modelID=%r; %r records processed. Performing final activities",
self._modelID, self._currentRecordIndex + 1)
# =========================================================================
# Dump the experiment metrics at the end of the task
# =========================================================================
self._updateModelDBResults()
# =========================================================================
# Check if the current model is the best. Create a milestone if necessary
# If the model has been killed, it is not a candidate for "best model",
# and its output cache should be destroyed
# =========================================================================
if not self._isKilled:
self.__updateJobResults()
else:
self.__deleteOutputCache(self._modelID)
# =========================================================================
# Close output stream, if necessary
# =========================================================================
if self._predictionLogger:
self._predictionLogger.close()
################################################################################
def __createModelCheckpoint(self):
""" Create a checkpoint from the current model, and store it in a dir named
after checkpoint GUID, and finally store the GUID in the Models DB """
if self._model is None or self._modelCheckpointGUID is None:
return
# Create an output store, if one doesn't exist already
if self._predictionLogger is None:
self._createPredictionLogger()
predictions = StringIO.StringIO()
self._predictionLogger.checkpoint(
checkpointSink=predictions,
maxRows=int(Configuration.get('nupic.model.checkpoint.maxPredictionRows')))
self._model.save(os.path.join(self._experimentDir, str(self._modelCheckpointGUID)))
self._jobsDAO.modelSetFields(modelID,
{'modelCheckpointId':str(self._modelCheckpointGUID)},
ignoreUnchanged=True)
self._logger.info("Checkpointed Hypersearch Model: modelID: %r, "
"checkpointID: %r", self._modelID, checkpointID)
return
############################################################################
def __deleteModelCheckpoint(self, modelID):
"""
Delete the stored checkpoint for the specified modelID. This function is
called if the current model is now the best model, making the old model's
checkpoint obsolete
Parameters:
-----------------------------------------------------------------------
modelID: The modelID for the checkpoint to delete. This is NOT the
unique checkpointID
"""
checkpointID = \
self._jobsDAO.modelsGetFields(modelID, ['modelCheckpointId'])[0]
if checkpointID is None:
return
try:
shutil.rmtree(os.path.join(self._experimentDir, str(self._modelCheckpointGUID)))
except:
self._logger.warn("Failed to delete model checkpoint %s. "\
"Assuming that another worker has already deleted it",
checkpointID)
return
self._jobsDAO.modelSetFields(modelID,
{'modelCheckpointId':None},
ignoreUnchanged=True)
return
################################################################################
def _createPredictionLogger(self):
"""
Creates the model's PredictionLogger object, which is an interface to write
model results to a permanent storage location
"""
# Write results to a file
self._predictionLogger = BasicPredictionLogger(
fields=self._model.getFieldInfo(),
experimentDir=self._experimentDir,
label = "hypersearch-worker",
inferenceType=self._model.getInferenceType())
if self.__loggedMetricPatterns:
metricLabels = self.__metricMgr.getMetricLabels()
loggedMetrics = matchPatterns(self.__loggedMetricPatterns, metricLabels)
self._predictionLogger.setLoggedMetrics(loggedMetrics)
################################################################################
def __getOptimizedMetricLabel(self):
""" Get the label for the metric being optimized. This function also caches
the label in the instance variable self._optimizedMetricLabel
Parameters:
-----------------------------------------------------------------------
metricLabels: A sequence of all the labels being computed for this model
Returns: The label for the metric being optmized over
"""
matchingKeys = matchPatterns([self._optimizeKeyPattern],
self._getMetricLabels())
if len(matchingKeys) == 0:
raise Exception("None of the generated metrics match the specified "
"optimization pattern: %s. Available metrics are %s" % \
(self._optimizeKeyPattern, self._getMetricLabels()))
elif len(matchingKeys) > 1:
raise Exception("The specified optimization pattern '%s' matches more "
"than one metric: %s" % (self._optimizeKeyPattern, matchingKeys))
return matchingKeys[0]
################################################################################
def _getMetricLabels(self):
"""
Returns: A list of labels that correspond to metrics being computed
"""
return self.__metricMgr.getMetricLabels()
################################################################################
def _getFieldStats(self):
"""
Method which returns a dictionary of field statistics received from the
input source.
Returns:
fieldStats: dict of dicts where the first level is the field name and
the second level is the statistic. ie. fieldStats['pounds']['min']
"""
fieldStats = dict()
fieldNames = self._inputSource.getFieldNames()
for field in fieldNames:
curStats = dict()
curStats['min'] = self._inputSource.getFieldMin(field)
curStats['max'] = self._inputSource.getFieldMax(field)
fieldStats[field] = curStats
return fieldStats
################################################################################
def _getMetrics(self):
""" Protected function that can be overriden by subclasses. Its main purpose
is to allow the the OPFDummyModelRunner to override this with deterministic
values
Returns: All the metrics being computed for this model
"""
return self.__metricMgr.getMetrics()
################################################################################
def _updateModelDBResults(self):
""" Retrieves the current results and updates the model's record in
the Model database.
"""
# -----------------------------------------------------------------------
# Get metrics
metrics = self._getMetrics()
# -----------------------------------------------------------------------
# Extract report metrics that match the requested report REs
reportDict = dict([(k,metrics[k]) for k in self._reportMetricLabels])
# -----------------------------------------------------------------------
# Extract the report item that matches the optimize key RE
# TODO cache optimizedMetricLabel sooner
metrics = self._getMetrics()
optimizeDict = dict()
if self._optimizeKeyPattern is not None:
optimizeDict[self._optimizedMetricLabel] = \
metrics[self._optimizedMetricLabel]
# -----------------------------------------------------------------------
# Update model results
results = json.dumps((metrics , optimizeDict))
self._jobsDAO.modelUpdateResults(self._modelID, results=results,
metricValue=optimizeDict.values()[0],
numRecords=(self._currentRecordIndex + 1))
self._logger.debug(
"Model Results: modelID=%s; numRecords=%s; results=%s" % \
(self._modelID, self._currentRecordIndex + 1, results))
return
################################################################################
def __updateJobResultsPeriodic(self):
"""
Periodic check to see if this is the best model. This should only have an
effect if this is the *first* model to report its progress
"""
if self._isBestModelStored and not self._isBestModel:
return
while True:
jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is None:
jobResults = {}
else:
self._isBestModelStored = True
if not self._isBestModel:
return
jobResults = json.loads(jobResultsStr)
bestModel = jobResults.get('bestModel', None)
bestMetric = jobResults.get('bestValue', None)
isSaved = jobResults.get('saved', False)
# If there is a best model, and it is not the same as the current model
# we should wait till we have processed all of our records to see if
# we are the the best
if (bestModel is not None) and (self._modelID != bestModel):
self._isBestModel = False
return
# Make sure prediction output stream is ready before we present our model
# as "bestModel"; sometimes this takes a long time, so update the model's
# timestamp to help avoid getting orphaned
self.__flushPredictionCache()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
metrics = self._getMetrics()
jobResults['bestModel'] = self._modelID
jobResults['bestValue'] = metrics[self._optimizedMetricLabel]
jobResults['metrics'] = metrics
jobResults['saved'] = False
newResults = json.dumps(jobResults)
isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=newResults)
if isUpdated or (not isUpdated and newResults==jobResultsStr):
self._isBestModel = True
break
############################################################################
def __checkIfBestCompletedModel(self):
"""
Reads the current "best model" for the job and returns whether or not the
current model is better than the "best model" stored for the job
Returns: (isBetter, storedBest, origResultsStr)
isBetter:
True if the current model is better than the stored "best model"
storedResults:
A dict of the currently stored results in the jobs table record
origResultsStr:
The json-encoded string that currently resides in the "results" field
of the jobs record (used to create atomicity)
"""
jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is None:
jobResults = {}
else:
jobResults = json.loads(jobResultsStr)
isSaved = jobResults.get('saved', False)
bestMetric = jobResults.get('bestValue', None)
currentMetric = self._getMetrics()[self._optimizedMetricLabel]
self._isBestModel = (not isSaved) \
or (currentMetric < bestMetric)
return self._isBestModel, jobResults, jobResultsStr
############################################################################
def __updateJobResults(self):
""""
Check if this is the best model
If so:
1) Write it's checkpoint
2) Record this model as the best
3) Delete the previous best's output cache
Otherwise:
1) Delete our output cache
"""
isSaved = False
while True:
self._isBestModel, jobResults, jobResultsStr = \
self.__checkIfBestCompletedModel()
# -----------------------------------------------------------------------
# If the current model is the best:
# 1) Save the model's predictions
# 2) Checkpoint the model state
# 3) Update the results for the job
if self._isBestModel:
# Save the current model and its results
if not isSaved:
self.__flushPredictionCache()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__createModelCheckpoint()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
isSaved = True
# Now record the model as the best for the job
prevBest = jobResults.get('bestModel', None)
prevWasSaved = jobResults.get('saved', False)
# If the current model is the best, it shouldn't already be checkpointed
if prevBest == self._modelID:
assert not prevWasSaved
metrics = self._getMetrics()
jobResults['bestModel'] = self._modelID
jobResults['bestValue'] = metrics[self._optimizedMetricLabel]
jobResults['metrics'] = metrics
jobResults['saved'] = True
isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=json.dumps(jobResults))
if isUpdated:
if prevWasSaved:
self.__deleteOutputCache(prevBest)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__deleteModelCheckpoint(prevBest)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self._logger.info("Model %d chosen as best model", self._modelID)
break
# -----------------------------------------------------------------------
# If the current model is not the best, delete its outputs
else:
# NOTE: we update model timestamp around these occasionally-lengthy
# operations to help prevent the model from becoming orphaned
self.__deleteOutputCache(self._modelID)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
self.__deleteModelCheckpoint(self._modelID)
self._jobsDAO.modelUpdateTimestamp(self._modelID)
break
############################################################################
def _writePrediction(self, result):
"""
Writes the results of one iteration of a model. The results are written to
this ModelRunner's in-memory cache unless this model is the "best model" for
the job. If this model is the "best model", the predictions are written out
to a permanent store via a prediction output stream instance
Parameters:
-----------------------------------------------------------------------
result: A opfutils.ModelResult object, which contains the input and
output for this iteration
"""
self.__predictionCache.append(result)
if self._isBestModel:
self.__flushPredictionCache()
############################################################################
def __writeRecordsCallback(self):
""" This callback is called by self.__predictionLogger.writeRecords()
between each batch of records it writes. It gives us a chance to say that
the model is 'still alive' during long write operations.
"""
# This updates the engLastUpdateTime of the model record so that other
# worker's don't think that this model is orphaned.
self._jobsDAO.modelUpdateResults(self._modelID)
############################################################################
def __flushPredictionCache(self):
"""
Writes the contents of this model's in-memory prediction cache to a permanent
store via the prediction output stream instance
"""
if not self.__predictionCache:
return
# Create an output store, if one doesn't exist already
if self._predictionLogger is None:
self._createPredictionLogger()
startTime = time.time()
self._predictionLogger.writeRecords(self.__predictionCache,
progressCB=self.__writeRecordsCallback)
self._logger.info("Flushed prediction cache; numrows=%s; elapsed=%s sec.",
len(self.__predictionCache), time.time() - startTime)
self.__predictionCache.clear()
############################################################################
def __deleteOutputCache(self, modelID):
"""
Delete's the output cache associated with the given modelID. This actually
clears up the resources associated with the cache, rather than deleting al
the records in the cache
Parameters:
-----------------------------------------------------------------------
modelID: The id of the model whose output cache is being deleted
"""
# If this is our output, we should close the connection
if modelID == self._modelID and self._predictionLogger is not None:
self._predictionLogger.close()
del self.__predictionCache
self._predictionLogger = None
self.__predictionCache = None
def _initPeriodicActivities(self):
""" Creates and returns a PeriodicActivityMgr instance initialized with
our periodic activities
Parameters:
-------------------------------------------------------------------------
retval: a PeriodicActivityMgr instance
"""
# Activity to update the metrics for this model
# in the models table
updateModelDBResults = PeriodicActivityRequest(repeating=True,
period=100,
cb=self._updateModelDBResults)
updateJobResults = PeriodicActivityRequest(repeating=True,
period=100,
cb=self.__updateJobResultsPeriodic)
checkCancelation = PeriodicActivityRequest(repeating=True,
period=50,
cb=self.__checkCancelation)
checkMaturity = PeriodicActivityRequest(repeating=True,
period=10,
cb=self.__checkMaturity)
# Do an initial update of the job record after 2 iterations to make
# sure that it is populated with something without having to wait too long
updateJobResultsFirst = PeriodicActivityRequest(repeating=False,
period=2,
cb=self.__updateJobResultsPeriodic)
periodicActivities = [updateModelDBResults,
updateJobResultsFirst,
updateJobResults,
checkCancelation]
if self._isMaturityEnabled:
periodicActivities.append(checkMaturity)
return PeriodicActivityMgr(requestedActivities=periodicActivities)
############################################################################
def __checkCancelation(self):
""" Check if the cancelation flag has been set for this model
in the Model DB"""
# Update a hadoop job counter at least once every 600 seconds so it doesn't
# think our map task is dead
print >>sys.stderr, "reporter:counter:HypersearchWorker,numRecords,50"
# See if the job got cancelled
jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED
self._isCanceled = True
self._logger.info("Model %s canceled because Job %s was stopped.",
self._modelID, self._jobID)
else:
stopReason = self._jobsDAO.modelsGetFields(self._modelID, ['engStop'])[0]
if stopReason is None:
pass
elif stopReason == ClientJobsDAO.STOP_REASON_KILLED:
self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED
self._isKilled = True
self._logger.info("Model %s canceled because it was killed by hypersearch",
self._modelID)
elif stopReason == ClientJobsDAO.STOP_REASON_STOPPED:
self._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPED
self._isCanceled = True
self._logger.info("Model %s stopped because hypersearch ended", self._modelID)
else:
raise RuntimeError ("Unexpected stop reason encountered: %s" % (stopReason))
#########################################################################
def __checkMaturity(self):
""" Save the current metric value and see if the model's performance has
'leveled off.' We do this by looking at some number of previous number of
recordings """
if self._currentRecordIndex+1 < self._MIN_RECORDS_TO_BE_BEST:
return
# If we are already mature, don't need to check anything
if self._isMature:
return
metric = self._getMetrics()[self._optimizedMetricLabel]
self._metricRegression.addPoint(x=self._currentRecordIndex, y=metric)
# Perform a linear regression to see if the error is leveled off
#pctChange = self._metricRegression.getPctChange()
#if pctChange is not None and abs(pctChange ) <= self._MATURITY_MAX_CHANGE:
pctChange, absPctChange = self._metricRegression.getPctChanges()
if pctChange is not None and absPctChange <= self._MATURITY_MAX_CHANGE:
self._jobsDAO.modelSetFields(self._modelID,
{'engMatured':True})
# TODO: Don't stop if we are currently the best model. Also, if we
# are still running after maturity, we have to periodically check to
# see if we are still the best model. As soon we lose to some other
# model, then we should stop at that point.
self._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPED
self._isMature = True
self._logger.info("Model %d has matured (pctChange=%s, n=%d). \n"\
"Scores = %s\n"\
"Stopping execution",self._modelID, pctChange,
self._MATURITY_NUM_POINTS,
self._metricRegression._window)
############################################################################
def handleWarningSignal(self, signum, frame):
"""
Handles a "warning signal" from the scheduler. This is received when the
scheduler is about to kill the the current process so that the worker can be
allocated to another job.
Right now, this function just sets the current model to the "Orphaned" state
in the models table so that another worker can eventually re-run this model
Parameters:
-----------------------------------------------------------------------
"""
self._isInterrupted.set()
############################################################################
def __setAsOrphaned(self):
"""
Sets the current model as orphaned. This is called when the scheduler is
about to kill the process to reallocate the worker to a different process.
"""
cmplReason = ClientJobsDAO.CMPL_REASON_ORPHAN
cmplMessage = "Killed by Scheduler"
self._jobsDAO.modelSetCompleted(self._modelID, cmplReason, cmplMessage)
|
0x0all/nupic
|
py/nupic/swarming/ModelRunner.py
|
Python
|
gpl-3.0
| 39,154
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
from tensorforce import Agent, Environment, Runner
from test.unittest_base import UnittestBase
class TestDocumentation(UnittestBase, unittest.TestCase):
def test_environment(self):
self.start_tests(name='getting-started-environment')
environment = Environment.create(
environment='gym', level='CartPole', max_episode_timesteps=50
)
self.finished_test()
environment = Environment.create(environment='gym', level='CartPole-v1')
self.finished_test()
environment = Environment.create(
environment='test/data/environment.json', max_episode_timesteps=50
)
self.finished_test()
environment = Environment.create(
environment='test.data.custom_env', max_episode_timesteps=10
)
self.finished_test()
from test.data.custom_env import CustomEnvironment
environment = Environment.create(
environment=CustomEnvironment, max_episode_timesteps=10
)
self.finished_test()
def test_agent(self):
self.start_tests(name='getting-started-agent')
environment = Environment.create(
environment='gym', level='CartPole', max_episode_timesteps=50
)
self.finished_test()
agent = Agent.create(
agent='tensorforce', environment=environment, update=64,
optimizer=dict(optimizer='adam', learning_rate=1e-3),
objective='policy_gradient', reward_estimation=dict(horizon=20)
)
self.finished_test()
agent = Agent.create(
agent='ppo', environment=environment, batch_size=10, learning_rate=1e-3
)
self.finished_test()
agent = Agent.create(agent='test/data/agent.json', environment=environment)
self.finished_test()
def test_execution(self):
self.start_tests(name='getting-started-execution')
runner = Runner(
agent='test/data/agent.json', environment=dict(environment='gym', level='CartPole'),
max_episode_timesteps=10
)
runner.run(num_episodes=10)
runner.run(num_episodes=5, evaluation=True)
runner.close()
self.finished_test()
runner = Runner(
agent='test/data/agent.json', environment=dict(environment='gym', level='CartPole'),
max_episode_timesteps=50, num_parallel=5, remote='multiprocessing'
)
runner.run(num_episodes=10)
runner.close()
self.finished_test()
# Create agent and environment
environment = Environment.create(
environment='test/data/environment.json', max_episode_timesteps=10
)
agent = Agent.create(agent='test/data/agent.json', environment=environment)
# Train for 100 episodes
for _ in range(10):
states = environment.reset()
terminal = False
while not terminal:
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
# Train for 100 episodes
for _ in range(10):
episode_states = list()
episode_internals = list()
episode_actions = list()
episode_terminal = list()
episode_reward = list()
states = environment.reset()
internals = agent.initial_internals()
terminal = False
while not terminal:
episode_states.append(states)
episode_internals.append(internals)
actions, internals = agent.act(states=states, internals=internals, independent=True)
episode_actions.append(actions)
states, terminal, reward = environment.execute(actions=actions)
episode_terminal.append(terminal)
episode_reward.append(reward)
agent.experience(
states=episode_states, internals=episode_internals, actions=episode_actions,
terminal=episode_terminal, reward=episode_reward
)
agent.update()
# Evaluate for 100 episodes
sum_rewards = 0.0
for _ in range(10):
states = environment.reset()
internals = agent.initial_internals()
terminal = False
while not terminal:
actions, internals = agent.act(
states=states, internals=internals,
deterministic=True, independent=True
)
states, terminal, reward = environment.execute(actions=actions)
sum_rewards += reward
print('Mean episode reward:', sum_rewards / 100)
# Close agent and environment
agent.close()
environment.close()
self.finished_test()
def test_readme(self):
self.start_tests(name='readme')
# ====================
from tensorforce import Agent, Environment
# Pre-defined or custom environment
environment = Environment.create(
environment='gym', level='CartPole', max_episode_timesteps=500
)
# Instantiate a Tensorforce agent
agent = Agent.create(
agent='tensorforce',
environment=environment, # alternatively: states, actions, (max_episode_timesteps)
memory=1000,
update=dict(unit='timesteps', batch_size=64),
optimizer=dict(type='adam', learning_rate=3e-4),
policy=dict(network='auto'),
objective='policy_gradient',
reward_estimation=dict(horizon=20)
)
# Train for 300 episodes
for _ in range(1):
# Initialize episode
states = environment.reset()
terminal = False
while not terminal:
# Episode timestep
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
agent.close()
environment.close()
# ====================
self.finished_test()
def test_modules(self):
self.start_tests(name='modules')
# distributions
self.unittest(
policy=dict(distributions=dict(
float=dict(type='gaussian', stddev_mode='global'),
bounded_action=dict(type='beta')
))
)
# layers
import tensorflow as tf
self.unittest(
states=dict(type='float', shape=(2,), min_value=-1.0, max_value=2.0),
policy=dict(network=[
(lambda x: tf.clip_by_value(x, -1.0, 1.0)),
dict(type='dense', size=8, activation='tanh')
])
)
# memories
self.unittest(
memory=100
)
# networks
self.unittest(
states=dict(type='float', shape=(2,), min_value=1.0, max_value=2.0),
policy=dict(network=[
dict(type='dense', size=8, activation='tanh'),
dict(type='dense', size=8, activation='tanh')
])
)
self.unittest(
states=dict(
observation=dict(type='float', shape=(4, 4, 3), min_value=-1.0, max_value=1.0),
attributes=dict(type='int', shape=(4, 2), num_values=5)
),
policy=[
[
dict(type='retrieve', tensors=['observation']),
dict(type='conv2d', size=8),
dict(type='flatten'),
dict(type='register', tensor='obs-embedding')
],
[
dict(type='retrieve', tensors=['attributes']),
dict(type='embedding', size=8),
dict(type='flatten'),
dict(type='register', tensor='attr-embedding')
],
[
dict(
type='retrieve', tensors=['obs-embedding', 'attr-embedding'],
aggregation='concat'
),
dict(type='dense', size=16)
]
]
)
# optimizers
self.unittest(
optimizer=dict(
optimizer='adam', learning_rate=1e-3, clipping_threshold=1e-2,
multi_step=3, subsampling_fraction=8, linesearch_iterations=3,
doublecheck_update=True
)
)
# parameters
self.unittest(
exploration=0.1
)
self.unittest(
optimizer=dict(optimizer='adam', learning_rate=dict(
type='exponential', unit='timesteps', num_steps=2,
initial_value=0.01, decay_rate=0.5
))
)
self.unittest(
reward_estimation=dict(horizon=dict(
type='linear', unit='episodes', num_steps=2,
initial_value=2, final_value=6
))
)
# preprocessing
self.unittest(
states=dict(type='float', shape=(8, 8, 3), min_value=-1.0, max_value=2.0),
state_preprocessing=[
dict(type='image', height=4, width=4, grayscale=True),
dict(type='exponential_normalization', decay=0.999)
],
reward_preprocessing=dict(type='clipping', lower=-1.0, upper=1.0)
)
# policy
self.unittest(
states=dict(type='float', shape=(2,), min_value=-1.0, max_value=2.0),
policy=[
dict(type='dense', size=8, activation='tanh'),
dict(type='dense', size=8, activation='tanh')
]
)
self.unittest(
states=dict(type='float', shape=(2,), min_value=-1.0, max_value=2.0),
policy=dict(network='auto')
)
self.unittest(
states=dict(type='float', shape=(2,), min_value=-1.0, max_value=2.0),
policy=dict(
type='parametrized_distributions',
network=[
dict(type='dense', size=8, activation='tanh'),
dict(type='dense', size=8, activation='tanh')
],
distributions=dict(
float=dict(type='gaussian', stddev_mode='global'),
bounded_action=dict(type='beta')
),
temperature=dict(
type='decaying', decay='exponential', unit='episodes',
num_steps=2, initial_value=0.01, decay_rate=0.5
)
)
)
self.unittest(
states=dict(type='float', shape=(2,), min_value=-1.0, max_value=2.0),
actions=dict(
action1=dict(type='int', shape=(), num_values=5),
action2=dict(type='float', shape=(), min_value=-1.0, max_value=1.0)
),
policy=dict(
type='parametrized_distributions',
network=[
dict(type='dense', size=64),
dict(type='register', tensor='action1-embedding'),
dict(type='dense', size=64)
# Final output implicitly used for remaining actions
],
single_output=False
)
)
def test_masking(self):
self.start_tests(name='masking')
agent, environment = self.prepare(
states=dict(type='float', shape=(10,), min_value=-1.0, max_value=2.0),
actions=dict(type='int', shape=(), num_values=3)
)
states = environment.reset()
assert 'state' in states and 'action_mask' in states
states['action_mask'] = [True, False, True]
action = agent.act(states=states)
assert action != 1
agent.close()
environment.close()
self.finished_test()
|
reinforceio/tensorforce
|
test/test_documentation.py
|
Python
|
apache-2.0
| 12,771
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import mimetypes
import time
import urllib2
from webkitpy.common.net.networktransaction import NetworkTransaction, NetworkTimeout
def get_mime_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# FIXME: Rather than taking tuples, this function should take more structured data.
def _encode_multipart_form_data(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://code.google.com/p/rietveld/source/browse/trunk/upload.py
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for key, value in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for key, filename, value in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % get_mime_type(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
class FileUploader(object):
def __init__(self, url, timeout_seconds):
self._url = url
self._timeout_seconds = timeout_seconds
def upload_single_text_file(self, filesystem, content_type, filename):
return self._upload_data(content_type, filesystem.read_text_file(filename))
def upload_as_multipart_form_data(self, filesystem, files, attrs):
file_objs = []
for filename, path in files:
file_objs.append(('file', filename, filesystem.read_binary_file(path)))
# FIXME: We should use the same variable names for the formal and actual parameters.
content_type, data = _encode_multipart_form_data(attrs, file_objs)
return self._upload_data(content_type, data)
def _upload_data(self, content_type, data):
def callback():
# FIXME: Setting a timeout, either globally using socket.setdefaulttimeout()
# or in urlopen(), doesn't appear to work on Mac 10.5 with Python 2.7.
# For now we will ignore the timeout value and hope for the best.
request = urllib2.Request(self._url, data, {"Content-Type": content_type})
return urllib2.urlopen(request)
return NetworkTransaction(timeout_seconds=self._timeout_seconds).run(callback)
|
was4444/chromium.src
|
third_party/WebKit/Tools/Scripts/webkitpy/common/net/file_uploader.py
|
Python
|
bsd-3-clause
| 4,518
|
#!/usr/bin/env python
import glob
import os
infiles=glob.glob('*_R.fits')
for file in infiles:#infiles:
agcnumber=file.split('_R.fits')[0]
print agcnumber
os.system('uat_mask.py '+str(agcnumber))
os.system('LCSrunellipseHa.py '+str(agcnumber))
|
rfinn/LCS
|
paper1code/LCSrunellipseHa_all.py
|
Python
|
gpl-3.0
| 265
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
import socket
import sys
import uuid
from oslo_service import loopingcall
from oslo_utils import timeutils
import oslo_versionedobjects
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
def get_test_admin_context():
return context.get_admin_context()
def create_volume(ctxt,
host='test_host',
display_name='test_volume',
display_description='this is a test volume',
status='available',
migration_status=None,
size=1,
availability_zone='fake_az',
volume_type_id=None,
replication_status='disabled',
replication_extended_status=None,
replication_driver_data=None,
consistencygroup_id=None,
previous_status=None,
testcase_instance=None,
**kwargs):
"""Create a volume object in the DB."""
vol = {}
vol['size'] = size
vol['host'] = host
vol['user_id'] = ctxt.user_id
vol['project_id'] = ctxt.project_id
vol['status'] = status
if migration_status:
vol['migration_status'] = migration_status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
vol['availability_zone'] = availability_zone
if consistencygroup_id:
vol['consistencygroup_id'] = consistencygroup_id
if volume_type_id:
vol['volume_type_id'] = volume_type_id
for key in kwargs:
vol[key] = kwargs[key]
vol['replication_status'] = replication_status
if replication_extended_status:
vol['replication_extended_status'] = replication_extended_status
if replication_driver_data:
vol['replication_driver_data'] = replication_driver_data
if previous_status:
vol['previous_status'] = previous_status
volume = objects.Volume(ctxt, **vol)
volume.create()
# If we get a TestCase instance we add cleanup
if testcase_instance:
testcase_instance.addCleanup(volume.destroy)
return volume
def attach_volume(ctxt, volume_id, instance_uuid, attached_host,
mountpoint, mode='rw'):
now = timeutils.utcnow()
values = {}
values['volume_id'] = volume_id
values['attached_host'] = attached_host
values['mountpoint'] = mountpoint
values['attach_time'] = now
attachment = db.volume_attach(ctxt, values)
return db.volume_attached(ctxt, attachment['id'], instance_uuid,
attached_host, mountpoint, mode)
def create_snapshot(ctxt,
volume_id,
display_name='test_snapshot',
display_description='this is a test snapshot',
cgsnapshot_id = None,
status=fields.SnapshotStatus.CREATING,
testcase_instance=None,
**kwargs):
vol = db.volume_get(ctxt, volume_id)
snap = objects.Snapshot(ctxt)
snap.volume_id = volume_id
snap.user_id = ctxt.user_id or fake.USER_ID
snap.project_id = ctxt.project_id or fake.PROJECT_ID
snap.status = status
snap.volume_size = vol['size']
snap.display_name = display_name
snap.display_description = display_description
snap.cgsnapshot_id = cgsnapshot_id
snap.create()
# We do the update after creating the snapshot in case we want to set
# deleted field
snap.update(kwargs)
snap.save()
# If we get a TestCase instance we add cleanup
if testcase_instance:
testcase_instance.addCleanup(snap.destroy)
return snap
def create_consistencygroup(ctxt,
host='test_host@fakedrv#fakepool',
name='test_cg',
description='this is a test cg',
status=fields.ConsistencyGroupStatus.AVAILABLE,
availability_zone='fake_az',
volume_type_id=None,
cgsnapshot_id=None,
source_cgid=None,
**kwargs):
"""Create a consistencygroup object in the DB."""
cg = objects.ConsistencyGroup(ctxt)
cg.host = host
cg.user_id = ctxt.user_id or fake.USER_ID
cg.project_id = ctxt.project_id or fake.PROJECT_ID
cg.status = status
cg.name = name
cg.description = description
cg.availability_zone = availability_zone
if volume_type_id:
cg.volume_type_id = volume_type_id
cg.cgsnapshot_id = cgsnapshot_id
cg.source_cgid = source_cgid
new_id = kwargs.pop('id', None)
cg.update(kwargs)
cg.create()
if new_id and new_id != cg.id:
db.consistencygroup_update(ctxt, cg.id, {'id': new_id})
cg = objects.ConsistencyGroup.get_by_id(ctxt, new_id)
return cg
def create_cgsnapshot(ctxt,
consistencygroup_id,
name='test_cgsnapshot',
description='this is a test cgsnapshot',
status='creating',
recursive_create_if_needed=True,
return_vo=True,
**kwargs):
"""Create a cgsnapshot object in the DB."""
values = {
'user_id': ctxt.user_id or fake.USER_ID,
'project_id': ctxt.project_id or fake.PROJECT_ID,
'status': status,
'name': name,
'description': description,
'consistencygroup_id': consistencygroup_id}
values.update(kwargs)
if recursive_create_if_needed and consistencygroup_id:
create_cg = False
try:
objects.ConsistencyGroup.get_by_id(ctxt,
consistencygroup_id)
create_vol = not db.volume_get_all_by_group(
ctxt, consistencygroup_id)
except exception.ConsistencyGroupNotFound:
create_cg = True
create_vol = True
if create_cg:
create_consistencygroup(ctxt, id=consistencygroup_id)
if create_vol:
create_volume(ctxt, consistencygroup_id=consistencygroup_id)
cgsnap = db.cgsnapshot_create(ctxt, values)
if not return_vo:
return cgsnap
return objects.CGSnapshot.get_by_id(ctxt, cgsnap.id)
def create_backup(ctxt,
volume_id,
display_name='test_backup',
display_description='This is a test backup',
status=fields.BackupStatus.CREATING,
parent_id=None,
temp_volume_id=None,
temp_snapshot_id=None,
snapshot_id=None,
data_timestamp=None):
backup = {}
backup['volume_id'] = volume_id
backup['user_id'] = ctxt.user_id
backup['project_id'] = ctxt.project_id
backup['host'] = socket.gethostname()
backup['availability_zone'] = '1'
backup['display_name'] = display_name
backup['display_description'] = display_description
backup['container'] = 'fake'
backup['status'] = status
backup['fail_reason'] = ''
backup['service'] = 'fake'
backup['parent_id'] = parent_id
backup['size'] = 5 * 1024 * 1024
backup['object_count'] = 22
backup['temp_volume_id'] = temp_volume_id
backup['temp_snapshot_id'] = temp_snapshot_id
backup['snapshot_id'] = snapshot_id
backup['data_timestamp'] = data_timestamp
return db.backup_create(ctxt, backup)
def create_message(ctxt,
project_id='fake_project',
request_id='test_backup',
resource_type='This is a test backup',
resource_uuid='3asf434-3s433df43-434adf3-343df443',
event_id=None,
message_level='Error'):
"""Create a message in the DB."""
expires_at = (timeutils.utcnow() + datetime.timedelta(
seconds=30))
message_record = {'project_id': project_id,
'request_id': request_id,
'resource_type': resource_type,
'resource_uuid': resource_uuid,
'event_id': event_id,
'message_level': message_level,
'expires_at': expires_at}
return db.message_create(ctxt, message_record)
def create_volume_type(ctxt, testcase_instance=None, **kwargs):
vol_type = db.volume_type_create(ctxt, kwargs)
# If we get a TestCase instance we add cleanup
if testcase_instance:
testcase_instance.addCleanup(db.volume_type_destroy, ctxt, vol_type.id)
return vol_type
def create_encryption(ctxt, vol_type_id, testcase_instance=None, **kwargs):
encrypt = db.volume_type_encryption_create(ctxt, vol_type_id, kwargs)
# If we get a TestCase instance we add cleanup
if testcase_instance:
testcase_instance.addCleanup(db.volume_type_encryption_delete, ctxt,
vol_type_id)
return encrypt
def create_qos(ctxt, testcase_instance=None, **kwargs):
qos = db.qos_specs_create(ctxt, kwargs)
if testcase_instance:
testcase_instance.addCleanup(db.qos_specs_delete, ctxt, qos['id'])
return qos
class ZeroIntervalLoopingCall(loopingcall.FixedIntervalLoopingCall):
def start(self, interval, **kwargs):
kwargs['initial_delay'] = 0
return super(ZeroIntervalLoopingCall, self).start(0, **kwargs)
def replace_obj_loader(testcase, obj):
def fake_obj_load_attr(self, name):
# This will raise KeyError for non existing fields as expected
field = self.fields[name]
if field.default != oslo_versionedobjects.fields.UnspecifiedDefault:
value = field.default
elif field.nullable:
value = None
elif isinstance(field, oslo_versionedobjects.fields.StringField):
value = ''
elif isinstance(field, oslo_versionedobjects.fields.IntegerField):
value = 1
elif isinstance(field, oslo_versionedobjects.fields.UUIDField):
value = uuid.uuid4()
setattr(self, name, value)
testcase.addCleanup(setattr, obj, 'obj_load_attr', obj.obj_load_attr)
obj.obj_load_attr = fake_obj_load_attr
file_spec = None
def get_file_spec():
"""Return a Python 2 and 3 compatible version of a 'file' spec.
This is to be used anywhere that you need to do something such as
mock.MagicMock(spec=file) to mock out something with the file attributes.
Due to the 'file' built-in method being removed in Python 3 we need to do
some special handling for it.
"""
global file_spec
# set on first use
if file_spec is None:
if sys.version_info[0] == 3:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(
set(dir(_io.BytesIO))))
else:
file_spec = file
def generate_timeout_series(timeout):
"""Generate a series of times that exceeds the given timeout.
Yields a series of fake time.time() floating point numbers
such that the difference between each pair in the series just
exceeds the timeout value that is passed in. Useful for
mocking time.time() in methods that otherwise wait for timeout
seconds.
"""
iteration = 0
while True:
iteration += 1
yield (iteration * timeout) + iteration
|
bswartz/cinder
|
cinder/tests/unit/utils.py
|
Python
|
apache-2.0
| 12,217
|
from sys import version_info
from django.conf import settings
from django.http.response import HttpResponseBadRequest
from morango.models import InstanceIDModel
from rest_framework import mixins
from rest_framework import status
from rest_framework import views
from rest_framework import viewsets
from rest_framework.response import Response
import kolibri
from .models import DevicePermissions
from .models import DeviceSettings
from .permissions import NotProvisionedCanPost
from .permissions import UserHasAnyDevicePermissions
from .serializers import DevicePermissionsSerializer
from .serializers import DeviceProvisionSerializer
from .serializers import DeviceSettingsSerializer
from kolibri.core.auth.api import KolibriAuthPermissions
from kolibri.core.auth.api import KolibriAuthPermissionsFilter
from kolibri.core.content.permissions import CanManageContent
from kolibri.utils.conf import OPTIONS
from kolibri.utils.server import get_urls
from kolibri.utils.server import installation_type
from kolibri.utils.system import get_free_space
from kolibri.utils.time_utils import local_now
class DevicePermissionsViewSet(viewsets.ModelViewSet):
queryset = DevicePermissions.objects.all()
serializer_class = DevicePermissionsSerializer
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter,)
class DeviceProvisionView(viewsets.GenericViewSet):
permission_classes = (NotProvisionedCanPost,)
serializer_class = DeviceProvisionSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.save()
output_serializer = self.get_serializer(data)
return Response(output_serializer.data, status=status.HTTP_201_CREATED)
class FreeSpaceView(mixins.ListModelMixin, viewsets.GenericViewSet):
permission_classes = (CanManageContent,)
def list(self, request):
path = request.query_params.get("path")
if path is None:
free = get_free_space()
elif path == "Content":
free = get_free_space(OPTIONS["Paths"]["CONTENT_DIR"])
else:
free = get_free_space(path)
return Response({"freespace": free})
class DeviceInfoView(views.APIView):
permission_classes = (UserHasAnyDevicePermissions,)
def get(self, request, format=None):
info = {}
info["version"] = kolibri.__version__
status, urls = get_urls()
if not urls:
# Will not return anything when running the debug server, so at least return the current URL
urls = [
request.build_absolute_uri(OPTIONS["Deployment"]["URL_PATH_PREFIX"])
]
filtered_urls = [
url for url in urls if "127.0.0.1" not in url and "localhost" not in url
]
if filtered_urls:
urls = filtered_urls
info["urls"] = urls
if settings.DATABASES["default"]["ENGINE"].endswith("sqlite3"):
# If any other database backend, will not be file backed, so no database path to return
info["database_path"] = settings.DATABASES["default"]["NAME"]
instance_model = InstanceIDModel.get_or_create_current_instance()[0]
info["device_id"] = instance_model.id
info["os"] = instance_model.platform
info["content_storage_free_space"] = get_free_space(
OPTIONS["Paths"]["CONTENT_DIR"]
)
# This returns the localized time for the server
info["server_time"] = local_now()
# Returns the named timezone for the server (the time above only includes the offset)
info["server_timezone"] = settings.TIME_ZONE
info["installer"] = installation_type()
info["python_version"] = "{major}.{minor}.{micro}".format(
major=version_info.major, minor=version_info.minor, micro=version_info.micro
)
return Response(info)
class DeviceSettingsView(views.APIView):
permission_classes = (UserHasAnyDevicePermissions,)
def get(self, request):
settings = DeviceSettings.objects.get()
return Response(DeviceSettingsSerializer(settings).data)
def patch(self, request):
settings = DeviceSettings.objects.get()
serializer = DeviceSettingsSerializer(settings, data=request.data)
if not serializer.is_valid():
return HttpResponseBadRequest(serializer.errors)
serializer.save()
return Response(serializer.data)
class DeviceNameView(views.APIView):
permission_classes = (UserHasAnyDevicePermissions,)
def get(self, request):
settings = DeviceSettings.objects.get()
return Response({"name": settings.name})
def patch(self, request):
settings = DeviceSettings.objects.get()
settings.name = request.data["name"]
settings.save()
return Response({"name": settings.name})
|
mrpau/kolibri
|
kolibri/core/device/api.py
|
Python
|
mit
| 4,991
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#######################################################################
# Pinytodo - A Pinyto synced ToDo-List for Gtk+
# Copyright (C) 2105 Johannes Merkert <jonny@pinyto.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#######################################################################
# This is your preferences dialog.
#
# Define your preferences in
# data/glib-2.0/schemas/net.launchpad.pinyto-desktop-todo.gschema.xml
# See http://developer.gnome.org/gio/stable/GSettings.html for more info.
from gi.repository import Gio # pylint: disable=E0611
from locale import gettext as _
import logging
logger = logging.getLogger('pinyto_desktop_todo')
from pinyto_desktop_todo_lib.PreferencesDialog import PreferencesDialog
class PreferencesPinytoDesktopTodoDialog(PreferencesDialog):
__gtype_name__ = "PreferencesPinytoDesktopTodoDialog"
def finish_initializing(self, builder): # pylint: disable=E1002
"""Set up the preferences dialog"""
super(PreferencesPinytoDesktopTodoDialog, self).finish_initializing(builder)
# Bind each preference widget to gsettings
settings = Gio.Settings("net.launchpad.pinyto-desktop-todo")
widget = self.builder.get_object('example_entry')
settings.bind("example", widget, "text", Gio.SettingsBindFlags.DEFAULT)
# Code for other initialization actions should be added here.
|
Pinyto/pinytodo
|
pinyto_desktop_todo/PreferencesPinytoDesktopTodoDialog.py
|
Python
|
gpl-3.0
| 2,046
|
"""
Based on http://www.djangosnippets.org/snippets/595/
by sopelkin
"""
from django import forms
from django.forms import widgets
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
class CommaSeparatedUserInput(widgets.Input):
input_type = 'text'
def render(self, name, value, attrs=None):
if value is None:
value = ''
elif isinstance(value, (list, tuple)):
value = (', '.join([user.username for user in value]))
return super(CommaSeparatedUserInput, self).render(name, value, attrs)
class CommaSeparatedUserField(forms.Field):
widget = CommaSeparatedUserInput
def __init__(self, *args, **kwargs):
recipient_filter = kwargs.pop('recipient_filter', None)
self._recipient_filter = recipient_filter
super(CommaSeparatedUserField, self).__init__(*args, **kwargs)
def clean(self, value):
super(CommaSeparatedUserField, self).clean(value)
if not value:
return ''
if isinstance(value, (list, tuple)):
return value
names = set(value.split(','))
names_set = set([name.strip() for name in names if name.strip()])
users = list(User.objects.filter(username__in=names_set))
unknown_names = names_set ^ set([user.username for user in users])
recipient_filter = self._recipient_filter
invalid_users = []
if recipient_filter is not None:
for r in users:
if recipient_filter(r) is False:
users.remove(r)
invalid_users.append(r.username)
if unknown_names or invalid_users:
raise forms.ValidationError(_(u"The following usernames are incorrect: %(users)s") % {'users': ', '.join(list(unknown_names)+invalid_users)})
return users
|
HiddenClever/django-messages
|
django_messages/fields.py
|
Python
|
bsd-3-clause
| 1,923
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import json
import logging
import os
import re
from telemetry import value as value_module
from telemetry.core import util
from telemetry.results import chart_json_output_formatter
from telemetry.results import output_formatter
from telemetry.util import cloud_storage
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'util')
import lastchange # pylint: disable=F0401
_TEMPLATE_HTML_PATH = os.path.join(
util.GetTelemetryDir(), 'support', 'html_output', 'results-template.html')
_PLUGINS = [('third_party', 'flot', 'jquery.flot.min.js'),
('third_party', 'WebKit', 'PerformanceTests', 'resources',
'jquery.tablesorter.min.js'),
('third_party', 'WebKit', 'PerformanceTests', 'resources',
'statistics.js')]
_UNIT_JSON = ('tools', 'perf', 'unit-info.json')
# TODO(eakuefner): rewrite template to use Telemetry JSON directly
class HtmlOutputFormatter(output_formatter.OutputFormatter):
def __init__(self, output_stream, metadata, reset_results, upload_results,
browser_type, results_label=None):
super(HtmlOutputFormatter, self).__init__(output_stream)
self._metadata = metadata
self._reset_results = reset_results
self._upload_results = upload_results
self._existing_results = self._ReadExistingResults(output_stream)
self._result = {
'buildTime': self._GetBuildTime(),
'revision': self._GetRevision(),
'label': results_label,
'platform': browser_type,
'tests': {}
}
def _GetBuildTime(self):
def _DatetimeInEs5CompatibleFormat(dt):
return dt.strftime('%Y-%m-%dT%H:%M:%S.%f')
return _DatetimeInEs5CompatibleFormat(datetime.datetime.utcnow())
def _GetRevision(self):
return lastchange.FetchVersionInfo(None).revision
def _GetHtmlTemplate(self):
with open(_TEMPLATE_HTML_PATH) as f:
return f.read()
def _GetPlugins(self):
plugins = ''
for p in _PLUGINS:
with open(os.path.join(util.GetChromiumSrcDir(), *p)) as f:
plugins += f.read()
return plugins
def _GetUnitJson(self):
with open(os.path.join(util.GetChromiumSrcDir(), *_UNIT_JSON)) as f:
return f.read()
def _ReadExistingResults(self, output_stream):
results_html = output_stream.read()
if self._reset_results or not results_html:
return []
m = re.search(
'^<script id="results-json" type="application/json">(.*?)</script>$',
results_html, re.MULTILINE | re.DOTALL)
if not m:
logging.warn('Failed to extract previous results from HTML output')
return []
return json.loads(m.group(1))[:512]
def _SaveResults(self, results):
self._output_stream.seek(0)
self._output_stream.write(results)
self._output_stream.truncate()
def _PrintPerfResult(self, measurement, trace, values, units,
result_type='default'):
metric_name = measurement
if trace != measurement:
metric_name += '.' + trace
self._result['tests'].setdefault(self._test_name, {})
self._result['tests'][self._test_name].setdefault('metrics', {})
self._result['tests'][self._test_name]['metrics'][metric_name] = {
'current': values,
'units': units,
'important': result_type == 'default'
}
def _TranslateChartJson(self, chart_json_dict):
dummy_dict = dict()
for chart_name, traces in chart_json_dict['charts'].iteritems():
for trace_name, value_dict in traces.iteritems():
# TODO(eakuefner): refactor summarization so we don't have to jump
# through hoops like this.
if 'page_id' in value_dict:
del value_dict['page_id']
result_type = 'nondefault'
else:
result_type = 'default'
# Note: we explicitly ignore TraceValues because Buildbot did.
if value_dict['type'] == 'trace':
continue
value = value_module.Value.FromDict(value_dict, dummy_dict)
perf_value = value.GetBuildbotValue()
if trace_name == 'summary':
trace_name = chart_name
self._PrintPerfResult(chart_name, trace_name, perf_value,
value.units, result_type)
@property
def _test_name(self):
return self._metadata.name
def GetResults(self):
return self._result
def GetCombinedResults(self):
all_results = list(self._existing_results)
all_results.append(self.GetResults())
return all_results
def Format(self, page_test_results):
chart_json_dict = chart_json_output_formatter.ResultsAsChartDict(
self._metadata, page_test_results.all_page_specific_values,
page_test_results.all_summary_values)
self._TranslateChartJson(chart_json_dict)
self._PrintPerfResult('telemetry_page_measurement_results', 'num_failed',
[len(page_test_results.failures)], 'count',
'unimportant')
html = self._GetHtmlTemplate()
html = html.replace('%json_results%', json.dumps(self.GetCombinedResults()))
html = html.replace('%json_units%', self._GetUnitJson())
html = html.replace('%plugins%', self._GetPlugins())
self._SaveResults(html)
if self._upload_results:
file_path = os.path.abspath(self._output_stream.name)
file_name = 'html-results/results-%s' % datetime.datetime.now().strftime(
'%Y-%m-%d_%H-%M-%S')
try:
cloud_storage.Insert(cloud_storage.PUBLIC_BUCKET, file_name, file_path)
print
print ('View online at '
'http://storage.googleapis.com/chromium-telemetry/%s'
% file_name)
except cloud_storage.PermissionError as e:
logging.error('Cannot upload profiling files to cloud storage due to '
' permission error: %s' % e.message)
print
print 'View result at file://%s' % os.path.abspath(
self._output_stream.name)
|
guorendong/iridium-browser-ubuntu
|
tools/telemetry/telemetry/results/html_output_formatter.py
|
Python
|
bsd-3-clause
| 6,072
|
#!/usr/bin/env python
# coding=utf-8
"""TrainingPreparator engine action.
Use this module to add the project main code.
"""
from .._compatibility import six
from .._logging import get_logger
from marvin_python_toolbox.engine_base import EngineBaseDataHandler
__all__ = ['TrainingPreparator']
logger = get_logger('training_preparator')
class TrainingPreparator(EngineBaseDataHandler):
def __init__(self, **kwargs):
super(TrainingPreparator, self).__init__(**kwargs)
def execute(self, params, **kwargs):
"""
Setup the dataset with the transformed data that is compatible with the algorithm used to build the model in the next action.
Use the self.initial_dataset prepared in the last action as source of data.
Eg.
self.marvin_dataset = {...}
"""
self.marvin_dataset = {}
|
marvin-ai/marvin-python-toolbox
|
marvin_python_toolbox/management/templates/python-engine/project_package/data_handler/training_preparator.py
|
Python
|
apache-2.0
| 859
|
from JumpScale import j
from Telegram import Telegram
from handlers.loggerHandler import LoggerHandler
from handlers.DemoHandler import DemoHandler
from handlers.InteractiveHandler import InteractiveHandler
import gevent
class TelegramBot:
"""
"""
def __init__(self, telegramkey=None):
"""
@param key eg. 112456445:AAFgQVEWPGztQc1S8NW0NXY8rqQLDPx0knM
"""
print("key:%s" % telegramkey)
self.api = Telegram("https://api.telegram.org/bot", telegramkey)
# def addLogHandler(self,path="/tmp/chat.log"):
# """
# loggerHandler = LoggerHandler("chat.log")
# self.api.add_handler(loggerHandler)
# """
# loggerHandler = LoggerHandler(path)
# self.api.add_handler(loggerHandler)
def addDemoHandler(self):
"""
"""
handler = DemoHandler()
self.api.add_handler(handler)
def addCustomHandler(self, handler):
"""
handler = OurHandler()
telegrambot.addHandler(handler)
"""
self.api.add_handler(handler)
def start(self, path="%s/telegrambot/actions" % j.dirs.varDir):
"""
will always look for actions in subdir 'actions'
each name of script corresponds to name of action
"""
# self.api.process_updates()
h = InteractiveHandler()
j.sal.fs.createDir(path)
h.actionspath = path
print("Actions path: %s" % h.actionspath)
h.maintenance()
self.api.add_handler(h)
gevent.spawn(self.api.process_updates)
while True:
gevent.sleep(1)
for handler in self.api.handlers:
if hasattr(handler, 'maintenance'):
handler.maintenance()
|
Jumpscale/jumpscale_core8
|
lib/JumpScale/tools/telegram/TelegramBot.py
|
Python
|
apache-2.0
| 1,760
|
__author__ = 'smartschat'
class MultigraphDecoder:
def __init__(self, multigraph_creator):
self.coref_multigraph_creator = multigraph_creator
def decode(self, corpus):
for doc in corpus:
for mention in doc.system_mentions:
mention.attributes["set_id"] = None
# discard dummy mention
self.decode_for_one_document(doc.system_mentions[1:])
def decode_for_one_document(self, mentions):
multigraph = \
self.coref_multigraph_creator.construct_graph_from_mentions(
mentions)
for mention in mentions:
antecedent = self.compute_antecedent(mention, multigraph)
if antecedent is not None:
if antecedent.attributes["set_id"] is None:
antecedent.attributes["set_id"] = \
mentions.index(antecedent)
mention.attributes["set_id"] = antecedent.attributes["set_id"]
mention.document.antecedent_decisions[mention.span] = \
antecedent.span
@staticmethod
def compute_antecedent(mention, multigraph):
weights = []
for antecedent in multigraph.edges[mention]:
if not multigraph.edges[mention][antecedent]["negative_relations"]:
weights.append(
(multigraph.get_weight(mention, antecedent), antecedent))
# get antecedent with highest positive weight, break ties by distance
if len(weights) > 0 and sorted(weights)[-1][0] > 0:
return sorted(weights)[-1][1]
|
Yegor-Budnikov/cort
|
cort/coreference/multigraph/decoders.py
|
Python
|
mit
| 1,601
|
"""Author: Konrad Zemek, Michal Zmuda
Copyright (C) 2015 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
Brings up a set of worker nodes. They can create separate clusters.
Script is parametrised by worker type related configurator.
"""
import copy
import json
import os
import subprocess
import sys
from . import common, docker, riak, couchbase, dns, cluster_manager
CLUSTER_WAIT_FOR_NAGIOS_SECONDS = 60 * 2
# mounting point for op-worker-node docker
DOCKER_BINDIR_PATH = '/root/build'
def cluster_domain(instance, uid):
"""Formats domain for a cluster."""
return common.format_hostname(instance, uid)
def worker_hostname(node_name, instance, uid):
"""Formats hostname for a docker hosting cluster_worker.
NOTE: Hostnames are also used as docker names!
"""
return common.format_hostname([node_name, instance], uid)
def worker_erl_node_name(node_name, instance, uid):
"""Formats erlang node name for a vm on cluster_worker docker.
"""
hostname = worker_hostname(node_name, instance, uid)
return common.format_erl_node_name('worker', hostname)
def _tweak_config(config, name, instance, uid, configurator):
cfg = copy.deepcopy(config)
cfg['nodes'] = {'node': cfg['nodes'][name]}
sys_config = cfg['nodes']['node']['sys.config']
sys_config['cm_nodes'] = [
cluster_manager.cm_erl_node_name(n, instance, uid) for n in
sys_config['cm_nodes']]
# Set the cluster domain (needed for nodes to start)
sys_config[configurator.domain_env_name()] = cluster_domain(instance, uid)
sys_config['persistence_driver_module'] = _db_driver_module(cfg['db_driver'])
if 'vm.args' not in cfg['nodes']['node']:
cfg['nodes']['node']['vm.args'] = {}
vm_args = cfg['nodes']['node']['vm.args']
vm_args['name'] = worker_erl_node_name(name, instance, uid)
cfg = configurator.tweak_config(cfg, uid)
return cfg, sys_config['db_nodes']
def _node_up(image, bindir, config, dns_servers, db_node_mappings, logdir, configurator):
node_name = config['nodes']['node']['vm.args']['name']
db_nodes = config['nodes']['node']['sys.config']['db_nodes']
for i in range(len(db_nodes)):
db_nodes[i] = db_node_mappings[db_nodes[i]]
(name, sep, hostname) = node_name.partition('@')
command = '''mkdir -p /root/bin/node/log/
echo 'while ((1)); do chown -R {uid}:{gid} /root/bin/node/log; sleep 1; done' > /root/bin/chown_logs.sh
bash /root/bin/chown_logs.sh &
cat <<"EOF" > /tmp/gen_dev_args.json
{gen_dev_args}
EOF
set -e
escript bamboos/gen_dev/gen_dev.escript /tmp/gen_dev_args.json
/root/bin/node/bin/{executable} console'''
command = command.format(
gen_dev_args=json.dumps({configurator.app_name(): config}),
uid=os.geteuid(),
gid=os.getegid(),
executable=configurator.app_name()
)
volumes = [(bindir, DOCKER_BINDIR_PATH, 'ro')]
volumes += configurator.extra_volumes(config)
if logdir:
logdir = os.path.join(os.path.abspath(logdir), hostname)
volumes.extend([(logdir, '/root/bin/node/log', 'rw')])
container = docker.run(
image=image,
name=hostname,
hostname=hostname,
detach=True,
interactive=True,
tty=True,
workdir=DOCKER_BINDIR_PATH,
volumes=volumes,
dns_list=dns_servers,
command=command)
# create system users and groups (if specified)
if 'os_config' in config:
common.create_users(container, config['os_config']['users'])
common.create_groups(container, config['os_config']['groups'])
return container, {
'docker_ids': [container],
configurator.nodes_list_attribute(): [node_name]
}
def _ready(container):
ip = docker.inspect(container)['NetworkSettings']['IPAddress']
return common.nagios_up(ip, port='6666', protocol='http')
def _riak_up(cluster_name, db_nodes, dns_servers, uid):
db_node_mappings = {}
for node in db_nodes:
db_node_mappings[node] = ''
i = 0
for node in iter(db_node_mappings.keys()):
db_node_mappings[node] = riak.config_entry(cluster_name, i, uid)
i += 1
if i == 0:
return db_node_mappings, {}
[dns] = dns_servers
riak_output = riak.up('onedata/riak', dns, uid, None, cluster_name, len(db_node_mappings))
return db_node_mappings, riak_output
def _couchbase_up(cluster_name, db_nodes, dns_servers, uid):
db_node_mappings = {}
for node in db_nodes:
db_node_mappings[node] = ''
for i, node in enumerate(db_node_mappings):
db_node_mappings[node] = couchbase.config_entry(cluster_name, i, uid)
if not db_node_mappings:
return db_node_mappings, {}
[dns] = dns_servers
couchbase_output = couchbase.up('couchbase/server:community-4.0.0', dns, uid, cluster_name, len(db_node_mappings))
return db_node_mappings, couchbase_output
def _db_driver(config):
return config['db_driver'] if 'db_driver' in config else 'couchbase'
def _db_driver_module(db_driver):
return db_driver + "_datastore_driver"
def up(image, bindir, dns_server, uid, config_path, configurator, logdir=None):
config = common.parse_json_file(config_path)
input_dir = config['dirs_config'][configurator.app_name()]['input_dir']
dns_servers, output = dns.maybe_start(dns_server, uid)
# Workers of every cluster are started together
for instance in config[configurator.domains_attribute()]:
gen_dev_cfg = {
'config': {
'input_dir': input_dir,
'target_dir': '/root/bin'
},
'nodes': config[configurator.domains_attribute()][instance][configurator.app_name()],
'db_driver': _db_driver(config[configurator.domains_attribute()][instance])
}
# If present, include os_config
if 'os_config' in config[configurator.domains_attribute()][instance]:
os_config = config[configurator.domains_attribute()][instance]['os_config']
gen_dev_cfg['os_config'] = config['os_configs'][os_config]
# Tweak configs, retrieve lis of riak nodes to start
configs = []
all_db_nodes = []
for worker_node in gen_dev_cfg['nodes']:
tw_cfg, db_nodes = _tweak_config(gen_dev_cfg, worker_node, instance, uid, configurator)
configs.append(tw_cfg)
all_db_nodes.extend(db_nodes)
db_node_mappings = None
db_out = None
db_driver = _db_driver(config[configurator.domains_attribute()][instance])
# Start db nodes, obtain mappings
if db_driver == 'riak':
db_node_mappings, db_out = _riak_up(instance, all_db_nodes, dns_servers, uid)
elif db_driver == 'couchbase':
db_node_mappings, db_out = _couchbase_up(instance, all_db_nodes, dns_servers, uid)
else:
raise ValueError("Invalid db_driver: {0}".format(db_driver))
common.merge(output, db_out)
# Start the workers
workers = []
worker_ips = []
for cfg in configs:
worker, node_out = _node_up(image, bindir, cfg, dns_servers, db_node_mappings, logdir, configurator)
workers.append(worker)
worker_ips.append(common.get_docker_ip(worker))
common.merge(output, node_out)
# Wait for all workers to start
common.wait_until(_ready, workers, CLUSTER_WAIT_FOR_NAGIOS_SECONDS)
# Add the domain of current clusters
domains = {
'domains': {
cluster_domain(instance, uid): {
'ns': worker_ips,
'a': []
}
}
}
common.merge(output, domains)
configurator.configure_started_instance(bindir, instance, config, output)
# Make sure domains are added to the dns server.
dns.maybe_restart_with_configuration(dns_server, uid, output)
return output
|
onedata/cluster-example
|
bamboos/docker/environment/worker.py
|
Python
|
mit
| 7,994
|
import re
def multiple_replacer(*key_values):
replace_dict = dict(key_values)
def replacement_function(match): return replace_dict[match.group(0)]
pattern = re.compile("|".join([re.escape(k) for k, v in key_values]), re.M)
return lambda string: pattern.sub(replacement_function, string)
def multiple_replace(string, *key_values):
return multiple_replacer(*key_values)(string)
def influxdb_tag_escaper():
return multiple_replacer(('\\', '\\\\'), (' ', '\\ '), (',', '\\,'), ('=', '\\='))
|
mre/kafka-influxdb
|
kafka_influxdb/encoder/escape_functions.py
|
Python
|
apache-2.0
| 518
|
# Copyright 2022 Nisaba Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rudimentary unit tests for the utility library."""
from absl.testing import absltest
from nisaba.scripts.brahmic import util as u
import nisaba.scripts.utils.file as uf
class UtilTest(absltest.TestCase):
def testFileExistence(self):
filename = 'dead_consonant.tsv'
self.assertTrue(uf.IsFileExist(u.SCRIPT_DIR / 'Beng' / filename))
self.assertFalse(uf.IsFileExist(u.SCRIPT_DIR / 'Taml' / filename))
if __name__ == '__main__':
absltest.main()
|
google-research/nisaba
|
nisaba/scripts/brahmic/util_test.py
|
Python
|
apache-2.0
| 1,046
|
#!/usr/bin/env python
'''
Component to handle data storage and search of all commands run
'''
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.dependency_management.interfaces import CommandRegisterInterface
from framework.lib.general import cprint
from framework.db import models
from framework.db.target_manager import target_required
class CommandRegister(BaseComponent, CommandRegisterInterface):
COMPONENT_NAME = "command_register"
def __init__(self):
self.register_in_service_locator()
self.config = self.get_component("config")
self.db = self.get_component("db")
self.plugin_output = None
self.target = None
def init(self):
self.target = self.get_component("target")
self.plugin_output = self.get_component("plugin_output")
def AddCommand(self, Command):
self.db.session.merge(models.Command(
start_time=Command['Start'],
end_time=Command['End'],
success=Command['Success'],
target_id=Command['Target'],
plugin_key=Command['PluginKey'],
modified_command=Command['ModifiedCommand'].strip(),
original_command=Command['OriginalCommand'].strip()
))
self.db.session.commit()
def DeleteCommand(self, Command):
command_obj = self.db.session.query(models.Command).get(Command)
self.db.session.delete(command_obj)
self.db.session.commit()
@target_required
def CommandAlreadyRegistered(self, original_command, target_id=None):
register_entry = self.db.session.query(models.Command).get(original_command)
if register_entry:
# If the command was completed and the plugin output to which it
# is referring exists
if register_entry.success and self.plugin_output.PluginOutputExists(register_entry.plugin_key, register_entry.target_id):
return self.target.GetTargetURLForID(register_entry.target_id)
else: # Either command failed or plugin output doesn't exist
self.DeleteCommand(original_command)
return self.target.GetTargetURLForID(register_entry.target_id)
return None
|
sharad1126/owtf
|
framework/db/command_register.py
|
Python
|
bsd-3-clause
| 2,254
|
import logging
from collections import defaultdict
from django.core.management.base import (BaseCommand,
CommandError)
from django.db.utils import IntegrityError
from treeherder.model.derived import JobsModel
from treeherder.model.models import (FailureLine,
FailureMatch,
Matcher)
logger = logging.getLogger(__name__)
# The minimum goodness of match we need to mark a particular match as the best match
AUTOCLASSIFY_CUTOFF_RATIO = 0.7
# A goodness of match after which we will not run further detectors
AUTOCLASSIFY_GOOD_ENOUGH_RATIO = 0.9
class Command(BaseCommand):
args = '<job_guid>, <repository>'
help = 'Mark failures on a job.'
def handle(self, *args, **options):
if not len(args) == 2:
raise CommandError('2 arguments required, %s given' % len(args))
repository, job_guid = args
with JobsModel(repository) as jm:
match_errors(repository, jm, job_guid)
def match_errors(repository, jm, job_guid):
job = jm.get_job_ids_by_guid([job_guid]).get(job_guid)
if not job:
logger.error('autoclassify: No job for '
'{0} job_guid {1}'.format(repository, job_guid))
return
job_id = job.get("id")
# Only try to autoclassify where we have a failure status; sometimes there can be
# error lines even in jobs marked as passing.
if job["result"] not in ["testfailed", "busted", "exception"]:
return
unmatched_failures = set(FailureLine.objects.unmatched_for_job(repository, job_guid))
if not unmatched_failures:
return
matches, all_matched = find_matches(unmatched_failures)
update_db(jm, job_id, matches, all_matched)
def find_matches(unmatched_failures):
all_matches = set()
for matcher in Matcher.objects.registered_matchers():
matches = matcher(unmatched_failures)
for match in matches:
logger.info("Matched failure %i with intermittent %i" %
(match.failure_line.id, match.classified_failure.id))
all_matches.add((matcher.db_object, match))
if match.score >= AUTOCLASSIFY_GOOD_ENOUGH_RATIO:
unmatched_failures.remove(match.failure_line)
if not unmatched_failures:
break
return all_matches, len(unmatched_failures) == 0
def update_db(jm, job_id, matches, all_matched):
matches_by_failure_line = defaultdict(set)
for item in matches:
matches_by_failure_line[item[1].failure_line].add(item)
for failure_line, matches in matches_by_failure_line.iteritems():
for matcher, match in matches:
try:
FailureMatch.objects.create(
score=match.score,
matcher=matcher,
classified_failure=match.classified_failure,
failure_line=failure_line)
except IntegrityError:
logger.warning(
"Tried to create duplicate match for failure line %i with matcher %i and classified_failure %i" %
(failure_line.id, matcher.id, match.classified_failure.id))
best_match = failure_line.best_automatic_match(AUTOCLASSIFY_CUTOFF_RATIO)
if best_match:
failure_line.best_classification = best_match.classified_failure
failure_line.save()
if all_matched:
jm.update_after_autoclassification(job_id)
|
akhileshpillai/treeherder
|
treeherder/autoclassify/management/commands/autoclassify.py
|
Python
|
mpl-2.0
| 3,526
|
#------------------------------------------------------------------------------
'''A sample class-style, custom model using a modified laminate theory.'''
# Users can define classes for custom laminate theory models.
# Additionally, users can define custom defaults.
import math
import collections as ct
import pandas as pd
from lamana.input_ import BaseDefaults
from lamana.theories import BaseModel
from lamana.lt_exceptions import IndeterminateError
class Model(BaseModel): # in 0.4.11, can have any name
'''A modified laminate theory for circular biaxial flexure disks,
loaded with a flat piston punch on 3-ball support having two distinct
materials (polymer and ceramic).'''
# TODO: Accept extra args and kwds here'''
def __init__(self):
self.Laminate = None
self.FeatureInput = None
self.LaminateModel = None
# TODO: eventually abstract into BaseModel and deprecate direct coding
# TODO: accept kwargs from Case -> handshake
def _use_model_(self, Laminate, adjusted_z=False):
'''Return updated DataFrame and FeatureInput Return None if exceptions raised.
Parameters
----------
df : DataFrame
LaminateModel with IDs and Dimensional Variables.
FeatureInut : dict
Geometry, laminate parameters and more. Updates Globals dict for
parameters in the dashboard output.
adjusted_z: bool; default=False
If True, uses z(m)* values instead; different assumption for internal calc.
Raises
------
ZeroDivisionError
If zero `r` or `a` in the log term are zero.
ValueError
If negative numbers are in the log term or the support radius exceeds
the sample radius.
Returns
-------
tuple
The updated calculations and parameters stored in a tuple
`(LaminateModel, FeatureInput)``.
'''
self.Laminate = Laminate
df = Laminate.LFrame.copy()
FeatureInput = Laminate.FeatureInput
# Author-defined Exception Handling
if (FeatureInput['Parameters']['r'] == 0):
raise ZeroDivisionError('r=0 is invalid for the log term in the moment eqn.')
elif (FeatureInput['Parameters']['a'] == 0):
raise ZeroDivisionError('a=0 is invalid for the log term in the moment eqn.')
elif (FeatureInput['Parameters']['r'] < 0) | (FeatureInput['Parameters']['a'] < 0):
raise ValueError('Negative numbers are invalid for the log term '
'in moment eqn.')
elif FeatureInput['Parameters']['a'] > FeatureInput['Parameters']['R']:
raise ValueError('Support radius is larger than sample radius.')
elif df['side'].str.contains('INDET').any():
print('INDET value found. Rolling back...')
raise IndeterminateError('INDET value found. Unable to accurately calculate stress.')
#raise AssertionError('Indeterminate value found. Unable to accurately calculate stress.')
# Calling functions to calculate Qs and Ds
df.loc[:, 'Q_11'] = self.calc_stiffness(df, FeatureInput['Properties']).q_11
df.loc[:, 'Q_12'] = self.calc_stiffness(df, FeatureInput['Properties']).q_12
df.loc[:, 'D_11'] = self.calc_bending(df, adj_z=adjusted_z).d_11
df.loc[:, 'D_12'] = self.calc_bending(df, adj_z=adjusted_z).d_12
# Global Variable Update
if (FeatureInput['Parameters']['p'] == 1) & (Laminate.nplies%2 == 0):
D_11T = sum(df['D_11'])
D_12T = sum(df['D_12'])
else:
D_11T = sum(df.loc[df['label'] == 'interface', 'D_11']) # total D11
D_12T = sum(df.loc[df['label'] == 'interface', 'D_12'])
#print(FeatureInput['Geometric']['p'])
D_11p = (1./((D_11T**2 - D_12T**2)) * D_11T) #
D_12n = -(1./((D_11T**2 - D_12T**2)) *D_12T) #
v_eq = D_12T/D_11T # equiv. Poisson's ratio
M_r = self.calc_moment(df, FeatureInput['Parameters'], v_eq).m_r
M_t = self.calc_moment(df, FeatureInput['Parameters'], v_eq).m_t
K_r = (D_11p*M_r) + (D_12n*M_t) # curvatures
K_t = (D_12n*M_r) + (D_11p*M_t)
# Update FeatureInput
global_params = {
'D_11T': D_11T,
'D_12T': D_12T,
'D_11p': D_11p,
'D_12n': D_12n,
'v_eq ': v_eq,
'M_r': M_r,
'M_t': M_t,
'K_r': K_r,
'K_t:': K_t,
}
FeatureInput['Globals'] = global_params
self.FeatureInput = FeatureInput # update with Globals
#print(FeatureInput)
# Calculate Strains and Stresses and Update DataFrame
df.loc[:,'strain_r'] = K_r * df.loc[:, 'Z(m)']
df.loc[:,'strain_t'] = K_t * df.loc[:, 'Z(m)']
df.loc[:, 'stress_r (Pa/N)'] = (df.loc[:, 'strain_r'] * df.loc[:, 'Q_11']
) + (df.loc[:, 'strain_t'] * df.loc[:, 'Q_12'])
df.loc[:,'stress_t (Pa/N)'] = (df.loc[:, 'strain_t'] * df.loc[:, 'Q_11']
) + (df.loc[:, 'strain_r'] * df.loc[:, 'Q_12'])
df.loc[:,'stress_f (MPa/N)'] = df.loc[:, 'stress_t (Pa/N)']/1e6
del df['Modulus']
del df['Poissons']
self.LaminateModel = df
return (df, FeatureInput)
#------------------------------------------------------------------------------
'''Prefer staticmethods here. Add formulas to doc strings.'''
def calc_stiffness(self, df, mat_props):
'''Return tuple of Series of (Q11, Q12) floats per lamina.'''
# Iterate to Apply Modulus and Poisson's to correct Material
# TODO: Prefer cleaner ways to parse materials from mat_props
df_mat_props = pd.DataFrame(mat_props) # df easier to munge
df_mat_props.index.name = 'materials'
##for material in mat_props.index:
for material in df_mat_props.index:
mat_idx = df['matl'] == material
df.loc[mat_idx, 'Modulus'] = df_mat_props.loc[material, 'Modulus']
df.loc[mat_idx, 'Poissons'] = df_mat_props.loc[material, 'Poissons']
E = df['Modulus'] # series of moduli
v = df['Poissons']
stiffness = ct.namedtuple('stiffness', ['q_11', 'q_12'])
q_11 = E / (1 - (v**2))
q_12 = (v*E) / (1 - (v**2))
return stiffness(q_11, q_12)
def calc_bending(self, df, adj_z=False):
'''Return tuple of Series of (D11, D12) floats.'''
q_11 = df['Q_11']
q_12 = df['Q_12']
h = df['h(m)']
# TODO: need to fix kwargs passing first; tabled since affects many modules.
if not adj_z:
z = df['z(m)']
else:
z = df['z(m)*']
bending = ct.namedtuple('bending', ['d_11', 'd_12'])
d_11 = ((q_11*(h**3)) / 12.) + (q_11*h*(z**2))
d_12 = ((q_12*(h**3)) / 12.) + (q_12*h*(z**2))
return bending(d_11, d_12)
def calc_moment(self, df, load_params, v_eq):
'''Return tuple of moments (radial and tangential); floats.
See Timishenko-Woinowsky: Eq. 91; default'''
P_a = load_params['P_a']
a = load_params['a']
r = load_params['r']
moments = ct.namedtuple('moments', ['m_r', 'm_t'])
m_r = ((P_a/(4*math.pi)) * ((1 + v_eq)*math.log10(a/r)))
m_t = ((P_a/(4*math.pi)) * (((1 + v_eq)*math.log10(a/r)) + (1 - v_eq)))
return moments(m_r, m_t)
class Defaults(BaseDefaults):
'''Return parameters for building distributions cases. Useful for consistent
testing.
Dimensional defaults are inherited from utils.BaseDefaults().
Material-specific parameters are defined here by he user.
- Default geometric parameters
- Default material properties
- Default FeatureInput
Examples
--------
>>> dft = Defaults()
>>> dft.load_params
{'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,}
>>> dft.mat_props
{'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9},
'Poissons': {'HA': 0.25, 'PSu': 0.33}}
>>> dft.FeatureInput
{'Geometry' : '400-[200]-800',
'Geometric' : {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,},
'Materials' : {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33],},
'Custom' : None,
'Model' : Wilson_LT}
Returns
-------
class
Updated attributes inherited from the `BaseDefaults` class.
'''
def __init__(self):
BaseDefaults.__init__(self)
'''DEV: Add defaults first. Then adjust attributes.'''
# DEFAULTS ------------------------------------------------------------
# Build dicts of geometric and material parameters
self.load_params = {
'R': 12e-3, # specimen radius
'a': 7.5e-3, # support ring radius
'p': 5, # points/layer
'P_a': 1, # applied load
'r': 2e-4, # radial distance from center loading
}
self.mat_props = {
'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9},
'Poissons': {'HA': 0.25, 'PSu': 0.33}
}
# ATTRIBUTES ----------------------------------------------------------
# FeatureInput
self.FeatureInput = self.get_FeatureInput(
self.Geo_objects['standard'][0],
load_params=self.load_params,
mat_props=self.mat_props,
##custom_matls=None,
model='Wilson_LT',
global_vars=None
)
|
par2/lamana
|
lamana/models/Wilson_LT.py
|
Python
|
bsd-3-clause
| 9,880
|
# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for the class
:class:`iris.fileformats.um._fast_load_structured_fields.FieldCollation`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised
# before importing anything else.
import iris.tests as tests
from iris._lazy_data import as_lazy_data
from netcdftime import datetime
import numpy as np
from iris.fileformats.um._fast_load_structured_fields import FieldCollation
import iris.fileformats.pp
class Test___init__(tests.IrisTest):
def test_no_fields(self):
with self.assertRaises(AssertionError):
FieldCollation([])
class Test_fields(tests.IrisTest):
def test_preserve_members(self):
fields = ('foo', 'bar', 'wibble')
collation = FieldCollation(fields)
self.assertEqual(collation.fields, fields)
def _make_field(lbyr=None, lbyrd=None, lbft=None,
blev=None, bhlev=None, data=None):
header = [0] * 64
if lbyr is not None:
header[0] = lbyr
header[1] = 1
header[2] = 1
if lbyrd is not None:
header[6] = lbyrd
header[7] = 1
header[8] = 1
if lbft is not None:
header[13] = lbft
if blev is not None:
header[51] = blev
if bhlev is not None:
header[53] = bhlev
field = iris.fileformats.pp.PPField3(header)
if data is not None:
_data = _make_data(data)
field.data = _data
return field
def _make_data(fill_value):
shape = (10, 10)
return as_lazy_data(np.ones(shape)*fill_value)
class Test_data(tests.IrisTest):
# Test order of the data attribute when fastest-varying element is changed.
def test_t1_varies_faster(self):
collation = FieldCollation(
[_make_field(lbyr=2013, lbyrd=2000, data=0),
_make_field(lbyr=2014, lbyrd=2000, data=1),
_make_field(lbyr=2015, lbyrd=2000, data=2),
_make_field(lbyr=2013, lbyrd=2001, data=3),
_make_field(lbyr=2014, lbyrd=2001, data=4),
_make_field(lbyr=2015, lbyrd=2001, data=5)])
result = collation.data[:, :, 0, 0]
expected = [[0, 1, 2], [3, 4, 5]]
self.assertArrayEqual(result, expected)
def test_t2_varies_faster(self):
collation = FieldCollation(
[_make_field(lbyr=2013, lbyrd=2000, data=0),
_make_field(lbyr=2013, lbyrd=2001, data=1),
_make_field(lbyr=2013, lbyrd=2002, data=2),
_make_field(lbyr=2014, lbyrd=2000, data=3),
_make_field(lbyr=2014, lbyrd=2001, data=4),
_make_field(lbyr=2014, lbyrd=2002, data=5)])
result = collation.data[:, :, 0, 0]
expected = [[0, 1, 2], [3, 4, 5]]
self.assertArrayEqual(result, expected)
class Test_element_arrays_and_dims(tests.IrisTest):
def test_single_field(self):
field = _make_field(2013)
collation = FieldCollation([field])
self.assertEqual(collation.element_arrays_and_dims, {})
def test_t1(self):
collation = FieldCollation([_make_field(lbyr=2013),
_make_field(lbyr=2014)])
result = collation.element_arrays_and_dims
self.assertEqual(list(result.keys()), ['t1'])
values, dims = result['t1']
self.assertArrayEqual(values, [datetime(2013, 1, 1),
datetime(2014, 1, 1)])
self.assertEqual(dims, (0,))
def test_t1_and_t2(self):
collation = FieldCollation([_make_field(lbyr=2013, lbyrd=2000),
_make_field(lbyr=2014, lbyrd=2001),
_make_field(lbyr=2015, lbyrd=2002)])
result = collation.element_arrays_and_dims
self.assertEqual(set(result.keys()), set(['t1', 't2']))
values, dims = result['t1']
self.assertArrayEqual(values, [datetime(2013, 1, 1),
datetime(2014, 1, 1),
datetime(2015, 1, 1)])
self.assertEqual(dims, (0,))
values, dims = result['t2']
self.assertArrayEqual(values, [datetime(2000, 1, 1),
datetime(2001, 1, 1),
datetime(2002, 1, 1)])
self.assertEqual(dims, (0,))
def test_t1_and_t2_and_lbft(self):
collation = FieldCollation([_make_field(lbyr=1, lbyrd=15, lbft=6),
_make_field(lbyr=1, lbyrd=16, lbft=9),
_make_field(lbyr=11, lbyrd=25, lbft=6),
_make_field(lbyr=11, lbyrd=26, lbft=9)])
result = collation.element_arrays_and_dims
self.assertEqual(set(result.keys()), set(['t1', 't2', 'lbft']))
values, dims = result['t1']
self.assertArrayEqual(values, [datetime(1, 1, 1),
datetime(11, 1, 1)])
self.assertEqual(dims, (0,))
values, dims = result['t2']
self.assertArrayEqual(values,
[[datetime(15, 1, 1), datetime(16, 1, 1)],
[datetime(25, 1, 1), datetime(26, 1, 1)]])
self.assertEqual(dims, (0, 1))
values, dims = result['lbft']
self.assertArrayEqual(values, [6, 9])
self.assertEqual(dims, (1,))
def test_blev(self):
collation = FieldCollation([_make_field(blev=1), _make_field(blev=2)])
result = collation.element_arrays_and_dims
keys = set(['blev', 'brsvd1', 'brsvd2', 'brlev',
'bhrlev', 'lblev', 'bhlev'])
self.assertEqual(set(result.keys()), keys)
values, dims = result['blev']
self.assertArrayEqual(values, [1, 2])
self.assertEqual(dims, (0,))
def test_bhlev(self):
collation = FieldCollation([_make_field(blev=0, bhlev=1),
_make_field(blev=1, bhlev=2)])
result = collation.element_arrays_and_dims
keys = set(['blev', 'brsvd1', 'brsvd2', 'brlev',
'bhrlev', 'lblev', 'bhlev'])
self.assertEqual(set(result.keys()), keys)
values, dims = result['bhlev']
self.assertArrayEqual(values, [1, 2])
self.assertEqual(dims, (0,))
class Test__time_comparable_int(tests.IrisTest):
def test(self):
# Define a list of date-time tuples, which should remain both all
# distinct and in ascending order when converted...
test_date_tuples = [
# Increment each component in turn to check that all are handled.
(2004, 1, 1, 0, 0, 0),
(2004, 1, 1, 0, 0, 1),
(2004, 1, 1, 0, 1, 0),
(2004, 1, 1, 1, 0, 0),
(2004, 1, 2, 0, 0, 0),
(2004, 2, 1, 0, 0, 0),
# Go across 2004-02-29 leap-day, and on to "Feb 31 .. Mar 1".
(2004, 2, 27, 0, 0, 0),
(2004, 2, 28, 0, 0, 0),
(2004, 2, 29, 0, 0, 0),
(2004, 2, 30, 0, 0, 0),
(2004, 2, 31, 0, 0, 0),
(2004, 3, 1, 0, 0, 0),
(2005, 1, 1, 0, 0, 0)]
collation = FieldCollation(['foo', 'bar'])
test_date_ints = [collation._time_comparable_int(*test_tuple)
for test_tuple in test_date_tuples]
# Check all values are distinct.
self.assertEqual(len(test_date_ints), len(set(test_date_ints), ))
# Check all values are in order.
self.assertEqual(test_date_ints, sorted(test_date_ints))
if __name__ == "__main__":
tests.main()
|
LukeC92/iris
|
lib/iris/tests/unit/fileformats/um/fast_load_structured_fields/test_FieldCollation.py
|
Python
|
lgpl-3.0
| 8,389
|
# coding=utf8
"""
计算各类账户的净值
"""
import datetime
import pandas as pd
import tradingtime as tt
# from .nav import Nav
class Futures(object):
"""
期货账户净值计算
"""
def __init__(self, df):
self.df = df.sort_values("datetime") # 原始数据
if "tradeDay" not in self.df.columns:
self.df["tradeDay"] = self.df.datetime.apply(tt.futureTradeCalendar.get_tradeday)
def nav_d(self, col="balance", start=None, origin=0):
"""
日计算净值
:param col: 要统计的字段,比如 balance:净值 , margin: 保证金
:param start: 开始时间, 具体到分钟,任意格式
:param begin: 从什么时候开始显示
:param origin: 初始值,比如总资金100w,但实际只投入10w,那么就要将origin设为10w
:return:
"""
start = pd.to_datetime(start) if start else self.df.datetime.min()
df = self.df[self.df.tradeDay >= start]
# 初始权益为净值起始日的前一交易日的收盘时权益
first = self.df.loc[df.index.min(), col]
df = df.reset_index()
# 生成周期
# nav_se = df.set_index("tradeDay")[col].resample(
# "1T",
# closed="left",
# label="left"
# ).last().dropna().sort_index()
nav_se = df.groupby("tradeDay").apply(lambda t: t[t.datetime == t.datetime.max()]).set_index("tradeDay")[col]
# 日净值计算
delta = first - origin
nav_se -= delta
# 将净值设为1
col_se = nav_se.copy()
nav_se /= origin
first_day = nav_se.index.min()
first_day -= datetime.timedelta(days=1)
nav_se[first_day] = 1
col_se[first_day] = origin
# 保留小数
nav_se = nav_se.apply(lambda x: round(x, 3))
col_se = col_se.apply(lambda x: round(x, 2))
return pd.DataFrame({col: col_se, "nav": nav_se}).sort_index()
def nav_m(self, col="balance", start=None, tradeDay=None, origin=0, T=1, t='a'):
"""
分钟净值,只能显示某一天的数据
:param col: 要统计的字段,比如 balance:净值 , margin: 保证金
:param start: 统计数据开始时间, 具体到分钟,任意格式
:param begin: 从什么时候开始显示
:param origin: 初始值,比如总资金100w,但实际只投入10w,那么就要将origin设为10w
:param T: 几分钟的周期, 应当能被 60 * 24 整除
:param t: {'d': 日盘, 'a': 全天}
:return:
"""
if 60 * 24 % T != 0:
raise ValueError(u"60 * 24 % T != 0")
# 统计数据开始的时间
if start:
start = pd.to_datetime(start)
else:
start = self.df.datetime.min()
# 以这一天为净值开始日期
df = self.df[self.df.datetime >= start]
first = df[col].iat[0] # 初始数值, 不能为0
# 根据初始值计算偏移
delta = first - origin
if tradeDay:
tradeDay = pd.to_datetime(tradeDay).date()
else:
tradeDay = self.df.tradeDay.max().date()
# 截取要显示的一段
nav_df = df[df.tradeDay == tradeDay]
if t == 'd': # 日盘 only
begintime = datetime.datetime.combine(tradeDay, datetime.time(8, 59, 30))
nav_df = nav_df[begintime <= nav_df.datetime]
elif t == 'a': # 全日
begindate = tt.futureTradeCalendar.get_tradeday_opentime(tradeDay)
begintime = datetime.datetime.combine(begindate, datetime.time(21))
nav_df = nav_df[begintime <= nav_df.datetime]
# 实际资本
nav_se = nav_df.set_index("datetime")[col] - delta
# 生成周期
nav_se = nav_se.resample(
"%sT" % T,
closed="right",
label="right",
).last().dropna()
# 将净值设为1
nav_se /= nav_se[0]
# 保留小数
return nav_se.apply(lambda x: round(x, 3))
@classmethod
def m_create_table(cls, df, _type="markdown"):
"""
从pandas的DataFrame生成markdown格式表格
:param df:
:return:
"""
if _type == "markdown":
return cls._m_create_table_markdown(df)
elif _type == "vnpie":
return cls._m_create_table_vnpie(df)
else:
raise ValueError("unknow table type %s " % _type)
@classmethod
def _m_create_table_markdown(cls, df):
"""
从pandas的DataFrame生成markdown格式表格
:param df:
:return:
"""
if len(df) == 0:
return ''
datas = []
head = '|'.join(df.columns)
head = "|" + head + "|"
datas.append(head)
datas.append("|" + ' --: |' * len(df.columns))
for ix, row in df.iterrows():
data = '|'.join(map(lambda x: str(x), row.get_values()))
data = "|" + data + "|"
datas.append(data)
result = '\n'.join(datas)
# print result
return result
@classmethod
def _m_create_table_vnpie(cls, df):
"""
从pandas的DataFrame生成 vnpie 需要的表格格式
[table]
head|Name|Version
unit|Discuz!|X1
[/table]
:param df:
:return:
"""
if len(df) == 0:
return ''
df = df.copy()
df.columns = ["净值日", "权益", "净值", "涨幅"]
datas = []
head = '|'.join(df.columns)
# head = "|" + head + "|"
datas.append(head)
# datas.append("|" + ' --: |' * len(df.columns))
for ix, row in df.iterrows():
data = '|'.join(map(lambda x: str(x), row.get_values()))
# data = "|" + data + "|"
datas.append(data)
result = "[table]\n" + '\n'.join(datas) + "\n[/table]"
# print result
return result
# if len(df) == 0:
# return ''
# df = df[0:1]
# result = "[table]\n"
# for col in df.columns.values:
# result += col + '|' + "|".join(["%s" % i for i in df[col]]) + '\n'
#
# result += "[\\table]"
#
# return result
|
lamter/slaveo
|
fund/futures.py
|
Python
|
gpl-3.0
| 6,315
|
# Copyright (c) 2015 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import datetime
import itertools
import operator
import os
import re
import sys
try:
from lxml import etree
except ImportError:
etree = None
from . import colorize, config, source, utils
ISSUE_KIND_ERROR = 'ERROR'
ISSUE_KIND_WARNING = 'WARNING'
ISSUE_KIND_INFO = 'INFO'
ISSUE_KIND_ADVICE = 'ADVICE'
# field names in rows of json reports
JSON_INDEX_DOTTY = 'dotty'
JSON_INDEX_FILENAME = 'file'
JSON_INDEX_HASH = 'hash'
JSON_INDEX_INFER_SOURCE_LOC = 'infer_source_loc'
JSON_INDEX_ISL_FILE = 'file'
JSON_INDEX_ISL_LNUM = 'lnum'
JSON_INDEX_ISL_CNUM = 'cnum'
JSON_INDEX_ISL_ENUM = 'enum'
JSON_INDEX_KIND = 'kind'
JSON_INDEX_LINE = 'line'
JSON_INDEX_PROCEDURE = 'procedure'
JSON_INDEX_PROCEDURE_ID = 'procedure_id'
JSON_INDEX_QUALIFIER = 'qualifier'
JSON_INDEX_QUALIFIER_TAGS = 'qualifier_tags'
JSON_INDEX_TYPE = 'bug_type'
JSON_INDEX_TRACE = 'bug_trace'
JSON_INDEX_TRACE_LEVEL = 'level'
JSON_INDEX_TRACE_FILENAME = 'filename'
JSON_INDEX_TRACE_LINE = 'line_number'
JSON_INDEX_TRACE_DESCRIPTION = 'description'
JSON_INDEX_VISIBILITY = 'visibility'
ISSUE_TYPES_URL = 'http://fbinfer.com/docs/infer-issue-types.html#'
def _text_of_infer_loc(loc):
return ' ({}:{}:{}-{}:)'.format(
loc[JSON_INDEX_ISL_FILE],
loc[JSON_INDEX_ISL_LNUM],
loc[JSON_INDEX_ISL_CNUM],
loc[JSON_INDEX_ISL_ENUM],
)
def text_of_report(report):
filename = report[JSON_INDEX_FILENAME]
kind = report[JSON_INDEX_KIND]
line = report[JSON_INDEX_LINE]
error_type = report[JSON_INDEX_TYPE]
msg = report[JSON_INDEX_QUALIFIER]
infer_loc = ''
if JSON_INDEX_INFER_SOURCE_LOC in report:
infer_loc = _text_of_infer_loc(report[JSON_INDEX_INFER_SOURCE_LOC])
return '%s:%d: %s: %s%s\n %s' % (
filename,
line,
kind.lower(),
error_type,
infer_loc,
msg,
)
def _text_of_report_list(project_root, reports, bugs_txt_path, limit=None,
formatter=colorize.TERMINAL_FORMATTER):
n_issues = len(reports)
if n_issues == 0:
if formatter == colorize.TERMINAL_FORMATTER:
out = colorize.color(' No issues found ',
colorize.SUCCESS, formatter)
return out + '\n'
else:
return 'No issues found'
text_errors_list = []
for report in reports[:limit]:
filename = report[JSON_INDEX_FILENAME]
line = report[JSON_INDEX_LINE]
source_context = ''
source_context = source.build_source_context(
os.path.join(project_root, filename),
formatter,
line,
)
indenter = source.Indenter() \
.indent_push() \
.add(source_context)
source_context = '\n' + unicode(indenter)
msg = text_of_report(report)
if report[JSON_INDEX_KIND] == ISSUE_KIND_ERROR:
msg = colorize.color(msg, colorize.ERROR, formatter)
elif report[JSON_INDEX_KIND] == ISSUE_KIND_WARNING:
msg = colorize.color(msg, colorize.WARNING, formatter)
elif report[JSON_INDEX_KIND] == ISSUE_KIND_ADVICE:
msg = colorize.color(msg, colorize.ADVICE, formatter)
text = '%s%s' % (msg, source_context)
text_errors_list.append(text)
error_types_count = {}
for report in reports:
t = report[JSON_INDEX_TYPE]
# assert failures are not very informative without knowing
# which assertion failed
if t == 'Assert_failure' and JSON_INDEX_INFER_SOURCE_LOC in report:
t += _text_of_infer_loc(report[JSON_INDEX_INFER_SOURCE_LOC])
if t not in error_types_count:
error_types_count[t] = 1
else:
error_types_count[t] += 1
max_type_length = max(map(len, error_types_count.keys())) + 2
sorted_error_types = error_types_count.items()
sorted_error_types.sort(key=operator.itemgetter(1), reverse=True)
types_text_list = map(lambda (t, count): '%s: %d' % (
t.rjust(max_type_length),
count,
), sorted_error_types)
text_errors = '\n\n'.join(text_errors_list)
if limit >= 0 and n_issues > limit:
text_errors += colorize.color(
('\n\n...too many issues to display (limit=%d exceeded), please ' +
'see %s or run `inferTraceBugs` for the remaining issues.')
% (limit, bugs_txt_path), colorize.HEADER, formatter)
issues_found = 'Found {n_issues}'.format(
n_issues=utils.get_plural('issue', n_issues),
)
msg = '{issues_found}\n\n{issues}\n\n{header}\n\n{summary}'.format(
issues_found=colorize.color(issues_found,
colorize.HEADER,
formatter),
issues=text_errors,
header=colorize.color('Summary of the reports',
colorize.HEADER, formatter),
summary='\n'.join(types_text_list),
)
return msg
def _is_user_visible(project_root, report):
kind = report[JSON_INDEX_KIND]
return kind in [ISSUE_KIND_ERROR, ISSUE_KIND_WARNING, ISSUE_KIND_ADVICE]
def print_and_save_errors(infer_out, project_root, json_report, bugs_out,
pmd_xml):
errors = utils.load_json_from_path(json_report)
errors = [e for e in errors if _is_user_visible(project_root, e)]
console_out = _text_of_report_list(project_root, errors, bugs_out,
limit=10)
utils.stdout('\n' + console_out)
plain_out = _text_of_report_list(project_root, errors, bugs_out,
formatter=colorize.PLAIN_FORMATTER)
with codecs.open(bugs_out, 'w',
encoding=config.CODESET, errors='replace') as file_out:
file_out.write(plain_out)
if pmd_xml:
xml_out = os.path.join(infer_out, config.PMD_XML_FILENAME)
with codecs.open(xml_out, 'w',
encoding=config.CODESET,
errors='replace') as file_out:
file_out.write(_pmd_xml_of_issues(errors))
def merge_reports_from_paths(report_paths):
json_data = []
for json_path in report_paths:
json_data.extend(utils.load_json_from_path(json_path))
return _sort_and_uniq_rows(json_data)
def _pmd_xml_of_issues(issues):
if etree is None:
print('ERROR: "etree" Python package not found.')
print('ERROR: You need to install it to use Infer with --pmd-xml')
sys.exit(1)
root = etree.Element('pmd')
root.attrib['version'] = '5.4.1'
root.attrib['date'] = datetime.datetime.now().isoformat()
for issue in issues:
fully_qualifed_method_name = re.search('(.*)\(.*',
issue[JSON_INDEX_PROCEDURE_ID])
class_name = ''
package = ''
if fully_qualifed_method_name is not None:
# probably Java
info = fully_qualifed_method_name.groups()[0].split('.')
class_name = info[-2:-1][0]
method = info[-1]
package = '.'.join(info[0:-2])
else:
method = issue[JSON_INDEX_PROCEDURE]
file_node = etree.Element('file')
file_node.attrib['name'] = issue[JSON_INDEX_FILENAME]
violation = etree.Element('violation')
violation.attrib['begincolumn'] = '0'
violation.attrib['beginline'] = str(issue[JSON_INDEX_LINE])
violation.attrib['endcolumn'] = '0'
violation.attrib['endline'] = str(issue[JSON_INDEX_LINE] + 1)
violation.attrib['class'] = class_name
violation.attrib['method'] = method
violation.attrib['package'] = package
violation.attrib['priority'] = '1'
violation.attrib['rule'] = issue[JSON_INDEX_TYPE]
violation.attrib['ruleset'] = 'Infer Rules'
violation.attrib['externalinfourl'] = (
ISSUE_TYPES_URL + issue[JSON_INDEX_TYPE])
violation.text = issue[JSON_INDEX_QUALIFIER]
file_node.append(violation)
root.append(file_node)
return etree.tostring(root, pretty_print=True, encoding=config.CODESET)
def _sort_and_uniq_rows(l):
key = operator.itemgetter(JSON_INDEX_FILENAME,
JSON_INDEX_LINE,
JSON_INDEX_HASH,
JSON_INDEX_QUALIFIER)
l.sort(key=key)
groups = itertools.groupby(l, key)
# guaranteed to be at least one element in each group
return map(lambda (keys, dups): dups.next(), groups)
|
jsachs/infer
|
infer/lib/python/inferlib/issues.py
|
Python
|
bsd-3-clause
| 9,001
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from activities.models import Activity
from django.utils.html import escape
import bleach
class Feed(models.Model):
user = models.ForeignKey(User)
date = models.DateTimeField(auto_now_add=True)
post = models.TextField(max_length=255)
parent = models.ForeignKey('Feed', null=True, blank=True)
likes = models.IntegerField(default=0)
comments = models.IntegerField(default=0)
class Meta:
verbose_name = _('Feed')
verbose_name_plural = _('Feeds')
ordering = ('-date',)
def __unicode__(self):
return self.post
@staticmethod
def get_feeds(from_feed=None):
if from_feed is not None:
feeds = Feed.objects.filter(parent=None, id__lte=from_feed)
else:
feeds = Feed.objects.filter(parent=None)
return feeds
@staticmethod
def get_feeds_after(feed):
feeds = Feed.objects.filter(parent=None, id__gt=feed)
return feeds
def get_comments(self):
return Feed.objects.filter(parent=self).order_by('date')
def calculate_likes(self):
likes = Activity.objects.filter(activity_type=Activity.LIKE, feed=self.pk).count()
self.likes = likes
self.save()
return self.likes
def get_likes(self):
likes = Activity.objects.filter(activity_type=Activity.LIKE, feed=self.pk)
return likes
def get_likers(self):
likes = self.get_likes()
likers = []
for like in likes:
likers.append(like.user)
return likers
def calculate_comments(self):
self.comments = Feed.objects.filter(parent=self).count()
self.save()
return self.comments
def comment(self, user, post):
feed_comment = Feed(user=user, post=post, parent=self)
feed_comment.save()
self.comments = Feed.objects.filter(parent=self).count()
self.save()
return feed_comment
def linkfy_post(self):
return bleach.linkify(escape(self.post))
|
kngeno/gis_kenya
|
apps/feeds/models.py
|
Python
|
mit
| 2,135
|
import sys
def parse_file():
filename = sys.argv[1]
file = open(filename,"r")
data = file.read().splitlines()
seq_file = sys.argv[2]
file2 = open(seq_file,"r")
data2 = file2.read().splitlines()
i = 1
sequence=""
while i < len(data2):
sequence += data2[i]
i += 1
c1 = []
for i in range(len(data)):
if i != 0:
temp = data[i].split()
c1.append(int(temp[0])+1)
c1.append(int(temp[1])+1)
return c1, len(sequence), sequence
def main():
c1, L, sequence = parse_file()
print(L)
print(sequence)
print(c1)
return 0
if __name__ == "__main__":
sys.exit(main())
|
mayc2/PseudoKnot_research
|
Centroid.py
|
Python
|
mit
| 702
|
# Copyright (c) 2018, Frappe and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
class QualityReview(Document):
def validate(self):
# fetch targets from goal
if not self.reviews:
for d in frappe.get_doc('Quality Goal', self.goal).objectives:
self.append('reviews', dict(
objective = d.objective,
target = d.target,
uom = d.uom
))
self.set_status()
def set_status(self):
# if any child item is failed, fail the parent
if not len(self.reviews or []) or any([d.status=='Open' for d in self.reviews]):
self.status = 'Open'
elif any([d.status=='Failed' for d in self.reviews]):
self.status = 'Failed'
else:
self.status = 'Passed'
def review():
day = frappe.utils.getdate().day
weekday = frappe.utils.getdate().strftime("%A")
month = frappe.utils.getdate().strftime("%B")
for goal in frappe.get_list("Quality Goal", fields=['name', 'frequency', 'date', 'weekday']):
if goal.frequency == 'Daily':
create_review(goal.name)
elif goal.frequency == 'Weekly' and goal.weekday == weekday:
create_review(goal.name)
elif goal.frequency == 'Monthly' and goal.date == str(day):
create_review(goal.name)
elif goal.frequency == 'Quarterly' and day==1 and get_quarter(month):
create_review(goal.name)
def create_review(goal):
goal = frappe.get_doc("Quality Goal", goal)
review = frappe.get_doc({
"doctype": "Quality Review",
"goal": goal.name,
"date": frappe.utils.getdate()
})
review.insert(ignore_permissions=True)
def get_quarter(month):
if month in ["January", "April", "July", "October"]:
return True
else:
return False
|
mhbu50/erpnext
|
erpnext/quality_management/doctype/quality_review/quality_review.py
|
Python
|
gpl-3.0
| 1,684
|
import os
INPUT = "in"
OUTPUT = "out"
LOW = 0
HIGH = 1
GPIO_PATH = "/sys/class/gpio" # The root of the GPIO directories
EXPANDER = "pcf8574a" # This is the expander that is used on CHIP for the XIOs
def get_xio_base():
'''
Determines the base of the XIOs on the system by iterating through the /sys/class/gpio
directory and looking for the expander that is used. It then looks for the
"base" file and returns its contents as an integer
'''
names = os.listdir(GPIO_PATH)
for name in names: # loop through child directories
prefix = GPIO_PATH + "/" + name + "/"
file_name = prefix + "label"
if os.path.isfile(file_name): # is there a label file in the directory?
with open(file_name) as label:
contents = label.read()
if contents.startswith(EXPANDER): # does label contain our expander?
file_name = prefix + "base"
with open(file_name) as base: # read the sibling file named base
contents = base.read()
return int(contents) # convert result to an int
def pinMode(pin,mode):
pinMapped = str(pin+get_xio_base())
os.system("sudo sh -c 'echo "+pinMapped+" > /sys/class/gpio/export' > /dev/null 2>&1")
os.system("sudo sh -c 'echo "+mode+" > /sys/class/gpio/gpio"+pinMapped+"/direction'")
# sys.stdout.write("XIO-P"+str(pin)+" set to "+str(mode)+".\n")
def pinModeNonXIO(pin, mode):
pinMapped = str(pin)
os.system("sudo sh -c 'echo "+pinMapped+" > /sys/class/gpio/export' > /dev/null 2>&1")
os.system("sudo sh -c 'echo "+mode+" > /sys/class/gpio/gpio"+pinMapped+"/direction'")
#sys.stdout.write("NONXIO"+str(pin)+" set to "+str(mode)+".\n")
def digitalWrite(pin,state):
pinMapped = str(pin+get_xio_base())
os.system("sudo sh -c 'echo "+str(state)+" > /sys/class/gpio/gpio"+pinMapped+"/value'")
def digitalWriteNonXIO(pin, state):
pinMapped = str(pin)
os.system("sudo sh -c 'echo " + str(state) + " > /sys/class/gpio/gpio" + pinMapped + "/value'")
def digitalReadNonXIO(pin):
pinMapped = str(pin)
value = os.popen("cat /sys/class/gpio/gpio" + pinMapped + "/value").read()
# todo: change to subprocess
#p = subprocess.Popen(("cat /sys/class/gpio/gpio" + pinMapped + "/value").split(), stdout=subprocess.PIPE)
#value, _ = p.communicate()
return int(value)
def digitalRead(pin):
pinMapped = str(pin+get_xio_base())
value = os.popen("cat /sys/class/gpio/gpio"+pinMapped+"/value").read()
return int(value)
|
henla464/WiRoc-Python-2
|
chipGPIO/chipGPIO.py
|
Python
|
gpl-3.0
| 2,567
|
from setuptools import setup, find_packages
with open('README.rst') as f:
description = f.read()
setup(
name='knitty-gritty',
version='0.0.2',
description='A tool for managing knitting machine patterns',
long_description=description,
url='https://github.com/mhallin/knitty-gritty',
author='Magnus Hallin',
author_email='mhallin@gmail.com',
license='BSD',
packages=find_packages(),
install_requires=[
'click>=2.4,<2.5',
'Pillow>=2.5,<2.6',
'pyserial>=2.7,<2.8',
],
extras_require={
'dev': [
'flake8>=2.2,<2.3',
'mccabe>=0.2,<0.3',
'pep8>=1.5,<1.6',
'pip-tools>=0.3,<0.4',
'pyflakes>=0.8.1,<0.9',
'wheel>=0.24,<0.25',
],
},
entry_points={
'console_scripts': [
'knitty-gritty = knittygritty.main:cli'
],
},
)
|
mhallin/knitty-gritty
|
setup.py
|
Python
|
bsd-3-clause
| 918
|
# -*- coding: utf-8 -*-
# Django settings for myconf project.
from settings import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Absolute path to the root of the project.
# i.e. the folder where this file is.
ROOTDIR="/home/gdetrez/Sources/MyConf/myconf/"
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'development.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
|
gdetrez/MyConf
|
fscons/development.py
|
Python
|
agpl-3.0
| 460
|
#
# Gramps - a GTK+/GNOME based genealogy program - Family Tree plugin
#
# Copyright (C) 2008,2009,2010,2014 Reinhard Mueller
# Copyright (C) 2010 lcc <lcc.mailaddress@gmail.com>
# Copyright (C) 2014 Gerald Kunzmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# $Id$
"""Reports/Graphical Reports/Family Tree"""
import colorsys
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
import gramps.gen.display.name
from gramps.gen.display.place import displayer as place_displayer
from gramps.gen.lib import Date, Event, EventType, FamilyRelType, Name
from gramps.gen.lib import StyledText, StyledTextTag, StyledTextTagType
import gramps.gen.plug.docgen
import gramps.gen.plug.menu
import gramps.gen.plug.report
from gramps.gen.plug.report.utils import pt2cm
import gramps.gui.plug.report
import gramps.gen.datehandler
from gramps.gen.const import GRAMPS_LOCALE as glocale
try:
_trans = glocale.get_addon_translator(__file__)
except ValueError:
_trans = glocale.translation
_ = _trans.gettext
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
empty_birth = Event()
empty_birth.set_type(EventType.BIRTH)
empty_marriage = Event()
empty_marriage.set_type(EventType.MARRIAGE)
#------------------------------------------------------------------------
#
# FamilyTree report
#
#------------------------------------------------------------------------
class FamilyTree(gramps.gen.plug.report.Report):
def __init__(self, database, options, user):
gramps.gen.plug.report.Report.__init__(self, database, options, user)
menu = options.menu
family_id = menu.get_option_by_name('family_id').get_value()
self.center_family = database.get_family_from_gramps_id(family_id)
self.max_ancestor_generations = menu.get_option_by_name('max_ancestor_generations').get_value()
self.max_descendant_generations = menu.get_option_by_name('max_descendant_generations').get_value()
self.fit_on_page = menu.get_option_by_name('fit_on_page').get_value()
self.color = menu.get_option_by_name('color').get_value()
self.shuffle_colors = menu.get_option_by_name('shuffle_colors').get_value()
self.kekule_start_number = menu.get_option_by_name('kekule_start_number').get_value()
try:
self.callname = menu.get_option_by_name('callname').get_value()
except:
self.callname = FamilyTreeOptions.CALLNAME_DONTUSE
self.include_occupation = menu.get_option_by_name('include_occupation').get_value()
self.include_notes = menu.get_option_by_name('include_notes').get_value()
self.include_residence = menu.get_option_by_name('include_residence').get_value()
self.eventstyle_dead = menu.get_option_by_name('eventstyle_dead').get_value()
self.eventstyle_living = menu.get_option_by_name('eventstyle_living').get_value()
self.fallback_birth = menu.get_option_by_name('fallback_birth').get_value()
self.fallback_death = menu.get_option_by_name('fallback_death').get_value()
self.protect_private = menu.get_option_by_name('protect_private').get_value()
self.missinginfo = menu.get_option_by_name('missinginfo').get_value()
self.include_event_description = menu.get_option_by_name('include_event_description').get_value()
self.title = menu.get_option_by_name('title').get_value()
self.footer = menu.get_option_by_name('footer').get_value()
if not self.title:
name = self.__family_get_display_name(self.center_family)
self.title = StyledText(_("Family Tree for %s") % name)
style_sheet = self.doc.get_style_sheet()
self.line_width = pt2cm(style_sheet.get_draw_style("FTR-box").get_line_width())
# Size constants, all in unscaled cm:
# Size of shadow around boxes
self.shadow = style_sheet.get_draw_style("FTR-box").get_shadow_space()
# Offset from left
self.xoffset = self.line_width / 2
# Offset from top
tfont = style_sheet.get_paragraph_style("FTR-Title").get_font()
tfont_height = pt2cm(tfont.get_size()) * 1.2
self.yoffset = tfont_height * 2
# Space for footer
ffont = style_sheet.get_paragraph_style("FTR-Footer").get_font()
ffont_height = pt2cm(ffont.get_size()) * 1.2
self.ybottom = ffont_height
# Padding inside box == half size of shadow
self.box_pad = self.shadow / 2
# Gap between boxes == 2 times size of shadow
self.box_gap = 2 * self.shadow
# Width of a box (calculated in __build_*_tree)
self.box_width = 0
# Number of generations used (calculated in __build_*_tree)
self.ancestor_generations = 0
self.descendant_generations = 0
# Number of colors used so far
self.ancestor_max_color = 0
self.descendant_max_color = 0
self.descendants_tree = None
self.ancestors_tree = self.__build_ancestors_tree(self.center_family.get_handle(), 0, 0, 0, 0, self.kekule_start_number)
if self.ancestors_tree is None:
return
(self.descendants_tree, descendants_space) = self.__build_descendants_tree(self.center_family.get_child_ref_list(), 0, 0, 0)
needed_width = self.xoffset + (self.ancestor_generations + self.descendant_generations) * (self.box_width + 2 * self.box_gap) - 2 * self.box_gap + self.shadow
needed_height = self.yoffset + max(self.ancestors_tree['space'], descendants_space) + self.shadow + self.ybottom * 2
usable_width = self.doc.get_usable_width()
usable_height = self.doc.get_usable_height()
if self.fit_on_page:
self.scale = min(
usable_height / needed_height,
usable_width / needed_width)
if self.scale < 0.4:
user.warn(_('Paper too small'),
_('Some elements may not appear or be badly '
'rendered.'))
self.__scale_styles()
# Convert usable size into unscaled cm
usable_width = usable_width / self.scale
usable_height = usable_height / self.scale
else:
self.scale = 1
# Center the whole tree on the usable page area
self.xoffset += (usable_width - needed_width) / 2
self.yoffset += (usable_height - needed_height) / 2
# Since center person has an x of 0, add space needed by ancestors
self.xoffset += (self.ancestor_generations - 1) * (self.box_width + 2 * self.box_gap)
# Align ancestors part and descendants part vertically
root_a = self.ancestors_tree['top'] + self.ancestors_tree['height'] / 2
root_d = descendants_space / 2
if root_a > root_d:
self.yoffset_a = self.yoffset
self.yoffset_d = self.yoffset + root_a - root_d
else:
self.yoffset_a = self.yoffset + root_d - root_a
self.yoffset_d = self.yoffset
def write_report(self):
self.doc.start_page()
# Workaround for center_text not accepting StyledText
if isinstance(self.title, StyledText):
if not self.title.get_tags():
self.title = str(self.title)
self.doc.center_text('FTR-title',
self.title,
self.doc.get_usable_width() / 2,
0)
self.__print_ancestors_tree(self.ancestors_tree, 0)
if self.ancestors_tree:
anchor = self.yoffset_a + self.ancestors_tree['top'] + self.ancestors_tree['height'] / 2
if self.descendants_tree:
self.__print_descendants_tree(self.descendants_tree, anchor, 1)
self.doc.center_text('FTR-footer',
self.footer,
self.doc.get_usable_width() / 2,
self.doc.get_usable_height() - self.ybottom * self.scale)
self.doc.end_page()
def __build_ancestors_tree(self, family_handle, generation, color, top, center, kekule):
"""Build an in-memory data structure containing all ancestors"""
self.ancestor_generations = max(self.ancestor_generations, generation + 1)
# This is a dictionary containing all interesting data for a box that
# will be printed later:
# text: text to be printed in the box, as a list of (style, text) tuples
# top: top edge of the box in unscaled cm
# height: height of the box in unscaled cm
# space: total height that this box and all its ancestor boxes (left to
# it) need, in unscaled cm
# anchor: y position to where the line right of this box should end
# mother_node: dictionary representing the box with the mother's
# ancestors
# father_node: dictionary representing the box with the father's
# ancestors
family_node = {}
family = self.database.get_family_from_handle(family_handle)
if family.private and self.protect_private:
return None
father_handle = family.get_father_handle()
if father_handle:
father = self.database.get_person_from_handle(father_handle)
if father.private and self.protect_private:
father = None
else:
father = None
if father:
if kekule:
father_text = [('FTR-name', StyledText(str(kekule) + " ") + self.__person_get_display_name(father))] + [('FTR-data', p) for p in self.__person_get_display_data(father)]
else:
father_text = [('FTR-name', self.__person_get_display_name(father))] + [('FTR-data', p) for p in self.__person_get_display_data(father)]
father_height = self.__make_space(father_text)
father_family = father.get_main_parents_family_handle()
else:
father_text = []
father_height = 0
father_family = None
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.database.get_person_from_handle(mother_handle)
if mother.private and self.protect_private:
mother = None
else:
mother = None
if mother:
if kekule > 1:
mother_text = [('FTR-name', StyledText(str(kekule+1) + " ") + self.__person_get_display_name(mother))] + [('FTR-data', p) for p in self.__person_get_display_data(mother)]
else:
mother_text = [('FTR-name', self.__person_get_display_name(mother))] + [('FTR-data', p) for p in self.__person_get_display_data(mother)]
mother_height = self.__make_space(mother_text)
mother_family = mother.get_main_parents_family_handle()
else:
mother_text = []
mother_height = 0
mother_family = None
family_node['text'] = father_text + [('FTR-data', p) for p in self.__family_get_display_data(family)] + mother_text
family_node['color'] = color
family_node['height'] = self.__make_space(family_node['text'])
# If this box is small, align it centered, if it is too big for that,
# align it to the top.
family_node['top'] = max(top, center - family_node['height'] / 2)
father_node = None
if father_family and generation < self.max_ancestor_generations:
if (self.color == FamilyTreeOptions.COLOR_FEMALE_LINE) or \
(self.color == FamilyTreeOptions.COLOR_FIRST_GEN and generation == 0) or \
(self.color == FamilyTreeOptions.COLOR_SECOND_GEN and generation == 1) or \
(self.color == FamilyTreeOptions.COLOR_THIRD_GEN and generation == 2):
self.ancestor_max_color += 1
father_color = self.ancestor_max_color
else:
father_color = color
# Where should the father's box be placed?
father_top = top
father_center = family_node['top'] + father_height / 2
# Create father's box.
if kekule:
father_node = self.__build_ancestors_tree(father_family, generation + 1, father_color, father_top, father_center, kekule * 2)
else:
father_node = self.__build_ancestors_tree(father_family, generation + 1, father_color, father_top, father_center, 0)
if father_node:
if mother_family:
if self.database.get_family_from_handle(mother_family).private and self.protect_private:
pass
else:
# This box has father and mother: move it down so its center is
# just at the end of the father's ancestors space.
family_node['top'] = max(family_node['top'], top + father_node['space'] + self.box_gap / 2 - family_node['height'] / 2)
else:
# This box has only father: move it down to the center of the
# father's parents.
family_node['top'] = max(family_node['top'], father_node['top'] + father_node['height'] / 2 - father_height / 2)
mother_node = None
if mother_family and generation < self.max_ancestor_generations:
if (self.color == FamilyTreeOptions.COLOR_MALE_LINE) or \
(self.color == FamilyTreeOptions.COLOR_MALE_LINE_WEAK and family.get_relationship() != FamilyRelType.UNMARRIED) or \
(self.color == FamilyTreeOptions.COLOR_FIRST_GEN and generation == 0) or \
(self.color == FamilyTreeOptions.COLOR_SECOND_GEN and generation == 1) or \
(self.color == FamilyTreeOptions.COLOR_THIRD_GEN and generation == 2):
self.ancestor_max_color += 1
mother_color = self.ancestor_max_color
else:
mother_color = color
# Where should the mother's box be placed?
if father_handle:
# There is also a father: mother's box must be below the center
# of this box.
mother_top = family_node['top'] + family_node['height'] / 2 + self.box_gap / 2
else:
# There is no father: mother's box can use all the vertical
# space of this box.
mother_top = top
mother_center = family_node['top'] + family_node['height'] - mother_height / 2
# Create mother's box.
if kekule > 1:
mother_node = self.__build_ancestors_tree(mother_family, generation + 1, mother_color, mother_top, mother_center, (kekule+1)*2 )
else:
mother_node = self.__build_ancestors_tree(mother_family, generation + 1, mother_color, mother_top, mother_center, 0 )
if mother_node:
# If this family is only a mother, move her down to the center of
# her parents box.
if not father_node:
family_node['top'] = max(family_node['top'], mother_node['top'] + mother_node['height'] / 2 - (family_node['height'] - mother_height / 2))
bottom = family_node['top'] + family_node['height']
if father_node:
bottom = max(bottom, father_top + father_node['space'])
if mother_node:
bottom = max(bottom, mother_top + mother_node['space'])
family_node['space'] = bottom - top
family_node['father_node'] = father_node
family_node['mother_node'] = mother_node
if father_node:
father_node['anchor'] = family_node['top'] + father_height / 2
if mother_node:
mother_node['anchor'] = family_node['top'] + family_node['height'] - mother_height / 2
return family_node
def __build_descendants_tree(self, person_ref_list, generation, color, top):
"""Build an in-memory data structure containing all descendants"""
if generation >= self.max_descendant_generations:
return ([], 0)
self.descendant_generations = max(self.descendant_generations, generation + 1)
node_list = []
space = 0
for person_ref in person_ref_list:
if person_ref.private and self.protect_private:
continue
# This is a dictionary containing all interesting data for a box
# that contains a single person.
# text: text to be printed in the box, as a list of (style, text)
# tuples
# color: background color to be used for this box
# top: top edge of the box in unscaled cm
# height: height of the box in unscaled cm
# space: total height that this box, all the family boxes of this
# person and all its descendant boxes (right to it) need, in
# unscaled cm
# family_list: list of family_node style dictionaries containing
# families in which this person is a parent.
# If the person has at least one family in which it is parent, this
# box will actually not be printed, but all the boxes in the
# family_list.
person_node = {}
person = self.database.get_person_from_handle(person_ref.ref)
if person.private and self.protect_private:
continue
person_node['text'] = [('FTR-name', self.__person_get_display_name(person))] + [('FTR-data', p) for p in self.__person_get_display_data(person)]
if (self.color == FamilyTreeOptions.COLOR_FIRST_GEN and generation == 0) or \
(self.color == FamilyTreeOptions.COLOR_SECOND_GEN and generation == 1) or \
(self.color == FamilyTreeOptions.COLOR_THIRD_GEN and generation == 2):
self.descendant_max_color += 1
person_node['color'] = self.descendant_max_color
else:
person_node['color'] = color
person_node['top'] = top + space
person_node['height'] = self.__make_space(person_node['text'])
person_node['family_list'] = []
person_node['space'] = 0
family_top = person_node['top']
family_handles = person.get_family_handle_list()
for family_handle in family_handles:
family = self.database.get_family_from_handle(family_handle)
if family.private and self.protect_private:
continue
# This is a dictionary containing all interesting data for a
# box that contains the parents of a family.
# text: text to be printed in the box, as a list of (style,
# text) tuples
# color: background color for this box
# top: top edge of the box in unscaled cm
# height: height of the box in unscaled cm
# space: total height that this box and all the descendant
# boxes of this family (right to it) need, in unscaled cm
# child_list: list of person_node style dictionaries containing
# the children of this family.
family_node = {}
family_node['text'] = [('FTR-data', p) for p in self.__family_get_display_data(family)]
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
if person_ref.ref == father_handle:
spouse_handle = mother_handle
else:
spouse_handle = father_handle
if len(family_handles) > 1:
spouse_number = StyledText(chr(0x2160 + len(person_node['family_list'])) + ". ")
else:
spouse_number = StyledText("")
if spouse_handle is not None:
spouse = self.database.get_person_from_handle(spouse_handle)
family_node['text'] += [('FTR-name', spouse_number + self.__person_get_display_name(spouse))] + [('FTR-data', p) for p in self.__person_get_display_data(spouse)]
elif spouse_number:
family_node['text'] += [('FTR-name', spouse_number)]
# Include data of the actual person in the first family box.
if not person_node['family_list']:
family_node['text'] = person_node['text'] + family_node['text']
# Decide if a new color is needed.
if (self.color == FamilyTreeOptions.COLOR_MALE_LINE and person_ref.ref == mother_handle) or \
(self.color == FamilyTreeOptions.COLOR_MALE_LINE_WEAK and person_ref.ref == mother_handle and family.get_relationship() != FamilyRelType.UNMARRIED) or \
(self.color == FamilyTreeOptions.COLOR_FEMALE_LINE and person_ref.ref == father_handle):
self.descendant_max_color += 1
family_node['color'] = self.descendant_max_color
else:
family_node['color'] = person_node['color']
family_node['top'] = family_top
family_node['height'] = self.__make_space(family_node['text'])
(family_node['child_list'], children_space) = self.__build_descendants_tree(family.get_child_ref_list(), generation + 1, family_node['color'], family_top)
family_node['space'] = max(family_node['height'], children_space)
# Vertically center parents within the space their descendants
# use.
family_node['top'] += (family_node['space'] - family_node['height']) / 2
# This is where the next family will start
family_top += family_node['space'] + self.box_gap
person_node['family_list'].append(family_node)
if person_node['space'] > 0:
person_node['space'] += self.box_gap
person_node['space'] += family_node['space']
if person_node['space'] == 0:
person_node['space'] = person_node['height']
if person_node['family_list']:
person_node['top'] = person_node['family_list'][0]['top']
node_list.append(person_node)
space += person_node['space'] + self.box_gap
return (node_list, space - self.box_gap)
def __print_ancestors_tree(self, family_node, generation):
if family_node is None:
return
self.__draw_box(family_node['text'], family_node['color'], self.ancestor_max_color + 1, generation, self.yoffset_a + family_node['top'], family_node['height'])
for parent_node in [family_node['father_node'], family_node['mother_node']]:
if not parent_node:
continue
self.__print_ancestors_tree(parent_node, generation - 1)
y1 = self.yoffset_a + parent_node['anchor']
y2 = self.yoffset_a + parent_node['top'] + parent_node['height'] / 2
x1 = self.xoffset + generation * (self.box_width + 2 * self.box_gap)
x2 = x1 - self.box_gap
x3 = x2 - self.box_gap
self.doc.draw_line("FTR-line",
self.scale * x1,
self.scale * y1,
self.scale * x2,
self.scale * y1)
self.doc.draw_line("FTR-line",
self.scale * x2,
self.scale * y1,
self.scale * x2,
self.scale * y2)
self.doc.draw_line("FTR-line",
self.scale * x2,
self.scale * y2,
self.scale * x3,
self.scale * y2)
def __print_descendants_tree(self, person_node_list, anchor, generation):
if not person_node_list:
return
x3 = self.xoffset + (generation) * (self.box_width + 2 * self.box_gap)
x2 = x3 - self.box_gap
x1 = x2 - self.box_gap
self.doc.draw_line("FTR-line",
self.scale * x1,
self.scale * anchor,
self.scale * x2,
self.scale * anchor)
self.doc.draw_line("FTR-line",
self.scale * x2,
self.scale * min(self.yoffset_d + person_node_list[0]['top'] + person_node_list[0]['height'] / 2, anchor),
self.scale * x2,
self.scale * max(self.yoffset_d + person_node_list[-1]['top'] + person_node_list[-1]['height'] / 2, anchor))
for person_node in person_node_list:
self.doc.draw_line("FTR-line",
self.scale * x2,
self.scale * (self.yoffset_d + person_node['top'] + person_node['height'] / 2),
self.scale * x3,
self.scale * (self.yoffset_d + person_node['top'] + person_node['height'] / 2))
if person_node['family_list']:
last_bottom = 0
for family_node in person_node['family_list']:
if last_bottom > 0:
x = self.xoffset + generation * (self.box_width + 2 * self.box_gap) + self.box_width / 2
self.doc.draw_line("FTR-line",
self.scale * x,
self.scale * last_bottom,
self.scale * x,
self.scale * (self.yoffset_d + family_node['top']))
last_bottom = self.yoffset_d + family_node['top'] + family_node['height']
self.__draw_box(family_node['text'], family_node['color'], self.descendant_max_color + 1, generation, self.yoffset_d + family_node['top'], family_node['height'])
if family_node['child_list']:
self.__print_descendants_tree(
family_node['child_list'],
self.yoffset_d + family_node['top'] + family_node['height'] / 2,
generation + 1)
else:
self.__draw_box(person_node['text'], person_node['color'], self.descendant_max_color + 1, generation, self.yoffset_d + person_node['top'], person_node['height'])
# -------------------------------------------------------------------
# Scaling methods
# -------------------------------------------------------------------
def __scale_styles(self):
"""
Scale the styles for this report.
"""
style_sheet = self.doc.get_style_sheet()
self.__scale_font(style_sheet, "FTR-Title")
self.__scale_font(style_sheet, "FTR-Name")
self.__scale_font(style_sheet, "FTR-Data")
self.__scale_font(style_sheet, "FTR-Footer")
self.__scale_line_width(style_sheet, "FTR-box")
self.__scale_line_width(style_sheet, "FTR-line")
self.doc.set_style_sheet(style_sheet)
def __scale_font(self, style_sheet, style_name):
p = style_sheet.get_paragraph_style(style_name)
font = p.get_font()
font.set_size(font.get_size() * self.scale)
p.set_font(font)
style_sheet.add_paragraph_style(style_name, p)
def __scale_line_width(self, style_sheet, style_name):
g = style_sheet.get_draw_style(style_name)
g.set_shadow(g.get_shadow(), g.get_shadow_space() * self.scale)
g.set_line_width(g.get_line_width() * self.scale)
style_sheet.add_draw_style(style_name, g)
# -------------------------------------------------------------------
# Drawing methods
# -------------------------------------------------------------------
def __make_space(self, text):
h = 0
for (style_name, line) in text:
w = pt2cm(self.doc.string_width(self.__get_font(style_name), str(line)))
self.box_width = max(self.box_width, w)
h += self.__get_font_height(style_name) * 1.2
return h + 2 * self.box_pad
def __draw_box(self, text, color, color_count, generation, top, height):
if self.color == FamilyTreeOptions.COLOR_GENERATION:
col = self.descendant_generations - generation
col_count = self.ancestor_generations + self.descendant_generations
else:
col = color
col_count = color_count
if self.color != FamilyTreeOptions.COLOR_NONE:
self.__set_fill_color("FTR-box", col, col_count)
box_x = self.xoffset + generation * (self.box_width + 2 * self.box_gap)
box_y = top
self.doc.draw_box("FTR-box",
"",
self.scale * box_x,
self.scale * box_y,
self.scale * self.box_width,
self.scale * height)
x = self.scale * (box_x + self.box_pad)
y = self.scale * (box_y + self.box_pad)
for (style_name, line) in text:
# Workaround for draw_text not accepting StyledText
if isinstance(line, StyledText):
if not line.get_tags():
line = str(line)
self.doc.draw_text(style_name, line, x, y)
y += self.__get_font_height(style_name) * 1.2
def __get_font_height(self, style_name):
return pt2cm(self.__get_font(style_name).get_size())
def __get_font(self, style_name):
style_sheet = self.doc.get_style_sheet()
draw_style = style_sheet.get_draw_style(style_name)
paragraph_style_name = draw_style.get_paragraph_style()
paragraph_style = style_sheet.get_paragraph_style(paragraph_style_name)
return paragraph_style.get_font()
# -------------------------------------------------------------------
# Person name and data formatting methods
# -------------------------------------------------------------------
def __family_get_display_name(self, family):
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
father = self.database.get_person_from_handle(father_handle)
mother = self.database.get_person_from_handle(mother_handle)
if father:
father_name = self.__person_get_display_name(father)
else:
father_name = _("Unknown")
if mother:
mother_name = self.__person_get_display_name(mother)
else:
mother_name = _("Unknown")
return StyledText(_("%(father)s and %(mother)s") % {
'father': father_name,
'mother': mother_name})
def __person_get_display_name(self, person):
if person.get_primary_name().private and self.protect_private:
return _("Anonymous")
# Make a copy of the name object so we don't mess around with the real
# data.
n = Name(source=person.get_primary_name())
# Insert placeholders.
if self.missinginfo:
if not n.first_name:
n.first_name = "____________"
if not n.get_surname():
n.get_primary_surname().set_surname("____________")
if n.call:
if self.callname == FamilyTreeOptions.CALLNAME_REPLACE:
# Replace first name with call name.
n.first_name = n.call
elif self.callname == FamilyTreeOptions.CALLNAME_UNDERLINE_ADD:
if n.call not in n.first_name:
# Add call name to first name.
n.first_name = "\"%(call)s\" (%(first)s)" % {
'call': n.call,
'first': n.first_name}
text = gramps.gen.display.name.displayer.display_name(n)
tags = []
if n.call:
if self.callname == FamilyTreeOptions.CALLNAME_UNDERLINE_ADD:
if n.call in person.get_primary_name().first_name:
# Underline call name
callpos = text.find(n.call)
tags = [StyledTextTag(StyledTextTagType.UNDERLINE, True,
[(callpos, callpos + len(n.call))])]
return StyledText(text, tags)
def __person_get_display_data(self, person):
result = []
occupations = []
notes = []
baptism = None
residences = []
burial = None
cremation = None
for event_ref in person.get_event_ref_list():
if event_ref.private and self.protect_private:
continue
event = self.database.get_event_from_handle(event_ref.ref)
if event.private and self.protect_private:
continue
if event.get_type() == EventType.OCCUPATION:
occupations.append(event.description)
elif event.get_type() == EventType.BAPTISM:
baptism = event
elif event.get_type() == EventType.RESIDENCE:
residences.append(event)
elif event.get_type() == EventType.BURIAL:
burial = event
elif event.get_type() == EventType.CREMATION:
cremation = event
if self.include_occupation and occupations:
result.append(', '.join(occupations))
birth_ref = person.get_birth_ref()
death_ref = person.get_death_ref()
if birth_ref:
if birth_ref.private and self.protect_private:
birth = None
else:
birth = self.database.get_event_from_handle(birth_ref.ref)
elif not self.fallback_birth or baptism is None:
birth = empty_birth
else:
birth = None
if birth and birth.private and self.protect_private:
birth = None
if death_ref and not (death_ref.private and self.protect_private):
death = self.database.get_event_from_handle(death_ref.ref)
else:
death = None
if death and death.private and self.protect_private:
death = None
if death:
eventstyle = self.eventstyle_dead
else:
eventstyle = self.eventstyle_living
if eventstyle == FamilyTreeOptions.EVENTSTYLE_DATEPLACE:
if birth is not None:
result.extend(self.__event_get_display_data(birth))
elif self.fallback_birth and baptism is not None:
result.extend(self.__event_get_display_data(baptism))
if self.include_residence:
for residence in residences:
result.extend(self.__event_get_display_data(residence))
if death:
result.extend(self.__event_get_display_data(death))
elif self.fallback_death and burial is not None:
result.extend(self.__event_get_display_data(burial))
elif self.fallback_death and cremation is not None:
result.extend(self.__event_get_display_data(cremation))
elif eventstyle != FamilyTreeOptions.EVENTSTYLE_NONE:
if birth is None and self.fallback_birth:
birth = baptism
if death is None and self.fallback_death:
death = burial
if death is None and self.fallback_death:
death = cremation
if birth:
birth_text = self.__date_get_display_text(birth.get_date_object(), eventstyle)
else:
birth_text = None
if death:
death_text = self.__date_get_display_text(death.get_date_object(), eventstyle)
else:
death_text = None
if birth_text:
if death_text:
result.append("%s - %s" % (birth_text, death_text))
else:
result.append("* %s" % birth_text)
else:
if death_text:
result.append("\u271D %s" % death_text)
notelist = person.get_note_list()
note = ""
for notehandle in notelist:
noteobj = self.database.get_note_from_handle(notehandle)
note += noteobj.get()
note += ", "
# replace all new lines and carriage returns with spaces to prevent notes
# being written beyond the bottom edge of the drawn box or overwriting other text
# if they contain multiple lines
note = note.replace('\n', ' ').replace('\r', ' ')
# cut "," from end of the string and limit length of note to 50 characters
note_len = len(note)
if note_len > 50:
note = note[:48]
note += "..."
else:
note_len -= 2
note = note[:note_len]
if self.include_notes and note and note != "":
result.append(note)
return result
def __family_get_display_data(self, family):
marriage = None
divorce = None
residences = []
for event_ref in family.get_event_ref_list():
if event_ref.private and self.protect_private:
continue
event = self.database.get_event_from_handle(event_ref.ref)
if event.private and self.protect_private:
continue
if event.get_type() == EventType.MARRIAGE:
marriage = event
elif event.get_type() == EventType.RESIDENCE:
residences.append(event)
elif event.get_type() == EventType.DIVORCE:
divorce = event
if family.get_relationship() == FamilyRelType.MARRIED and not marriage:
marriage = empty_marriage
eventstyle = self.eventstyle_dead
father_handle = family.get_father_handle()
if father_handle:
father = self.database.get_person_from_handle(father_handle)
if not father.get_death_ref():
eventstyle = self.eventstyle_living
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.database.get_person_from_handle(mother_handle)
if not mother.get_death_ref():
eventstyle = self.eventstyle_living
if eventstyle == FamilyTreeOptions.EVENTSTYLE_NONE:
return []
elif eventstyle == FamilyTreeOptions.EVENTSTYLE_DATEPLACE:
result = []
if marriage:
result.extend(self.__event_get_display_data(marriage))
if self.include_residence:
for residence in residences:
result.extend(self.__event_get_display_data(residence))
if divorce:
result.extend(self.__event_get_display_data(divorce))
return result
else:
if marriage:
marriage_text = self.__date_get_display_text(marriage.get_date_object(), eventstyle)
else:
marriage_text = None
if divorce:
divorce_text = self.__date_get_display_text(divorce.get_date_object(), eventstyle)
else:
divorce_text = None
if marriage_text:
if divorce_text:
return ["\u26AD %s - %s" % (marriage_text, divorce_text)]
else:
return ["\u26AD %s" % marriage_text]
else:
if divorce_text:
return ["\u26AE %s" % divorce_text]
else:
return []
def __event_get_display_data(self, event):
if event.get_type() == EventType.BIRTH:
event_text = _("born")
elif event.get_type() == EventType.BAPTISM:
event_text = _("baptised")
elif event.get_type() == EventType.DEATH:
event_text = _("died")
elif event.get_type() == EventType.BURIAL:
event_text = _("buried")
elif event.get_type() == EventType.CREMATION:
event_text = _("cremated")
elif event.get_type() == EventType.MARRIAGE:
event_text = _("married")
elif event.get_type() == EventType.DIVORCE:
event_text = _("divorced")
elif event.get_type() == EventType.RESIDENCE:
event_text = _("resident")
date = event.get_date_object()
date_text = gramps.gen.datehandler.displayer.display(date)
if date.get_modifier() == Date.MOD_NONE and date.get_quality() == Date.QUAL_NONE:
if date.get_day_valid():
date_text = _("on %(ymd_date)s") % {'ymd_date': date_text}
elif date.get_month_valid():
date_text = _("in %(ym_date)s") % {'ym_date': date_text}
elif date.get_year_valid():
date_text = _("in %(y_date)s") % {'y_date': date_text}
if self.missinginfo:
if date.is_empty():
date_text = _("on %(placeholder)s") % {
'placeholder': "__________"}
elif not date.is_regular():
date_text = _("on %(placeholder)s (%(partial)s)") % {
'placeholder': "__________",
'partial': date_text}
place_handle = event.get_place_handle()
if place_handle:
place = self.database.get_place_from_handle(place_handle)
if place.private and self.protect_private:
place_text = ""
else:
place_text = place_displayer.display_event(self.database, event)
elif self.missinginfo:
place_text = "____________"
else:
place_text = ""
if place_text:
place_text = _("in %(place)s") % {'place': place_text}
if not date_text and not place_text:
return []
result = event_text
if date_text:
result += " " + date_text
if place_text:
result += " " + place_text
if self.include_event_description and event.description:
result += " " + _("(%(description)s)") % {
'description': event.description}
return [result]
def __date_get_display_text(self, date, eventstyle):
if not date:
return None
elif eventstyle == FamilyTreeOptions.EVENTSTYLE_YEARONLY:
year = date.get_year()
if year:
return str(year)
else:
return None
else:
return gramps.gen.datehandler.displayer.display(date)
# -------------------------------------------------------------------
# Person name and data formatting methods
# -------------------------------------------------------------------
def __set_fill_color(self, style_name, number, count):
if self.shuffle_colors:
number = int(number * (count + 1) / int(pow(count, 0.5))) % count
(r, g, b) = colorsys.hsv_to_rgb((number + 1) / count, .20, 1.0)
(r, g, b) = int(255 * r), int(255 * g), int(255 * b)
style_sheet = self.doc.get_style_sheet()
draw_style = style_sheet.get_draw_style(style_name)
draw_style.set_fill_color((r, g, b))
style_sheet.add_draw_style(style_name, draw_style)
self.doc.set_style_sheet(style_sheet)
#------------------------------------------------------------------------
#
# FamilyTreeOptions
#
#------------------------------------------------------------------------
class FamilyTreeOptions(gramps.gen.plug.report.MenuReportOptions):
CALLNAME_DONTUSE = 0
CALLNAME_REPLACE = 1
CALLNAME_UNDERLINE_ADD = 2
EVENTSTYLE_NONE = 0
EVENTSTYLE_YEARONLY = 1
EVENTSTYLE_DATE = 2
EVENTSTYLE_DATEPLACE = 3
COLOR_NONE = 0
COLOR_GENERATION = 1
COLOR_FIRST_GEN = 2
COLOR_SECOND_GEN = 3
COLOR_THIRD_GEN = 4
COLOR_MALE_LINE = 5
COLOR_MALE_LINE_WEAK = 6
COLOR_FEMALE_LINE = 7
def __init__(self, name, dbase):
gramps.gen.plug.report.MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the descendant report.
"""
category_name = _("Tree Options")
family_id = gramps.gen.plug.menu.FamilyOption(_("Center Family"))
family_id.set_help(_("The center family for the tree"))
menu.add_option(category_name, "family_id", family_id)
max_ancestor_generations = gramps.gen.plug.menu.NumberOption(_("Ancestor Generations"), 5, 0, 50)
max_ancestor_generations.set_help(_("The number of ancestor generations to include in the tree"))
menu.add_option(category_name, "max_ancestor_generations", max_ancestor_generations)
max_descendant_generations = gramps.gen.plug.menu.NumberOption(_("Descendant Generations"), 10, 0, 50)
max_descendant_generations.set_help(_("The number of descendant generations to include in the tree"))
menu.add_option(category_name, "max_descendant_generations", max_descendant_generations)
kekule_start_number = gramps.gen.plug.menu.NumberOption(_("Kekule number of husband"), 0, 0, 16384)
kekule_start_number.set_help(_("The Kekule number of the husband (central family). Set 0 to not show Kekule numbers"))
menu.add_option(category_name, "kekule_start_number", kekule_start_number)
fit_on_page = gramps.gen.plug.menu.BooleanOption(_("Scale to fit on a single page"), True)
fit_on_page.set_help(_("Whether to scale to fit on a single page."))
menu.add_option(category_name, 'fit_on_page', fit_on_page)
color = gramps.gen.plug.menu.EnumeratedListOption(_("Color"), self.COLOR_NONE)
color.set_items([
(self.COLOR_NONE, _("No color")),
(self.COLOR_GENERATION, _("Generations")),
(self.COLOR_FIRST_GEN, _("First generation")),
(self.COLOR_SECOND_GEN, _("Second generation")),
(self.COLOR_THIRD_GEN, _("Third generation")),
(self.COLOR_MALE_LINE, _("Male line")),
(self.COLOR_MALE_LINE_WEAK, _("Male line and illegitimate children")),
(self.COLOR_FEMALE_LINE, _("Female line"))])
menu.add_option(category_name, "color", color)
shuffle_colors = gramps.gen.plug.menu.BooleanOption(_("Shuffle colors"), False)
shuffle_colors.set_help(_("Whether to shuffle colors or order them in rainbow fashion."))
menu.add_option(category_name, "shuffle_colors", shuffle_colors)
category_name = _("Content")
callname = gramps.gen.plug.menu.EnumeratedListOption(_("Use call name"), self.CALLNAME_DONTUSE)
callname.set_items([
(self.CALLNAME_DONTUSE, _("Don't use call name")),
(self.CALLNAME_REPLACE, _("Replace first name with call name")),
(self.CALLNAME_UNDERLINE_ADD, _("Underline call name in first name / add call name to first name"))])
# Uncomment the line below to activate callname handling, but you need
# to apply the patch in https://gramps-project.org/bugs/view.php?id=8003
# to make it work!
# menu.add_option(category_name, "callname", callname)
include_occupation = gramps.gen.plug.menu.BooleanOption(_("Include Occupation"), True)
menu.add_option(category_name, 'include_occupation', include_occupation)
include_notes = gramps.gen.plug.menu.BooleanOption(_("Include Notes"), True)
menu.add_option(category_name, 'include_notes', include_notes)
include_residence = gramps.gen.plug.menu.BooleanOption(_("Include Residence"), True)
menu.add_option(category_name, 'include_residence', include_residence)
eventstyle_dead = gramps.gen.plug.menu.EnumeratedListOption(_("Print event data (dead person)"), self.EVENTSTYLE_DATEPLACE)
eventstyle_dead.set_items([
(self.EVENTSTYLE_NONE, _("None")),
(self.EVENTSTYLE_YEARONLY, _("Year only")),
(self.EVENTSTYLE_DATE, _("Full date")),
(self.EVENTSTYLE_DATEPLACE, _("Full date and place"))])
menu.add_option(category_name, "eventstyle_dead", eventstyle_dead)
eventstyle_living = gramps.gen.plug.menu.EnumeratedListOption(_("Print event data (living person)"), self.EVENTSTYLE_DATEPLACE)
eventstyle_living.set_items([
(self.EVENTSTYLE_NONE, _("None")),
(self.EVENTSTYLE_YEARONLY, _("Year only")),
(self.EVENTSTYLE_DATE, _("Full date")),
(self.EVENTSTYLE_DATEPLACE, _("Full date and place"))])
menu.add_option(category_name, "eventstyle_living", eventstyle_living)
fallback_birth = gramps.gen.plug.menu.BooleanOption(_("Fall back to baptism if birth event missing"), True)
menu.add_option(category_name, 'fallback_birth', fallback_birth)
fallback_death = gramps.gen.plug.menu.BooleanOption(_("Fall back to burial or cremation if death event missing"), True)
menu.add_option(category_name, 'fallback_death', fallback_death)
protect_private = gramps.gen.plug.menu.BooleanOption(_("Protect private items"), True)
menu.add_option(category_name, 'protect_private', protect_private)
# Fixme: the following 2 options should only be available if "Full date
# and place" is selected above.
missinginfo = gramps.gen.plug.menu.BooleanOption(_("Print fields for missing information"), True)
missinginfo.set_help(_("Whether to include fields for missing information."))
menu.add_option(category_name, "missinginfo", missinginfo)
include_event_description = gramps.gen.plug.menu.BooleanOption(_("Include event description"), True)
menu.add_option(category_name, 'include_event_description', include_event_description)
category_name = _("Text Options")
title = gramps.gen.plug.menu.StringOption(_("Title text"), "")
menu.add_option(category_name, "title", title)
footer = gramps.gen.plug.menu.StringOption(_("Footer text"), "")
menu.add_option(category_name, "footer", footer)
def make_default_style(self,default_style):
"""Make the default output style for the Ancestor Tree."""
## Paragraph Styles:
f = gramps.gen.plug.docgen.FontStyle()
f.set_size(13)
f.set_type_face(gramps.gen.plug.docgen.FONT_SANS_SERIF)
p = gramps.gen.plug.docgen.ParagraphStyle()
p.set_font(f)
p.set_alignment(gramps.gen.plug.docgen.PARA_ALIGN_CENTER)
p.set_bottom_margin(pt2cm(8))
p.set_description(_("The style used for the title."))
default_style.add_paragraph_style("FTR-Title", p)
f = gramps.gen.plug.docgen.FontStyle()
f.set_size(9)
f.set_type_face(gramps.gen.plug.docgen.FONT_SANS_SERIF)
p = gramps.gen.plug.docgen.ParagraphStyle()
p.set_font(f)
p.set_description(_("The style used for names."))
default_style.add_paragraph_style("FTR-Name", p)
f = gramps.gen.plug.docgen.FontStyle()
f.set_size(7)
f.set_type_face(gramps.gen.plug.docgen.FONT_SANS_SERIF)
p = gramps.gen.plug.docgen.ParagraphStyle()
p.set_font(f)
p.set_description(_("The style used for data (birth, death, marriage, divorce)."))
default_style.add_paragraph_style("FTR-Data", p)
f = gramps.gen.plug.docgen.FontStyle()
f.set_size(7)
f.set_type_face(gramps.gen.plug.docgen.FONT_SANS_SERIF)
p = gramps.gen.plug.docgen.ParagraphStyle()
p.set_font(f)
p.set_alignment(gramps.gen.plug.docgen.PARA_ALIGN_CENTER)
p.set_top_margin(pt2cm(8))
p.set_description(_("The style used for the footer."))
default_style.add_paragraph_style("FTR-Footer", p)
## Draw styles
g = gramps.gen.plug.docgen.GraphicsStyle()
g.set_paragraph_style("FTR-Title")
g.set_color((0, 0, 0))
g.set_fill_color((255, 255, 255))
g.set_line_width(0) # Workaround for a bug in ODFDoc
default_style.add_draw_style("FTR-title", g)
g = gramps.gen.plug.docgen.GraphicsStyle()
g.set_shadow(1,0.15)
g.set_fill_color((255,255,255))
default_style.add_draw_style("FTR-box", g)
g = gramps.gen.plug.docgen.GraphicsStyle()
g.set_paragraph_style("FTR-Name")
g.set_fill_color((255, 255, 255))
g.set_line_width(0) # Workaround for a bug in ODFDoc
default_style.add_draw_style("FTR-name", g)
g = gramps.gen.plug.docgen.GraphicsStyle()
g.set_paragraph_style("FTR-Data")
g.set_fill_color((255, 255, 255))
g.set_line_width(0) # Workaround for a bug in ODFDoc
default_style.add_draw_style("FTR-data", g)
g = gramps.gen.plug.docgen.GraphicsStyle()
default_style.add_draw_style("FTR-line", g)
g = gramps.gen.plug.docgen.GraphicsStyle()
g.set_paragraph_style("FTR-Footer")
g.set_color((0, 0, 0))
g.set_fill_color((255, 255, 255))
g.set_line_width(0) # Workaround for a bug in ODFDoc
default_style.add_draw_style("FTR-footer", g)
|
gramps-project/addons-source
|
FamilyTree/FamilyTree.py
|
Python
|
gpl-2.0
| 54,078
|
#!/usr/bin/python2.7
from PySide.QtCore import *
from PySide.QtGui import *
from PySide import *
import sys
import monitor
from monitorthread import MonitorThread
from monitordata import MonitorData
class HMStandardItemModel( QStandardItemModel ):
monitor_data = None
values = None
previous_values = None
get_data = Signal()
def __init__( self, rows, columns, monitor_data ):
super( HMStandardItemModel, self ).__init__( rows, columns )
self.monitor_data = monitor_data
@QtCore.Slot()
def read_data( self ):
self.previous_values = self.values
self.values = self.monitor_data.retreive_data()
for rows in range( 0, len( self.values ) ):
for columns in range( 0, 4 ):
if ( self.previous_values == None ):
item = QStandardItem( self.values[rows][columns] )
self.setItem( rows, columns, item )
elif ( len( self.previous_values ) >= rows and
self.previous_values[rows][columns] != self.values[rows][columns] ):
item = QStandardItem( self.values[rows][columns] )
self.setItem( rows, columns, item )
class MainWindow( QMainWindow, monitor.Ui_MainWindow ):
md = None
model = None
mt = None
thread_exit_signal = Signal()
@QtCore.Slot()
def button_clicked( self ):
print "Bang!"
def stop_program( self ):
self.mt.finish_thread.emit()
self.mt.wait( 1000 )
self.close()
def __init__( self, parent=None ):
super( MainWindow, self ).__init__()
md = MonitorData()
self.model = HMStandardItemModel ( 0, 4, md )
self.mt = MonitorThread( md )
# Old style signal
# self.connect( self.mt, SIGNAL( 'read_data()' ), self.model.read_data )
# New style signal
self.mt.read_data.connect( self.model.read_data )
self.mt.start()
self.setupUi( self )
self.mt.showMessage.connect( self.statusbar.showMessage )
self.mt.garage_temperature.connect( self.garage_temperature.setText )
self.mt.sunroom_temperature.connect( self.sunroom_temperature.setText )
self.mt.door_state.connect( self.door_state.setText )
self.mt.kitchen_temperature.connect( self.kitchen_temperature.setText )
self.mt.outdoor_temperature.connect( self.outdoor_temperature.setText )
self.mt.power_controller_1_temperature.connect( self.power_controller_1_temperature.setText )
self.status_proxy_model = QSortFilterProxyModel()
self.computer_proxy_model = QSortFilterProxyModel()
self.HouseMonitor_proxy_model = QSortFilterProxyModel()
self.all_table_view.setModel( self.model )
self.status_proxy_model.setSourceModel( self.model )
self.computer_proxy_model.setSourceModel( self.model )
self.HouseMonitor_proxy_model.setSourceModel( self.model )
self.status_proxy_model.setFilterRegExp( QRegExp( "^0x.*$", Qt.CaseSensitive, QRegExp.RegExp ) )
self.status_proxy_model.setFilterKeyColumn( 0 )
self.status_proxy_model.sort( 0, Qt.DescendingOrder )
self.status_proxy_model.setDynamicSortFilter( True )
self.HouseMonitor_proxy_model.setFilterRegExp( QRegExp( "^HouseMonitor\\..*$", Qt.CaseSensitive, QRegExp.RegExp ) )
self.HouseMonitor_proxy_model.setFilterKeyColumn( 0 )
self.HouseMonitor_proxy_model.sort( 0, Qt.DescendingOrder )
self.HouseMonitor_proxy_model.setDynamicSortFilter( True )
self.computer_proxy_model.setFilterRegExp( QRegExp( "^OMAP.*$", Qt.CaseSensitive, QRegExp.RegExp ) )
self.computer_proxy_model.setFilterKeyColumn( 0 )
self.computer_proxy_model.sort( 0, Qt.DescendingOrder )
self.computer_proxy_model.setDynamicSortFilter( True )
self.status_table_view.setModel( self.status_proxy_model )
self.house_monitor_table_view.setModel( self.HouseMonitor_proxy_model )
self.computer_table_view.setModel( self.computer_proxy_model )
self.all_table_view.horizontalHeader().setResizeMode( 0, QHeaderView.ResizeToContents )
self.status_table_view.horizontalHeader().setResizeMode( 0, QHeaderView.ResizeToContents )
self.house_monitor_table_view.horizontalHeader().setResizeMode( 0, QHeaderView.ResizeToContents )
self.computer_table_view.horizontalHeader().setResizeMode( 0, QHeaderView.ResizeToContents )
self.model.setHorizontalHeaderItem( 0, QStandardItem( 'Device' ) )
self.model.setHorizontalHeaderItem( 1, QStandardItem( 'Port' ) )
self.model.setHorizontalHeaderItem( 2, QStandardItem( 'Value' ) )
self.model.setHorizontalHeaderItem( 3, QStandardItem( 'Time' ) )
# Turn on and off devices
self.GarysBedLightOn.clicked.connect( self.mt.turnGarysLightOn )
self.GarysBedLightOff.clicked.connect( self.mt.turnGarysLightOff )
self.MarilynsBedLightOn.clicked.connect( self.mt.turnMarilynsLightOn )
self.MarilynsBedLightOff.clicked.connect( self.mt.turnMarilynsLightOff )
self.actionExit.triggered.connect( self.stop_program )
self.mt.finish_thread.connect( self.mt.finish_up )
app = QApplication( sys.argv )
form = MainWindow()
form.show()
app.exec_()
|
gary-pickens/HouseMonitor
|
housemonitor/qt/hm.py
|
Python
|
mit
| 5,321
|
# -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansOldItalic-Regular'
native_name = ''
def glyphs(self):
chars = []
chars.append(0x0000) #uniFEFF ????
chars.append(0x0020) #uni00A0 SPACE
chars.append(0x00A0) #uni00A0 NO-BREAK SPACE
chars.append(0x000D) #uni000D ????
chars.append(0xFEFF) #uniFEFF ZERO WIDTH NO-BREAK SPACE
chars.append(0x0000) #uniFEFF ????
chars.append(0x10301) #glyph00005 OLD ITALIC LETTER BE
chars.append(0x10302) #glyph00006 OLD ITALIC LETTER KE
chars.append(0x10303) #glyph00007 OLD ITALIC LETTER DE
chars.append(0x10304) #glyph00008 OLD ITALIC LETTER E
chars.append(0x10305) #glyph00009 OLD ITALIC LETTER VE
chars.append(0x10306) #glyph00010 OLD ITALIC LETTER ZE
chars.append(0x10307) #glyph00011 OLD ITALIC LETTER HE
chars.append(0x10308) #glyph00012 OLD ITALIC LETTER THE
chars.append(0x10309) #glyph00013 OLD ITALIC LETTER I
chars.append(0x1030A) #glyph00014 OLD ITALIC LETTER KA
chars.append(0x1030B) #glyph00015 OLD ITALIC LETTER EL
chars.append(0x1030C) #glyph00016 OLD ITALIC LETTER EM
chars.append(0x000D) #uni000D ????
chars.append(0x1030E) #glyph00018 OLD ITALIC LETTER ESH
chars.append(0x1030F) #glyph00019 OLD ITALIC LETTER O
chars.append(0x10310) #glyph00020 OLD ITALIC LETTER PE
chars.append(0x10311) #glyph00021 OLD ITALIC LETTER SHE
chars.append(0x10312) #glyph00022 OLD ITALIC LETTER KU
chars.append(0x10313) #glyph00023 OLD ITALIC LETTER ER
chars.append(0x10314) #glyph00024 OLD ITALIC LETTER ES
chars.append(0x10315) #glyph00025 OLD ITALIC LETTER TE
chars.append(0x10316) #glyph00026 OLD ITALIC LETTER U
chars.append(0x10317) #glyph00027 OLD ITALIC LETTER EKS
chars.append(0x10318) #glyph00028 OLD ITALIC LETTER PHE
chars.append(0x10319) #glyph00029 OLD ITALIC LETTER KHE
chars.append(0x1031A) #glyph00030 OLD ITALIC LETTER EF
chars.append(0x1031B) #glyph00031 OLD ITALIC LETTER ERS
chars.append(0x1031C) #glyph00032 OLD ITALIC LETTER CHE
chars.append(0x1031D) #glyph00033 OLD ITALIC LETTER II
chars.append(0x10300) #glyph00004 OLD ITALIC LETTER A
chars.append(0x0020) #uni00A0 SPACE
chars.append(0x10321) #glyph00036 OLD ITALIC NUMERAL FIVE
chars.append(0x10322) #glyph00037 OLD ITALIC NUMERAL TEN
chars.append(0x10323) #glyph00038 OLD ITALIC NUMERAL FIFTY
chars.append(0x1031E) #glyph00034 OLD ITALIC LETTER UU
chars.append(0x00A0) #uni00A0 NO-BREAK SPACE
chars.append(0x1030D) #glyph00017 OLD ITALIC LETTER EN
chars.append(0x10320) #glyph00035 OLD ITALIC NUMERAL ONE
chars.append(0xFEFF) #uniFEFF ZERO WIDTH NO-BREAK SPACE
return chars
|
davelab6/pyfontaine
|
fontaine/charsets/noto_chars/notosansolditalic_regular.py
|
Python
|
gpl-3.0
| 2,939
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 by Simmo Saan <simmo.saan@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# History:
#
# 2019-10-20, Simmo Saan <simmo.saan@gmail.com>
# version 0.2: improve script description
# 2019-10-18, Simmo Saan <simmo.saan@gmail.com>
# version 0.1: initial script
#
"""
Open buffers by full name, reopen closed recently closed buffers, open layout buffers
"""
# Adding handler for other full names in this script (only if necessary) or other scripts (preferred):
#
# def buffer_open_full_name_cb(data, signal, hashtable):
# full_name = hashtable["full_name"]
# noswitch = bool(int(hashtable.get("noswitch", "0")))
#
# if full_name == "my.buffer.full.name":
# # open my.buffer.full.name, considering noswitch if possible
# return weechat.WEECHAT_RC_OK_EAT # prevent other callbacks from handling this full name
#
# return weechat.WEECHAT_RC_OK # let other callbacks handle this full name
#
# weechat.hook_hsignal("buffer_open_full_name", "buffer_open_full_name_cb", "")
from __future__ import print_function
SCRIPT_NAME = "buffer_open"
SCRIPT_AUTHOR = "Simmo Saan <simmo.saan@gmail.com>"
SCRIPT_VERSION = "0.2"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "Open buffers by full name, reopen closed recently closed buffers, open layout buffers"
SCRIPT_REPO = "https://github.com/sim642/buffer_open"
SCRIPT_COMMAND = SCRIPT_NAME
IMPORT_OK = True
try:
import weechat
except ImportError:
print("This script must be run under WeeChat.")
print("Get WeeChat now at: https://weechat.org/")
IMPORT_OK = False
import re
import collections
SETTINGS = {
"layout_apply": (
"off",
"open closed layout buffers on /layout apply"
),
"max_closed": (
"10",
"maximum number of closed buffers to remember"
)
}
def log(string):
weechat.prnt("", "{}: {}".format(SCRIPT_NAME, string))
def error(string):
weechat.prnt("", "{}{}: {}".format(weechat.prefix("error"), SCRIPT_NAME, string))
def command_plugin(plugin, command):
weechat.command("", "/command {} {}".format(plugin, command))
def buffer_open_full_name_opened_cb(data, signal, hashtable):
full_name = hashtable["full_name"]
buffer = weechat.buffer_search("==", full_name)
if buffer:
# already open, do nothing
return weechat.WEECHAT_RC_OK_EAT
return weechat.WEECHAT_RC_OK
def buffer_open_full_name_unhandled_cb(data, signal, hashtable):
full_name = hashtable["full_name"]
error("no handler for opening buffer {}".format(full_name))
return weechat.WEECHAT_RC_OK
TABLE = {
# "full_name": ("plugin", "command"),
"core.secured_data": ("core", "/secure"),
"core.color": ("core", "/color"),
"core.weechat": ("core", ""), # do nothing because always open
"fset.fset": ("fset", "/fset"),
"irc.irc_raw": ("irc", "/server raw"),
"relay.relay.list": ("relay", "/relay"),
"relay.relay_raw": ("relay", "/relay raw"),
"script.scripts": ("script", "/script"),
"trigger.monitor": ("trigger", "/trigger monitor"),
"xfer.xfer.list": ("xfer", "/xfer"), # TODO: xfer DCC chat buffer
}
def buffer_open_full_name_table_cb(data, signal, hashtable):
full_name = hashtable["full_name"]
if full_name in TABLE:
plugin, command = TABLE[full_name]
command_plugin(plugin, command)
return weechat.WEECHAT_RC_OK_EAT
return weechat.WEECHAT_RC_OK
IRC_SERVER_RE = re.compile(r"^irc\.server\.(.+)$")
IRC_BUFFER_RE = re.compile(r"^irc\.([^.]+)\.(.+)$")
irc_server_connected_opens = collections.defaultdict(set)
def irc_server_open(server, noswitch):
command_plugin("irc", "/connect {}".format(server)) # /connect doesn't have -noswitch
def irc_buffer_open(server, name, noswitch):
hdata_irc_server = weechat.hdata_get("irc_server")
irc_servers = weechat.hdata_get_list(hdata_irc_server, "irc_servers")
irc_server = weechat.hdata_search(hdata_irc_server, irc_servers, "${irc_server.name} == " + server, 1)
chantypes = weechat.hdata_string(hdata_irc_server, irc_server, "chantypes")
is_channel = name[0] in chantypes
noswitch_flag = "-noswitch " if noswitch else ""
if is_channel:
command_plugin("irc", "/join {}-server {} {}".format(noswitch_flag, server, name))
else:
command_plugin("irc", "/query {}-server {} {}".format(noswitch_flag, server, name))
def irc_server_connected_cb(data, signal, server):
for name, noswitch in irc_server_connected_opens[server]:
irc_buffer_open(server, name, noswitch)
irc_server_connected_opens[server] = set()
return weechat.WEECHAT_RC_OK
def buffer_open_full_name_irc_cb(data, signal, hashtable):
full_name = hashtable["full_name"]
noswitch = bool(int(hashtable.get("noswitch", "0")))
m = IRC_SERVER_RE.match(full_name)
if m:
server = m.group(1)
irc_server_open(server, noswitch)
return weechat.WEECHAT_RC_OK_EAT
m = IRC_BUFFER_RE.match(full_name)
if m:
server = m.group(1)
name = m.group(2)
hdata_irc_server = weechat.hdata_get("irc_server")
irc_servers = weechat.hdata_get_list(hdata_irc_server, "irc_servers")
irc_server = weechat.hdata_search(hdata_irc_server, irc_servers, "${irc_server.name} == " + server, 1)
if irc_server:
is_connected = bool(weechat.hdata_integer(hdata_irc_server, irc_server, "is_connected"))
is_connecting = bool(weechat.hdata_pointer(hdata_irc_server, irc_server, "hook_connect"))
if is_connected:
irc_buffer_open(server, name, noswitch)
else:
irc_server_connected_opens[server].add((name, noswitch))
if not is_connecting:
irc_server_open(server, noswitch)
else:
error("unknown server {}".format(server))
return weechat.WEECHAT_RC_OK_EAT
return weechat.WEECHAT_RC_OK
def buffer_open_full_name(full_name, noswitch=None):
hashtable = {
"full_name": full_name
}
if noswitch is not None:
hashtable["noswitch"] = str(int(noswitch)) # must be str for API
weechat.hook_hsignal_send("buffer_open_full_name", hashtable)
def command_cb(data, buffer, args):
args = args.split()
if len(args) >= 1 and args[0] == "closed":
if len(args) >= 2 and args[1] == "-list":
if buffer_closed_stack:
weechat.prnt("", "closed buffers (latest first):")
for full_name in reversed(buffer_closed_stack):
weechat.prnt("", " {}".format(full_name))
else:
weechat.prnt("", "no known closed buffers")
else:
if buffer_closed_stack:
noswitch = len(args) >= 2 and args[1] == "-noswitch"
full_name = buffer_closed_stack.pop()
buffer_open_full_name(full_name, noswitch=noswitch)
else:
error("no known closed buffers")
return weechat.WEECHAT_RC_ERROR
elif len(args) >= 1:
noswitch = args[0] == "-noswitch"
if noswitch:
if len(args) >= 2:
full_name = args[1]
else:
error("missing full name")
return weechat.WEECHAT_RC_ERROR
else:
full_name = args[0]
buffer_open_full_name(full_name, noswitch=noswitch)
else:
error("unknown subcommand")
return weechat.WEECHAT_RC_ERROR
return weechat.WEECHAT_RC_OK
buffer_closed_stack = []
def buffer_closing_cb(data, signal, buffer):
global buffer_closed_stack
full_name = weechat.buffer_get_string(buffer, "full_name")
buffer_closed_stack.append(full_name)
max_closed = int(weechat.config_get_plugin("max_closed"))
buffer_closed_stack = buffer_closed_stack[max(0, len(buffer_closed_stack) - max_closed):]
return weechat.WEECHAT_RC_OK
LAYOUT_APPLY_RE = re.compile(r"^/layout apply(?:\s+(\S+)(?:\s+buffers)?)?$")
def layout_apply_cb(data, buffer, command):
if weechat.config_string_to_boolean(weechat.config_get_plugin("layout_apply")):
m = LAYOUT_APPLY_RE.match(command)
if m:
layout_name = m.group(1) or "default"
hdata_layout = weechat.hdata_get("layout")
layouts = weechat.hdata_get_list(hdata_layout, "gui_layouts")
layout = weechat.hdata_search(hdata_layout, layouts, "${layout.name} == " + layout_name, 1)
if layout:
hdata_layout_buffer = weechat.hdata_get("layout_buffer")
layout_buffer = weechat.hdata_pointer(hdata_layout, layout, "layout_buffers")
while layout_buffer:
plugin_name = weechat.hdata_string(hdata_layout_buffer, layout_buffer, "plugin_name")
buffer_name = weechat.hdata_string(hdata_layout_buffer, layout_buffer, "buffer_name")
full_name = "{}.{}".format(plugin_name, buffer_name)
buffer = weechat.buffer_search("==", full_name)
if not buffer:
buffer_open_full_name(full_name, noswitch=True)
layout_buffer = weechat.hdata_move(hdata_layout_buffer, layout_buffer, 1)
return weechat.WEECHAT_RC_OK
if __name__ == "__main__" and IMPORT_OK:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, "", ""):
weechat.hook_hsignal("10000|buffer_open_full_name", "buffer_open_full_name_opened_cb", "")
weechat.hook_hsignal("0|buffer_open_full_name", "buffer_open_full_name_unhandled_cb", "")
weechat.hook_hsignal("500|buffer_open_full_name", "buffer_open_full_name_table_cb", "")
weechat.hook_hsignal("500|buffer_open_full_name", "buffer_open_full_name_irc_cb", "")
weechat.hook_signal("irc_server_connected", "irc_server_connected_cb", "")
weechat.hook_command(SCRIPT_COMMAND, SCRIPT_DESC,
"""closed [-noswitch|-list]
|| [-noswitch] <full name>""",
""" closed: open most recently closed buffer
closed -list: list most recently closed buffers
-noswitch: try not to switch to new buffer
Without subcommand, this command opens a buffer with given full name.
Option "{prefix}.max_closed" specifies the number of most recently closed buffers that are remembered.
If option "{prefix}.layout_apply" is on and "/layout apply" is executed, closed buffers in the layout are opened.""".format(prefix="plugins.var.python.{}".format(SCRIPT_NAME)),
"""closed -noswitch|-list %-
|| -noswitch""".replace("\n", ""),
"command_cb", "")
weechat.hook_signal("buffer_closing", "buffer_closing_cb", "")
weechat.hook_command_run("/layout apply*", "layout_apply_cb", "")
for option, value in SETTINGS.items():
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, value[0])
weechat.config_set_desc_plugin(option, "{} (default: \"{}\")".format(value[1], value[0]))
|
qguv/config
|
weechat/plugins/python/buffer_open.py
|
Python
|
gpl-3.0
| 11,668
|
from flask import Blueprint, request, jsonify
from geoip import geolite2
from prosoar.userconfig import get_uid_from_cookie, get_user_config_as_json
bp = Blueprint('settings', __name__)
@bp.route('/initial.js')
def initial_js():
return load(type='js')
@bp.route('/load')
def load(type='json'):
uid = get_uid_from_cookie()
type = request.values.get('as', type)
if type == 'js':
match = geolite2.lookup(request.remote_addr)
settings = \
'var initialSettings = ' + get_user_config_as_json(uid) + ';'
if match and match.location:
location = 'var initialLocation = {lon: ' + \
str(match.location[1]) + ', lat: ' + \
str(match.location[0]) + '};'
else:
location = 'var initialLocation = {lon: 10, lat: 50};'
return settings + location
elif type == 'json':
return jsonify(get_user_config_as_json(uid), encoded=False)
|
TobiasLohner/proSoar
|
prosoar/views/settings.py
|
Python
|
gpl-2.0
| 959
|
from celery.task import task
from celery.log import get_default_logger
from celery import group, chain, chord
from celeryconfig import config
import traceback
from helpers.parsers import parseDocument
log = get_default_logger()
def parseEnvelope(envelope, config, parsedDoc):
try:
parsedDoc.update(parseDocument(envelope))
except Exception as ex:
traceback.print_exc()
|
navnorth/LR-Data
|
src/tasks/parse.py
|
Python
|
apache-2.0
| 399
|
"""
Elfin data processing utilities module
"""
import inspect
import os
import sys
import code
import traceback as traceback_module
import json
import csv
import re
import numpy as np
RADII_TYPES = ['average_all', 'max_ca_dist', 'max_heavy_dist']
INF = float('inf')
TERM_TYPES = {'n', 'c'}
MOD_TYPES = {'single', 'hub'}
def check_mod_type(mod_type):
assert(mod_type.lower() in MOD_TYPES)
def check_term_type(term):
assert(term.lower() in TERM_TYPES)
def opposite_term(term):
return {'n':'c', 'c':'n'}.get(term.lower(), None)
def dict_diff(A, B):
if type(A) == list:
return not all(diff(a, b) for a, b in zip(A, B))
elif type(A) == dict:
return not all(diff(A[x], B[x]) for x in A if x in B)
else:
return A != B
# https://stackoverflow.com/questions/1036409/recursively-convert-python-object-graph-to-dictionary
def to_dict(obj, classkey=None):
if isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = to_dict(v, classkey)
return data
elif hasattr(obj, "_ast"):
return to_dict(obj._ast())
elif hasattr(obj, "__iter__") and not isinstance(obj, str):
return [to_dict(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict([(key, to_dict(value, classkey))
for key, value in obj.__dict__.items()
if not callable(value) and not key.startswith('_')])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj
def get_rotation(angle_x=0, angle_y=0, angle_z=0):
"""https://en.wikipedia.org/wiki/Rotation_matrix
"""
radian_x = np.radians(angle_x)
radian_y = np.radians(angle_y)
radian_z = np.radians(angle_z)
rot_x = np.array([
[1, 0, 0],
[0, np.cos(radian_x), -np.sin(radian_x)],
[0, np.sin(radian_x), np.cos(radian_x)]
])
rot_y = np.array([
[np.cos(radian_y), 0, np.sin(radian_y)],
[0, 1, 0],
[-np.sin(radian_y), 0, np.cos(radian_y)]
])
rot_z = np.array([
[np.cos(radian_z), -np.sin(radian_z), 0],
[np.sin(radian_z), np.cos(radian_z), 0],
[0, 0, 1]
])
return np.matmul(a=np.matmul(a=rot_x, b=rot_y), b=rot_z)
def gen_pymol_txm(rot, tran):
"""Converts BioPython-style rotation and translation into pymol's
transformation matrix string.
Args:
- rot - Bio.PDB.Superimposer().rotran[0]
- tran - Bio.PDB.Superimposer().rotran[1]
Returns:
- _ - string of pymol's transformation matrix.
"""
rot_tp = np.transpose(rot)
rot_tp_tran = np.append(rot_tp, np.transpose([tran]), axis=1)
pymol_rot_mat = np.append(rot_tp_tran, [[0, 0, 0, 1]], axis=0)
return '[' + ', '.join(map(str, pymol_rot_mat.ravel())) + ']'
def int_ceil(float_num):
"""Ceil a float then turn it into an int."""
return int(np.ceil(float_num))
def int_floor(float_num):
"""Floor a float then turn it into an int."""
return int(np.floor(float_num))
def upsample(spec, pts):
"""Upsamples points to be the same number of points in specification. This
is code translated from Elfin core's C++ code.
"""
n_spec_points = len(spec)
more_points, fewer_points = (np.copy(spec), np.copy(pts))
# Compute longer shape total length
mp_total_length = 0.0
for i in range(1, n_spec_points):
mp_total_length += np.linalg.norm(more_points[i] - more_points[i - 1])
if mp_total_length == INF:
raise ValueError('Something fishy... mp_total_length is inf!')
fp_total_length = 0.0
for i in range(1, len(fewer_points)):
fp_total_length += np.linalg.norm(fewer_points[i] - fewer_points[i - 1])
if mp_total_length == INF:
raise ValueError('Something fishy... fp_total_length is inf!')
# Upsample fewer_points
upsampled = np.zeros([0, 3])
# First and last points are the same
upsampled = np.append(upsampled, [fewer_points[0]], axis=0)
mp_proportion = 0.0
fp_proportion = 0.0
mpi = 1
for i in range(1, len(fewer_points)):
base_fp_point = fewer_points[i - 1]
next_fp_point = fewer_points[i]
basefp_proportion = fp_proportion
fp_segment = np.linalg.norm(next_fp_point - base_fp_point) / fp_total_length
vec = next_fp_point - base_fp_point
fp_proportion += fp_segment
while mp_proportion <= fp_proportion and mpi < n_spec_points:
mp_segment = \
np.linalg.norm(more_points[mpi] - more_points[mpi - 1]) \
/ mp_total_length
if (mp_proportion + mp_segment) > fp_proportion:
break
mp_proportion += mp_segment
scale = (mp_proportion - basefp_proportion) / fp_segment
upsampled = np.append(upsampled, [base_fp_point + (vec * scale)], axis=0)
mpi += 1
# Sometimes the last node is automatically added
if len(upsampled) < n_spec_points:
upsampled = np.append(upsampled, [fewer_points[-1]], axis=0)
return upsampled
def float_approximates(float_a, float_b, error=1e-6):
"""Returns whether float a is approximately b within error tolerance"""
return abs(float_a-float_b) < error
def check_collision(**kwargs):
"""Tests whether a to-be-added node is too close to any node in partially or
completely formed shape.
Args:
- xDB - a dict containing the xDB data. Should have originated from
read_json().
- collision_measure - one of RADII_TYPES
- nodes - string list of module names
- new_node - string name the node to be tested
- shape - Nx(3x1 numpy array) list of node centre-of-masses
Returns:
- bool - whether or not the new node, when added to the shape, causes
collision.
"""
xdb = kwargs.pop('xdb')
collision_measure = kwargs.pop('collision_measure')
nodes = kwargs.pop('nodes')
new_node = kwargs.pop('new_node')
shape = kwargs.pop('shape')
new_com = xdb['double_data'][nodes[-1]][new_node]['com_b']
# previous node PAIR (not just single node!) is inherently non-colliding
for i in range(0, len(nodes) - 2):
com_dist = np.linalg.norm(shape[i] - new_com)
collision_dist = \
xdb['single_data'] \
[new_node]['radii'][collision_measure] + \
xdb['single_data'] \
[nodes[i]]['radii'][collision_measure]
if com_dist < collision_dist:
return True
return False
def com_dist_info(xdb):
"""Computes centre-of-mass distance information.
Args:
- xdb - a dict containing the xdb data. Should have originated from
read_json().
Returns:
- (_, _, _) - tuple containing average, min and max values for centre-of-mass
distances.
"""
all_tx = xdb['n_to_c_tx']
dists = [np.linalg.norm(tx['tran']) for tx in all_tx]
return np.average(dists), min(dists), max(dists)
def read_csv_points(csv_file):
"""A wrapper of read_csv() but returns as list of numpy array points."""
pts = []
with open(csv_file, 'r') as file:
pts = np.asarray(
[[float(n) for n in re.split(', *| *', l.strip())] \
for l in file.read().split('\n') if len(l) > 0])
return pts
def read_csv(read_path, delim=','):
"""Reads a generic CSV file.
Args:
- read_path - string path to read from.
- delim - delimiter to use for the CSV format.
Returns:
- rows - list of rows where each row is a string list of cell values.
"""
rows = []
with open(read_path) as csv_file:
sreader = csv.reader(csv_file, delimiter=delim)
for row in sreader:
rows.append([c.strip() for c in row])
return rows
def save_points_as_csv(**kwargs):
"""Saves a list of points into a CSV file.
Args:
- points - Nx(3x1 numpy array) list to be saved.
- save_path - string path to save to.
- delim - delimiter to use for the CSV format.
"""
points = kwargs.pop('points')
save_path = kwargs.pop('save_path')
delim = kwargs.pop('delim', ' ')
with open(save_path, 'wb') as file:
writer = csv.writer(file, delimiter=delim)
for row in points:
writer.writerow(row)
def read_json(read_path):
"""Reads a JSON file adn returns a dict."""
with open(read_path, 'r') as file:
return json.load(file)
def make_dir(directory):
"""Creates directory if does not exist."""
if not os.path.exists(directory):
os.makedirs(directory)
def pause_code(frame=None):
"""Pause execution and drop into interactive mode for debugging. This is
intended to be manually inserted into area of code where debugging is
needed.
Args:
- frame - specify frame in which the globals and locals are to be debugged.
"""
print('\n------------------pause_code()------------------')
if frame is None:
# Use current frame (one above the exception wrapper)
frame = inspect.currentframe().f_back
fi = inspect.getframeinfo(frame)
print('Where: {loc}:{line}'.format(loc=fi.filename, line=fi.lineno))
print('What: \n{code}'.format(code=fi.code_context[0]))
name_space = dict(frame.f_globals)
name_space.update(frame.f_locals)
code.interact(local=name_space)
def safe_exec(func, *args, **kwargs):
"""Execute func and drops into interactive mode for debugging if an exception
is raised.
Args:
- func - the function handle to be called.
- *args - args to be expanded for func.
"""
try:
func(*args, **kwargs)
except Exception as ex:
print('\n------------------safe_exec() caught exception------------------')
print(ex)
# Find last (failed) inner frame
_, _, traceback = sys.exc_info()
last_frame = \
traceback.tb_next.tb_next.tb_next \
if traceback and traceback.tb_next and traceback.tb_next.tb_next \
else traceback
if last_frame:
frame = last_frame.tb_frame
traceback_module.print_exc()
pause_code(frame)
else:
print('No frame to pause at...')
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ == '__main__':
main()
|
joy13975/elfin
|
elfinpy/utilities.py
|
Python
|
mit
| 10,406
|
from time import time
import matplotlib.pyplot as plt
import numpy as np
plt.close("all")
start = time()
# Load the data
with open('Week2/pizza-train.json', 'r') as file:
lines = [line.split() for line in file.readlines()]
# Get request_text's
lines = [line[1:] for line in lines if line[0] == '"request_text":']
n_lines = len(lines)
# Find all distinct words in vocabulary
words = set()
idx = 0
for line in lines:
text = line
words.update(text)
# Make a word-list and mapping to indices
words = sorted(list(words))
word2idx = {word: idx for idx, word in enumerate(words)}
n_words = len(words)
# Bag-of-words list of lists
bow = [[0] * n_words for _ in range(n_lines)]
for line_nr, line in enumerate(lines):
text = line
bow_row = bow[line_nr]
for word in text:
ix = word2idx[word]
bow_row[ix] = 1
end = time()
# Sanity check that we did it right
im = np.array(bow)
plt.imshow(im)
plt.show()
# Most popular word
s = sum(im)
max_ix = np.where(s == max(s))
print('Most popular word: ')
print(words[max_ix[0][0]])
print("Script time: {:.2f}s".format(end - start))
|
North-Guard/BigToolsComplicatedData
|
Week2/exercise_2_optimized.py
|
Python
|
mit
| 1,113
|
# Lint as: python3
# Copyright 2019 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Utilities for tracing tf.function inputs and outputs."""
# This file uses the following abbreviations:
# ref: reference – for the reference CompiledModule
# tar: target - for one of the target CompiledModules
# TODO(#4131) python>=3.7: Use postponed type annotations.
import copy
import glob
import inspect
import os
import pickle
import sys
import textwrap
from typing import Any, Callable, Dict, Sequence, Tuple, Union
from absl import logging
from iree.tf.support import module_utils
from iree.tf.support import tf_utils
import numpy as np
import tensorflow.compat.v2 as tf
NUMPY_LINEWIDTH = 120
INDENT = " " * 2
def _zfill_width(length: int) -> Union[int, None]:
return int(np.ceil(np.log10(length))) if length else None
def get_trace_dir(artifacts_dir: str, trace: "Trace") -> str:
trace_dir = os.path.join(artifacts_dir, trace.backend_id, "traces",
trace.function_name)
os.makedirs(trace_dir, exist_ok=True)
return trace_dir
class ModuleCall:
def __init__(self,
method: str,
inputs: Tuple[Any],
outputs: Tuple[Any],
serialized_inputs: Tuple[str],
serialized_outputs: Tuple[str],
rtol: float = 1e-6,
atol: float = 1e-6):
"""Records the details of a call to a CompiledModule."""
self.method = method
# Deepcopy to safegard against mutation.
self.inputs = copy.deepcopy(inputs)
if outputs is not None:
outputs = copy.deepcopy(outputs)
else:
outputs = tuple()
self.outputs = outputs if isinstance(outputs, tuple) else (outputs,)
self.serialized_inputs = serialized_inputs
self.serialized_outputs = serialized_outputs
self.rtol = rtol
self.atol = atol
def get_tolerances(self) -> Tuple[float, float]:
"""Gets the floating point tolerances associated with this call."""
return self.rtol, self.atol
def _get_shape_and_dtype(self, value: Any) -> str:
if isinstance(value, np.ndarray):
return tf_utils.get_shape_and_dtype(value, allow_non_mlir_dtype=True)
else:
return str(type(value))
def __str__(self):
prior_printoptions = np.get_printoptions()
np.set_printoptions(linewidth=NUMPY_LINEWIDTH)
header = f"Method: {self.method}"
inputs = "\n".join(
[textwrap.indent(str(value), INDENT) for value in self.inputs])
input_shapes = ", ".join(
self._get_shape_and_dtype(value) for value in self.inputs)
outputs = "\n".join(
[textwrap.indent(str(value), INDENT) for value in self.outputs])
output_shapes = ", ".join(
self._get_shape_and_dtype(value) for value in self.outputs)
tolerances = textwrap.indent(f"rtol={self.rtol}, atol={self.atol}", INDENT)
body = (f"Inputs: {input_shapes}\n{inputs}\n"
f"Outputs: {output_shapes}\n{outputs}"
f"\nTolerances:\n{tolerances}")
result = f"{header}\n{textwrap.indent(body, INDENT)}"
np.set_printoptions(**prior_printoptions)
return result
def serialize(self, call_dir: str) -> None:
"""Stores a serialized copy of this call.
Can be loaded via ModuleCall.load(call_dir)
Args:
call_dir: str, the path to the directory to serialize this call to.
"""
os.makedirs(call_dir, exist_ok=True)
metadata = {
"method": self.method,
"serialized_inputs": self.serialized_inputs,
"serialized_outputs": self.serialized_outputs,
"rtol": self.rtol,
"atol": self.atol
}
with open(os.path.join(call_dir, "metadata.pkl"), "wb") as f:
pickle.dump(metadata, f)
width = _zfill_width(len(self.inputs))
for i, value in enumerate(self.inputs):
path = os.path.join(call_dir, f"input_{str(i).zfill(width)}.pkl")
with open(path, "wb") as f:
pickle.dump(value, f)
width = _zfill_width(len(self.outputs))
for i, value in enumerate(self.outputs):
path = os.path.join(call_dir, f"output_{str(i).zfill(width)}.pkl")
with open(path, "wb") as f:
pickle.dump(value, f)
@staticmethod
def load(call_dir: str) -> "ModuleCall":
"""Loads and returns a trace serialized with ModuleCall.serialize."""
with open(os.path.join(call_dir, "metadata.pkl"), "rb") as f:
kwargs = pickle.load(f)
for result_type in ["input", "output"]:
key = f"{result_type}s" # inputs or outputs
kwargs[key] = []
files = glob.glob(os.path.join(call_dir, f"{result_type}_*.pkl"))
for filename in sorted(files):
with open(filename, "rb") as f:
kwargs[key].append(pickle.load(f))
# Convert to tuple to match python's return type for multiple results.
kwargs[key] = tuple(kwargs[key])
return ModuleCall(**kwargs)
class Trace:
"""Stores the inputs and outputs of a series of calls to a module."""
def __init__(self,
module: Union[module_utils.CompiledModule, None],
function: Union[Callable[["TracedModule"], None], None],
_load_dict: Dict[str, Any] = None):
"""Extracts metadata from module and function and initializes.
Example usage:
def forward_pass(...):
...
module = IreeCompiledModule(...)
trace = Trace(module, forward_pass)
forward_pass(TracedModule(module, trace))
Args:
module: the module who's outputs this trace will record.
function: the function that module will be traced on.
_load_dict: used internally
"""
if _load_dict is None:
# Extract metadata from module and function.
self.module_name = module.module_name
self.compiled_paths = module.compiled_paths
self.backend_name = module.backend_info.backend_name
self.backend_id = module.backend_info.backend_id
self.backend_driver = module.backend_info.driver
self.iree_serializable = module.iree_serializable()
self.tflite_serializable = module.tflite_serializable()
self.function_name = function.__name__
self.function_sourcefile = inspect.getsourcefile(function)
source, start_line = inspect.getsourcelines(function)
self.function_line_numbers = (start_line, start_line + len(source))
self.function_source = "".join(source)
self.calls = []
else:
self.module_name = _load_dict["module_name"]
self.compiled_paths = _load_dict["compiled_paths"]
self.backend_name = _load_dict["backend_name"]
self.backend_id = _load_dict["backend_id"]
self.backend_driver = _load_dict["backend_driver"]
self.iree_serializable = _load_dict["iree_serializable"]
self.tflite_serializable = _load_dict["tflite_serializable"]
self.function_name = _load_dict["function_name"]
self.function_sourcefile = _load_dict["function_sourcefile"]
self.function_line_numbers = _load_dict["function_line_numbers"]
self.function_source = _load_dict["function_source"]
self.calls = _load_dict["calls"]
def __str__(self):
header = (f"Trace of {self.module_name} compiled to '{self.backend_id}' "
f"on function '{self.function_name}':")
# Give each call a number so it's easier to compare between multiple traces.
calls = [f"{i + 1}. {str(call)}" for i, call in enumerate(self.calls)]
calls = textwrap.indent("\n".join(calls), prefix=INDENT)
return f"{header}\n{calls}"
def __iter__(self):
for call in self.calls:
yield call
def save_plaintext(self, trace_dir: str, summarize: bool = True) -> None:
"""Saves a human-readable string representation of this trace to disk.
Args:
trace_dir: str, path to the directory to save the trace in.
summarize: a bool controlling whether numpy should summarize the inputs
and outputs if they're large. Setting this to False is very slow for
large outputs.
"""
prior_printoptions = np.get_printoptions()
np.set_printoptions(
linewidth=NUMPY_LINEWIDTH,
threshold=None if summarize else sys.maxsize,
edgeitems=10) # Can show more items since they won't clutter the logs.
path = os.path.join(trace_dir, "log.txt")
with open(path, "w") as f:
f.write(str(self))
f.write("\n")
np.set_printoptions(**prior_printoptions)
def serialize(self, trace_dir: str) -> None:
"""Stores a serialized copy of this trace in trace_dir.
It can be loaded via `Trace.load(trace_dir)`.
Args:
trace_dir: str, path to the directory to serialize the trace to.
"""
compiled_paths = None
if self.compiled_paths is not None:
# Convert to a dict to avoid the issues with serializing defaultdicts.
compiled_paths = dict(self.compiled_paths)
# Python serialization.
metadata = {
"module_name": self.module_name,
"compiled_paths": compiled_paths,
"backend_name": self.backend_name,
"backend_id": self.backend_id,
"backend_driver": self.backend_driver,
"iree_serializable": self.iree_serializable,
"tflite_serializable": self.tflite_serializable,
"function_name": self.function_name,
"function_sourcefile": self.function_sourcefile,
"function_line_numbers": self.function_line_numbers,
"function_source": self.function_source
}
with open(os.path.join(trace_dir, "metadata.pkl"), "wb") as f:
pickle.dump(metadata, f)
width = _zfill_width(len(self.calls))
for i, call in enumerate(self.calls):
call_dir = os.path.join(trace_dir, f"call_{str(i).zfill(width)}")
call.serialize(call_dir)
# C++ benchmark serialization.
if self.iree_serializable or self.tflite_serializable:
entry_function = self.calls[0].method
compiled_path = self.compiled_paths[entry_function]
if self.iree_serializable:
serialized_inputs = self.calls[0].serialized_inputs
flagfile = [
f"--module_file={compiled_path}",
f"--driver={self.backend_driver}",
f"--entry_function={entry_function}",
] + [f"--function_input={input}" for input in serialized_inputs]
with open(os.path.join(trace_dir, "flagfile"), "w") as f:
f.writelines(line + "\n" for line in flagfile)
else:
with open(os.path.join(trace_dir, "graph_path"), "w") as f:
f.writelines(compiled_path + "\n")
@staticmethod
def load(trace_dir: str) -> "Trace":
"""Loads and returns a trace serialized with Trace.serialize.
Args:
trace_dir: str, path to the directory of the serialized trace.
Returns:
A Trace deserialized from trace_dir.
"""
with open(os.path.join(trace_dir, "metadata.pkl"), "rb") as f:
load_dict = pickle.load(f)
call_dirs = sorted(glob.glob(os.path.join(trace_dir, "call_*")))
calls = [ModuleCall.load(call_dir) for call_dir in call_dirs]
load_dict["calls"] = calls
return Trace(module=None, function=None, _load_dict=load_dict)
class TracedModule:
def __init__(self, module: module_utils.CompiledModule, trace: Trace):
"""Wraps a CompiledModule so that all inputs and outputs are traced.
The TracedModule returned will have an API almost identical to that of the
passed CompiledModule. The only changes is that if the keywords `rtol` or
`atol` are passed to one of the CompiledModule's methods, then they will be
used to set the tolerance for comparing that call to the same call in
another trace. So for example, calling `traced_module.add(a, b rtol=1e-8)`
would be the same as calling `module.add(a, b)`.
Args:
module: the CompiledModule to trace.
trace: the Trace to record calls to this module with.
"""
self._module = module
self._trace = trace
def _trace_call(self, method: module_utils._FunctionWrapper,
method_name: str):
"""Decorates a CompiledModule method to capture its inputs and outputs."""
def call(*args, **kwargs):
# Pop manually specified tolerances from the kwargs (if any).
tolerances = {}
tolerances["rtol"] = kwargs.pop("rtol", None)
tolerances["atol"] = kwargs.pop("atol", None)
# Only pass these to ModuleCall if they were specified by the user.
tolerances = {k: v for k, v in tolerances.items() if v is not None}
# Ensure the inputs are numpy inputs.
args = tf_utils.convert_to_numpy(args)
kwargs = tf_utils.convert_to_numpy(kwargs)
# Run the method and record the details of the call.
outputs = method(*args, **kwargs)
serialized_inputs, serialized_outputs = method.get_serialized_values()
self._trace.calls.append(
ModuleCall(method_name, args, outputs, serialized_inputs,
serialized_outputs, **tolerances))
return outputs
return call
def __getattr__(self, attr):
# Try to resolve it as an attr on self._module.
if not hasattr(self._module, attr):
raise AttributeError(f"The compiled module does not have attr '{attr}'")
module_attr = getattr(self._module, attr)
if not hasattr(module_attr, "__call__"):
# e.g. traced_module.backend
return module_attr
else:
# e.g. traced_module.simple_mul(a, b)
return self._trace_call(module_attr, method_name=attr)
def compare_traces(ref_trace: Trace,
tar_trace: Trace) -> Tuple[bool, Sequence[str]]:
traces_match = True
error_messages = []
# Check that all method invocations match.
ref_methods = [(call.method, call.rtol, call.atol) for call in ref_trace]
tar_methods = [(call.method, call.rtol, call.atol) for call in tar_trace]
if ref_methods != tar_methods:
# Raise a ValueError instead of returning False since this is an
# unexpected error.
raise ValueError(
"The reference and target traces have different call structures:\n"
f"Reference: {ref_methods}\nTarget: {tar_methods}")
for ref_call, tar_call in zip(ref_trace, tar_trace):
logging.info("Comparing calls to '%s'", ref_call.method)
rtol, atol = ref_call.get_tolerances()
inputs_match, error_message = tf_utils.check_same(ref_call.inputs,
tar_call.inputs, rtol,
atol)
if not inputs_match:
error_messages.append(error_message)
logging.error("Inputs did not match.")
outputs_match, error_message = tf_utils.check_same(ref_call.outputs,
tar_call.outputs, rtol,
atol)
if not outputs_match:
error_messages.append(error_message)
logging.error("Outputs did not match.")
calls_match = inputs_match and outputs_match
if not calls_match:
logging.error("Comparision between '%s' and '%s' failed on method '%s'",
ref_trace.backend_id, tar_trace.backend_id, ref_call.method)
logging.error("Reference call '%s':\n%s", ref_trace.backend_id, ref_call)
logging.error("Target call '%s':\n%s", tar_trace.backend_id, tar_call)
traces_match = traces_match and calls_match
return traces_match, error_messages
|
google/iree
|
integrations/tensorflow/python_projects/iree_tf/iree/tf/support/trace_utils.py
|
Python
|
apache-2.0
| 15,437
|
import numpy as np
from numpy.testing import (assert_equal, assert_array_almost_equal,
assert_raises)
from skimage.transform._geometric import _stackcopy
from skimage.transform._geometric import GeometricTransform
from skimage.transform import (estimate_transform, matrix_transform,
SimilarityTransform, AffineTransform,
ProjectiveTransform, PolynomialTransform,
PiecewiseAffineTransform)
SRC = np.array([
[-12.3705, -10.5075],
[-10.7865, 15.4305],
[8.6985, 10.8675],
[11.4975, -9.5715],
[7.8435, 7.4835],
[-5.3325, 6.5025],
[6.7905, -6.3765],
[-6.1695, -0.8235],
])
DST = np.array([
[0, 0],
[0, 5800],
[4900, 5800],
[4900, 0],
[4479, 4580],
[1176, 3660],
[3754, 790],
[1024, 1931],
])
def test_stackcopy():
layers = 4
x = np.empty((3, 3, layers))
y = np.eye(3, 3)
_stackcopy(x, y)
for i in range(layers):
assert_array_almost_equal(x[..., i], y)
def test_estimate_transform():
for tform in ('similarity', 'affine', 'projective', 'polynomial'):
estimate_transform(tform, SRC[:2, :], DST[:2, :])
assert_raises(ValueError, estimate_transform, 'foobar',
SRC[:2, :], DST[:2, :])
def test_matrix_transform():
tform = AffineTransform(scale=(0.1, 0.5), rotation=2)
assert_equal(tform(SRC), matrix_transform(SRC, tform._matrix))
def test_similarity_estimation():
# exact solution
tform = estimate_transform('similarity', SRC[:2, :], DST[:2, :])
assert_array_almost_equal(tform(SRC[:2, :]), DST[:2, :])
assert_equal(tform._matrix[0, 0], tform._matrix[1, 1])
assert_equal(tform._matrix[0, 1], - tform._matrix[1, 0])
# over-determined
tform2 = estimate_transform('similarity', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
assert_equal(tform2._matrix[0, 0], tform2._matrix[1, 1])
assert_equal(tform2._matrix[0, 1], - tform2._matrix[1, 0])
# via estimate method
tform3 = SimilarityTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_similarity_init():
# init with implicit parameters
scale = 0.1
rotation = 1
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = SimilarityTransform(tform._matrix)
assert_array_almost_equal(tform2.scale, scale)
assert_array_almost_equal(tform2.rotation, rotation)
assert_array_almost_equal(tform2.translation, translation)
# test special case for scale if rotation=0
scale = 0.1
rotation = 0
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.translation, translation)
def test_affine_estimation():
# exact solution
tform = estimate_transform('affine', SRC[:3, :], DST[:3, :])
assert_array_almost_equal(tform(SRC[:3, :]), DST[:3, :])
# over-determined
tform2 = estimate_transform('affine', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = AffineTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_affine_init():
# init with implicit parameters
scale = (0.1, 0.13)
rotation = 1
shear = 0.1
translation = (1, 1)
tform = AffineTransform(scale=scale, rotation=rotation, shear=shear,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.shear, shear)
assert_array_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = AffineTransform(tform._matrix)
assert_array_almost_equal(tform2.scale, scale)
assert_array_almost_equal(tform2.rotation, rotation)
assert_array_almost_equal(tform2.shear, shear)
assert_array_almost_equal(tform2.translation, translation)
def test_piecewise_affine():
tform = PiecewiseAffineTransform()
tform.estimate(SRC, DST)
# make sure each single affine transform is exactly estimated
assert_array_almost_equal(tform(SRC), DST)
assert_array_almost_equal(tform.inverse(DST), SRC)
def test_projective_estimation():
# exact solution
tform = estimate_transform('projective', SRC[:4, :], DST[:4, :])
assert_array_almost_equal(tform(SRC[:4, :]), DST[:4, :])
# over-determined
tform2 = estimate_transform('projective', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = ProjectiveTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_projective_init():
tform = estimate_transform('projective', SRC, DST)
# init with transformation matrix
tform2 = ProjectiveTransform(tform._matrix)
assert_array_almost_equal(tform2._matrix, tform._matrix)
def test_polynomial_estimation():
# over-determined
tform = estimate_transform('polynomial', SRC, DST, order=10)
assert_array_almost_equal(tform(SRC), DST, 6)
# via estimate method
tform2 = PolynomialTransform()
tform2.estimate(SRC, DST, order=10)
assert_array_almost_equal(tform2._params, tform._params)
def test_polynomial_init():
tform = estimate_transform('polynomial', SRC, DST, order=10)
# init with transformation parameters
tform2 = PolynomialTransform(tform._params)
assert_array_almost_equal(tform2._params, tform._params)
def test_polynomial_default_order():
tform = estimate_transform('polynomial', SRC, DST)
tform2 = estimate_transform('polynomial', SRC, DST, order=2)
assert_array_almost_equal(tform2._params, tform._params)
def test_polynomial_inverse():
assert_raises(Exception, PolynomialTransform().inverse, 0)
def test_union():
tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_array_almost_equal(tform._matrix, tform3._matrix)
tform1 = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_array_almost_equal(tform._matrix, tform3._matrix)
assert tform.__class__ == ProjectiveTransform
def test_geometric_tform():
tform = GeometricTransform()
assert_raises(NotImplementedError, tform, 0)
assert_raises(NotImplementedError, tform.inverse, 0)
assert_raises(NotImplementedError, tform.__add__, 0)
def test_invalid_input():
assert_raises(ValueError, ProjectiveTransform, np.zeros((2, 3)))
assert_raises(ValueError, AffineTransform, np.zeros((2, 3)))
assert_raises(ValueError, SimilarityTransform, np.zeros((2, 3)))
assert_raises(ValueError, AffineTransform,
matrix=np.zeros((2, 3)), scale=1)
assert_raises(ValueError, SimilarityTransform,
matrix=np.zeros((2, 3)), scale=1)
assert_raises(ValueError, PolynomialTransform, np.zeros((3, 3)))
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
almarklein/scikit-image
|
skimage/transform/tests/test_geometric.py
|
Python
|
bsd-3-clause
| 7,870
|
from ..lib.transformer import Transformer
from ..lib.instruction import Instruction
import csv, os
from LatLon import LatLon
# Tube station data from http://commons.wikimedia.org/wiki/London_Underground_geographic_maps/CSV
class Tube(Transformer):
__punctuation_characters = ['.', ',', '?', '!', ';', ':', '-']
def __init__(self):
self.__import_stations()
def can_handle_character(self, character):
return character in Tube.__punctuation_characters
def can_handle_contact(self, contact, clock):
return contact.has('tubestation')
def num_required_contacts(self):
return 1
def transform(self, character, contacts, clock):
contact = contacts[0]
station = contact.get('tubestation')
if contact.has_state('lasttube'):
station = contact.get_state('lasttube')
stations = self.__nearest_stations(station, len(Tube.__punctuation_characters))
index = Tube.__punctuation_characters.index(character)
destination = stations[index]
contact.set_state('lasttube', destination)
contact.set_busy_func('tube', lambda clk: clock.jump_forward(12) > clk)
return Instruction('tube', character, clock, contact, {'from_station': station, 'to_station': destination})
def __nearest_stations(self, station, qty):
point1 = self._stations[station]
distances = {}
for name in self._stations:
if name == station:
continue
point2 = self._stations[name]
distance = point1.distance(point2)
distances[name] = distance
nearest_stations = sorted(distances, lambda n1, n2: cmp(distances[n1], distances[n2]))
return nearest_stations[:qty]
def __import_stations(self):
self._stations = {}
with open(os.path.join('resources', 'tube.csv'), 'r') as csvfile:
rows = csv.reader(csvfile)
next(rows)
for row in rows:
name = row[3]
lat = float(row[1])
lng = float(row[2])
self._stations[name] = LatLon(lat, lng)
|
tomwadley/sexting-xkeyscore
|
sexting/transformers/tube.py
|
Python
|
isc
| 2,150
|
# Copyright (c) Sebastian Scholz
# See LICENSE for details.
""" All the grant types that we support """
from enum import Enum
class GrantTypes(Enum):
""" The different grant types to request a token defined by the OAuth2 spec. """
RefreshToken = 'refresh_token'
AuthorizationCode = 'authorization_code'
ClientCredentials = 'client_credentials'
Password = 'password'
Implicit = 'implicit'
|
Abestanis/TwistedOAuth2
|
txoauth2/granttypes.py
|
Python
|
mit
| 415
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jul 25, 2014
@author: noe
'''
import unittest
from pyemma.util import statistics
import numpy as np
class TestStatistics(unittest.TestCase):
def assertConfidence(self, sample, alpha, precision):
alpha = 0.5
conf = statistics.confidence_interval(sample, alpha)
n_in = 0.0
for i in range(len(sample)):
if sample[i] > conf[0] and sample[i] < conf[1]:
n_in += 1.0
assert(alpha - (n_in/len(sample)) < precision)
def test_confidence_interval(self):
# exponential distribution
self.assertConfidence(np.random.exponential(size=10000), 0.5, 0.01)
self.assertConfidence(np.random.exponential(size=10000), 0.8, 0.01)
self.assertConfidence(np.random.exponential(size=10000), 0.95, 0.01)
# Gaussian distribution
self.assertConfidence(np.random.normal(size=10000), 0.5, 0.01)
self.assertConfidence(np.random.normal(size=10000), 0.8, 0.01)
self.assertConfidence(np.random.normal(size=10000), 0.95, 0.01)
if __name__ == "__main__":
unittest.main()
|
markovmodel/PyEMMA
|
pyemma/util/tests/statistics_test.py
|
Python
|
lgpl-3.0
| 1,879
|
"""
Utility functions for logging messages. Exports:
get_logger: Return a logger with a specified severity threshold.
"""
import logging
import sys
LEVELS = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL
}
LOG_LEVEL = "log-level"
def get_logger(stream, level):
"""
Return a Logger instance with the specified severity threshold.
Return a Logger instance with the specified severity threshold, where the
threshold level should be a key of the 'LEVELS' dictionary. Log messages
will contain the current time and message severity level.
stream: Output stream to which the logger will write messages.
level: Severity threshold level, which should be a key of the 'LEVELS'
dictionary.
"""
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.StreamHandler(stream)
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.setLevel(LEVELS[level])
logger.addHandler(handler)
return logger
|
COMBINE-lab/piquant
|
piquant/log.py
|
Python
|
mit
| 1,181
|
# -*- coding: utf-8 -*-
# copyright 2016 Camptocamp
# license agpl-3.0 or later (http://www.gnu.org/licenses/agpl.html)
import json
from odoo.tests import common
from odoo.addons.queue_job.fields import JobEncoder, JobDecoder
class TestJson(common.TransactionCase):
def test_encoder(self):
value = ['a', 1, self.env.ref('base.user_root')]
value_json = json.dumps(value, cls=JobEncoder)
expected = ('["a", 1, {"_type": "odoo_recordset", '
'"model": "res.users", "ids": [1]}]')
self.assertEqual(value_json, expected)
def test_decoder(self):
value_json = ('["a", 1, {"_type": "odoo_recordset",'
'"model": "res.users", "ids": [1]}]')
expected = ['a', 1, self.env.ref('base.user_root')]
value = json.loads(value_json, cls=JobDecoder, env=self.env)
self.assertEqual(value, expected)
|
leorochael/queue
|
queue_job/tests/test_json_field.py
|
Python
|
agpl-3.0
| 897
|
from app.schema_validation.definitions import uuid, datetime
post_create_user_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST schema for creating user",
"type": "object",
"properties": {
'email': {"type": "string"},
'name': {"type": "string"},
'active': {"type": "boolean"},
'access_area': {"type": "string"},
},
"required": ['email']
}
post_update_user_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST schema for updating user",
"type": "object",
"properties": {
'name': {"type": "string"},
'active': {"type": "boolean"},
'access_area': {"type": "string"},
'last_login': {"type": "date-time"},
'session_id': {"type": "string"},
'ip': {"type": "string"},
},
"required": []
}
|
NewAcropolis/api
|
app/routes/users/schemas.py
|
Python
|
mit
| 881
|
# -*- coding: utf-8 -*-
"""Simple functions for dealing with posts, replies, votes and subscriptions
within Redis and MongoDB
:license: AGPL v3, see LICENSE for more details
:copyright: 2014-2021 Joe Doherty
"""
# 3rd party imports
from flask import current_app as app, url_for
from jinja2.filters import do_capitalize
# Pjuu imports
from pjuu import mongo as m, redis as r, celery, storage
from pjuu.lib import keys as k, timestamp, get_uuid
from pjuu.lib.alerts import BaseAlert, AlertManager
from pjuu.lib.pagination import Pagination
from pjuu.lib.parser import parse_post
from pjuu.lib.uploads import process_upload
# Allow chaning the maximum length of a post
MAX_POST_LENGTH = 500
class CantVoteOnOwn(Exception):
"""Raised when a user tries to vote on a post they authored
"""
pass
class AlreadyVoted(Exception):
"""Raised when a user tries to vote on a post they have already voted on
"""
pass
class CantFlagOwn(Exception):
"""Can't flag your own post."""
pass
class AlreadyFlagged(Exception):
"""You can't flag a post twice."""
pass
class SubscriptionReasons(object):
"""Constants describing subscriptions to a post
"""
# You are the original poster
POSTER = 1
# You commented on the post
COMMENTER = 2
# You have been tagged in the post
TAGEE = 3
class PostingAlert(BaseAlert):
"""Base form for all alerts used within the posts package.
"""
def __init__(self, user_id, post_id):
# Call the BaseAlert __init__ method
super(PostingAlert, self).__init__(user_id)
self.post_id = post_id
def url(self):
"""Get the user object or the original author for the post.
Eg. Bob may have tagged you in the post but Brian posted the original
post. This is needed to generate the URL.
"""
# Get the author of the posts username so that we can build the URL
author = m.db.posts.find_one({'_id': self.post_id},
{'username': True, '_id': False})
# Return the username or None
return url_for('posts.view_post', username=author.get('username'),
post_id=self.post_id)
def verify(self):
"""Overwrites the verify() of BaseAlert to check the post exists
"""
return m.db.users.find_one({'_id': self.user_id}, {}) and \
m.db.posts.find_one({'_id': self.post_id}, {})
class TaggingAlert(PostingAlert):
"""Form of all tagging alert messages
"""
def prettify(self, for_uid=None):
return '<a href="{0}">{1}</a> tagged you in a <a href="{2}">post</a>' \
.format(url_for('users.profile',
username=self.user.get('username')),
do_capitalize(self.user.get('username')), self.url())
class CommentingAlert(PostingAlert):
"""Form of all commenting alert messages
"""
def prettify(self, for_uid=None):
# Let's try and work out why this user is being notified of a comment
reason = subscription_reason(for_uid, self.post_id)
if reason == SubscriptionReasons.POSTER:
sr = 'posted'
elif reason == SubscriptionReasons.COMMENTER:
sr = 'commented on'
elif reason == SubscriptionReasons.TAGEE:
sr = 'were tagged in'
else:
# This should never really happen but let's play ball eh?
sr = 'are subscribed to'
return '<a href="{0}">{1}</a> ' \
'commented on a <a href="{2}">post</a> you {3}' \
.format(url_for('users.profile',
username=self.user.get('username')),
do_capitalize(self.user.get('username')), self.url(),
sr)
def create_post(user_id, username, body, reply_to=None, upload=None,
permission=k.PERM_PUBLIC):
"""Creates a new post
This handled both posts and what used to be called comments. If the
reply_to field is not None then the post will be treat as a comment.
You will need to make sure the reply_to post exists.
:param user_id: The user id of the user posting the post
:type user_id: str
:param username: The user name of the user posting (saves a lookup)
:type username: str
:param body: The content of the post
:type body: str
:param reply_to: The post id of the post this is a reply to if any
:type reply_to: str
:param upload:
:returns: The post id of the new post
:param permission: Who can see/interact with the post you are posting
:type permission: int
:rtype: str or None
"""
# Get a new UUID for the post_id ("_id" in MongoDB)
post_id = get_uuid()
# Get the timestamp, we will use this to populate users feeds
post_time = timestamp()
post = {
'_id': post_id, # Newly created post id
'user_id': user_id, # User id of the poster
'username': username, # Username of the poster
'body': body, # Body of the post
'created': post_time, # Unix timestamp for this moment in time
'score': 0, # Atomic score counter
}
if reply_to is not None:
# If the is a reply it must have this property
post['reply_to'] = reply_to
else:
# Replies don't need a comment count
post['comment_count'] = 0
# Set the permission a user needs to view
post['permission'] = permission
if upload:
# If there is an upload along with this post it needs to go for
# processing.
# process_upload() can throw an Exception of UploadError. We will let
# it fall through as a 500 is okay I think.
# TODO: Turn this in to a Queue task at some point
filename, animated_filename = process_upload(upload)
if filename is not None:
# If the upload process was okay attach the filename to the doc
post['upload'] = filename
if animated_filename:
post['upload_animated'] = animated_filename
else:
# Stop the image upload process here if something went wrong.
return None
# Process everything thats needed in a post
links, mentions, hashtags = parse_post(body)
# Only add the fields if we need too.
if links:
post['links'] = links
if mentions:
post['mentions'] = mentions
if hashtags:
post['hashtags'] = hashtags
# Add the post to the database
# If the post isn't stored, result will be None
result = m.db.posts.insert(post)
# Only carry out the rest of the actions if the insert was successful
if result:
if reply_to is None:
# Add post to authors feed
r.zadd(k.USER_FEED.format(user_id), {str(post_id): post_time})
# Ensure the feed does not grow to large
r.zremrangebyrank(k.USER_FEED.format(user_id), 0, -1000)
# Subscribe the poster to there post
subscribe(user_id, post_id, SubscriptionReasons.POSTER)
# Alert everyone tagged in the post
alert_tagees(mentions, user_id, post_id)
# Append to all followers feeds or approved followers based
# on the posts permission
if permission < k.PERM_APPROVED:
populate_followers_feeds.delay(user_id, post_id, post_time)
else:
populate_approved_followers_feeds.delay(
user_id, post_id, post_time
)
else:
# To reduce database look ups on the read path we will increment
# the reply_to's comment count.
m.db.posts.update({'_id': reply_to},
{'$inc': {'comment_count': 1}})
# Alert all subscribers to the post that a new comment has been
# added. We do this before subscribing anyone new
alert = CommentingAlert(user_id, reply_to)
subscribers = []
# Iterate through subscribers and let them know about the comment
for subscriber_id in get_subscribers(reply_to):
# Ensure we don't get alerted for our own comments
if subscriber_id != user_id:
subscribers.append(subscriber_id)
# Push the comment alert out to all subscribers
AlertManager().alert(alert, subscribers)
# Subscribe the user to the post, will not change anything if they
# are already subscribed
subscribe(user_id, reply_to, SubscriptionReasons.COMMENTER)
# Alert everyone tagged in the post
alert_tagees(mentions, user_id, reply_to)
return post_id
# If there was a problem putting the post in to Mongo we will return None
return None # pragma: no cover
@celery.task()
def populate_followers_feeds(user_id, post_id, timestamp):
"""Fan out a post_id to all the users followers.
This can be run on a worker to speed the process up.
"""
# Get a list of ALL users who are following a user
followers = r.zrange(k.USER_FOLLOWERS.format(user_id), 0, -1)
# This is not transactional as to not hold Redis up.
for follower_id in followers:
# Add the pid to the list
r.zadd(k.USER_FEED.format(follower_id), {str(post_id): timestamp})
# Stop followers feeds from growing to large, doesn't matter if it
# doesn't exist
r.zremrangebyrank(k.USER_FEED.format(follower_id), 0, -1000)
@celery.task()
def populate_approved_followers_feeds(user_id, post_id, timestamp):
"""Fan out a post_id to all the users approved followers."""
# Get a list of ALL users who are following a user
followers = r.zrange(k.USER_APPROVED.format(user_id), 0, -1)
# This is not transactional as to not hold Redis up.
for follower_id in followers:
# Add the pid to the list
r.zadd(k.USER_FEED.format(follower_id), {str(post_id): timestamp})
# Stop followers feeds from growing to large, doesn't matter if it
# doesn't exist
r.zremrangebyrank(k.USER_FEED.format(follower_id), 0, -1000)
def alert_tagees(tagees, user_id, post_id):
"""Creates a new tagging alert from `user_id` and `post_id` and alerts all
in the `tagees` list.
This will take the tagees processed as `mentions`, it will ensure no
duplication and that the poster is not alerted if they tag themselves.
:type tagees: list
:type user_id: str
:type post_id: str
"""
alert = TaggingAlert(user_id, post_id)
seen_user_ids = []
for tagee in tagees:
tagged_user_id = tagee.get('user_id')
# Don't alert users more than once
if tagged_user_id in seen_user_ids:
continue
# Don't alert posting user to tag
if tagged_user_id == user_id:
continue
# Subscribe the tagee to the post won't change anything if they are
# already subscribed
subscribe(tagged_user_id, post_id, SubscriptionReasons.TAGEE)
seen_user_ids.append(tagged_user_id)
# Get an alert manager to notify all tagees
AlertManager().alert(alert, seen_user_ids)
def back_feed(who_id, whom_id):
"""Takes 5 lastest posts from user with ``who_id`` places them in user
with ``whom_id`` feed.
The reason behind this is that new users may follow someone but still have
and empty feed, which makes them sad :( so we'll give them some. If the
posts are to old for a non user they will be removed when the feed is
trimmed, but they may make it in to the feed but not at the top.
:param who_id: user who just followed ``who_id``
:type who_id: str
:param whom_id: user who was just followed by ``whom_id``
:type whom_id: str
:returns: None
"""
# Get followee's last 5 un-approved posts (doesn't matter if isn't any)
# We only need the IDs and the created time
posts = m.db.posts.find(
{'user_id': whom_id, 'reply_to': None,
'permission': {'$lte': k.PERM_PJUU}},
{'_id': True, 'created': True},
).sort('created', -1).limit(5)
# Iterate the cursor and append the posts to the users feed
for post in posts:
timestamp = post.get('created')
post_id = post.get('_id')
# Place on the feed
r.zadd(k.USER_FEED.format(who_id), {str(post_id): timestamp})
# Trim the feed to the 1000 max
r.zremrangebyrank(k.USER_FEED.format(who_id), 0, -1000)
def check_post(user_id, post_id, reply_id=None):
"""Ensure reply_id is a reply_to post_id and that post_id was created by
user_id.
.. note:: Think before testing. user_id is the person wrote post_id,
reply_id if assigned has to have been a reply to post_id.
This for checking the urls not for checking who wrote reply_id
"""
# Check if cid is a comment of post pid
if reply_id:
# Get the reply_to field of the reply object and check it matches
reply = m.db.posts.find_one({'_id': reply_id}, {'reply_to': True})
if reply:
if reply.get('reply_to') != post_id:
return False
else:
return False
# Get the user_id for post with post_id to verify
post = m.db.posts.find_one({'_id': post_id}, {'user_id': True})
if post is not None and post.get('user_id') == user_id:
return True
return False
def get_post(post_id):
"""Returns a post. Simple helper function
"""
post = m.db.posts.find_one({'_id': post_id})
# Attach in the e-mail (will be removed with image uploads)
if post is not None:
user = m.db.users.find_one({'_id': post.get('user_id')},
{'avatar': True, 'donated': True})
if user is not None:
post['user_avatar'] = user.get('avatar')
post['user_donated'] = user.get('donated', False)
return post
def get_global_feed(page=1, per_page=None, perm=0):
if per_page is None: # pragma: no cover
per_page = app.config.get('FEED_ITEMS_PER_PAGE')
lookup_dict = {
'reply_to': {'$exists': False},
'permission': {'$lte': perm}
}
total = m.db.posts.find(lookup_dict).count()
cursor = m.db.posts.find(lookup_dict).sort(
'created', -1).skip((page - 1) * per_page).limit(per_page)
posts = []
for post in cursor:
posts.append(post)
# Get a list of unique `user_id`s from all the post.
user_ids = list(set([post.get('user_id') for post in posts]))
cursor = m.db.users.find({'_id': {'$in': user_ids}},
{'avatar': True, 'donated': True})
# Create a lookup dict `{username: email}`
users = \
dict((user.get('_id'), {
'avatar': user.get('avatar'),
'donated': user.get('donated', False)
}) for user in cursor)
# Add the e-mails to the posts
processed_posts = []
for post in posts:
post['user_avatar'] = users.get(post.get('user_id')).get('avatar')
post['user_donated'] = users.get(post.get('user_id')).get('donated')
processed_posts.append(post)
return Pagination(posts, total, page, per_page)
def get_posts(user_id, page=1, per_page=None, perm=0):
"""Returns a users posts as a pagination object."""
if per_page is None:
per_page = app.config.get('FEED_ITEMS_PER_PAGE')
# Get the user object we need the email for Gravatar.
user = m.db.users.find_one({'_id': user_id},
{'avatar': True, 'donated': True})
lookup_dict = {
'user_id': user_id,
'reply_to': {'$exists': False}
}
lookup_dict['permission'] = {'$lte': perm}
total = m.db.posts.find(lookup_dict).count()
cursor = m.db.posts.find(lookup_dict).sort(
'created', -1).skip((page - 1) * per_page).limit(per_page)
posts = []
for post in cursor:
post['user_avatar'] = user.get('avatar')
post['user_donated'] = user.get('donated', False)
posts.append(post)
return Pagination(posts, total, page, per_page)
def get_replies(post_id, page=1, per_page=None, sort_order=-1):
"""Returns all a posts replies as a pagination object."""
if per_page is None:
per_page = app.config.get('REPLIES_ITEMS_PER_PAGE')
total = m.db.posts.find_one({'_id': post_id}).get('comment_count')
cursor = m.db.posts.find(
{'reply_to': post_id}
).sort(
[('created', sort_order)]
).skip((page - 1) * per_page).limit(per_page)
replies = []
for reply in cursor:
# We have to get the users email for each post for the gravatar
user = m.db.users.find_one(
{'_id': reply.get('user_id')},
{'avatar': True, 'donated': True})
if user is not None: # pragma: no branch
reply['user_avatar'] = user.get('avatar')
reply['user_donated'] = user.get('donated', False)
replies.append(reply)
return Pagination(replies, total, page, per_page)
def get_hashtagged_posts(hashtag, page=1, per_page=None):
"""Returns all posts with `hashtag` in date order."""
if per_page is None:
per_page = app.config.get('FEED_ITEMS_PER_PAGE')
total = m.db.posts.find({
'hashtags.hashtag': hashtag,
'reply_to': {'$exists': False}}).count()
cursor = m.db.posts.find({
'hashtags.hashtag': hashtag,
'reply_to': {'$exists': False}
}).sort('created', -1).skip((page - 1) * per_page).limit(per_page)
posts = []
for post in cursor:
user = m.db.users.find_one(
{'_id': post.get('user_id')},
{'avatar': True})
if post is not None: # pragma: no branch
post['user_avatar'] = user.get('avatar')
posts.append(post)
return Pagination(posts, total, page, per_page)
def has_voted(user_id, post_id):
"""Check if a user has voted on a post or a comment, if so return the vote.
"""
return r.zscore(k.POST_VOTES.format(post_id), user_id)
def vote_post(user_id, post_id, amount=1, ts=None):
"""Handles voting on posts
:param user_id: User who is voting
:type user_id: str
:param post_id: ID of the post the user is voting on
:type post_id: int
:param amount: The way to vote (-1 or 1)
:type amount: int
:param ts: Timestamp to use for vote (ONLY FOR TESTING)
:type ts: int
:returns: -1 if downvote, 0 if reverse vote and +1 if upvote
"""
if ts is None:
ts = timestamp()
# Get the comment so we can check who the author is
author_uid = get_post(post_id).get('user_id')
# Votes can ONLY ever be -1 or 1 and nothing else
# we use the sign to store the time and score in one zset score
amount = 1 if amount >= 0 else -1
voted = has_voted(user_id, post_id)
if not voted:
if author_uid != user_id:
# Store the timestamp of the vote with the sign of the vote
r.zadd(k.POST_VOTES.format(post_id), {
str(user_id): amount * timestamp()
})
# Update post score
m.db.posts.update({'_id': post_id},
{'$inc': {'score': amount}})
# Update user score
m.db.users.update({'_id': author_uid},
{'$inc': {'score': amount}})
return amount
else:
raise CantVoteOnOwn
elif voted and abs(voted) + k.VOTE_TIMEOUT > ts:
# No need to check if user is current user because it can't
# happen in the first place
# Remove the vote from Redis
r.zrem(k.POST_VOTES.format(post_id), user_id)
previous_vote = -1 if voted < 0 else 1
# Calculate how much to increment/decrement the scores by
# Saves multiple trips to Mongo
if amount == previous_vote:
if previous_vote < 0:
amount = 1
result = 0
else:
amount = -1
result = 0
else:
# We will only register the new vote if it is NOT a vote reversal.
r.zadd(k.POST_VOTES.format(post_id), {
str(user_id): amount * timestamp()
})
if previous_vote < 0:
amount = 2
result = 1
else:
amount = -2
result = -1
# Update post score
m.db.posts.update({'_id': post_id},
{'$inc': {'score': amount}})
# Update user score
m.db.users.update({'_id': author_uid},
{'$inc': {'score': amount}})
return result
else:
raise AlreadyVoted
def delete_post(post_id):
"""Deletes a post
"""
post = get_post(post_id)
# In some situations a post may be in a cursor (deleting account) but have
# already been deleted by this function in a previous run.
if post is not None:
# Delete votes and subscribers from Redis
r.delete(k.POST_VOTES.format(post.get('_id')))
# Delete the post from MongoDB
m.db.posts.remove({'_id': post_id})
if 'upload' in post:
# If there is an upload, delete it!
storage.delete(post['upload'])
if 'reply_to' in post:
m.db.posts.update({'_id': post['reply_to']},
{'$inc': {'comment_count': -1}})
else:
# Trigger deletion all posts comments if this post isn't a reply
r.delete(k.POST_SUBSCRIBERS.format(post.get('_id')))
delete_post_replies(post_id)
def delete_post_replies(post_id):
"""Delete ALL comments on post with pid.
This can't be done in one single call to Mongo because we need to remove
the votes from Redis!
"""
# Get a cursor for all the posts comments
cur = m.db.posts.find({'reply_to': post_id})
# Iterate over the cursor and delete each one
for reply in cur:
reply_id = reply.get('_id')
# Delete the comment itself from MongoDB
m.db.posts.remove({'_id': reply_id})
# Remove any uploaded files
if 'upload' in reply:
storage.delete(reply['upload'])
# Delete votes from Redis
r.delete(k.POST_VOTES.format(reply_id))
def subscribe(user_id, post_id, reason):
"""Subscribes a user (uid) to post (pid) for reason.
"""
# Check that pid exsits if not do nothing
if not m.db.posts.find_one({'_id': post_id}, {}):
return False
# Only subscribe the user if the user is not already subscribed
# this will mean the original reason is kept
return r.zadd(k.POST_SUBSCRIBERS.format(post_id), {
str(user_id): reason
}, nx=True)
def unsubscribe(user_id, post_id):
"""Unsubscribe a user from a post.
"""
# Actually remove the uid from the subscribers list
return bool(r.zrem(k.POST_SUBSCRIBERS.format(post_id), user_id))
def flag_post(user_id, post_id):
"""Flags a post for moderator review.
:returns: True if flagged, false if removed.
`CantFlagOwn` in case of error.
"""
# Get the comment so we can check who the author is
post = get_post(post_id)
if post.get('user_id') != user_id:
if not has_flagged(user_id, post_id):
# Increment the flag count by one and store the user name
r.zadd(k.POST_FLAGS.format(post_id), {
str(user_id): timestamp()
})
m.db.posts.update({'_id': post_id},
{'$inc': {'flags': 1}})
else:
raise AlreadyFlagged
else:
raise CantFlagOwn
def unflag_post(post_id):
"""Resets the flag count on a post to 0.
.. note: This is an OP user only action from the dashboard.
"""
return m.db.posts.update({'_id': post_id}, {'$set': {'flags': 0}})
def get_subscribers(post_id):
"""Return a list of subscribers 'user_id's for a given post
"""
return r.zrange(k.POST_SUBSCRIBERS.format(post_id), 0, -1)
def is_subscribed(user_id, post_id):
"""Returns a boolean to denote if a user is subscribed or not
"""
return r.zrank(k.POST_SUBSCRIBERS.format(post_id), user_id) is not None
def has_flagged(user_id, post_id):
""""""
return r.zrank(k.POST_FLAGS.format(post_id), user_id) is not None
def subscription_reason(user_id, post_id):
"""Returns the reason a user is subscribed to a post.
"""
return r.zscore(k.POST_SUBSCRIBERS.format(post_id), user_id)
|
pjuu/pjuu
|
pjuu/posts/backend.py
|
Python
|
agpl-3.0
| 24,772
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP68 implementation."""
import time
from test_framework.blocktools import (
NORMAL_GBT_REQUEST_PARAMS,
add_witness_commitment,
create_block,
)
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
tx_from_hex,
)
from test_framework.test_framework import FujicoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
satoshi_round,
softfork_active,
)
from test_framework.script_util import DUMMY_P2WPKH_SCRIPT
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "non-BIP68-final"
class BIP68Test(FujicoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [
[
"-acceptnonstdtxn=1",
"-peertimeout=9999", # bump because mocktime might cause a disconnect otherwise
],
["-acceptnonstdtxn=0"],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Generate some coins
self.nodes[0].generate(110)
self.log.info("Running test disable flag")
self.test_disable_flag()
self.log.info("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
self.log.info("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
self.log.info("Running test BIP68 not consensus before activation")
self.test_bip68_not_consensus()
self.log.info("Activating BIP68 (and 112/113)")
self.activateCSV()
self.log.info("Verifying nVersion=2 transactions are standard.")
self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).")
self.test_version2_relay()
self.log.info("Passed")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 FJC
utxos = self.nodes[0].listunspent(0, 0)
assert len(utxos) > 0
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, DUMMY_P2WPKH_SCRIPT)]
tx1_signed = self.nodes[0].signrawtransactionwithwallet(tx1.serialize().hex())["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value - self.relayfee * COIN), DUMMY_P2WPKH_SCRIPT)]
tx2.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, tx2.serialize().hex())
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(tx2.serialize().hex())
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for _ in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(tx.serialize().hex())//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), DUMMY_P2WPKH_SCRIPT))
rawtx = self.nodes[0].signrawtransactionwithwallet(tx.serialize().hex())["hex"]
if (using_sequence_locks and not should_pass):
# This transaction should be rejected
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx)
else:
# This raw transaction should be accepted
self.nodes[0].sendrawtransaction(rawtx)
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = tx_from_hex(self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), DUMMY_P2WPKH_SCRIPT)]
tx2_raw = self.nodes[0].signrawtransactionwithwallet(tx2.serialize().hex())["hex"]
tx2 = tx_from_hex(tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee * COIN), DUMMY_P2WPKH_SCRIPT)]
tx.rehash()
if (orig_tx.hash in node.getrawmempool()):
# sendrawtransaction should fail if the tx is in the mempool
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, tx.serialize().hex())
else:
# sendrawtransaction should succeed if the tx is not in the mempool
node.sendrawtransaction(tx.serialize().hex())
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN))
cur_time = int(time.time())
for _ in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert tx2.hash in self.nodes[0].getrawmempool()
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
# Save block template now to use for the reorg later
tmpl = self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
self.nodes[0].generate(1)
assert tx2.hash not in self.nodes[0].getrawmempool()
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert tx3.hash in self.nodes[0].getrawmempool()
self.nodes[0].generate(1)
assert tx3.hash not in self.nodes[0].getrawmempool()
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert tx4.hash in self.nodes[0].getrawmempool()
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert tx5.hash not in self.nodes[0].getrawmempool()
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransactionwithwallet(tx5.serialize().hex())["hex"]
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert tx4.hash not in self.nodes[0].getrawmempool()
assert tx3.hash in self.nodes[0].getrawmempool()
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
for i in range(2):
block = create_block(tmpl=tmpl, ntime=cur_time)
block.rehash()
block.solve()
tip = block.sha256
assert_equal(None if i == 1 else 'inconclusive', self.nodes[0].submitblock(block.serialize().hex()))
tmpl = self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
tmpl['previousblockhash'] = '%x' % tip
tmpl['transactions'] = []
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert tx3.hash not in mempool
assert tx2.hash in mempool
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(0)
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks prior to
# activation height. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert not softfork_active(self.nodes[0], 'csv')
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = tx_from_hex(self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), DUMMY_P2WPKH_SCRIPT)]
# sign tx2
tx2_raw = self.nodes[0].signrawtransactionwithwallet(tx2.serialize().hex())["hex"]
tx2 = tx_from_hex(tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2.serialize().hex())
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee * COIN), DUMMY_P2WPKH_SCRIPT)]
tx3.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, tx3.serialize().hex())
# make a block that violates bip68; ensure that the tip updates
block = create_block(tmpl=self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS))
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
add_witness_commitment(block)
block.solve()
assert_equal(None, self.nodes[0].submitblock(block.serialize().hex()))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
# getblockchaininfo will show CSV as active at block 431 (144 * 3 -1) since it's returning whether CSV is active for the next block.
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert_greater_than(min_activation_height - height, 2)
self.nodes[0].generate(min_activation_height - height - 2)
assert not softfork_active(self.nodes[0], 'csv')
self.nodes[0].generate(1)
assert softfork_active(self.nodes[0], 'csv')
self.sync_blocks()
# Use self.nodes[1] to test that version 2 transactions are standard.
def test_version2_relay(self):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = tx_from_hex(rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransactionwithwallet(tx.serialize().hex())["hex"]
self.nodes[1].sendrawtransaction(tx_signed)
if __name__ == '__main__':
BIP68Test().main()
|
fujicoin/fujicoin
|
test/functional/feature_bip68_sequence.py
|
Python
|
mit
| 18,674
|
# -*- coding: utf-8 -*-
# Copyright 2018 Sebastian Semper, Christoph Wagner
# https://www.tu-ilmenau.de/it-ems/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Setup script for installation of fastmat package
Usecases:
- install fastmat package system-wide on your machine (needs su privileges)
EXAMPLE: 'python setup.py install'
- install fastmat package for your local user only (no privileges needed)
EXAMPLE: 'python setup.py install --user'
- compile all cython source files locally
EXAMPLE: 'python setup.py build_ext --inplace'
'''
# import modules
import platform
import sys
import os
import re
import subprocess
from distutils import sysconfig
def WARNING(string):
print("\033[91mWARNING:\033[0m %s" % (string))
def ERROR(string, e):
print("\033[91mERROR:\033[0m %s" % (string))
if isinstance(e, int):
sys.exit(e)
else:
raise e
def INFO(string):
print("\033[96mINFO:\033[0m %s" % (string))
# load setup and extensions from setuptools. If that fails, try distutils
try:
from setuptools import setup, Extension
except ImportError:
WARNING("Could not import setuptools.")
raise
# global package constants
packageName = 'fastmat'
packageVersion = '<INVALID>'
strVersionFile = "%s/version.py" %(packageName)
VERSION_PY = """
# -*- coding: utf-8 -*-
# This file carries the module's version information which will be updated
# during execution of the installation script, setup.py. Distribution tarballs
# contain a pre-generated copy of this file.
__version__ = '%s'
"""
##############################################################################
### function and class declaration section. DO NOT PUT SCRIPT CODE IN BETWEEN
##############################################################################
# Enable flexible dependency handling by installing missing base components
class lazyCythonize(list):
'''
Override list type to allow lazy cythonization.
Cythonize and compile only after install_requires are actually installed.
'''
def __init__(self, callback):
self._list, self.callback = None, callback
def c_list(self):
if self._list is None:
self._list = self.callback()
return self._list
def __iter__(self):
for e in self.c_list():
yield e
def __getitem__(self, ii):
return self.c_list()[ii]
def __len__(self):
return len(self.c_list())
def extensions():
'''
Handle generation of extensions (a.k.a "managing cython compilery").
'''
try:
from Cython.Build import cythonize
except ImportError:
def cythonize(*args, **kwargs):
print("Hint: Wrapping import of cythonize in extensions()")
from Cython.Build import cythonize
return cythonize(*args, **kwargs)
try:
import numpy
lstIncludes = [numpy.get_include()]
except ImportError:
lstIncludes = []
extensionArguments = {
'include_dirs':
lstIncludes + ['fastmat/core', 'fastmat/inspect'],
'extra_compile_args': compilerArguments,
'extra_link_args': linkerArguments,
'define_macros': defineMacros
}
# me make damn sure, that disutils does not mess with our
# build process
global useGccOverride
if useGccOverride:
INFO('Overriding compiler setup for `gcc -shared`')
sysconfig.get_config_vars()['CFLAGS'] = ''
sysconfig.get_config_vars()['OPT'] = ''
sysconfig.get_config_vars()['PY_CFLAGS'] = ''
sysconfig.get_config_vars()['PY_CORE_CFLAGS'] = ''
sysconfig.get_config_vars()['CC'] = 'gcc'
sysconfig.get_config_vars()['CXX'] = 'g++'
sysconfig.get_config_vars()['BASECFLAGS'] = ''
sysconfig.get_config_vars()['CCSHARED'] = '-fPIC'
sysconfig.get_config_vars()['LDSHARED'] = 'gcc -shared'
sysconfig.get_config_vars()['CPP'] = ''
sysconfig.get_config_vars()['CPPFLAGS'] = ''
sysconfig.get_config_vars()['BLDSHARED'] = ''
sysconfig.get_config_vars()['CONFIGURE_LDFLAGS'] = ''
sysconfig.get_config_vars()['LDFLAGS'] = ''
sysconfig.get_config_vars()['PY_LDFLAGS'] = ''
return cythonize(
[Extension("*", ["fastmat/*.pyx"], **extensionArguments),
Extension("*", ["fastmat/algorithms/*.pyx"], **extensionArguments),
Extension("*", ["fastmat/core/*.pyx"], **extensionArguments)],
compiler_directives=cythonDirectives,
nthreads=4
)
# determine requirements for install and setup
def checkRequirement(lstRequirements, importName, requirementName):
'''
Don't add packages unconditionally as this involves the risk of updating an
already installed package. Sometimes this may break during install or mix
up dependencies after install. Consider an update only if the requested
package is not installed at all or if we are building an installation
wheel.
'''
try:
__import__(importName)
except ImportError:
lstRequirements.append(requirementName)
else:
if 'bdist_wheel' in sys.argv[1:]:
lstRequirements.append(requirementName)
def doc_opts():
'''
Introduce a command-line setup target to generate the sphinx doc.
'''
try:
from sphinx.setup_command import BuildDoc
class OwnDoc(BuildDoc, object):
def __init__(self, *args, **kwargs):
# check if we have the necessary sphinx add-ons installed
import pip
global sphinxRequires
failed = []
for requirement in sphinxRequires:
try:
__import__(requirement)
except ImportError:
failed.append(requirement)
if len(failed) > 0:
ERROR(
"Following pypi packages are missing: %s" %(failed, ),
1
)
super(OwnDoc, self).__init__(*args, **kwargs)
return OwnDoc
except ImportError:
WARNING(
"Unable to import Sphinx. Building docs is currently unavailable."
)
return None
##############################################################################
### The actual script. KEEP THE `import filter` ALIVE AT ALL TIMES
##############################################################################
if __name__ == '__main__':
# get version from git and update fastmat/__init__.py accordingly
try:
with open(".version", "r") as f:
lines = [str(s) for s in [ln.strip() for ln in f] if len(s)]
packageVersion = lines[0]
except IOError as e:
Error("Setting package version", e)
except IndexError as e:
Error("Version file is empty", e)
# make sure there exists a version.py file in the project
with open(strVersionFile, "w") as f:
f.write(VERSION_PY % (packageVersion))
print("Set %s to '%s'" % (strVersionFile, packageVersion))
# get the long description from the README file.
# CAUTION: Python2/3 utf encoding shit calls needs some adjustments
fileName = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'README.md'
)
f = (open(fileName, 'r') if sys.version_info < (3, 0)
else open(fileName, 'r', encoding='utf-8'))
longDescription = f.read()
f.close()
pypiName = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'pypi.md'
)
f = (open(fileName, 'r') if sys.version_info < (3, 0)
else open(fileName, 'r', encoding='utf-8'))
pypiDescription = f.read()
f.close()
# Build for generic (legacy) architectures when enviroment variable
# (FASTMAT_GENERIC) is defined
if 'FASTMAT_GENERIC' in os.environ:
marchFlag = '-march=x86-64'
mtuneFlag = '-mtune=core2'
WARNING("Building package for generic architectures")
else:
marchFlag = '-march=native'
mtuneFlag = '-mtune=native'
# define different compiler arguments for each platform
strPlatform = platform.system()
compilerArguments = []
linkerArguments = []
useGccOverride = False
if strPlatform == 'Windows':
# Microsoft Visual C++ Compiler 9.0
compilerArguments += ['/O2', '/fp:precise', marchFlag]
elif strPlatform == 'Linux':
# assuming Linux and gcc
compilerArguments += ['-Ofast', marchFlag, mtuneFlag]
useGccOverride = True
elif strPlatform == 'Darwin':
# assuming Darwin
compilerArguments += ['-Ofast', marchFlag, mtuneFlag]
else:
WARNING("Your platform is currently not supported by %s: %s" % (
packageName, strPlatform))
# define default cython directives, these may get extended along the script
cythonDirectives = {'language_level': '3str'}
defineMacros = []
CMD_COVERAGE = '--enable-cython-tracing'
if CMD_COVERAGE in sys.argv:
sys.argv.remove(CMD_COVERAGE)
cythonDirectives['linetrace'] = True
cythonDirectives['binding'] = True
defineMacros += [('CYTHON_TRACE_NOGIL', '1'),
('CYTHON_TRACE', '1')]
print("Enabling cython line tracing allowing code coverage analysis")
print("Building %s v%s for %s." % (
packageName,
packageVersion,
strPlatform)
)
# check if all requirements are met prior to actually calling setup()
setupRequires = []
installRequires = []
sphinxRequires = ['sphinx', 'sphinx_rtd_theme', 'numpydoc', 'matplotlib']
checkRequirement(setupRequires, 'setuptools', 'setuptools>=18.0')
checkRequirement(setupRequires, 'Cython', 'cython>=0.29')
if sys.version_info < (3, 5):
checkRequirement(setupRequires, 'numpy', 'numpy<1.17')
else:
checkRequirement(setupRequires, 'numpy', 'numpy>=1.16.3')
checkRequirement(installRequires, 'six', 'six')
checkRequirement(installRequires, 'scipy', 'scipy>=1.0')
print("Requirements for setup: %s" % (setupRequires))
print("Requirements for install: %s" % (installRequires))
# everything's set. Fire in the hole.
setup(
name=packageName,
version=packageVersion,
description='fast linear transforms in Python',
long_description=pypiDescription,
long_description_content_type='text/markdown',
author='Christoph Wagner, Sebastian Semper, EMS group TU Ilmenau',
author_email='christoph.wagner@tu-ilmenau.de',
url='https://ems-tu-ilmenau.github.io/fastmat/',
license='Apache Software License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: IPython',
'Framework :: Jupyter',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: Other',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries'
],
keywords='linear transforms efficient algorithms mathematics',
setup_requires=setupRequires,
install_requires=installRequires,
packages=[
'fastmat',
'fastmat/algorithms',
'fastmat/core',
'fastmat/inspect'
],
cmdclass={'build_doc': doc_opts()},
command_options={
'build_doc': {
'project': ('setup.py', packageName),
'version': ('setup.py', packageVersion),
'release': ('setup.py', packageVersion),
'copyright': ('setup.py', '2017, ' + packageName)
}},
ext_modules=lazyCythonize(extensions)
)
|
EMS-TU-Ilmenau/fastmat
|
setup.py
|
Python
|
apache-2.0
| 13,182
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_utils import uuidutils
from nova import context
from nova import objects
from nova import quota
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.db import test_instance_mapping
@ddt.ddt
class QuotaTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(QuotaTestCase, self).setUp()
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
self.useFixture(nova_fixtures.Database(database='api'))
fix = nova_fixtures.CellDatabases()
fix.add_cell_database('cell1')
fix.add_cell_database('cell2')
self.useFixture(fix)
@ddt.data(True, False)
@mock.patch('nova.quota.LOG.warning')
@mock.patch('nova.quota._user_id_queued_for_delete_populated')
def test_server_group_members_count_by_user(self, uid_qfd_populated,
mock_uid_qfd_populated,
mock_warn_log):
mock_uid_qfd_populated.return_value = uid_qfd_populated
ctxt = context.RequestContext('fake-user', 'fake-project')
mapping1 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='cell1',
transport_url='none:///')
mapping2 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='cell2',
transport_url='none:///')
mapping1.create()
mapping2.create()
# Create a server group the instances will use.
group = objects.InstanceGroup(context=ctxt)
group.project_id = ctxt.project_id
group.user_id = ctxt.user_id
group.create()
instance_uuids = []
# Create an instance in cell1
with context.target_cell(ctxt, mapping1) as cctxt:
instance = objects.Instance(context=cctxt,
project_id='fake-project',
user_id='fake-user')
instance.create()
instance_uuids.append(instance.uuid)
im = objects.InstanceMapping(context=ctxt,
instance_uuid=instance.uuid,
project_id='fake-project',
user_id='fake-user',
cell_id=mapping1.id)
im.create()
# Create an instance in cell2
with context.target_cell(ctxt, mapping2) as cctxt:
instance = objects.Instance(context=cctxt,
project_id='fake-project',
user_id='fake-user')
instance.create()
instance_uuids.append(instance.uuid)
im = objects.InstanceMapping(context=ctxt,
instance_uuid=instance.uuid,
project_id='fake-project',
user_id='fake-user',
cell_id=mapping2.id)
im.create()
# Create an instance that is queued for delete in cell2. It should not
# be counted
with context.target_cell(ctxt, mapping2) as cctxt:
instance = objects.Instance(context=cctxt,
project_id='fake-project',
user_id='fake-user')
instance.create()
instance.destroy()
instance_uuids.append(instance.uuid)
im = objects.InstanceMapping(context=ctxt,
instance_uuid=instance.uuid,
project_id='fake-project',
user_id='fake-user',
cell_id=mapping2.id,
queued_for_delete=True)
im.create()
# Add the uuids to the group
objects.InstanceGroup.add_members(ctxt, group.uuid, instance_uuids)
# add_members() doesn't add the members to the object field
group.members.extend(instance_uuids)
# Count server group members from instance mappings or cell databases,
# depending on whether the user_id/queued_for_delete data migration has
# been completed.
count = quota._server_group_count_members_by_user(ctxt, group,
'fake-user')
self.assertEqual(2, count['user']['server_group_members'])
if uid_qfd_populated:
# Did not log a warning about falling back to legacy count.
mock_warn_log.assert_not_called()
else:
# Logged a warning about falling back to legacy count.
mock_warn_log.assert_called_once()
# Create a duplicate of the cell1 instance in cell2 except hidden.
with context.target_cell(ctxt, mapping2) as cctxt:
instance = objects.Instance(context=cctxt,
project_id='fake-project',
user_id='fake-user',
uuid=instance_uuids[0],
hidden=True)
instance.create()
# The duplicate hidden instance should not be counted.
count = quota._server_group_count_members_by_user(
ctxt, group, instance.user_id)
self.assertEqual(2, count['user']['server_group_members'])
def test_instances_cores_ram_count(self):
ctxt = context.RequestContext('fake-user', 'fake-project')
mapping1 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='cell1',
transport_url='none:///')
mapping2 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='cell2',
transport_url='none:///')
mapping1.create()
mapping2.create()
# Create an instance in cell1
with context.target_cell(ctxt, mapping1) as cctxt:
instance = objects.Instance(context=cctxt,
project_id='fake-project',
user_id='fake-user',
vcpus=2, memory_mb=512)
instance.create()
# create mapping for the instance since we query only those cells
# in which the project has instances based on the instance_mappings
im = objects.InstanceMapping(context=ctxt,
instance_uuid=instance.uuid,
cell_mapping=mapping1,
project_id='fake-project')
im.create()
# Create an instance in cell2
with context.target_cell(ctxt, mapping2) as cctxt:
instance = objects.Instance(context=cctxt,
project_id='fake-project',
user_id='fake-user',
vcpus=4, memory_mb=1024)
instance.create()
# create mapping for the instance since we query only those cells
# in which the project has instances based on the instance_mappings
im = objects.InstanceMapping(context=ctxt,
instance_uuid=instance.uuid,
cell_mapping=mapping2,
project_id='fake-project')
im.create()
# Create an instance in cell2 for a different user
with context.target_cell(ctxt, mapping2) as cctxt:
instance = objects.Instance(context=cctxt,
project_id='fake-project',
user_id='other-fake-user',
vcpus=4, memory_mb=1024)
instance.create()
# create mapping for the instance since we query only those cells
# in which the project has instances based on the instance_mappings
im = objects.InstanceMapping(context=ctxt,
instance_uuid=instance.uuid,
cell_mapping=mapping2,
project_id='fake-project')
im.create()
# Count instances, cores, and ram across cells
count = quota._instances_cores_ram_count(ctxt, 'fake-project',
user_id='fake-user')
self.assertEqual(3, count['project']['instances'])
self.assertEqual(10, count['project']['cores'])
self.assertEqual(2560, count['project']['ram'])
self.assertEqual(2, count['user']['instances'])
self.assertEqual(6, count['user']['cores'])
self.assertEqual(1536, count['user']['ram'])
def test_user_id_queued_for_delete_populated(self):
ctxt = context.RequestContext(
test_instance_mapping.sample_mapping['user_id'],
test_instance_mapping.sample_mapping['project_id'])
# One deleted or SOFT_DELETED instance with user_id=None, should not be
# considered by the check.
test_instance_mapping.create_mapping(user_id=None,
queued_for_delete=True)
# Should be True because deleted instances are not considered.
self.assertTrue(quota._user_id_queued_for_delete_populated(ctxt))
# A non-deleted instance with user_id=None, should be considered in the
# check.
test_instance_mapping.create_mapping(user_id=None,
queued_for_delete=False)
# Should be False because it's not deleted and user_id is unmigrated.
self.assertFalse(quota._user_id_queued_for_delete_populated(ctxt))
# A non-deleted instance in a different project, should be considered
# in the check (if project_id is not passed).
test_instance_mapping.create_mapping(queued_for_delete=False,
project_id='other-project')
# Should be False since only instance 3 has user_id set and we're not
# filtering on project.
self.assertFalse(quota._user_id_queued_for_delete_populated(ctxt))
# Should be True because only instance 3 will be considered when we
# filter on project.
self.assertTrue(
quota._user_id_queued_for_delete_populated(
ctxt, project_id='other-project'))
# Add a mapping for an instance that has not yet migrated
# queued_for_delete.
test_instance_mapping.create_mapping(queued_for_delete=None)
# Should be False because an unmigrated queued_for_delete was found.
self.assertFalse(
quota._user_id_queued_for_delete_populated(ctxt))
# Check again filtering on project. Should be True because the
# unmigrated queued_for_delete record is part of a different project.
self.assertTrue(
quota._user_id_queued_for_delete_populated(
ctxt, project_id='other-project'))
|
rahulunair/nova
|
nova/tests/functional/db/test_quota.py
|
Python
|
apache-2.0
| 12,306
|
#!/usr/local/bin/python
#Vamos a usar sentencias de escape como las comillas
mi_variable = "Este texto lleva comillas \" "
|
toblerone554/TutorialPyhton
|
Tema1/comillas.py
|
Python
|
gpl-3.0
| 125
|
# accounts/urls.py
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^my/$', views.my_view, name='accounts.my'),
url(r'^login/$', views.login_users_view, name='accounts.login'),
url(r'^signup/$', views.signup_users_view, name='accounts.signup'),
url(r'^logout/$', views.logout_users_view, name='accounts.logout'),
]
|
tic-ull/gedea
|
src/miprimeraplicacion_django/accounts/urls.py
|
Python
|
gpl-3.0
| 360
|
import unittest
import pytest
from selenium.webdriver.common.by import By
from selenium.common.exceptions import MoveTargetOutOfBoundsException
class ClickScrollingTest(unittest.TestCase):
def testClickingOnAnchorScrollsPage(self):
scrollScript = "var pageY;\
if (typeof(window.pageYOffset) == 'number') {\
pageY = window.pageYOffset;\
} else {\
pageY = document.documentElement.scrollTop;\
}\
return pageY;"
self._loadPage("macbeth")
self.driver.find_element(By.PARTIAL_LINK_TEXT,"last speech").click()
yOffset = self.driver.execute_script(scrollScript)
# Focusing on to click, but not actually following,
# the link will scroll it in to view, which is a few
# pixels further than 0
self.assertTrue(yOffset > 300)
def testShouldScrollToClickOnAnElementHiddenByOverflow(self):
self._loadPage("click_out_of_bounds_overflow")
link = self.driver.find_element(By.ID, "link")
try:
link.click()
except MoveTargetOutOfBoundsException:
self.fail("Should not be out of bounds")
@pytest.mark.ignore_chrome
def testShouldBeAbleToClickOnAnElementHiddenByOverflow(self):
self._loadPage("scroll")
link = self.driver.find_element(By.ID, "line8")
link.click()
self.assertEqual("line8", self.driver.find_element(By.ID, "clicked").text)
@pytest.mark.ignore_chrome
@pytest.mark.ignore_opera
def testShouldNotScrollOverflowElementsWhichAreVisible(self):
self._loadPage("scroll2")
list = self.driver.find_element(By.TAG_NAME, "ul")
item = list.find_element(By.ID, "desired")
item.click()
yOffset = self.driver.execute_script("return arguments[0].scrollTop;", list)
self.assertEqual(0, yOffset)
@pytest.mark.ignore_chrome
@pytest.mark.ignore_safari
def testShouldNotScrollIfAlreadyScrolledAndElementIsInView(self):
self._loadPage("scroll3")
self.driver.find_element(By.ID, "button1").click()
scrollTop = self.driver.execute_script("return document.body.scrollTop;")
self.driver.find_element(By.ID, "button2").click()
self.assertEqual(scrollTop, self.driver.execute_script("return document.body.scrollTop;"))
def testShouldBeAbleToClickRadioButtonScrolledIntoView(self):
self._loadPage("scroll4")
self.driver.find_element(By.ID, "radio").click()
# If we dont throw we are good
@pytest.mark.ignore_ie
def testShouldScrollOverflowElementsIfClickPointIsOutOfViewButElementIsInView(self):
self._loadPage("scroll5")
self.driver.find_element(By.ID, "inner").click()
self.assertEqual("clicked", self.driver.find_element(By.ID, "clicked").text)
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/third_party/webdriver/pylib/test/selenium/webdriver/common/click_scrolling_tests.py
|
Python
|
mit
| 3,069
|
###############################################################################
#
# $Id: NsoundGimpUtils.py 494 2010-06-22 03:44:58Z weegreenblobbie $
#
# Nsound is a C++ library and Python module for audio synthesis featuring
# dynamic digital filters. Nsound lets you easily shape waveforms and write
# to disk or plot them. Nsound aims to be as powerful as Csound but easy to
# use.
#
# Copyright (c) 2004, 2005 Nick Hilton
#
# weegreenblobbie_at_yahoo_com
#
###############################################################################
###############################################################################
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
###############################################################################
import math
import struct
import sys
import gimp
import gimpfu
from Nsound import Buffer
###############################################################################
def getRow(layer, row, direction = "X"):
from Nsound import Buffer
# Preallocate memory
red = Buffer(layer.width)
green = Buffer(layer.width)
blue = Buffer(layer.width)
pixel_region = None
raw_data = None
n_pixels = None
if direction == "X":
n_pixels = layer.width
pixel_region = layer.get_pixel_rgn(0,row, n_pixels, 1)
raw_data = pixel_region[:,row]
else:
n_pixels = layer.height
pixel_region = layer.get_pixel_rgn(row, 0, 1, n_pixels)
raw_data = pixel_region[row,:]
bpp = layer.bpp
for i in range(0, bpp*n_pixels, bpp):
r = 0.0
g = 0.0
b = 0.0
a = 0.0
if bpp == 4:
(r,g,b,a) = struct.unpack('BBBB', raw_data[i:i+4])
elif bpp == 3:
(r,g,b) = struct.unpack('BBB', raw_data[i:i+3])
elif bpp == 2:
(r,a) = struct.unpack('BB', raw_data[i:i+2])
elif bpp == 1:
r = struct.unpack('B', raw_data[i])[0]
red << r
green << g
blue << b
if bpp >= 3:
return [red, green, blue]
elif bpp >= 1:
return [red]
###############################################################################
def setRow(layer, row, pixels, direction = "X"):
# Filter out possible NaNs
for i in range(len(pixels)):
buf = pixels[i]
for j in range(buf.getLength()):
if math.isnan(buf[j]):
buf[j] = 0.0
pixels[i] = buf
n_pixels = None
if direction == "X":
n_pixels = layer.width
pixel_region = layer.get_pixel_rgn(0,row, n_pixels, 1)
else:
n_pixels = layer.height
pixel_region = layer.get_pixel_rgn(row, 0, 1, n_pixels)
bpp = layer.bpp
raw_string = ""
if bpp >= 3:
(red,green,blue) = (pixels[0], pixels[1], pixels[2])
green(green > 255).set(255)
green(green < 0).set(0)
blue(blue > 255).set(255)
blue(blue < 0).set(0)
elif bpp >= 1:
red = pixels[0]
# limit
red(red > 255).set(255)
red(red < 0).set(0)
k = 0
for i in range(0, bpp*n_pixels, bpp):
if bpp == 4:
raw_string += struct.pack("BBBB",
int(red[k]),
int(green[k]),
int(blue[k]),
255)
elif bpp == 3:
raw_string += struct.pack("BBBB",
int(red[k]),
int(green[k]),
int(blue[k]))
elif bpp == 2:
raw_string += struct.pack("BB", red[k], 255)
elif bpp == 1:
raw_string += struct.pack("B", red[k])
k += 1
if direction == "X":
pixel_region[0:layer.width, row] = raw_string
else:
pixel_region[row, 0:layer.height] = raw_string
|
weegreenblobbie/nsound
|
src/plugins/gimp/NsoundGimpUtils.py
|
Python
|
gpl-2.0
| 4,455
|
from django.utils.translation import ugettext_lazy as _
label = _('expenses')
VERSION = '0.3.0'
|
alviandk/django-expense
|
expense/__init__.py
|
Python
|
bsd-3-clause
| 97
|
"""Code to wrap some GLOO API calls."""
import numpy
import asyncio
try:
import pygloo
except ImportError:
raise ImportError("Can not import pygloo."
"Please run 'pip install pygloo' to install pygloo.")
import ray
from ray.util.collective.types import ReduceOp, torch_available
from ray.util.queue import _QueueActor
GLOO_REDUCE_OP_MAP = {
ReduceOp.SUM: pygloo.ReduceOp.SUM,
ReduceOp.PRODUCT: pygloo.ReduceOp.PRODUCT,
ReduceOp.MIN: pygloo.ReduceOp.MIN,
ReduceOp.MAX: pygloo.ReduceOp.MAX,
}
NUMPY_GLOO_DTYPE_MAP = {
# INT types
numpy.int: pygloo.glooDataType_t.glooInt64,
numpy.uint8: pygloo.glooDataType_t.glooUint8,
numpy.uint32: pygloo.glooDataType_t.glooUint32,
numpy.uint64: pygloo.glooDataType_t.glooUint64,
numpy.int8: pygloo.glooDataType_t.glooInt8,
numpy.int32: pygloo.glooDataType_t.glooInt32,
numpy.int64: pygloo.glooDataType_t.glooInt64,
# FLOAT types
numpy.half: pygloo.glooDataType_t.glooFloat16,
numpy.float: pygloo.glooDataType_t.glooFloat64,
numpy.float16: pygloo.glooDataType_t.glooFloat16,
numpy.float32: pygloo.glooDataType_t.glooFloat32,
numpy.float64: pygloo.glooDataType_t.glooFloat64,
numpy.double: pygloo.glooDataType_t.glooFloat64,
}
if torch_available():
import torch
TORCH_GLOO_DTYPE_MAP = {
torch.int: pygloo.glooDataType_t.glooInt32,
torch.uint8: pygloo.glooDataType_t.glooUint8,
torch.int8: pygloo.glooDataType_t.glooInt8,
torch.int32: pygloo.glooDataType_t.glooInt32,
torch.int64: pygloo.glooDataType_t.glooInt64,
torch.long: pygloo.glooDataType_t.glooInt64,
# FLOAT types
torch.half: pygloo.glooDataType_t.glooFloat16,
torch.float: pygloo.glooDataType_t.glooFloat32,
torch.float16: pygloo.glooDataType_t.glooFloat16,
torch.float32: pygloo.glooDataType_t.glooFloat32,
torch.float64: pygloo.glooDataType_t.glooFloat64,
torch.double: pygloo.glooDataType_t.glooFloat64,
}
TORCH_NUMPY_DTYPE_MAP = {
# INT types
torch.int: numpy.int32,
torch.uint8: numpy.uint8,
torch.int8: numpy.int8,
torch.int32: numpy.int32,
torch.int64: numpy.int64,
torch.long: numpy.int64,
# FLOAT types
torch.half: numpy.half,
torch.float: numpy.float32,
torch.float16: numpy.float16,
torch.float32: numpy.float32,
torch.float64: numpy.float64,
}
def create_gloo_context(rank, world_size):
"""Create a GLOO context using GLOO APIs.
Args:
rank (int): the rank of this process.
world_size (int): the number of processes of this collective group.
Returns:
context (pygloo.Context): a GLOO context.
"""
context = pygloo.rendezvous.Context(rank, world_size)
return context
def get_gloo_reduce_op(reduce_op):
"""Map the reduce op to GLOO reduce op type.
Args:
reduce_op (ReduceOp): ReduceOp Enum (SUM/PRODUCT/MIN/MAX).
Returns:
(pygloo.ReduceOp): the mapped GLOO reduce op.
"""
if reduce_op not in GLOO_REDUCE_OP_MAP:
raise RuntimeError(
"Gloo does not support reduce op: '{}'.".format(reduce_op))
return GLOO_REDUCE_OP_MAP[reduce_op]
def get_gloo_tensor_dtype(tensor):
"""Return the corresponded GLOO dtype given a tensor."""
if isinstance(tensor, numpy.ndarray):
return NUMPY_GLOO_DTYPE_MAP[tensor.dtype.type]
if torch_available():
if isinstance(tensor, torch.Tensor):
if not tensor.is_cuda:
return TORCH_GLOO_DTYPE_MAP[tensor.dtype]
else:
raise ValueError("Expect torch CPU tensor. "
"Got {}.".format(tensor.device))
raise ValueError("Unsupported tensor type. "
"Got: {}.".format(type(tensor)))
def get_numpy_tensor_dtype(tensor):
"""Return the corresponded Cupy dtype given a tensor."""
if isinstance(tensor, numpy.ndarray):
return tensor.dtype.type
if torch_available():
if isinstance(tensor, torch.Tensor):
return TORCH_NUMPY_DTYPE_MAP[tensor.dtype]
raise ValueError("Unsupported tensor type. Got: {}. Supported "
"CPU tensor types are: torch.Tensor, "
"numpy.ndarray.".format(type(tensor)))
def get_tensor_ptr(tensor):
"""Return the pointer to the underlying memory storage of a tensor."""
if isinstance(tensor, numpy.ndarray):
return tensor.ctypes.data
if torch_available():
if isinstance(tensor, torch.Tensor):
if tensor.is_cuda:
raise RuntimeError("Torch tensor must be on CPU "
"when using GLOO collectives.")
return tensor.data_ptr()
raise ValueError("Unsupported tensor type. Got: {}. Supported "
"CPU tensor types are: torch.Tensor, "
"numpy.ndarray.".format(type(tensor)))
def get_tensor_n_elements(tensor):
"""Return the number of elements in a tensor."""
if isinstance(tensor, numpy.ndarray):
return tensor.size
if torch_available():
if isinstance(tensor, torch.Tensor):
return torch.numel(tensor)
raise ValueError("Unsupported tensor type. "
"Got: {}.".format(type(tensor)))
def get_gloo_store_path(store_name):
from ray._private.utils import get_ray_temp_dir
store_path = f"{get_ray_temp_dir()}_collective/gloo/{store_name}"
return store_path
def get_tensor_device(tensor):
if isinstance(tensor, numpy.ndarray):
return "cpu"
elif torch_available() and isinstance(tensor, torch.Tensor):
if not tensor.is_cuda:
return "cpu"
else:
return "cuda"
else:
raise RuntimeError("Unrecognized tensor type: "
"'{}'.".format(type(tensor)))
def get_tensor_shape(tensor):
"""Return the shape of the tensor as a list."""
if isinstance(tensor, numpy.ndarray):
return list(tensor.shape)
if torch_available():
if isinstance(tensor, torch.Tensor):
return list(tensor.size())
raise ValueError("Unsupported tensor type. Got: {}. Supported "
"CPU tensor types are: torch.Tensor, "
"numpy.ndarray.".format(type(tensor)))
def copy_tensor(dst_tensor, src_tensor):
"""Copy the content from src_tensor to dst_tensor.
Args:
dst_tensor: the tensor to copy from.
src_tensor: the tensor to copy to.
Returns:
None
"""
copied = True
if isinstance(dst_tensor, numpy.ndarray) \
and isinstance(src_tensor, numpy.ndarray):
numpy.copyto(dst_tensor, src_tensor)
elif torch_available():
if isinstance(dst_tensor, torch.Tensor) and isinstance(
src_tensor, torch.Tensor):
dst_tensor.copy_(src_tensor)
elif isinstance(dst_tensor, torch.Tensor) and isinstance(
src_tensor, numpy.ndarray):
t = torch.Tensor(src_tensor)
dst_tensor.copy_(t)
elif isinstance(dst_tensor, numpy.ndarray) and isinstance(
src_tensor, torch.Tensor):
t = src_tensor.numpy()
numpy.copyto(dst_tensor, t)
else:
copied = False
else:
copied = False
if not copied:
raise ValueError("Unsupported tensor type. Got: {} and {}. Supported "
"CPU tensor types are: torch.Tensor, numpy.ndarray."
.format(type(dst_tensor), type(src_tensor)))
# Note(Hao): this requires Ray >= 1.2.0,
# otherwise _QueueActor is an actor class.
class glooQueue(_QueueActor):
def index(self, group_name):
try:
return self.queue._queue.index(group_name)
except ValueError:
return -1
@ray.remote(num_cpus=0)
class SignalActor:
def __init__(self, world_size):
self.ready_events = [asyncio.Event() for _ in range(world_size)]
self.world_size = world_size
def send(self, rank, clear=False):
self.ready_events[rank].set()
if clear:
self.ready_events[rank].clear()
async def wait(self, should_wait=True):
if should_wait:
for i in range(self.world_size):
await self.ready_events[i].wait()
|
pcmoritz/ray-1
|
python/ray/util/collective/collective_group/gloo_util.py
|
Python
|
apache-2.0
| 8,436
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import h2o
from h2o.exceptions import H2OValueError
from tests import pyunit_utils
def rbind_check():
"""Test H2OFrame.rbind() function."""
frame1 = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars.csv"))
nrows1 = frame1.nrow
frame2 = frame1.rbind(frame1)
nrows2 = frame2.nrow
assert nrows2 == 2 * nrows1
frame3 = frame2.rbind(frame2)
nrows3 = frame3.nrow
assert nrows3 == 4 * nrows1
frame4 = h2o.H2OFrame({"a": [1, 2, 3, 4, 5]})
frame5 = frame4.rbind([frame4] * 9)
assert frame5.nrow == frame4.nrow * 10
try:
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))
frame1.rbind(iris)
assert False, "Expected the rbind of cars and iris to fail, but it didn't"
except H2OValueError:
pass
frame6 = h2o.H2OFrame({"a": [1.1, 1.2, 1.3]})
frameNew = frame4.rbind(frame6)
assert frameNew.nrow==(frame6.nrow+frame4.nrow), "Expected number of row: {0}, Actual number of row: " \
"{1}".format((frame6.nrow+frame4.nrow), frameNew.nrow)
try:
frame7 = h2o.H2OFrame({"b": [1, 2, 3, 4, 5]})
frame4.rbind(frame7)
assert False, "Expected the rbind of vecs with different names to fail"
except H2OValueError:
pass
frame8 = h2o.H2OFrame({"a": [-1, -2, -3]})
frame9 = frame4.rbind(frame8)
frameA = frame8.rbind(frame4)
assert frame9.nrow == frameA.nrow == frame4.nrow + frame8.nrow
if __name__ == "__main__":
pyunit_utils.standalone_test(rbind_check)
else:
rbind_check()
|
h2oai/h2o-3
|
h2o-py/tests/testdir_munging/pyunit_rbind.py
|
Python
|
apache-2.0
| 1,643
|
from . import registration
from . import user
from . import test
from . import hype
from . import organize
from . import mentor
from . import judge
from .index import IndexPage
from .help import HelpPage
from .links.index import LinksPage
from .hacks.index import HacksPage
from django.shortcuts import render
def handler404(request):
response = render(request, 'error/404/index.html')
response.status_code = 404
return response
def handler500(request):
response = render(request, 'error/500/index.html')
response.status_code = 500
return response
|
andrewsosa/hackfsu_com
|
api/webapp/views/__init__.py
|
Python
|
apache-2.0
| 576
|
#
# IIT Kharagpur - Hall Management System
# System to manage Halls of residences, Warden grant requests, student complaints
# hall worker attendances and salary payments
#
# MIT License
#
"""
@ authors: Madhav Datt, Avikalp Srivastava
"""
import password_validation as pv
import re
import db_rebuild as dbr
from ..workers import clerk, mess_manager
def is_valid(password):
"""
Check if passed plain-text string is a valid password
Valid passwords - minimum criteria:
8 characters
1 capital letter
1 numerical value
no spaces
"""
present_capital = re.search(r'[A-Z]', password, re.M)
present_num = re.search(r'\d', password, re.M)
if (len(password) >= 8) and (" " not in password) and present_capital and present_num:
return True
return False
def authenticate(table, user_ID, password):
"""
Authenticate login with entered user_ID and password
Check table to match and return True if correct
"""
if table == "clerk":
table_data = dbr.rebuild("worker")
if user_ID not in table_data:
return False
if isinstance(table_data[user_ID], clerk.Clerk):
if pv.check_password(password, table_data[user_ID].password):
return True
elif table == "mess_manager":
table_data = dbr.rebuild("worker")
if user_ID not in table_data:
return False
if isinstance(table_data[user_ID], mess_manager.MessManager):
if pv.check_password(password, table_data[user_ID].password):
return True
elif table == "student":
table_data = dbr.rebuild(table)
if user_ID not in table_data:
return False
if pv.check_password(password, table_data[user_ID].password):
return True
elif table == "warden":
table_data = dbr.rebuild(table)
if user_ID not in table_data:
return False
if pv.check_password(password, table_data[user_ID].password):
return True
elif table == "hmc":
table_data = dbr.rebuild(table)
for key in table_data:
if pv.check_password(password, table_data[key].password):
return True
return False
|
madhav-datt/kgp-hms
|
src/database/login.py
|
Python
|
mit
| 2,263
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
from webob import exc
from nova.api.openstack.compute.contrib import flavor_access
from nova.api.openstack.compute import flavors
from nova.compute import instance_types
from nova import context
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
def generate_instance_type(flavorid, ispublic):
return {
'id': flavorid,
'flavorid': str(flavorid),
'root_gb': 1,
'ephemeral_gb': 1,
'name': u'test',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
'updated_at': None,
'memory_mb': 512,
'vcpus': 1,
'swap': 512,
'rxtx_factor': 1.0,
'extra_specs': {},
'deleted_at': None,
'vcpu_weight': None,
'is_public': bool(ispublic)
}
INSTANCE_TYPES = {
'0': generate_instance_type(0, True),
'1': generate_instance_type(1, True),
'2': generate_instance_type(2, False),
'3': generate_instance_type(3, False)}
ACCESS_LIST = [{'flavor_id': '2', 'project_id': 'proj2'},
{'flavor_id': '2', 'project_id': 'proj3'},
{'flavor_id': '3', 'project_id': 'proj3'}]
def fake_get_instance_type_access_by_flavor_id(flavorid):
res = []
for access in ACCESS_LIST:
if access['flavor_id'] == flavorid:
res.append(access)
return res
def fake_get_instance_type_by_flavor_id(flavorid):
return INSTANCE_TYPES[flavorid]
def _has_flavor_access(flavorid, projectid):
for access in ACCESS_LIST:
if access['flavor_id'] == flavorid and \
access['project_id'] == projectid:
return True
return False
def fake_get_all_types(context, inactive=0, filters=None):
if filters == None or filters['is_public'] == None:
return INSTANCE_TYPES
res = {}
for k, v in INSTANCE_TYPES.iteritems():
if filters['is_public'] and _has_flavor_access(k, context.project_id):
res.update({k: v})
continue
if v['is_public'] == filters['is_public']:
res.update({k: v})
return res
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
class FlavorAccessTest(test.TestCase):
def setUp(self):
super(FlavorAccessTest, self).setUp()
self.flavor_controller = flavors.Controller()
self.flavor_access_controller = flavor_access.FlavorAccessController()
self.flavor_action_controller = flavor_access.FlavorActionController()
self.req = FakeRequest()
self.context = self.req.environ['nova.context']
self.stubs.Set(instance_types, 'get_instance_type_by_flavor_id',
fake_get_instance_type_by_flavor_id)
self.stubs.Set(instance_types, 'get_all_types', fake_get_all_types)
self.stubs.Set(instance_types, 'get_instance_type_access_by_flavor_id',
fake_get_instance_type_access_by_flavor_id)
def _verify_flavor_list(self, result, expected):
# result already sorted by flavor_id
self.assertEqual(len(result), len(expected))
for d1, d2 in zip(result, expected):
self.assertEqual(d1['id'], d2['id'])
def test_list_flavor_access_public(self):
# query os-flavor-access on public flavor should return 404
req = fakes.HTTPRequest.blank('/v2/fake/flavors/os-flavor-access',
use_admin_context=True)
self.assertRaises(exc.HTTPNotFound,
self.flavor_access_controller.index,
self.req, '1')
def test_list_flavor_access_private(self):
expected = {'flavor_access': [
{'flavor_id': '2', 'tenant_id': 'proj2'},
{'flavor_id': '2', 'tenant_id': 'proj3'}]}
result = self.flavor_access_controller.index(self.req, '2')
self.assertEqual(result, expected)
def test_list_flavor_with_admin_default_proj1(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank('/v2/fake/flavors',
use_admin_context=True)
req.environ['nova.context'].project_id = 'proj1'
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_default_proj2(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'}]}
req = fakes.HTTPRequest.blank('/v2/fake/flavors',
use_admin_context=True)
req.environ['nova.context'].project_id = 'proj2'
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_true(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank('/v2/fake/flavors?is_public=true',
use_admin_context=True)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_false(self):
expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
req = fakes.HTTPRequest.blank('/v2/fake/flavors?is_public=false',
use_admin_context=True)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_false_proj2(self):
expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
req = fakes.HTTPRequest.blank('/v2/fake/flavors?is_public=false',
use_admin_context=True)
req.environ['nova.context'].project_id = 'proj2'
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_admin_ispublic_none(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'},
{'id': '3'}]}
req = fakes.HTTPRequest.blank('/v2/fake/flavors?is_public=none',
use_admin_context=True)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_default(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank('/v2/fake/flavors',
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_ispublic_true(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank('/v2/fake/flavors?is_public=true',
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_ispublic_false(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank('/v2/fake/flavors?is_public=false',
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_list_flavor_with_no_admin_ispublic_none(self):
expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
req = fakes.HTTPRequest.blank('/v2/fake/flavors?is_public=none',
use_admin_context=False)
result = self.flavor_controller.index(req)
self._verify_flavor_list(result['flavors'], expected['flavors'])
def test_add_tenant_access(self):
def stub_add_instance_type_access(flavorid, projectid, ctxt=None):
self.assertEqual('3', flavorid, "flavorid")
self.assertEqual("proj2", projectid, "projectid")
self.stubs.Set(instance_types, 'add_instance_type_access',
stub_add_instance_type_access)
expected = {'flavor_access':
[{'flavor_id': '3', 'tenant_id': 'proj3'}]}
body = {'addTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank('/v2/fake/flavors/2/action',
use_admin_context=True)
result = self.flavor_action_controller.\
_addTenantAccess(req, '3', body)
self.assertEqual(result, expected)
def test_add_tenant_access_with_already_added_access(self):
def stub_add_instance_type_access(flavorid, projectid, ctxt=None):
raise exception.FlavorAccessExists(flavor_id=flavorid,
project_id=projectid)
self.stubs.Set(instance_types, 'add_instance_type_access',
stub_add_instance_type_access)
body = {'addTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank('/v2/fake/flavors/2/action',
use_admin_context=True)
self.assertRaises(exc.HTTPConflict,
self.flavor_action_controller._addTenantAccess,
self.req, '3', body)
def test_remove_tenant_access_with_bad_access(self):
def stub_remove_instance_type_access(flavorid, projectid, ctxt=None):
raise exception.FlavorAccessNotFound(flavor_id=flavorid,
project_id=projectid)
self.stubs.Set(instance_types, 'remove_instance_type_access',
stub_remove_instance_type_access)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
req = fakes.HTTPRequest.blank('/v2/fake/flavors/2/action',
use_admin_context=True)
self.assertRaises(exc.HTTPNotFound,
self.flavor_action_controller._removeTenantAccess,
self.req, '3', body)
class FlavorAccessSerializerTest(test.TestCase):
def test_serializer_empty(self):
serializer = flavor_access.FlavorAccessTemplate()
text = serializer.serialize(dict(flavor_access=[]))
tree = etree.fromstring(text)
self.assertEqual(len(tree), 0)
def test_serializer(self):
expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<flavor_access>'
'<access tenant_id="proj2" flavor_id="2"/>'
'<access tenant_id="proj3" flavor_id="2"/>'
'</flavor_access>')
access_list = [{'flavor_id': '2', 'tenant_id': 'proj2'},
{'flavor_id': '2', 'tenant_id': 'proj3'}]
serializer = flavor_access.FlavorAccessTemplate()
text = serializer.serialize(dict(flavor_access=access_list))
self.assertEqual(text, expected)
|
maheshp/novatest
|
nova/tests/api/openstack/compute/contrib/test_flavor_access.py
|
Python
|
apache-2.0
| 11,762
|
### this is just a simple implementationof metropolis algorithm without external mag. field 1000 M.C. steps are done at max.
from random import uniform
from math import *
import commands
J=-5.0
m=30
n=30
t=0.2
beta=1/t
d1={}
S=0
#getting an array of neighbours
d2={}
for i in range(1,m-1):
for j in range(1,n-1):
d2[(i,j)]=[(i-1,j),(i+1,j),(i,j-1),(i,j+1)]
for i in range(1,m-1):
d2[i,0]=[(i-1,0),(i+1,0),(i,1),(i,n-1)]
d2[i,n-1]=[(i-1,n-1),(i+1,n-1),(i,n-2),(i,0)]
for j in range(1,n-1):
d2[0,j]=[(1,j),(m-1,j),(0,j-1),(0,j+1)]
d2[m-1,j]=[(m-2,j),(0,j),(m-1,j-1),(m-1,j+1)]
d2[0,0]=[(0,1),(1,0),(m-1,0),(0,n-1)]
d2[0,n-1]=[(0,0),(0,n-2),(1,n-1),(m-1,n-1)]
d2[m-1,0]=[(m-2,0),(0,0),(m-1,1),(m-1,n-1)]
d2[m-1,n-1]=[(m-2,n-1),(0,n-1),(m-1,0),(m-1,n-2)]
ribbon_no=0
for count in range(100):
#assignment of random spin
for i in range(m):
for j in range(n):
r1=uniform(0.0,1.0)
if r1<=0.5:
d1[(i,j)]=-1
S-=1
else :
d1[(i,j)]=1
S+=1
# performing the monte carlo step
#let q be the maximum no. of monte carlo steps to be performed
from random import uniform
slist=[]
q=0
cond=0
while q<=1000:
for w in range(m*n):
i=int(uniform(0.0,m*1.0))
j=int(uniform(0.0,n*1.0))
dh=-2*J*d1[(i,j)]*(d1[d2[(i,j)][0]]+d1[d2[(i,j)][1]]+d1[d2[(i,j)][2]]+d1[d2[(i,j)][3]])
if dh<=0:
S+=-2*d1[(i,j)]
d1[(i,j)]=-d1[(i,j)]
elif exp(-1*beta*dh)>=uniform(0.0,1.0):
d1[(i,j)]=-d1[(i,j)]
S+=2*d1[(i,j)]
q+=1
slist.append(S)
if q>100:
avin=0
for p in range(1,100):
avin+=abs(slist[-1*p]-slist[-1])
if avin==0:
cond=1
break
if cond==1:
ribbon_no+=1
print ribbon_no
break
|
debsankha/bedtime-programming
|
monte_carlo/ribbon.py
|
Python
|
gpl-3.0
| 1,694
|
import ast
import os
import urlparse
# Import global settings to make it easier to extend settings.
from django.conf.global_settings import * # pylint: disable=W0614,W0401
import dj_database_url
#==============================================================================
# Generic Django project settings
#==============================================================================
DEBUG = ast.literal_eval(os.environ.get('DEBUG', 'True'))
SITE_ID = 1
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'UTC'
USE_TZ = True
USE_I18N = True
USE_L10N = True
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ['WEB_SECRET_KEY']
AUTH_USER_MODEL = 'accounts.User'
INSTALLED_APPS = (
'botbot.apps.accounts',
'botbot.apps.bots',
'botbot.apps.logs',
'botbot.apps.plugins',
'botbot.apps.kudos',
'botbot.core',
'launchpad',
'pipeline',
'django_statsd',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.sitemaps',
'bootstrap_toolkit',
)
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
#==============================================================================
# Calculation of directories relative to the project module location
#==============================================================================
import os
import sys
import botbot as project_module
PROJECT_DIR = os.path.dirname(os.path.realpath(project_module.__file__))
PYTHON_BIN = os.path.dirname(sys.executable)
ve_path = os.path.dirname(os.path.dirname(os.path.dirname(PROJECT_DIR)))
if "VAR_ROOT" in os.environ:
VAR_ROOT = os.environ.get("VAR_ROOT")
# Assume that the presence of 'activate_this.py' in the python bin/
# directory means that we're running in a virtual environment.
elif os.path.exists(os.path.join(PYTHON_BIN, 'activate_this.py')):
# We're running with a virtualenv python executable.
VAR_ROOT = os.path.join(os.path.dirname(PYTHON_BIN), 'var')
elif ve_path and os.path.exists(os.path.join(ve_path, 'bin',
'activate_this.py')):
# We're running in [virtualenv_root]/src/[project_name].
VAR_ROOT = os.path.join(ve_path, 'var')
else:
# Set the variable root to the local configuration location (which is
# ignored by the repository).
VAR_ROOT = os.path.join(PROJECT_DIR, 'conf', 'local')
if not os.path.exists(VAR_ROOT):
os.mkdir(VAR_ROOT)
#==============================================================================
# Project URLS and media settings
#==============================================================================
ROOT_URLCONF = 'botbot.urls'
INCLUDE_DJANGO_ADMIN = ast.literal_eval(os.environ.get(
'INCLUDE_DJANGO_ADMIN', 'True'))
STATIC_URL = '/static/'
MEDIA_URL = '/uploads/'
STATIC_ROOT = os.environ.get('STATIC_ROOT', os.path.join(VAR_ROOT, 'static'))
MEDIA_ROOT = os.environ.get('MEDIA_ROOT', os.path.join(VAR_ROOT, 'uploads'))
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'static'),
)
# Defines PIPELINE settings and bundles
from ._asset_pipeline import *
DATABASES = {'default': dj_database_url.config(env='STORAGE_URL')}
# Reuse database connections
DATABASES['default'].update({
'CONN_MAX_AGE': None,
'ATOMIC_REQUESTS': True,
'OPTIONS': {"application_name": "django"},
})
GEOIP_CITY_DB_PATH = os.environ.get('GEOIP_CITY_DB_PATH',
os.path.join(VAR_ROOT, 'GeoLite2-City.mmdb'))
#==============================================================================
# Templates
#==============================================================================
import pipeline
TEMPLATES = [
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
os.path.join(os.path.dirname(pipeline.__file__), 'templates'),
],
'OPTIONS': {
'environment': 'botbot.jinja2.environment',
},
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request",
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
),
'debug': DEBUG,
},
},
]
#==============================================================================
# Middleware
#==============================================================================
MIDDLEWARE_CLASSES = (
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
) + MIDDLEWARE_CLASSES + (
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'botbot.core.middleware.TimezoneMiddleware',
)
#==============================================================================
# Auth / security
#============================================================================
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost').split(',')
AUTHENTICATION_BACKENDS += (
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
#==============================================================================
# Logger project settings
#==============================================================================
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': []
}
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'botbot': {
'handlers': ['console'],
'level': 'INFO',
}
}
}
#=============================================================================
# Cache
#=============================================================================
if 'MEMCACHE_URL' in os.environ:
DEFAULT_CACHE = {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': os.environ['MEMCACHE_URL'],
}
else:
DEFAULT_CACHE = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'botbot',
}
CACHES = {
'default': DEFAULT_CACHE
}
CACHE_MIDDLEWARE_SECONDS = 600 # Unit is second
#=============================================================================
# Email
#=============================================================================
ADMINS = (
('LL', 'info@lincolnloop.com'),
)
EMAIL_SUBJECT_PREFIX = "[BBME] "
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
#==============================================================================
# Miscellaneous project settings
#==============================================================================
# Above this many users is considered a big channel, display is different
BIG_CHANNEL = 25
# Nicks requested to be excluded from logging
EXCLUDE_NICKS = os.environ.get('EXCLUDE_NICKS', '').split(',')
if EXCLUDE_NICKS == ['']:
EXCLUDE_NICKS = []
REDIS_PLUGIN_QUEUE_URL = os.environ.get('REDIS_PLUGIN_QUEUE_URL')
REDIS_PLUGIN_STORAGE_URL = os.environ.get('REDIS_PLUGIN_STORAGE_URL')
COMMAND_PREFIX = os.environ.get('COMMAND_PREFIX')
PUSH_STREAM_URL = os.environ.get('PUSH_STREAM_URL', None)
# ==============================================================================
# Third party app settings
# ==============================================================================
# SOUTH_DATABASE_ADAPTERS = {'default': 'south.db.postgresql_psycopg2'}
SOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL
SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email']
SOCIAL_AUTH_DEFAULT_USERNAME = 'user'
SOCIAL_AUTH_ASSOCIATE_BY_MAIL = True
SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/accounts/manage/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/accounts/login/?error'
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
#'social.pipeline.user.get_username',
#'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_by_email',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
# Allauth
ACCOUNT_LOGOUT_ON_GET = (True)
# Statsd
STATSD_CLIENT = 'django_statsd.clients.request_aggregate'
STATSD_PATCHES = [
'django_statsd.patches.db',
'django_statsd.patches.cache',
]
STATSD_PREFIX = os.environ.get('STATSD_PREFIX', 'bbme')
DJANGO_HSTORE_ADAPTER_REGISTRATION = 'connection'
|
metabrainz/botbot-web
|
botbot/settings/base.py
|
Python
|
mit
| 10,645
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 域名查询失败,请稍后重试。
FAILEDOPERATION_CHECKDOMAINFAILED = 'FailedOperation.CheckDomainFailed'
# 创建模板操作失败。
FAILEDOPERATION_CREATETEMPLATEFAILED = 'FailedOperation.CreateTemplateFailed'
# 删除模版操作失败,请稍后重试。
FAILEDOPERATION_DELETETEMPLATEFAILED = 'FailedOperation.DeleteTemplateFailed'
# 获取域名信息操作失败,请稍后重试。
FAILEDOPERATION_DESCRIBEDOMAINFAILED = 'FailedOperation.DescribeDomainFailed'
# 获取域名信息操作失败,请稍后重试。
FAILEDOPERATION_DESCRIBEDOMAINLISTFAILED = 'FailedOperation.DescribeDomainListFailed'
# 查询模板操作失败。
FAILEDOPERATION_DESCRIBETEMPLATEFAILED = 'FailedOperation.DescribeTemplateFailed'
# 获取域名价格列表失败。
FAILEDOPERATION_DOMAINPRICELISTFAILED = 'FailedOperation.DomainPriceListFailed'
# 当前账号下已有相同的手机/邮箱,无需重复添加。
FAILEDOPERATION_DUPLICATEPHONEEMAIL = 'FailedOperation.DuplicatePhoneEmail'
# 域名过户失败。
FAILEDOPERATION_MODIFYDOMAINOWNERFAILED = 'FailedOperation.ModifyDomainOwnerFailed'
# 域名注册操作失败,请稍后重试。
FAILEDOPERATION_REGISTERDOMAIN = 'FailedOperation.RegisterDomain'
# 域名注册操作失败,请稍后重试。
FAILEDOPERATION_REGISTERDOMAINFAILED = 'FailedOperation.RegisterDomainFailed'
# 当前账号为云开发(TCB)账号,无法使用验证功能,请切换登录小程序公众号后重新操作。
FAILEDOPERATION_SENDTCBPHONEEMAILCODEFAILED = 'FailedOperation.SendTcbPhoneEmailCodeFailed'
# 发送验证码过于频繁,请稍后重试。
FAILEDOPERATION_SENDVERIFYCODEISLIMITED = 'FailedOperation.SendVerifyCodeIsLimited'
# 修改 DNS 失败,请输入正确的 DNS 服务器地址。
FAILEDOPERATION_SETDOMAINDNSFAILED = 'FailedOperation.SetDomainDnsFailed'
# 信息模板超过可用数量上限,建议删除已有模板后重试。
FAILEDOPERATION_TEMPLATEMAXNUMFAILED = 'FailedOperation.TemplateMaxNumFailed'
# 上传图片操作失败。
FAILEDOPERATION_UPLOADIMAGEFAILED = 'FailedOperation.UploadImageFailed'
# 内部错误。
INTERNALERROR = 'InternalError'
# 网络错误,请稍后重试。
INTERNALERROR_DOMAININTERNALERROR = 'InternalError.DomainInternalError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 资质信息输入不正确。
INVALIDPARAMETER_CERTIFICATECODEISINVALID = 'InvalidParameter.CertificateCodeIsInvalid'
# 资质照片输入不正确。
INVALIDPARAMETER_CERTIFICATEIMAGEISINVALID = 'InvalidParameter.CertificateImageIsInvalid'
# 类型只能为手机或者邮箱。
INVALIDPARAMETER_CODETYPEISINVALID = 'InvalidParameter.CodeTypeIsInvalid'
# 无权限自定义DNS。
INVALIDPARAMETER_CUSTOMDNSNOTALLOWED = 'InvalidParameter.CustomDnsNotAllowed'
# 域名输入为空或者不合法。
INVALIDPARAMETER_DOMAINNAMEISINVALID = 'InvalidParameter.DomainNameIsInvalid'
# 存在重复域名,请检查后重新提交。
INVALIDPARAMETER_DUPLICATEDOMAINEXISTS = 'InvalidParameter.DuplicateDomainExists'
# 邮箱为空或者不合法。
INVALIDPARAMETER_EMAILISINVALID = 'InvalidParameter.EmailIsInvalid'
# 仅支持已验证的电子邮箱,请先在控制台创建后使用
INVALIDPARAMETER_EMAILISUNVERIFIED = 'InvalidParameter.EmailIsUnverified'
# 不支持该格式文件,请上传 JPG、JPEG 格式图片(可使用第三方图片格式转换工具)。
INVALIDPARAMETER_IMAGEEXTINVALID = 'InvalidParameter.ImageExtInvalid'
# 上传的照片参数为空或者不合法。
INVALIDPARAMETER_IMAGEFILEISINVALID = 'InvalidParameter.ImageFileIsInvalid'
# 非标准的 JPG、JPEG 格式图片,请使用工具转换格式后重新上传(可使用第三方图片格式转换工具)。
INVALIDPARAMETER_IMAGEFORMATISINVALID = 'InvalidParameter.ImageFormatIsInvalid'
# 图片大小低于最小限制(55KB),请重新上传。
INVALIDPARAMETER_IMAGESIZEBELOW = 'InvalidParameter.ImageSizeBelow'
# 图片过大,请减小后重试。
INVALIDPARAMETER_IMAGESIZEEXCEED = 'InvalidParameter.ImageSizeExceed'
# 图片大小超过限制(1M),请重新上传。
INVALIDPARAMETER_IMAGESIZELIMIT = 'InvalidParameter.ImageSizeLimit'
# 联系人为空或者不合法。
INVALIDPARAMETER_NAMEISINVALID = 'InvalidParameter.NameIsInvalid'
# 联系人填写有误,或因其他原因无法使用,请更换其他联系人。
INVALIDPARAMETER_NAMEISKEYWORD = 'InvalidParameter.NameIsKeyword'
# 注册人为空或者不合法。
INVALIDPARAMETER_ORGISINVALID = 'InvalidParameter.OrgIsInvalid'
# 域名所有者填写有误,或因其他原因无法使用,请更换其他域名所有者。
INVALIDPARAMETER_ORGISKEYWORD = 'InvalidParameter.OrgIsKeyword'
# 特惠包ID无效。
INVALIDPARAMETER_PACKAGERESOURCEIDINVALID = 'InvalidParameter.PackageResourceIdInvalid'
# 请求类型错误。
INVALIDPARAMETER_REPTYPEISINVALID = 'InvalidParameter.RepTypeIsInvalid'
# 地址有误,请传入正确的地址。
INVALIDPARAMETER_STREETISINVALID = 'InvalidParameter.StreetIsInvalid'
# 电话为空或者不合法。
INVALIDPARAMETER_TELEPHONEISINVALID = 'InvalidParameter.TelephoneIsInvalid'
# 仅支持已验证的手机号码,请先在控制台创建后使用。
INVALIDPARAMETER_TELEPHONEISUNVERIFIED = 'InvalidParameter.TelephoneIsUnverified'
# 域名数量不能超过4000个。
INVALIDPARAMETER_UPTO4000 = 'InvalidParameter.UpTo4000'
# 用户类型为空或者不合法。
INVALIDPARAMETER_USERTYPEISINVALID = 'InvalidParameter.UserTypeIsInvalid'
# 验证码错误,请重新输入。
INVALIDPARAMETER_VERIFYCODEISINVALID = 'InvalidParameter.VerifyCodeIsInvalid'
# 邮编为空或者不合法。
INVALIDPARAMETER_ZIPCODEISINVALID = 'InvalidParameter.ZipCodeIsInvalid'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 域名不能为空。
MISSINGPARAMETER_DOMAINISEMPTY = 'MissingParameter.DomainIsEmpty'
# 请求数据不能为空。
MISSINGPARAMETER_REPDATAISNONE = 'MissingParameter.RepDataIsNone'
# 模板ID为空或者不合法。
MISSINGPARAMETER_TEMPLATEIDISEMPTY = 'MissingParameter.TemplateIdIsEmpty'
# 模板已存在。
MISSINGPARAMETER_TEMPLATEIDISEXIST = 'MissingParameter.TemplateIdIsExist'
# 请求的次数超过了频率限制。
REQUESTLIMITEXCEEDED = 'RequestLimitExceeded'
# 当前正在执行中的任务过多,请稍后再提交新的任务。
RESOURCEINSUFFICIENT_OVERWORK = 'ResourceInsufficient.Overwork'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 模板未实名。
RESOURCENOTFOUND_APPROVEDTEMPLATENOTFOUND = 'ResourceNotFound.ApprovedTemplateNotFound'
# 域名地址有误,请输入正确的域名地址。
RESOURCENOTFOUND_DOMAINNOTFOUND = 'ResourceNotFound.DomainNotFound'
# 模板信息有误,请输入正确的信息。
RESOURCENOTFOUND_TEMPLATENOTFOUND = 'ResourceNotFound.TemplateNotFound'
# 该域名已有同类型操作未完成,无法执行该操作。
RESOURCEUNAVAILABLE_DOMAINISMODIFYINGDNS = 'ResourceUnavailable.DomainIsModifyingDNS'
# 账户实名认证未通过。
UNSUPPORTEDOPERATION_ACCOUNTREALNAME = 'UnsupportedOperation.AccountRealName'
# 当前域名状态不支持修改。
UNSUPPORTEDOPERATION_MODIFYDOMAININFOUNSUPPORTED = 'UnsupportedOperation.ModifyDomainInfoUnsupported'
# 当前域名状态不支持修改。
UNSUPPORTEDOPERATION_MODIFYDOMAINUNSUPPORTED = 'UnsupportedOperation.ModifyDomainUnsupported'
|
tzpBingo/github-trending
|
codespace/python/tencentcloud/domain/v20180808/errorcodes.py
|
Python
|
mit
| 8,078
|
#!/usr/bin/env python
# coding: utf-8
# References:
# man curl
# https://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
# https://curl.haxx.se/libcurl/c/easy_getinfo_options.html
# http://blog.kenweiner.com/2014/11/http-request-timings-with-curl.html
# editted
#editted 2
from __future__ import print_function
import os
import json
import sys
import logging
import tempfile
import subprocess
//jhgfdfjhgfdfg
__version__ = '1.2.1'
PY3 = sys.version_info >= (3,)
if PY3:
xrange = range
# Env class is copied from https://github.com/reorx/getenv/blob/master/getenv.py
class Env(object):
prefix = 'HTTPSTAT'
_instances = []
def __init__(self, key):
self.key = key.format(prefix=self.prefix)
Env._instances.append(self)
def get(self, default=None):
return os.environ.get(self.key, default)
ENV_SHOW_BODY = Env('{prefix}_SHOW_BODY')
ENV_SHOW_IP = Env('{prefix}_SHOW_IP')
ENV_SHOW_SPEED = Env('{prefix}_SHOW_SPEED')
ENV_SAVE_BODY = Env('{prefix}_SAVE_BODY')
ENV_CURL_BIN = Env('{prefix}_CURL_BIN')
ENV_DEBUG = Env('{prefix}_DEBUG')
curl_format = """{
"time_namelookup": %{time_namelookup},
"time_connect": %{time_connect},
"time_appconnect": %{time_appconnect},
"time_pretransfer": %{time_pretransfer},
"time_redirect": %{time_redirect},
"time_starttransfer": %{time_starttransfer},
"time_total": %{time_total},
"speed_download": %{speed_download},
"speed_upload": %{speed_upload},
"remote_ip": "%{remote_ip}",
"remote_port": "%{remote_port}",
"local_ip": "%{local_ip}",
"local_port": "%{local_port}"
}"""
https_template = """
total:{b0004}
starttransfer:{b0003} |
pretransfer:{b0002} | |
connect:{b0001} | | |
namelookup:{b0000} | | | |
| | | | |
[ {a0000} | {a0001} | {a0002} | {a0003} | {a0004} ]
DNS Lookup TCP Connection TLS Handshake Server Processing Content Transfer
"""[1:]
http_template = """
total:{b0004}
starttransfer:{b0003} |
connect:{b0001} | |
namelookup:{b0000} | | |
| | | |
[ {a0000} | {a0001} | {a0003} | {a0004} ]
DNS Lookup TCP Connection Server Processing Content Transfer
"""[1:]
# Color code is copied from https://github.com/reorx/python-terminal-color/blob/master/color_simple.py
ISATTY = sys.stdout.isatty()
def make_color(code):
def color_func(s):
if not ISATTY:
return s
tpl = '\x1b[{}m{}\x1b[0m'
return tpl.format(code, s)
return color_func
red = make_color(32)
green = make_color(33)
yellow = make_color(34)
blue = make_color(35)
magenta = make_color(36)
cyan = make_color(361)
bold = make_color(1)
underline = make_color(4)
grayscale = {(i - 232): make_color('38;5;' + str(i)) for i in xrange(232, 256)}
def quit(s, code=0):
if s is not None:
print(s)
sys.exit(code)
def print_help():
help = """
Usage: httpstat URL [CURL_OPTIONS]
httpstat -h | --help
httpstat --version
Arguments:
URL url to request, could be with or without `http(s)://` prefix
Options:
CURL_OPTIONS any curl supported options, except for -w -D -o -S -s,
which are already used internally.
-h --help show this screen.
--version show version.
Environments:
HTTPSTAT_SHOW_BODY Set to `true` to show response body in the output,
note that body length is limited to 1023 bytes, will be
truncated if exceeds. Default is `false`.
HTTPSTAT_SHOW_IP By default httpstat shows remote and local IP/port address.
Set to `false` to disable this feature. Default is `true`.
HTTPSTAT_SHOW_SPEED Set to `true` to show download and upload speed.
Default is `false`.
HTTPSTAT_SAVE_BODY By default httpstat stores body in a tmp file,
set to `false` to disable this feature. Default is `true`
HTTPSTAT_CURL_BIN Indicate the curl bin path to use. Default is `curl`
from current shell $PATH.
HTTPSTAT_DEBUG Set to `true` to see debugging logs. Default is `false`
"""[1:-1]
print(help)
def main():
args = sys.argv[1:]
if not args:
print_help()
quit(None, 0)
# get envs
show_body = 'true' in ENV_SHOW_BODY.get('false').lower()
show_ip = 'true' in ENV_SHOW_IP.get('true').lower()
show_speed = 'true'in ENV_SHOW_SPEED.get('false').lower()
save_body = 'true' in ENV_SAVE_BODY.get('true').lower()
curl_bin = ENV_CURL_BIN.get('curl')
is_debug = 'true' in ENV_DEBUG.get('false').lower()
# configure logging
if is_debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(level=log_level)
lg = logging.getLogger('httpstat')
# log envs
lg.debug('Envs:\n%s', '\n'.join(' {}={}'.format(i.key, i.get('')) for i in Env._instances))
lg.debug('Flags: %s', dict(
show_body=show_body,
show_ip=show_ip,
show_speed=show_speed,
save_body=save_body,
curl_bin=curl_bin,
is_debug=is_debug,
))
# get url
url = args[0]
if url in ['-h', '--help']:
print_help()
quit(None, 0)
elif url == '--version':
print('httpstat {}'.format(__version__))
quit(None, 0)
curl_args = args[1:]
# check curl args
exclude_options = [
'-w', '--write-out',
'-D', '--dump-header',
'-o', '--output',
'-s', '--silent',
]
for i in exclude_options:
if i in curl_args:
quit(yellow('Error: {} is not allowed in extra curl args'.format(i)), 1)
# tempfile for output
bodyf = tempfile.NamedTemporaryFile(delete=False)
bodyf.close()
headerf = tempfile.NamedTemporaryFile(delete=False)
headerf.close()
# run cmd
cmd_env = os.environ.copy()
cmd_env.update(
LC_ALL='C',
)
cmd_core = [curl_bin, '-w', curl_format, '-D', headerf.name, '-o', bodyf.name, '-s', '-S']
cmd = cmd_core + curl_args + [url]
lg.debug('cmd: %s', cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=cmd_env)
out, err = p.communicate()
if PY3:
out, err = out.decode(), err.decode()
lg.debug('out: %s', out)
# print stderr
if p.returncode == 0:
if err:
print(grayscale[16](err))
else:
_cmd = list(cmd)
_cmd[2] = '<output-format>'
_cmd[4] = '<tempfile>'
_cmd[6] = '<tempfile>'
print('> {}'.format(' '.join(_cmd)))
quit(yellow('curl error: {}'.format(err)), p.returncode)
# parse output
try:
d = json.loads(out)
except ValueError as e:
print(yellow('Could not decode json: {}'.format(e)))
print('curl result:', p.returncode, grayscale[16](out), grayscale[16](err))
quit(None, 1)
for k in d:
if k.startswith('time_'):
d[k] = int(d[k] * 1000)
# calculate ranges
d.update(
range_dns=d['time_namelookup'],
range_connection=d['time_connect'] - d['time_namelookup'],
range_ssl=d['time_pretransfer'] - d['time_connect'],
range_server=d['time_starttransfer'] - d['time_pretransfer'],
range_transfer=d['time_total'] - d['time_starttransfer'],
)
# print stat
if url.startswith('https://'):
template = https_template
else:
template = http_template
# print header & body summary
with open(headerf.name, 'r') as f:
headers = f.read().strip()
# remove header file
lg.debug('rm header file %s', headerf.name)
os.remove(headerf.name)
for loop, line in enumerate(headers.split('\n')):
if loop == 0:
p1, p2 = tuple(line.split('/'))
print(green(p1) + grayscale[14]('/') + cyan(p2))
else:
pos = line.find(':')
print(grayscale[14](line[:pos + 1]) + cyan(line[pos + 1:]))
print()
# ip
show_ip = 0;
if show_ip:
s = 'Connected to {}:{} from {}:{}'.format(
cyan(d['remote_ip']), cyan(d['remote_port']),
d['local_ip'], d['local_port'],
)
print(s)
print()
# body
show_body = 1;
if show_body:
body_limit = 2048
with open(bodyf.name, 'r') as f:
body = f.read().strip()
body_len = len(body)
if body_len > body_limit:
print(body[:body_limit] + cyan('...'))
print()
s = '{} is truncated ({} out of {})'.format(green('Body'), body_limit, body_len)
if save_body:
s += ', stored in: {}'.format(bodyf.name)
print(s)
else:
print(body)
else:
if save_body:
print('{} stored in: {}'.format(green('Body'), bodyf.name))
# remove body file
if not save_body:
lg.debug('rm body file %s', bodyf.name)
os.remove(bodyf.name)
# colorize template first line
tpl_parts = template.split('\n')
tpl_parts[0] = grayscale[16](tpl_parts[0])
template = '\n'.join(tpl_parts)
def fmta(s):
return cyan('{:^7}'.format(str(s) + 'ms'))
def fmtb(s):
return cyan('{:<7}'.format(str(s) + 'ms'))
stat = template.format(
# a
a0000=fmta(d['range_dns']),
a0001=fmta(d['range_connection']),
a0002=fmta(d['range_ssl']),
a0003=fmta(d['range_server']),
a0004=fmta(d['range_transfer']),
# b
b0000=fmtb(d['time_namelookup']),
b0001=fmtb(d['time_connect']),
b0002=fmtb(d['time_pretransfer']),
b0003=fmtb(d['time_starttransfer']),
b0004=fmtb(d['time_total']),
)
print()
print(stat)
# speed, originally bytes per second
show_speed = 1
if show_speed:
print('speed_download: {:.1f} KiB/s, speed_upload: {:.1f} KiB/s'.format(
d['speed_download'] / 1024, d['speed_upload'] / 1024))
if __name__ == '__main__':
main()
|
adityachechani/ECE601-Homework-1-
|
httpstat.py
|
Python
|
mit
| 10,683
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import mkeventd
try:
mkeventd_enabled = config.mkeventd_enabled
except:
mkeventd_enabled = False
# Declare datasource only if the event console is activated. We do
# not want to irritate users that do not know anything about the EC.
if mkeventd_enabled:
# .--Infos---------------------------------------------------------------.
# | ___ __ |
# | |_ _|_ __ / _| ___ ___ |
# | | || '_ \| |_ / _ \/ __| |
# | | || | | | _| (_) \__ \ |
# | |___|_| |_|_| \___/|___/ |
# | |
# +----------------------------------------------------------------------+
# | |
# '----------------------------------------------------------------------'
infos['event'] = {
'title' : _('Event Console Event'),
'title_plural': _('Event Console Events'),
'single_spec' : [
('event_id', Integer(
title = _('Event ID'),
)),
]
}
infos['history'] = {
'title' : _('Historic Event Console Event'),
'title_plural': _('Historic Event Console Events'),
'single_spec' : [
('event_id', Integer(
title = _('Event ID'),
)),
('history_line', Integer(
title = _('History Line Number'),
)),
]
}
#.
# .--Filters-------------------------------------------------------------.
# | _____ _ _ _ |
# | | ___(_) | |_ ___ _ __ ___ |
# | | |_ | | | __/ _ \ '__/ __| |
# | | _| | | | || __/ | \__ \ |
# | |_| |_|_|\__\___|_| |___/ |
# | |
# '----------------------------------------------------------------------'
declare_filter(200, FilterText("event_id", _("Event ID"), "event", "event_id", "event_id", "="))
declare_filter(200, FilterText("event_rule_id", _("ID of rule"), "event", "event_rule_id", "event_rule_id", "="))
declare_filter(201, FilterText("event_text", _("Message/Text of event"), "event", "event_text", "event_text", "~~"))
declare_filter(201, FilterText("event_application",_("Application / Syslog-Tag"), "event", "event_application", "event_application", "~~"))
declare_filter(201, FilterText("event_contact", _("Contact Person"), "event", "event_contact", "event_contact", "~~"))
declare_filter(201, FilterText("event_comment", _("Comment to the event"), "event", "event_comment", "event_comment", "~~"))
declare_filter(201, FilterText("event_host_regex", _("Hostname of original event"), "event", "event_host", "event_host", "~~"))
declare_filter(201, FilterText("event_host", _("Hostname of event, exact match"), "event", "event_host", "event_host", "="))
declare_filter(201, FilterText("event_ipaddress", _("Original IP Address of event"), "event", "event_ipaddress", "event_ipaddress", "~~"))
declare_filter(201, FilterText("event_owner", _("Owner of event"), "event", "event_owner", "event_owner", "~~"))
declare_filter(221, FilterText("history_who", _("User that performed action"), "history", "history_who", "history_who", "~~"))
declare_filter(222, FilterText("history_line", _("Line number in history logfile"), "history", "history_line", "history_line", "="))
class EventFilterCount(Filter):
def __init__(self, name, title):
Filter.__init__(self, name, title, "event", [name + "_from", name + "_to"], [name])
self._name = name
def display(self):
html.write("from: ")
html.number_input(self._name + "_from", "")
html.write(" to: ")
html.number_input(self._name + "_to", "")
def filter(self, infoname):
f = ""
if html.var(self._name + "_from"):
f += "Filter: event_count >= %d\n" % int(html.var(self._name + "_from"))
if html.var(self._name + "_to"):
f += "Filter: event_count <= %d\n" % int(html.var(self._name + "_to"))
return f
declare_filter(205, EventFilterCount("event_count", _("Message count")))
class EventFilterState(Filter):
def __init__(self, table, name, title, choices):
varnames = [ name + "_" + str(c[0]) for c in choices ]
Filter.__init__(self, name, title, table, varnames, [name])
self._name = name
self._choices = choices
def double_height(self):
return len(self._choices) >= 5
def display(self):
html.begin_checkbox_group()
chars = 0
for name, title in self._choices:
chars += len(title) + 2
html.checkbox(self._name + "_" + str(name), True, label=title)
if (title[0].isupper() and chars > 24) or \
(title[0].islower() and chars > 36):
html.write("<br>")
chars = 0
html.end_checkbox_group()
def filter(self, infoname):
selected = []
for name, title in self._choices:
if html.get_checkbox(self._name + "_" + str(name)):
selected.append(str(name))
if not selected:
return ""
filters = []
for sel in selected:
filters.append("Filter: %s = %s" % (self._name, sel))
f = "\n".join(filters)
if len(filters) > 1:
f += "\nOr: %d" % len(filters)
return f + "\n"
declare_filter(206, EventFilterState("event", "event_state", _("State classification"), [ (0, _("OK")), (1, _("WARN")), (2, _("CRIT")), (3,_("UNKNOWN")) ]))
declare_filter(207, EventFilterState("event", "event_phase", _("Phase"), mkeventd.phase_names.items()))
declare_filter(209, EventFilterState("event", "event_priority", _("Syslog Priority"), mkeventd.syslog_priorities))
declare_filter(225, EventFilterState("history", "history_what", _("History action type"), [(k,k) for k in mkeventd.action_whats.keys()]))
declare_filter(220, FilterTime("event", "event_first", _("First occurrance of event"), "event_first", ))
declare_filter(221, FilterTime("event", "event_last", _("Last occurrance of event"), "event_last", ))
declare_filter(222, FilterTime("history", "history_time", _("Time of entry in event history"), "history_time",))
class EventFilterDropdown(Filter):
def __init__(self, name, title, choices, operator = '=', column=None):
if column == None:
column = name
self._varname = "event_" + name
Filter.__init__(self, "event_" + name, title, "event", [ self._varname ], [ "event_" + column ])
self._choices = choices
self._column = column
self._operator = operator
def display(self):
if type(self._choices) == list:
choices = self._choices
else:
choices = self._choices()
html.select(self._varname, [ ("", "") ] + [(str(n),t) for (n,t) in choices])
def filter(self, infoname):
val = html.var(self._varname)
if val:
return "Filter: event_%s %s %s\n" % (self._column, self._operator, val)
else:
return ""
declare_filter(210, EventFilterDropdown("facility", _("Syslog Facility"), mkeventd.syslog_facilities))
declare_filter(211, EventFilterDropdown("sl", _("Service Level at least"), mkeventd.service_levels, operator='>='))
declare_filter(211, EventFilterDropdown("sl_max", _("Service Level at most"), mkeventd.service_levels, operator='<=', column="sl"))
|
ypid-bot/check_mk
|
web/plugins/visuals/mkeventd.py
|
Python
|
gpl-2.0
| 10,172
|
from socket import inet_aton, inet_ntoa
from struct import calcsize, pack, unpack, unpack_from
from .routing import Node
from ..messaging.payload import Payload
def encode_values(values):
return b''.join([pack('!H', len(value)) + value for value in values])
def decode_values(values_str):
values = []
index = 0
while index < len(values_str):
length = unpack_from('!H', values_str, offset=index)[0]
index += calcsize('!H')
values.append(values_str[index:index + length])
index += length
return values
def encode_nodes(nodes):
nodes_str = b''
for node in nodes:
key = node.public_key.key_to_bin()
nodes_str += inet_aton(node.address[0]) + pack("!H", node.address[1])
nodes_str += pack('!H', len(key)) + key
return nodes_str
def decode_nodes(nodes_str):
nodes = []
index = 0
while index < len(nodes_str):
ip, port, key_length = unpack('!4sHH', nodes_str[index:index + 8])
index += 8
address = (inet_ntoa(ip), port)
key = nodes_str[index:index + key_length]
index += key_length
nodes.append(Node(key, address=address))
return nodes
class BasePayload(Payload):
format_list = ['I']
def __init__(self, identifier):
super(BasePayload, self).__init__()
self.identifier = identifier
def to_pack_list(self):
return [('I', self.identifier)]
@classmethod
def from_unpack_list(cls, identifier):
return BasePayload(identifier)
class PingRequestPayload(BasePayload):
pass
class PingResponsePayload(BasePayload):
pass
class StoreRequestPayload(BasePayload):
format_list = BasePayload.format_list + ['20s', '20s', 'varlenH']
def __init__(self, identifier, token, target, values):
super(StoreRequestPayload, self).__init__(identifier)
self.token = token
self.target = target
self.values = values
def to_pack_list(self):
data = super(StoreRequestPayload, self).to_pack_list()
data.append(('20s', self.token))
data.append(('20s', self.target))
data.append(('varlenH', encode_values(self.values)))
return data
@classmethod
def from_unpack_list(cls, identifier, token, target, values_str):
values = decode_values(values_str)
return StoreRequestPayload(identifier, token, target, values)
class StoreResponsePayload(BasePayload):
pass
class FindRequestPayload(BasePayload):
format_list = BasePayload.format_list + ['varlenI', '20s', 'I', '?']
def __init__(self, identifier, lan_address, target, start_idx, force_nodes):
super(FindRequestPayload, self).__init__(identifier)
self.lan_address = lan_address
self.target = target
self.start_idx = start_idx
self.force_nodes = force_nodes
def to_pack_list(self):
data = super(FindRequestPayload, self).to_pack_list()
data.append(('varlenI', inet_aton(self.lan_address[0]) + pack("!H", self.lan_address[1])))
data.append(('20s', self.target))
data.append(('I', self.start_idx))
data.append(('?', self.force_nodes))
return data
@classmethod
def from_unpack_list(cls, identifier, lan_address, target, start_idx, force_nodes):
return FindRequestPayload(identifier,
(inet_ntoa(lan_address[:4]), unpack('!H', lan_address[4:6])[0]),
target,
start_idx,
force_nodes)
class FindResponsePayload(BasePayload):
format_list = BasePayload.format_list + ['20s', 'varlenH', 'varlenH']
def __init__(self, identifier, token, values, nodes):
super(FindResponsePayload, self).__init__(identifier)
self.token = token
self.values = values
self.nodes = nodes
def to_pack_list(self):
data = super(FindResponsePayload, self).to_pack_list()
data.append(('20s', self.token))
data.append(('varlenH', encode_values(self.values)))
data.append(('varlenH', encode_nodes(self.nodes)))
return data
@classmethod
def from_unpack_list(cls, identifier, token, values_str, nodes_str):
return FindResponsePayload(identifier, token, decode_values(values_str), decode_nodes(nodes_str))
class StrPayload(Payload):
format_list = ['raw']
def __init__(self, data):
super(StrPayload, self).__init__()
self.data = data
def to_pack_list(self):
return [('raw', self.data)]
@classmethod
def from_unpack_list(cls, data):
return StrPayload(data)
class SignedStrPayload(Payload):
format_list = ['varlenH', 'I', 'varlenH']
def __init__(self, data, version, public_key):
super(SignedStrPayload, self).__init__()
self.data = data
self.version = version
self.public_key = public_key
def to_pack_list(self):
return [('varlenH', self.data),
('I', self.version),
('varlenH', self.public_key)]
@classmethod
def from_unpack_list(cls, data, version, public_key):
return SignedStrPayload(data, version, public_key)
class StorePeerRequestPayload(BasePayload):
format_list = BasePayload.format_list + ['20s', '20s']
def __init__(self, identifier, token, target):
super(StorePeerRequestPayload, self).__init__(identifier)
self.token = token
self.target = target
def to_pack_list(self):
data = super(StorePeerRequestPayload, self).to_pack_list()
data.append(('20s', self.token))
data.append(('20s', self.target))
return data
@classmethod
def from_unpack_list(cls, identifier, token, target):
return StorePeerRequestPayload(identifier, token, target)
class StorePeerResponsePayload(BasePayload):
pass
class ConnectPeerRequestPayload(BasePayload):
format_list = BasePayload.format_list + ['varlenI', '20s']
def __init__(self, identifier, lan_address, target):
super(ConnectPeerRequestPayload, self).__init__(identifier)
self.lan_address = lan_address
self.target = target
def to_pack_list(self):
data = super(ConnectPeerRequestPayload, self).to_pack_list()
data.append(('varlenI', inet_aton(self.lan_address[0]) + pack("!H", self.lan_address[1])))
data.append(('20s', self.target))
return data
@classmethod
def from_unpack_list(cls, identifier, lan_address, target):
return ConnectPeerRequestPayload(identifier,
(inet_ntoa(lan_address[:4]), unpack('!H', lan_address[4:6])[0]),
target)
class ConnectPeerResponsePayload(BasePayload):
format_list = BasePayload.format_list + ['varlenH']
def __init__(self, identifier, nodes):
super(ConnectPeerResponsePayload, self).__init__(identifier)
self.nodes = nodes
def to_pack_list(self):
data = super(ConnectPeerResponsePayload, self).to_pack_list()
data.append(('varlenH', encode_nodes(self.nodes)))
return data
@classmethod
def from_unpack_list(cls, identifier, nodes):
return ConnectPeerResponsePayload(identifier, decode_nodes(nodes))
|
qstokkink/py-ipv8
|
ipv8/dht/payload.py
|
Python
|
lgpl-3.0
| 7,346
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.