content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import time
import serial
import numpy as np
from pytweening import easeInOutQuint, easeOutSine
from scipy.misc import derivative
from scipy.interpolate import interp1d
from raspberryturk.embedded.motion.arm_movement_engine import ArmMovementEngine
from .pypose.ax12 import *
from .pypose.driver import Driver
SERVO_1 = 1
SERVO_2 = 2
SERVOS = [SERVO_2, SERVO_1]
MIN_SPEED = 20
MAX_SPEED = 80
RESTING_POSITION = (512, 512)
def _register_bytes_to_value(register_bytes):
return register_bytes[0] + (register_bytes[1]<<8)
def _easing_derivative(p):
d = 0.0
try:
d = derivative(easeInOutQuint, p, dx=1e-6)
except ValueError:
pass
return d
def _adjusted_speed(start_position, goal_position, position):
r = np.array([start_position, goal_position])
clipped_position = np.clip(position, r.min(), r.max())
f = interp1d(r, [0,1])
adj = _easing_derivative(f(clipped_position)) / _easing_derivative(0.5)
amp = easeOutSine(abs(goal_position - start_position) / 1023.0)
return np.int(MIN_SPEED + (MAX_SPEED - MIN_SPEED) * adj * amp)
class Arm(object):
def __init__(self, port="/dev/ttyUSB0"):
self.driver = Driver(port=port)
self.movement_engine = ArmMovementEngine()
def close(self):
self.driver.close()
def recenter(self):
self.move((512, 512))
def return_to_rest(self):
self.move_to_point([20, 13.5])
def move(self, goal_position):
start_position = self.current_position()
self.set_speed([MIN_SPEED, MIN_SPEED])
for i in SERVOS:
self.driver.setReg(i, P_GOAL_POSITION_L, [goal_position[i%2]%256, goal_position[i%2]>>8])
while self._is_moving():
position = self.current_position()
speed = [_adjusted_speed(start_position[i%2], goal_position[i%2], position[i%2]) for i in SERVOS]
self.set_speed(speed)
def move_to_point(self, pt):
goal_position = self.movement_engine.convert_point(pt)
self.move(goal_position)
def set_speed(self, speed):
for i in SERVOS:
self.driver.setReg(i, P_GOAL_SPEED_L, [speed[i%2]%256, speed[i%2]>>8])
def current_position(self):
return self._values_for_register(P_PRESENT_POSITION_L)
def _is_moving(self):
return any([self.driver.getReg(index, P_MOVING, 1) == 1 for index in SERVOS])
def _values_for_register(self, register):
return [_register_bytes_to_value(self.driver.getReg(index, register, 2)) for index in SERVOS]
|
nilq/baby-python
|
python
|
"""
程式設計練習題 1-6 1-14 Turtle:畫三角形.
撰寫一程式,在螢幕的畫三角形。
"""
from turtle import Turtle
TURTLE = Turtle()
TURTLE.showturtle()
TURTLE.right(60)
TURTLE.forward(100)
TURTLE.right(120)
TURTLE.forward(100)
TURTLE.right(120)
TURTLE.forward(100)
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.1 on 2020-09-18 16:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0005_personel'),
]
operations = [
migrations.AddField(
model_name='crew',
name='total_assigments',
field=models.CharField(default=0, max_length=6),
preserve_default=False,
),
]
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.5 on 2020-11-06 16:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('qing', '0003_mistakes'),
]
operations = [
migrations.AddField(
model_name='data',
name='data_url',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
nilq/baby-python
|
python
|
# flake8: noqa
from .some_function import some_function
from .SomeClass import SomeClass
from .SomeClass import SOME_CONSTANT
from .wrap_min import wrap_min
from .wrap_min import MinWrapper
|
nilq/baby-python
|
python
|
# Copyright (c) 2015 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron_lib.db import api as db_api
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from sqlalchemy import exc as sql_exc
from sqlalchemy.orm import session as se
from neutron._i18n import _
from neutron.conf import quota as quota_conf
from neutron.db.quota import api as quota_api
LOG = log.getLogger(__name__)
def _count_resource(context, collection_name, project_id):
count_getter_name = "get_%s_count" % collection_name
getter_name = "get_%s" % collection_name
plugins = directory.get_plugins()
for pname in sorted(plugins,
# inspect core plugin first
key=lambda n: n != constants.CORE):
# Some plugins support a count method for particular resources, using a
# DB's optimized counting features. We try to use that one if present.
# Otherwise just use regular getter to retrieve all objects and count
# in python, allowing older plugins to still be supported
try:
obj_count_getter = getattr(plugins[pname], count_getter_name)
return obj_count_getter(
context, filters={'project_id': [project_id]})
except (NotImplementedError, AttributeError):
try:
obj_getter = getattr(plugins[pname], getter_name)
obj_list = obj_getter(
context, filters={'project_id': [project_id]})
return len(obj_list) if obj_list else 0
except (NotImplementedError, AttributeError):
pass
raise NotImplementedError(
_('No plugins that support counting %s found.') % collection_name)
class BaseResource(object, metaclass=abc.ABCMeta):
"""Describe a single resource for quota checking."""
def __init__(self, name, flag, plural_name=None):
"""Initializes a resource.
:param name: The name of the resource, i.e., "instances".
:param flag: The name of the flag or configuration option
:param plural_name: Plural form of the resource name. If not
specified, it is generated automatically by
appending an 's' to the resource name, unless
it ends with a 'y'. In that case the last
letter is removed, and 'ies' is appended.
Dashes are always converted to underscores.
"""
self.name = name
# If a plural name is not supplied, default to adding an 's' to
# the resource name, unless the resource name ends in 'y', in which
# case remove the 'y' and add 'ies'. Even if the code should not fiddle
# too much with English grammar, this is a rather common and easy to
# implement rule.
if plural_name:
self.plural_name = plural_name
elif self.name[-1] == 'y':
self.plural_name = "%sies" % self.name[:-1]
else:
self.plural_name = "%ss" % self.name
# always convert dashes to underscores
self.plural_name = self.plural_name.replace('-', '_')
self.flag = flag
@property
def default(self):
"""Return the default value of the quota."""
# Any negative value will be interpreted as an infinite quota,
# and stored as -1 for compatibility with current behaviour
value = getattr(cfg.CONF.QUOTAS,
self.flag,
cfg.CONF.QUOTAS.default_quota)
return max(value, quota_api.UNLIMITED_QUOTA)
@property
@abc.abstractmethod
def dirty(self):
"""Return the current state of the Resource instance.
:returns: True if the resource count is out of sync with actual date,
False if it is in sync, and None if the resource instance
does not track usage.
"""
@abc.abstractmethod
def count(self, context, plugin, project_id, **kwargs):
"""Return the total count of this resource"""
class CountableResource(BaseResource):
"""Describe a resource where the counts are determined by a function."""
def __init__(self, name, count, flag=None, plural_name=None):
"""Initializes a CountableResource.
Countable resources are those resources which directly
correspond to objects in the database, i.e., network, subnet,
etc.,. A CountableResource must be constructed with a counting
function, which will be called to determine the current counts
of the resource.
The counting function will be passed the context, along with
the extra positional and keyword arguments that are passed to
Quota.count(). It should return an integer specifying the
count.
:param name: The name of the resource, i.e., "instances".
:param count: A callable which returns the count of the
resource. The arguments passed are as described
above.
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
:param plural_name: Plural form of the resource name. If not
specified, it is generated automatically by
appending an 's' to the resource name, unless
it ends with a 'y'. In that case the last
letter is removed, and 'ies' is appended.
Dashes are always converted to underscores.
"""
super(CountableResource, self).__init__(
name, flag=flag, plural_name=plural_name)
self._count_func = count
@property
def dirty(self):
return
def count(self, context, plugin, project_id, **kwargs):
# NOTE(ihrachys) _count_resource doesn't receive plugin
return self._count_func(context, self.plural_name, project_id)
class TrackedResource(BaseResource):
"""Resource which keeps track of its usage data."""
def __init__(self, name, model_class, flag, plural_name=None):
"""Initializes an instance for a given resource.
TrackedResource are directly mapped to data model classes.
Resource usage is tracked in the database, and the model class to
which this resource refers is monitored to ensure always "fresh"
usage data are employed when performing quota checks.
This class operates under the assumption that the model class
describing the resource has a project identifier attribute.
:param name: The name of the resource, i.e., "networks".
:param model_class: The sqlalchemy model class of the resource for
which this instance is being created
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
:param plural_name: Plural form of the resource name. If not
specified, it is generated automatically by
appending an 's' to the resource name, unless
it ends with a 'y'. In that case the last
letter is removed, and 'ies' is appended.
Dashes are always converted to underscores.
"""
super(TrackedResource, self).__init__(
name, flag=flag, plural_name=plural_name)
# Register events for addition/removal of records in the model class
# As project_id is immutable for all Neutron objects there is no need
# to register a listener for update events
self._model_class = model_class
self._dirty_projects = set()
self._out_of_sync_projects = set()
# NOTE(ralonsoh): "DbQuotaNoLockDriver" driver does not need to track
# the DB events or resync the resource quota usage.
if cfg.CONF.QUOTAS.quota_driver == quota_conf.QUOTA_DB_DRIVER:
self._track_resource_events = False
else:
self._track_resource_events = True
@property
def dirty(self):
if not self._track_resource_events:
return
return self._dirty_projects
def mark_dirty(self, context):
if not self._dirty_projects or not self._track_resource_events:
return
with db_api.CONTEXT_WRITER.using(context):
# It is not necessary to protect this operation with a lock.
# Indeed when this method is called the request has been processed
# and therefore all resources created or deleted.
# dirty_projects will contain all the projects for which the
# resource count is changed. The list might contain also projects
# for which resource count was altered in other requests, but this
# won't be harmful.
dirty_projects_snap = self._dirty_projects.copy()
for project_id in dirty_projects_snap:
quota_api.set_quota_usage_dirty(context, self.name, project_id)
self._out_of_sync_projects |= dirty_projects_snap
self._dirty_projects -= dirty_projects_snap
def _db_event_handler(self, mapper, _conn, target):
try:
project_id = target['project_id']
except AttributeError:
with excutils.save_and_reraise_exception():
LOG.error("Model class %s does not have a project_id "
"attribute", target)
self._dirty_projects.add(project_id)
# Retry the operation if a duplicate entry exception is raised. This
# can happen is two or more workers are trying to create a resource of a
# give kind for the same project concurrently. Retrying the operation will
# ensure that an UPDATE statement is emitted rather than an INSERT one
@db_api.retry_if_session_inactive()
def _set_quota_usage(self, context, project_id, in_use):
return quota_api.set_quota_usage(
context, self.name, project_id, in_use=in_use)
def _resync(self, context, project_id, in_use):
# Update quota usage
usage_info = self._set_quota_usage(context, project_id, in_use)
self._dirty_projects.discard(project_id)
self._out_of_sync_projects.discard(project_id)
LOG.debug(("Unset dirty status for project:%(project_id)s on "
"resource:%(resource)s"),
{'project_id': project_id, 'resource': self.name})
return usage_info
def resync(self, context, project_id):
if (project_id not in self._out_of_sync_projects or
not self._track_resource_events):
return
LOG.debug(("Synchronizing usage tracker for project:%(project_id)s on "
"resource:%(resource)s"),
{'project_id': project_id, 'resource': self.name})
in_use = context.session.query(
self._model_class.project_id).filter_by(
project_id=project_id).count()
# Update quota usage
return self._resync(context, project_id, in_use)
@db_api.CONTEXT_WRITER
def count_used(self, context, project_id, resync_usage=True):
"""Returns the current usage count for the resource.
:param context: The request context.
:param project_id: The ID of the project
:param resync_usage: Default value is set to True. Syncs
with in_use usage.
"""
# Load current usage data, setting a row-level lock on the DB
usage_info = quota_api.get_quota_usage_by_resource_and_project(
context, self.name, project_id)
# If dirty or missing, calculate actual resource usage querying
# the database and set/create usage info data
# NOTE: this routine "trusts" usage counters at service startup. This
# assumption is generally valid, but if the database is tampered with,
# or if data migrations do not take care of usage counters, the
# assumption will not hold anymore
if (project_id in self._dirty_projects or
not usage_info or usage_info.dirty):
LOG.debug(("Usage tracker for resource:%(resource)s and project:"
"%(project_id)s is out of sync, need to count used "
"quota"), {'resource': self.name,
'project_id': project_id})
in_use = context.session.query(
self._model_class.project_id).filter_by(
project_id=project_id).count()
# Update quota usage, if requested (by default do not do that, as
# typically one counts before adding a record, and that would mark
# the usage counter as dirty again)
if resync_usage:
usage_info = self._resync(context, project_id, in_use)
else:
resource = usage_info.resource if usage_info else self.name
project_id = (usage_info.project_id if usage_info else
project_id)
dirty = usage_info.dirty if usage_info else True
usage_info = quota_api.QuotaUsageInfo(
resource, project_id, in_use, dirty)
LOG.debug(("Quota usage for %(resource)s was recalculated. "
"Used quota:%(used)d."),
{'resource': self.name,
'used': usage_info.used})
return usage_info.used
def count_reserved(self, context, project_id):
"""Return the current reservation count for the resource."""
# NOTE(princenana) Current implementation of reservations
# is ephemeral and returns the default value
reservations = quota_api.get_reservations_for_resources(
context, project_id, [self.name])
reserved = reservations.get(self.name, 0)
return reserved
def count(self, context, _plugin, project_id, resync_usage=True,
count_db_registers=False):
"""Return the count of the resource.
The _plugin parameter is unused but kept for
compatibility with the signature of the count method for
CountableResource instances.
"""
if count_db_registers:
count = self._count_db_registers(context, project_id)
else:
count = self.count_used(context, project_id, resync_usage)
return count + self.count_reserved(context, project_id)
def _count_db_registers(self, context, project_id):
"""Return the existing resources (self._model_class) in a project.
The query executed must be as fast as possible. To avoid retrieving all
model backref relationship columns, only "project_id" is requested
(this column always exists in the DB model because is used in the
filter).
"""
# TODO(ralonsoh): declare the OVO class instead the DB model and use
# ``NeutronDbObject.count`` with the needed filters and fields to
# retrieve ("project_id").
admin_context = context.elevated()
with db_api.CONTEXT_READER.using(admin_context):
query = admin_context.session.query(self._model_class.project_id)
query = query.filter(self._model_class.project_id == project_id)
return query.count()
def _except_bulk_delete(self, delete_context):
if delete_context.mapper.class_ == self._model_class:
raise RuntimeError(_("%s may not be deleted in bulk because "
"it is tracked by the quota engine via "
"SQLAlchemy event handlers, which are not "
"compatible with bulk deletes.") %
self._model_class)
def register_events(self):
if not self._track_resource_events:
return
listen = db_api.sqla_listen
listen(self._model_class, 'after_insert', self._db_event_handler)
listen(self._model_class, 'after_delete', self._db_event_handler)
listen(se.Session, 'after_bulk_delete', self._except_bulk_delete)
def unregister_events(self):
if not self._track_resource_events:
return
try:
db_api.sqla_remove(self._model_class, 'after_insert',
self._db_event_handler)
db_api.sqla_remove(self._model_class, 'after_delete',
self._db_event_handler)
db_api.sqla_remove(se.Session, 'after_bulk_delete',
self._except_bulk_delete)
except sql_exc.InvalidRequestError:
LOG.warning("No sqlalchemy event for resource %s found",
self.name)
|
nilq/baby-python
|
python
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2016, Ilya Etingof <ilya@glas.net>
# License: http://pysnmp.sf.net/license.html
#
# PySNMP MIB module SNMP-USM-AES-MIB (http://pysnmp.sf.net)
# ASN.1 source file:///usr/share/snmp/mibs/SNMP-USM-AES-MIB.txt
# Produced by pysmi-0.0.5 at Sat Sep 19 23:11:55 2015
# On host grommit.local platform Darwin version 14.4.0 by user ilya
# Using Python version 2.7.6 (default, Sep 9 2014, 15:04:36)
#
( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint")
( snmpPrivProtocols, ) = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "snmpPrivProtocols")
( NotificationGroup, ModuleCompliance, ) = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
( Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, MibIdentifier, IpAddress, TimeTicks, Counter64, Unsigned32, iso, Gauge32, snmpModules, ModuleIdentity, ObjectIdentity, Bits, Counter32, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "MibIdentifier", "IpAddress", "TimeTicks", "Counter64", "Unsigned32", "iso", "Gauge32", "snmpModules", "ModuleIdentity", "ObjectIdentity", "Bits", "Counter32")
( DisplayString, TextualConvention, ) = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
snmpUsmAesMIB = ModuleIdentity((1, 3, 6, 1, 6, 3, 20)).setRevisions(("2004-06-14 00:00",))
if mibBuilder.loadTexts: snmpUsmAesMIB.setLastUpdated('200406140000Z')
if mibBuilder.loadTexts: snmpUsmAesMIB.setOrganization('IETF')
if mibBuilder.loadTexts: snmpUsmAesMIB.setContactInfo('Uri Blumenthal\n Lucent Technologies / Bell Labs\n 67 Whippany Rd.\n 14D-318\n Whippany, NJ 07981, USA\n 973-386-2163\n uri@bell-labs.com\n\n Fabio Maino\n Andiamo Systems, Inc.\n 375 East Tasman Drive\n San Jose, CA 95134, USA\n 408-853-7530\n fmaino@andiamo.com\n\n Keith McCloghrie\n Cisco Systems, Inc.\n 170 West Tasman Drive\n San Jose, CA 95134-1706, USA\n\n 408-526-5260\n kzm@cisco.com')
if mibBuilder.loadTexts: snmpUsmAesMIB.setDescription("Definitions of Object Identities needed for\n the use of AES by SNMP's User-based Security\n Model.\n\n Copyright (C) The Internet Society (2004).\n\n This version of this MIB module is part of RFC 3826;\n see the RFC itself for full legal notices.\n Supplementary information may be available on\n http://www.ietf.org/copyrights/ianamib.html.")
usmAesCfb128Protocol = ObjectIdentity((1, 3, 6, 1, 6, 3, 10, 1, 2, 4))
if mibBuilder.loadTexts: usmAesCfb128Protocol.setDescription('The CFB128-AES-128 Privacy Protocol.')
mibBuilder.exportSymbols("SNMP-USM-AES-MIB", usmAesCfb128Protocol=usmAesCfb128Protocol, snmpUsmAesMIB=snmpUsmAesMIB, PYSNMP_MODULE_ID=snmpUsmAesMIB)
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.4 on 2018-04-17 05:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='Tags',
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(related_name='Tags', to='blog.Post'),
),
migrations.AddField(
model_name='post',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterField(
model_name='post',
name='Blog',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Blog'),
),
migrations.AlterField(
model_name='post',
name='Category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category'),
),
]
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.7 on 2020-07-23 07:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('disdata', '0021_auto_20200723_0649'),
]
operations = [
migrations.AlterField(
model_name='disease',
name='victim_id',
field=models.CharField(choices=[('pt', 'Poultry'), ('gt', 'Goat'), ('pg', 'Pig'), ('bf', 'Buffalo'), ('sp', 'Sheep')], max_length=2),
),
]
|
nilq/baby-python
|
python
|
from math import factorial
from collections import Counter
import operator
from itertools import permutations
import math
print(round(2.9))
print(abs(-2.9)) # absolute vaue
print(math.ceil(2.2)) # the ceiling of a number
print(math.floor(9.8))
print(sum([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1]))
print(math.fsum([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1]))
print(math.gcd(42, 7))
# Python code to demonstrate gcd()
# method exceptions
# prints 0
print("The gcd of 50 and 8 is: ", end="")
print(math.gcd(50, 8))
# Produces error
# print("\nThe gcd of a and 13 is: ", end="")
# print(math.gcd('a', 13))
|
nilq/baby-python
|
python
|
from util.orientation import Orientation
from util.vec import Vec3
class GameObject:
"""GameObjects are considered to be all objects that can move on the field.
Attributes:
location (Vec3): location vector defined by x,y,z coordinates
velocity (Vec3): velocity vector with x,y,z components
orientation (Orientation): orientation vector defined by pitch, yaw, and roll
r_velocity (Vec3): Rotational velocity define by pitch, yaw, and roll components as x, y, z respectively
local_location (Vec3): location of the GameObject relative to the bot
"""
def __init__(self):
"""Creates a new GameObject with zeroed data."""
self.location = Vec3(0, 0, 0)
self.velocity = Vec3(0, 0, 0)
self.orientation = Orientation()
self.r_velocity = Vec3(0, 0, 0)
self.local_location = Vec3(0, 0, 0)
class Car(GameObject):
"""Car is an Extension of the GameObject class that holds data and function specific to the behavior of other cars.
Attributes:
boost (float): The amount of boost remaining in the car
"""
def __init__(self):
"""Creates a new Car object with zero boost."""
super().__init__()
self.boost = 0.0
self.team = -1
class Ball(GameObject):
"""Ball is an extension of the gameObject class that holds data and functions specific to the ball
"""
def __init__(self):
"""Creates a new Ball object."""
super().__init__()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Juan Cabral
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOC
# =============================================================================
""""""
# =============================================================================
# IMPORTS
# =============================================================================
import math
import numpy as np
from .core import Extractor
# =============================================================================
# CONSTANTS
# =============================================================================
COMMON_DOC = r"""
In order to caracterize the sorted magnitudes distribution we use percentiles.
If :math:`F_{5, 95}` is the difference between 95% and 5% magnitude values,
we calculate the following:
- flux_percentile_ratio_mid20: ratio :math:`F_{40, 60}/F_{5, 95}`
- flux_percentile_ratio_mid35: ratio :math:`F_{32.5, 67.5}/F_{5, 95}`
- flux_percentile_ratio_mid50: ratio :math:`F_{25, 75}/F_{5, 95}`
- flux_percentile_ratio_mid65: ratio :math:`F_{17.5, 82.5}/F_{5, 95}`
- flux_percentile_ratio_mid80: ratio :math:`F_{10, 90}/F_{5, 95}`
For the first feature for example, in the case of a normal distribution, this
is equivalente to calculate:
.. math::
\frac{erf^{-1}(2 \cdot 0.6-1)-erf^{-1}(2 \cdot 0.4-1)}
{erf^{-1}(2 \cdot 0.95-1)-erf^{-1}(2 \cdot 0.05-1)}
So, the expected values for each of the flux percentile features are:
- flux_percentile_ratio_mid20 = 0.154
- flux_percentile_ratio_mid35 = 0.275
- flux_percentile_ratio_mid50 = 0.410
- flux_percentile_ratio_mid65 = 0.568
- flux_percentile_ratio_mid80 = 0.779
References
----------
.. [richards2011machine] Richards, J. W., Starr, D. L., Butler, N. R.,
Bloom, J. S., Brewer, J. M., Crellin-Quick, A., ... &
Rischard, M. (2011). On machine-learned classification of variable stars
with sparse and noisy time-series data.
The Astrophysical Journal, 733(1), 10. Doi:10.1088/0004-637X/733/1/10.
"""
# =============================================================================
# EXTRACTOR CLASS
# =============================================================================
class FluxPercentileRatioMid20(Extractor):
__doc__ = COMMON_DOC
data = ['magnitude']
features = ["FluxPercentileRatioMid20"]
def fit(self, magnitude):
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data) - 1
F_60_index = int(math.ceil(0.60 * lc_length))
F_40_index = int(math.ceil(0.40 * lc_length))
F_5_index = int(math.ceil(0.05 * lc_length))
F_95_index = int(math.ceil(0.95 * lc_length))
F_40_60 = sorted_data[F_60_index] - sorted_data[F_40_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid20 = F_40_60 / F_5_95
return {"FluxPercentileRatioMid20": F_mid20}
class FluxPercentileRatioMid35(Extractor):
__doc__ = COMMON_DOC
data = ['magnitude']
features = ["FluxPercentileRatioMid35"]
def fit(self, magnitude):
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data) - 1
F_325_index = int(math.ceil(0.325 * lc_length))
F_675_index = int(math.ceil(0.675 * lc_length))
F_5_index = int(math.ceil(0.05 * lc_length))
F_95_index = int(math.ceil(0.95 * lc_length))
F_325_675 = sorted_data[F_675_index] - sorted_data[F_325_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid35 = F_325_675 / F_5_95
return {"FluxPercentileRatioMid35": F_mid35}
class FluxPercentileRatioMid50(Extractor):
__doc__ = COMMON_DOC
data = ['magnitude']
features = ["FluxPercentileRatioMid50"]
def fit(self, magnitude):
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data) - 1
F_25_index = int(math.ceil(0.25 * lc_length))
F_75_index = int(math.ceil(0.75 * lc_length))
F_5_index = int(math.ceil(0.05 * lc_length))
F_95_index = int(math.ceil(0.95 * lc_length))
F_25_75 = sorted_data[F_75_index] - sorted_data[F_25_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid50 = F_25_75 / F_5_95
return {"FluxPercentileRatioMid50": F_mid50}
class FluxPercentileRatioMid65(Extractor):
__doc__ = COMMON_DOC
data = ['magnitude']
features = ["FluxPercentileRatioMid65"]
def fit(self, magnitude):
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data) - 1
F_175_index = int(math.ceil(0.175 * lc_length))
F_825_index = int(math.ceil(0.825 * lc_length))
F_5_index = int(math.ceil(0.05 * lc_length))
F_95_index = int(math.ceil(0.95 * lc_length))
F_175_825 = sorted_data[F_825_index] - sorted_data[F_175_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid65 = F_175_825 / F_5_95
return {"FluxPercentileRatioMid65": F_mid65}
class FluxPercentileRatioMid80(Extractor):
__doc__ = COMMON_DOC
data = ['magnitude']
features = ["FluxPercentileRatioMid80"]
def fit(self, magnitude):
sorted_data = np.sort(magnitude)
lc_length = len(sorted_data) - 1
F_10_index = int(math.ceil(0.10 * lc_length))
F_90_index = int(math.ceil(0.90 * lc_length))
F_5_index = int(math.ceil(0.05 * lc_length))
F_95_index = int(math.ceil(0.95 * lc_length))
F_10_90 = sorted_data[F_90_index] - sorted_data[F_10_index]
F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]
F_mid80 = F_10_90 / F_5_95
return {"FluxPercentileRatioMid80": F_mid80}
|
nilq/baby-python
|
python
|
def greet(i):
console.log(str(i) + " Hello World!")
for i in range(8):
greet(i)
|
nilq/baby-python
|
python
|
import unittest
from pyconductor import *
class NewUserTest(unittest.TestCase):
def setUp(self):
self.preloaded_dict = load_test_values()
def test_user_can_run_material_testcase(self):
calculate_conductance(self.preloaded_dict["air"])
def test_user_can_add_material_to_materialdict(self):
pass
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
import re
from pyingest.config import config
class UATURIConverter():
'''
Takes a string containing a comma-separated list of string as input,
and converts any that match UAT entities to their UAT:URI_# instead
(not including URL). Returns a string consisting of comma-separated
keywords/uris.
'''
def convert_to_uri(self,kw_list):
try:
kw_list_new = [x.strip() for x in kw_list.split(',')]
kw_list_new = list(set(kw_list_new))
uat_conv = UATURIConverter()
kwl = list()
for kw in kw_list_new:
if kw.lower() in config.UAT_ASTRO_URI_DICT.keys():
kout = 'UAT:' + config.UAT_ASTRO_URI_DICT[kw.lower()]
else:
kout = kw
kwl.append(kout)
return ', '.join(kwl)
except Exception, err:
return kw_list
|
nilq/baby-python
|
python
|
from __future__ import annotations
from typing import Optional
from pydantic.fields import Field
from pydantic.types import StrictBool
from ..api import BodyParams, EndpointData
from ..types_.endpoint import BaseEndpoint
from ..types_.inputs import WorkflowCustomField
from ..types_.scalar import WorkflowId
class Workflows(BaseEndpoint):
@property
def endpoint_data(self) -> EndpointData:
return EndpointData(
method="GET",
url="/workflows",
)
class CreateWorkflow(BaseEndpoint):
name: str = Field(..., max_length=128)
@property
def endpoint_data(self) -> EndpointData:
return EndpointData(method="POST", url="/workflows", body_params=self._body_params)
@property
def _body_params(self) -> BodyParams:
return {"name": self.name}
class ModifyWorkflow(BaseEndpoint):
workflow_id: WorkflowId
name: Optional[str] = Field(..., max_length=128)
hidden: Optional[StrictBool]
custom_status: Optional[WorkflowCustomField]
@property
def endpoint_data(self) -> EndpointData:
return EndpointData(method="PUT", url=f"/workflows/{self.workflow_id}", body_params=self._body_params)
@property
def _body_params(self) -> BodyParams:
body = {}
if self.name is not None:
body["name"] = self.name
if self.hidden is not None:
body["hidden"] = self._convert_bool(self.hidden)
if self.custom_status:
body["custom_status"] = self._convert_input(self.custom_status)
return body
|
nilq/baby-python
|
python
|
from app import *
keyboard = types.InlineKeyboardMarkup(row_width=1)
a = types.InlineKeyboardButton(text=emoji.emojize(":memo: Activate Subscriber", use_aliases=True), callback_data="activate")
b = types.InlineKeyboardButton(text=emoji.emojize(":scroll: Send Advertisement", use_aliases=True), callback_data="ad")
c = types.InlineKeyboardButton(text=emoji.emojize(":memo: Deactivate Subscriber", use_aliases=True), callback_data="deactivate")
keyboard.add(a,c,b)
@bot.message_handler(commands=['admin', 'panel'])
def handle_admin(msg):
"""Admin feature to the bot management"""
if msg.from_user.id == int(ADMIN_ID):
bot.send_message(
msg.chat.id,
f"""
Welcome Back {msg.from_user.username},
<b>Dx15 Group Administrative Panel.</b>""",
reply_markup=keyboard,
parse_mode=telegram.ParseMode.HTML
)
else:
bot.reply_to(
msg,
"You are not authorized to use this command"
)
|
nilq/baby-python
|
python
|
import cv2
import numpy as np
from imread_from_url import imread_from_url
from acvnet import ACVNet
resolutions = [(240,320),(320,480),(384,640),(480,640),(544,960),(720,1280)]
# Load images
left_img = imread_from_url("https://vision.middlebury.edu/stereo/data/scenes2003/newdata/cones/im2.png")
right_img = imread_from_url("https://vision.middlebury.edu/stereo/data/scenes2003/newdata/cones/im6.png")
num_repetitions = 10
for resolution in resolutions:
print(f"Model: acvnet_maxdisp192_sceneflow_{resolution[0]}x{resolution[1]}.onnx")
try:
# Initialize model
model_path = f'models/acvnet_maxdisp192_sceneflow_{resolution[0]}x{resolution[1]}/acvnet_maxdisp192_sceneflow_{resolution[0]}x{resolution[1]}.onnx'
depth_estimator = ACVNet(model_path)
for repetition in range(num_repetitions):
# Estimate the depth
disparity_map = depth_estimator(left_img, right_img)
del depth_estimator
except:
print("Model could not be loaded")
|
nilq/baby-python
|
python
|
"""
Test Models
A set of trivial models for PyTests
"""
import pandas as pd
import numpy as np
import re
class SingleWordModel:
def __init__(self, name, colname, myword):
self.name = name
self.colname = colname
self.word = myword
def predict(self, x: pd.DataFrame) -> np.ndarray:
#if len(x) > 1:
# rez = np.where( x[self.colname].str.find(self.word)>=0,1,0)
#else:
# rez = np.where( x[self.colname].find(self.word)>=0,1,0)
rez = np.where( x[self.colname].str.find(self.word)>=0,1,0)
return rez
class MultiWordModel:
def __init__(self, name, colname, mywords):
self.name = name
self.colname = colname
self.words = mywords
def predict(self, x: pd.DataFrame) -> np.ndarray:
score = 0
for w in self.words:
score += np.where( x[self.colname].str.find(w)>=0,1,0)
score = score/len(self.words)
return score
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 6 13:24:49 2021
@author: Asus
"""
import pandas as pd
import numpy as np
import string
import unicodedata
import re
from functools import reduce
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def del_punct_wsp(text):
text = text.upper()
text = re.sub('(?:MR|SR|SRA|SRTA|SRES|MISS)\.\s*','',text)
text = re.sub(r'\([^)]*\)', '', text) #remueve paréntesis y todo lo de adentro
text = text.replace(".","").replace('('," ").replace(")"," ")
text = text.replace("\M:"," ").replace("M:"," ")
text = re.sub(r'[!"\#\$%\'\(\)\*\+,\-\./:;<=>\?@\[\\\]\^_`\{\|\}\~]',' ',text) #borra punct y agrega espacio
text = re.sub(r'\d+\b',' ', text)
text = strip_accents(text)
return text
# =============================================================================
# glei = pd.read_csv('https://www.gleif.org/content/2-about-lei/7-code-lists/2-iso-20275-entity-legal-forms-code-list/2020-11-19_elf-code-list-v1.3.csv')
# ab = ";".join(glei[glei['Abbreviations Local language'].isna()==False]["Abbreviations Local language"].drop_duplicates().values.tolist())
# ab = np.unique(np.array(ab.split(";"))).tolist()
# abreviaturas = np.unique(np.array([x.upper() for x in ab])).tolist()+["REF"]
# indices = list(range(1,len(abreviaturas)+1))
# abrev_dict = dict()
# for k,v in zip(abreviaturas, indices):
# abrev_dict[k]=v
# =============================================================================
def special_corrections(text):
text = re.sub(r"\bCOMPA.*IA\b","COMPANIA",text)
text = re.sub(r"\bLA VICU.*A\b","LA VICUNA",text)
text = re.sub(r"\bMONTADZ.*A\b", "MONTANA", text)
text = re.sub(r"DZ˝","N",text)
text = re.sub(r"\bASIJEMIN\b", "ASOCIACION SINDICAL DEL PERSONAL JERARQUICO PROFESIONAL Y TECNICO DE LA ACTIVIDAD MINERA ARGENTINA", text)
text = re.sub(r"\bS A I C Y A\b","SAICYA", text)
text = re.sub(r"\bS A C I\b","SACI",text)
text = re.sub(r"\bSAIC Y F\b","SAICYF", text)
text = re.sub(r"\bSA IC Y F\b","SAICYF",text)
text = re.sub(r"\bPROD Y SERVICIOS\b","PRODUCTOS Y SERVICIOS",text)
text = re.sub(r"\bSA\b|\bS A\b|\bSOCIEDAD ANONIMA\b","SA", text)
text = re.sub(r"\bS R L\b|\bSOCIEDAD DE RESPONSABILIDAD LIMITADA\b","SRL", text)
return text
def acronyms(text):
if ''==text:
return ''
else:
text = text.upper()
text = text.split(' ')
while (text[-1] in abrev_dict) and (len(text)>2):
text = text[:-1]
acronyms(' '.join(text))
return ' '.join(text)
def remove_digits(text):
splitted = text.split(' ')
cleanned = []
for word in splitted:
evaluation = [1 if i.isdigit() else 0 for i in word]
suma = reduce(lambda x,y: x+y, evaluation,0)
if suma==0:
cleanned.append(word)
elif suma<2:
cleanned.append(word)
else:
word = ''.join([i for i in word if not i.isdigit()])
cleanned.append(word)
return " ".join(cleanned)
def strip_spaces(text):
return text.upper().lstrip().rstrip()
def remove_within_wsp(text):
return " ".join(text.split())
def sepecial_deletions(text, acronyms_list_or_dict):
return " ".join([word for word in text.split(" ") if word not in acronyms_list_or_dict])
def pre_processing(text,
punctuation=True,
within_spaces=True,
digits=True,
strip_space=True,
acronyms_at_end=True,
special_deletions = None,
specialcorr=True):
"""1) Se borra puntuación, acentos y caracteres específicos como "\M:"
2) Se borran dígitos
3) Se remueven espacios en blanco de principio y final
4) Se borran las siglas al final del texto
5) Se remueven espacios dentro del texto"""
if punctuation:
text = del_punct_wsp(text)
#print(text)
if within_spaces:
text = remove_within_wsp(text)
#print(text)
if digits:
text = remove_digits(text)
#print(text)
if strip_space:
text = strip_spaces(text)
#print(text)
if special_deletions:
text = special_deletions(text, special_deletions)
#print(text)
if acronyms_at_end:
text = acronyms(text)
#print(text)
if within_spaces:
text = remove_within_wsp(text)
if specialcorr:
text=special_corrections(text)
return text
def ngrams(text, n=3):
ngrams = zip(*[text[i:] for i in range(n)])
return [''.join(ngram) for ngram in ngrams]
def AxB(listA, listaB, output_folder, vectorizing_by="A", analyze_by='word', lowerbound=0.8, topn=10, idfsmooth=True, sublinear=True):
#Vectorizer
vectorizer = TfidfVectorizer(min_df=3, analyzer=analyze_by, lowercase=False, smooth_idf=idfsmooth, sublinear_tf=sublinear)
'''
* vectorizing_by="A" es producto de AxB.transpose()
y los features son de A
* vectorizing_by="B" es producto de AxB.transpose()
y los features son de B
'''
if vectorizing_by=="A":
print("TF-IDF Vectorizig...\n")
A = vectorizer.fit_transform(listA)
B = vectorizer.transform(listaB)
print("Processing Matches...\n")
if vectorizing_by=="B":
print("TF-IDF Vectorizig...\n")
B = vectorizer.fit_transform(listaB)
A = vectorizer.transform(listA)
print("Processing Matches...\n")
#Sparse Matrix dot product
import time
t1 = time.time()
matches_ngrams = awesome_cossim_topn(A,B.transpose(), topn, lowerbound)
t = time.time()-t1
print('This program has runned in {} seconds\n'.format(t))
#Saving Matrix
from scipy import sparse
from datetime import datetime
outputpath = output_folder+"/"+"matches_{}.npz".format(datetime.now().strftime('%Y-%m-%d %H_%M_%S'))
sparse.save_npz(outputpath, matches_ngrams)
print("Matches save into {}".format(outputpath))
return matches_ngrams
|
nilq/baby-python
|
python
|
'''
test_fix.py: Test fix_fusion
'''
import os
import pysam
from utils import check_file
from circ.CIRCexplorer import fix_fusion
class TestFix(object):
def setup(self):
'''
Run fix_fusion
'''
print('#%s: Start testing fix_fusion' % __name__)
ref = 'data/ref.txt'
genome = pysam.FastaFile('data/chr21.fa')
input = 'data/annotated_junction.txt'
output = 'data/test_circular_RNA.txt'
fix_fusion(ref, genome, input, output, False)
def testFix(self):
'''
Check file
'''
print('#%s: Test fix_fusion' % __name__)
test_file = 'data/test_circular_RNA.txt'
result_file = 'data/circular_RNA.txt'
check_file(test_file, result_file)
def teardown(self):
'''
Delete fix file
'''
print('#%s: End testing fix_fusion' % __name__)
os.remove('data/test_circular_RNA.txt')
|
nilq/baby-python
|
python
|
import numpy as np
import rich
from rich import print, pretty
pretty.install()
#############
from price_model import SimulateGBM
from basis_fun import laguerre_polynomials
##############
def priceOption(S0, K, r, paths, sd, T, steps, Stock_Matrix,k, reduce_variance = True):
steps = int(steps)
Stn = Stock_Matrix
#Stn = Stock_Matrix
dt = T/steps
cashFlow = np.zeros((paths, steps))
cashFlow[:,steps - 1] = np.maximum(K-Stn[:,steps - 1], 0)
cont_value = cashFlow
decision = np.zeros((paths, steps))
decision[:, steps - 1] = 1
discountFactor = np.tile(np.exp(-r*dt* np.arange(1,
steps + 1, 1)), paths).reshape((paths, steps))
for i in reversed(range(steps - 1)):
# Find in the money paths
in_the_money_n = np.where(K-Stn[:, i] > 0)[0]
out_of_money_n = np.asarray(list(set(np.arange(paths)) - set(in_the_money_n)))
X = laguerre_polynomials(Stn[in_the_money_n, i], k)
Y = cashFlow[in_the_money_n, i + 1]/np.exp(r*dt)
A = np.dot(X.T, X)
b = np.dot(X.T, Y)
Beta = np.dot(np.linalg.pinv(A), b)
cont_value[in_the_money_n,i] = np.dot(X, Beta)
try:
cont_value[out_of_money_n,i] = cont_value[out_of_money_n, i + 1]/np.exp(r*dt)
except:
pass
decision[:, i] = np.where(np.maximum(K-Stn[:, i], 0) - cont_value[:,i] >= 0, 1, 0)
cashFlow[:, i] = np.maximum(K-Stn[:, i], cont_value[:,i])
first_exercise = np.argmax(decision, axis = 1)
decision = np.zeros((len(first_exercise), steps))
decision[np.arange(len(first_exercise)), first_exercise] = 1
last = np.sum(decision*discountFactor*cashFlow, axis = 1)
option_value = np.mean(last)
var = np.sum((last-option_value)**2)/(last.shape[0]-1)
return option_value
#return option_value,var, cashFlow, decision
#######################################################
# Example of LSM Paper, First one
S0_value = 36
r_value = 0.06
sd_value = 0.2
T_value = 1
paths_value = 100000
steps_value = 50
K_value = 40
k_value = 4
Stock_Matrix_GBM = SimulateGBM(S0=S0_value, r=r_value, sd=sd_value, T=T_value,
paths=paths_value,steps=steps_value)
price_reduced = priceOption(S0=S0_value,
K=K_value, r=r_value, paths=paths_value,
sd=sd_value, T=T_value, steps=steps_value,
Stock_Matrix=Stock_Matrix_GBM,
k=k_value,
reduce_variance=True)
price_reduced
#########################################################
from scipy.stats import norm
def european_put_price(S0, K, r, sd, T) -> float:
sigma_sqrt: float = sd * np.sqrt(T)
d1: float = (np.log(S0 / K) +
(r + sd ** 2 / 2.) * T) \
/ sigma_sqrt
d2: float = d1 - sigma_sqrt
return K * np.exp(-r * T) * norm.cdf(-d2) \
- S0 * norm.cdf(-d1)
#########################################################
S0_values_table1 = np.arange(36,46, 2)
sd_values_table1 = np.array([0.2, 0.4])
T_values_table1 = np.array([1, 2])
def Table1_func(S0_values,sd_values,T_values):
print("%-10s %-10s %-10s %-20s %-20s %-20s"
%("S0","vol", "T", "Closed Form European", "Simulated American", "Early exercise"))
for S0_table1 in S0_values:
for sd_table1 in sd_values:
for T_table1 in T_values:
euoption = european_put_price(S0=S0_table1, K=K_value, r=r_value,sd=sd_table1, T=T_table1)
Stock_Matrix_GBM = SimulateGBM(S0=S0_table1, r=r_value, sd=sd_table1, T=T_table1,
paths=paths_value,steps=steps_value)
Option_price = priceOption(S0=S0_table1, K=K_value, r=r_value, paths=paths_value,
sd=sd_table1, T=T_table1, steps=steps_value,
Stock_Matrix=Stock_Matrix_GBM,
k=k_value,reduce_variance=True)
print("%d %10.2f %10d %20.3f %20.3f %20.3f"
%(S0_table1,sd_table1, T_table1, euoption, Option_price,Option_price-euoption))
Table1_func(S0_values=S0_values_table1, sd_values=sd_values_table1, T_values=T_values_table1)
|
nilq/baby-python
|
python
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from gensim.test.utils import common_texts
from gensim.models import Word2Vec
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import pandas as pd
import numpy as np
from brightics.common.repr import BrtcReprBuilder
from brightics.common.repr import strip_margin
from brightics.function.utils import _model_dict
from brightics.common.repr import dict2MD
from brightics.common.repr import plt2MD
from brightics.common.repr import pandasDF2MD
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import greater_than_or_equal_to
def hash_brtc(astring):
return ord(astring[0])
def word2vec(table, **params):
check_required_parameters(_word2vec, params, ['table'])
params = get_default_from_parameters_if_required(params, _word2vec)
param_validation_check = [greater_than_or_equal_to(params, 1, 'size'),
greater_than_or_equal_to(params, 1, 'window'),
greater_than_or_equal_to(params, 1, 'min_count'),
greater_than_or_equal_to(params, 1, 'workers'),
greater_than_or_equal_to(params, 1, 'topn')]
validate(*param_validation_check)
return _word2vec(table, **params)
def _word2vec(table, input_col, size=100, window=5, min_count=1, seed=None, workers=4, sg=1, topn=30):
texts = table[input_col].apply(list).tolist()
w2v = Word2Vec(texts, size=size, window=window, min_count=min_count, seed=seed, workers=workers, sg=sg, hashfxn=hash_brtc)
w2v.init_sims(replace=True)
vocab = w2v.wv.vocab
algo = 'Skip-gram'
if sg == '0':
algo = 'CBOW'
params = {'Input column': input_col,
'Word vector dimensionality': size,
'Context window size': window,
'Minimum word count': min_count,
'Worker threads': workers,
'Training algorithm': algo}
# tsne visualization
length = len(vocab)
if length < topn:
topn = length
topn_words = sorted(vocab, key=vocab.get, reverse=True)[:topn]
X = w2v[topn_words]
tsne = TSNE(n_components=min(2, topn), random_state=seed)
X_tsne = tsne.fit_transform(X)
df = pd.DataFrame(X_tsne, index=topn_words, columns=['x', 'y'])
fig = plt.figure()
fig.set_size_inches(50, 40)
ax = fig.add_subplot(1, 1, 1)
ax.scatter(df['x'], df['y'], s=1000)
ax.tick_params(axis='both', which='major', labelsize=50)
for word, pos in df.iterrows():
ax.annotate(word, pos, fontsize=80)
plt.show()
fig = plt2MD(plt)
plt.clf()
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ## Word2Vec Result
|
| ### Total Number of words
| {length}
|
| ### Top {topn} Words
| {topn_words}
| {fig}
|
| ### Parameters
| {params}
""".format(length=length, topn=topn, topn_words=topn_words, params=dict2MD(params), fig=fig)))
vocab = list(w2v.wv.vocab)
model = _model_dict('word2vec_model')
model['params'] = params
model['vocab'] = vocab
model['w2v'] = w2v
model['_repr_brtc_'] = rb.get()
out_table = pd.DataFrame()
out_table['words'] = w2v.wv.index2word
out_table['word_vectors'] = w2v.wv[vocab].tolist()
return {'model': model, 'out_table': out_table}
# def word2vec_update(table, model):
def _feature_vec(words, model, num_features):
feature_vector = np.zeros(num_features, dtype="float32")
word_set = set(model.wv.index2word)
num_words = 1.
for word in words:
if word in word_set:
feature_vector = np.divide(np.add(feature_vector, model[word]), num_words)
num_words = num_words + 1.
return feature_vector
def _avg_feature_vecs(docs, model, num_features):
doc_feature_vectors = np.zeros((len(docs), num_features), dtype="float32")
counter = 0.
for doc in docs:
doc_feature_vectors[int(counter)] = _feature_vec(doc, model, num_features)
counter = counter + 1.
return doc_feature_vectors
def word2vec_model(table, model, **params):
check_required_parameters(_word2vec_model, params, ['table', 'model'])
return _word2vec_model(table, model, **params)
def _word2vec_model(table, model):
doc = table[model['params']['Input column']]
word_vec_model = model['w2v']
num_features = model['params']['Word vector dimensionality']
out_table = table.copy()
out_table['feature_vectors'] = _avg_feature_vecs(doc, word_vec_model, num_features).tolist()
return {'out_table': out_table}
def word2vec_similarity(model, **params):
check_required_parameters(_word2vec_similarity, params, ['model'])
params = get_default_from_parameters_if_required(params, _word2vec_similarity)
param_validation_check = [greater_than_or_equal_to(params, 1, 'topn')]
validate(*param_validation_check)
return _word2vec_similarity(model, **params)
def _word2vec_similarity(model, positive=None, negative=None, topn=1):
if positive is None and negative is None:
length = 0
else:
result = model['w2v'].wv.most_similar(positive=positive, negative=negative, topn=topn)
length = len(result)
out_table = pd.DataFrame()
out_table['most_similar_words'] = [result[i][0] for i in range(length)]
out_table['similarity'] = [result[i][1] for i in range(length)]
return {'out_table': out_table}
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['scikit-learn', 'pandas', 'scipy', 'numpy', 'category_encoders',
'statsmodels']
setup_requirements = []
misc_requirements = [
"pip==21.1",
"bump2version==0.5.11",
"wheel==0.33.6",
"watchdog==0.9.0",
"flake8==3.7.8",
"tox==3.14.0",
"coverage==4.5.4",
"Sphinx==1.8.5",
"sphinx-rtd-theme==0.4.3",
"twine==1.14.0",
"pre-commit==2.6.0",
]
test_requirements = requirements
dev_requirements = misc_requirements + requirements
setup(
author="David Masip Bonet",
author_email='david26694@gmail.com',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Tools to extend sklearn",
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='sktools',
name='sktools',
packages=find_packages(include=['sktools', 'sktools.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
extras_require={
"test": test_requirements,
"dev": dev_requirements
},
url='https://github.com/david26694/sktools',
version='0.1.4',
zip_safe=False,
)
|
nilq/baby-python
|
python
|
import copy
import weakref
import re
from django.core import validators
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_unicode
from django.core.exceptions import FieldError, ValidationError
from django.utils.translation import get_language
from itertools import izip
from django.utils.translation import string_concat
from django.utils.datastructures import SortedDict
from bisect import bisect
import signals as persistent_signals
from fields import FieldDoesNotExist
from .utils import get_fqclassname_forclass, to_unicode_utf8
import django_documents.managers # @UnusedImport needed for triggering connecting to signals, DO NOT REMOVE
# The values to use for "blank" in SelectFields. Will be appended to the start of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
def subclass_exception(name, parents, module):
return type(name, parents, {'__module__': module})
# Calculate the verbose_name by converting from InitialCaps to "lowercase with spaces".
get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip()
DEFAULT_NAMES = ('verbose_name', 'permissions',
'app_label',
'abstract', 'managed', 'proxy', 'auto_created')
class ObjectValidationError(ValidationError):
def __init__(self, messages, code=None, params=None, obj = None):
assert isinstance(messages, dict)
self.message_dict = messages
self.messages = messages
self.obj = obj
self.message = self.messages
class Meta(object):
def __init__(self, meta, app_label=None):
self.local_fields = []
self.virtual_fields = []
self.module_name, self.verbose_name = None, None
self.verbose_name_plural = None
self.object_name, self.app_label = None, app_label
self.meta = meta
self.has_auto_field, self.auto_field = False, None
self.abstract = False
self.managed = True
self.proxy = False
self.proxy_for_model = None
self.parents = SortedDict()
self.duplicate_targets = {}
self.auto_created = False
self.xml_element_name = None
self.is_root = False
self.key_space_name = None
self.column_family_name = None
self.js_widgetclass = None
self.js_widgetclass_meta = None
self.index_function = None
self.is_group = False
self.abstract_managers = []
self.concrete_managers = []
def contribute_to_class(self, cls, name):
cls._meta = self
# First, construct the default values for these options.
self.object_name = cls.__name__
self.module_name = self.object_name.lower()
#self.verbose_name = get_verbose_name(self.object_name)
self.clazz_name = get_fqclassname_forclass(cls)
self.xml_element_name = cls.__name__
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
# verbose_name_plural is a special case because it uses a 's'
# by default.
setattr(self, 'verbose_name_plural', meta_attrs.pop('verbose_name_plural', string_concat(self.verbose_name, 's')))
setattr(self, 'xml_element_name', meta_attrs.pop('xml_element_name', cls.__name__))
setattr(self, 'is_root', meta_attrs.pop('is_root', self.is_root))
setattr(self, 'column_family_name', meta_attrs.pop('column_family_name', self.column_family_name))
setattr(self, 'key_space_name', meta_attrs.pop('key_space_name', self.key_space_name))
setattr(self, 'js_widgetclass', meta_attrs.pop('js_widgetclass', None))
setattr(self, 'js_widgetclass_meta', meta_attrs.pop('js_widgetclass_meta', None))
setattr(self, 'index_function', meta_attrs.pop('index_function', None))
setattr(self, "is_group", meta_attrs.pop('is_group', None))
setattr(self, "display_order", meta_attrs.pop('display_order', None))
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
def _prepare(self, model):
pass
def add_field(self, field):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
self.local_fields.insert(bisect(self.local_fields, field), field)
if hasattr(self, '_field_cache'):
del self._field_cache
del self._field_name_cache
if hasattr(self, '_name_map'):
del self._name_map
def _fields(self):
"""
The getter for self.fields. This returns the list of field objects
available to this model (including through parent models).
Callers are not permitted to modify this list, since it's a reference
to this instance (not a copy).
"""
try:
self._field_name_cache
except AttributeError:
self._fill_fields_cache()
return self._field_name_cache
fields = property(_fields)
def _fill_fields_cache(self):
cache = []
for parent in self.parents:
for field, model in parent._meta.get_fields_with_model():
if model:
cache.append((field, model))
else:
cache.append((field, parent))
cache.extend([(f, None) for f in self.local_fields])
self._field_cache = tuple(cache)
self._field_name_cache = [x for x, _ in cache]
def get_field(self, name, many_to_many=True):
"""
Returns the requested field by name. Raises FieldDoesNotExist on error.
"""
to_search = self.fields
for f in to_search:
if f.name == name:
return f
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name))
def get_field_by_xml_element_name(self, xml_element_name):
to_search = self.fields
for f in to_search:
if f.xml_element_name == xml_element_name:
return f
raise FieldDoesNotExist('%s has no field with xml_element_name %r' % (self.object_name, xml_element_name))
def describe(self, described_classes = None, recursive = False):
if not described_classes:
described_classes = []
if self.clazz_name not in described_classes:
if recursive:
described_classes.append(self.clazz_name)
description = {}
fields_desc_list = []
for field in self.local_fields:
fields_desc_list.append(field.describe(described_classes = described_classes, recursive = recursive))
description['clazz'] = self.clazz_name
description['fields'] = fields_desc_list
description['verbose_name'] = self.verbose_name
description['is_group'] = self.is_group
if self.js_widgetclass is not None:
description['js_widgetclass'] = self.js_widgetclass
if self.js_widgetclass_meta is not None:
description['js_widgetclass_meta'] = self.js_widgetclass_meta
return description
else:
description = {"clazz": self.clazz_name, "already_described" : True}
return description
def get_verbose_name(self, locale):
if isinstance(self.verbose_name, dict):
if locale in self.verbose_name:
return to_unicode_utf8( self.verbose_name[locale])
else:
return to_unicode_utf8( self.verbose_name.itervalues().next())
else:
return to_unicode_utf8(self.verbose_name)
from register import register_model
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
# If this isn't a subclass of Model, don't do anything special.
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
kwargs = {}
new_class.add_to_class('_meta', Meta(meta, **kwargs))
# Bail out early if we have already created this class.
#m = get_model(new_class._meta.app_label, name, False)
#if m is not None:
# return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Inherited some meta functions from parents
if new_class._meta.index_function is None and base._meta.index_function is not None:
new_class._meta.index_function = base._meta.index_function
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError('Local field %r in class %r clashes '\
'with field of similar name from '\
'abstract base class %r' % \
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
new_class._prepare()
register_model(new_class)
# register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return new_class #get_model(new_class._meta.app_label, name, False)
def copy_managers(cls, base_managers):#@NoSelf
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers:
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):#@NoSelf
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):#@NoSelf
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
#if hasattr(cls, 'get_absolute_url'):
# cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
# cls.get_absolute_url)
persistent_signals.class_prepared.send(sender=cls)
class DeferredAttribute(object):
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field_name, model):
self.field_name = field_name
self.model_ref = weakref.ref(model)
self.loaded = False
def __get__(self, instance, owner):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
"""
assert instance is not None
cls = self.model_ref()
data = instance.__dict__
if data.get(self.field_name, self) is self:
# self.field_name is the attname of the field, but only() takes the
# actual name, so we need to translate it here.
try:
cls._meta.get_field_by_name(self.field_name)
name = self.field_name
except FieldDoesNotExist:
name = [f.name for f in cls._meta.fields
if f.attname == self.field_name][0]
# We use only() instead of values() here because we want the
# various data coersion methods (to_python(), etc.) to be called
# here.
val = getattr(
cls._base_manager.filter(pk=instance.pk).only(name).using(
instance._state.db).get(),
self.field_name
)
data[self.field_name] = val
return data[self.field_name]
def __set__(self, instance, value):
"""
Deferred loading attributes can be set normally (which means there will
never be a database lookup involved.
"""
instance.__dict__[self.field_name] = value
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
class Model(object):
__metaclass__ = ModelBase
_deferred = False
def __init__(self, *args, **kwargs):
#signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
self.key = None
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
fields_iter = iter(self._meta.fields)
if not kwargs:
# The ordering of the izip calls matter - izip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
from related import RelationMeta
# Maintain compatibility with existing calls.
if isinstance(field.rel, RelationMeta):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)):
# This field will be populated on request.
continue
if kwargs:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# ROHO todo solve this
rel_obj = None
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
#if val: # don't attemp to set a None
setattr(self, field.attname, val)
if kwargs:
for prop in kwargs.keys():
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % kwargs.keys()[0])
#signals.post_init.send(sender=self.__class__, instance=self)
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
flat_choices_dict = dict(field.flatchoices)
display_values = flat_choices_dict.get(value, value)
if isinstance( display_values, dict):
language = get_language()
lang_code = language.split('-')[0]
display_value = display_values.get(lang_code, None)
if display_value is None:
display_value = display_values.itervalues().next()
else:
display_value = display_values
return force_unicode( display_value, strings_only=True)
def save(self):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
"""
cls = self.__class__
meta = cls._meta
assert meta.is_root, "expecting save only on root objects"
#signals.pre_save.send(sender=origin, instance=self, raw=raw)
cls.objects.save(self)
def delete(self):
cls = self.__class__
meta = cls._meta
assert meta.is_root, "expecting delete only on root objects"
cls.objects.delete(self.id)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def _add_error(self, attname, error_messages):
obj_errors = getattr(self, '_errors', None)
if obj_errors is None:
obj_errors = {}
setattr(self, '_errors', obj_errors)
if not attname in obj_errors:
obj_errors[attname] = []
obj_errors[attname].append(error_messages)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing message_dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in validators.EMPTY_VALUES:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError, e:
errors[f.name] = e.messages
self._add_error(f.attname, e.messages)
if errors:
raise ObjectValidationError(errors)
def full_clean(self, exclude=None):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ObjectValidationError`` for any errors that occured.
"""
errors = {}
if exclude is None:
exclude = []
try:
self.clean_fields(exclude=exclude)
except ValidationError, e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError, e:
errors = e.update_error_dict(errors)
if errors:
raise ObjectValidationError(errors, obj = self)
def visit(self, visitor):
try:
visitor.start_handle_object(self)
for field in self._meta.local_fields:
if field.rel is None:
visitor.handle_field(field, self)
else:
# relation handle visitors themself
field.handle_visit(visitor, self)
except StopIteration:
pass
visitor.end_handle_object(self)
class DataAspect(Model):
class Meta:
abstract = True
class DynamicModel(Model):
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
self.__dynamicdict__ = {}
super(DynamicModel, self).__init__( *args, **kwargs)
def add_dynamic_attribute(self, name, value):
assert not name in self.__dict__
if not issubclass(value.__class__, DataAspect):
raise Exception()
self.__dynamicdict__[name] = value
def delete_dynamic_attribute(self, name):
assert name in self.__dynamicdict__
del self.__dynamicdict__[name]
def __getattr__(self, name):
"""
Note that when __setattr__ is called by setting
a attribute __getattr__ isn't called
"""
try:
return self.__dynamicdict__[name]
except KeyError:
raise AttributeError()
# def __getattribute__(self, name):
# try:
# return super(DynamicModel, self).__getattribute__(name)
# except AttributeError:
# return self.__dynamicdict__[name]
#
def _get_dynamic_attributes(self):
return self.__dynamicdict__.copy()
class ModelVisitor(object):
"""
defines the interface of a model visitor
"""
def start_handle_object(self, instance):
pass
def end_handle_object(self, instance):
pass
def handle_field(self, field, instance):
pass
def handle_one_of(self, one_of_field, related_instance):
pass
def handle_list_of(self, list_of_field, instance):
pass
def handle_map_of(self, map_of_relation, instance):
pass
def handle_dynamic_field(self, name, value):
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from lantz import Feat, Action, Driver, Q_
from lantz.drivers.ni.daqmx import AnalogOutputTask, VoltageOutputChannel
import numpy as np
import pandas as pd
import os
import time
default_folder = os.path.dirname(__file__)
default_filename = os.path.join(default_folder, 'power_calibration.csv')
class V1000F(Driver):
def __init__(self, ch, calibration_file=default_filename, min_max=(0., 5.)):
super().__init__()
self._voltage = 0
self.ch = ch
self.min_max = min_max
self.calibration_file = calibration_file
return
@Feat(units='V', limits=(0., 5.))
def voltage(self):
return self._voltage
@voltage.setter
def voltage(self, val):
task_config = {
'data': np.ones(5)*val,
'auto_start': True,
}
self.task.write(**task_config)
self._voltage = val
@Feat(units='W', limits=(0, 100.e-3))
def power(self):
return self.voltage2power(self.voltage)
@power.setter
def power(self, val):
self.voltage = self.power2voltage(val)
def _get_cal(self):
d = pd.read_csv(self.calibration_file)
return d.voltage.values, d.power.values
def power2voltage(self, p):
cal_vs, cal_ps = self._get_cal()
if type(p) is Q_:
p = p.to('W').m
return Q_(np.interp(p, cal_ps, cal_vs, period=1000), 'V')
def voltage2power(self, v):
cal_vs, cal_ps = self._get_cal()
if type(v) is Q_:
v = v.to('V').m
return Q_(np.interp(v, cal_vs, cal_ps), 'W')
def initialize(self):
self.task = AnalogOutputTask('Analog_Out_{}'.format(self.ch.split('/')[-1]))
VoltageOutputChannel(self.ch, min_max=self.min_max, units='volts', task=self.task)
def finalize(self):
self.task.clear()
@Action()
def run_calibration(self, power_fun, npoints=500, min_pt=0, max_pt=5, delay_per_point=0.1):
voltages = np.linspace(min_pt, max_pt, npoints)
powers = np.zeros(npoints)
for i, v in enumerate(voltages):
self.voltage = Q_(v, 'V')
time.sleep(delay_per_point)
powers[i] = power_fun().to('W').m
print('{} V = {} W'.format(v, powers[i]))
data = np.transpose(np.array([voltages, powers]))
np.savetxt(self.calibration_file, data, delimiter=",", header='voltage,power', comments='')
return data
|
nilq/baby-python
|
python
|
#!/usr/local/bin/python3
from SM1 import * # The SM1 library is imported here
COMPORT = '/dev/tty.usbserial-AL05TVH5' # Serial port (on Windows, it is COM1,2,...)
ser = setup_serialcom(COMPORT) # Connection w serial port established
print('Reading axes position...\n')
output1 = query_position(ser, 1) # Position device n. 1 acquired (as a string)
output2 = query_position(ser, 2) # Position device n. 2 acquired (as a string)
output3 = query_position(ser, 3) # Position device n. 3 acquired (as a string)
print('yellow axis: ' + output1) # Print the position on screen
print('green axis: ' + output2) # Print the position on screen
print('red axis: ' + output3) # Print the position on screen
print('')
print(query_status(ser, 3))
ser.close() # Connection with serial port closed
|
nilq/baby-python
|
python
|
# The code that helped me to achive this is from Just Van Rossum: https://gist.github.com/justvanrossum/b65f4305ffcf2690bc65
def drawShape(shapePhase, shapeRadius):
def variation(pt, radius, phase):
x, y = pt
dx = radius * cos(phase)
dy = radius * sin(phase)
return x + dx, y + dy
points = []
for i in range(numShapePoints):
a = 2 * pi * i / numShapePoints
x = shapeRadius * cos(a)
y = shapeRadius * sin(a)
rPhase, rSign = randomPhases[i]
points.append(variation((x, y), 0.1 * shapeRadius, rPhase + rSign * 2 * pi * shapePhase))
points.append(None)
path = BezierPath()
path.qCurveTo(*points)
path.closePath()
drawPath(path)
#Counter shape
with savedState():
cp = 20
fill(0)
stroke(1)
clipPath(path)
polygon(
(-100 + randint(-cp, cp), 200 + randint(-cp, cp)),
(-100 + randint(-cp, cp), -200 + randint(-cp, cp)),
(100 + randint(-cp, cp), -200 + randint(-cp, cp)),
(100 + randint(-cp, cp), -150 + randint(-cp, cp)),
(0 + randint(-cp, cp), -150 + randint(-cp, cp)),
(0 + randint(-cp, cp), 50 + randint(-cp, cp)),
(1000 + randint(-cp, cp), 50 + randint(-cp, cp)),
(1000 + randint(-cp, cp), 150 + randint(-cp, cp)),
(100 + randint(-cp, cp), 100 + randint(-cp, cp)),
(100 + randint(-cp, cp), 200 + randint(-cp, cp)),
(-100 + randint(-cp, cp), 200 + randint(-cp, cp)),
close=True
)
numShapePoints = 5
randomPhases = [(2 * pi * random(), randint(-100, 100)) for i in range(numShapePoints)]
canvasSize = 1080
nShapes = 60
nFrames = 48
for frame in range(nFrames):
framePhase = frame / nFrames
newPage(canvasSize, canvasSize)
frameDuration(1/24)
fill(0)
rect(0, 0, canvasSize, canvasSize)
translate(canvasSize/2, canvasSize/2)
strokeWidth(1)
stroke(1)
fill(None)
for i in range(nShapes):
shapePhase = i / nShapes
radius = 20 + i * 10
drawShape(framePhase + shapePhase * 0.5, radius)
saveImage("~/Desktop/07_36_DAYS_OF_TYPE_2020.mp4")
|
nilq/baby-python
|
python
|
## Hit-and-Run Sampling, adapted from Johannes Asplund-Samuelsson (https://github.com/Asplund-Samuelsson)
# Import libraries
import sys, os
import numpy as np
import time
import math
from scipy import stats
#######################################################################################################
## Names of input files and output files need to be changed according to which substrate is being used!
#######################################################################################################
EFM_Nr = sys.argv[1]
#########---Read in Data---#########
###-----Load Stoichiometric Matrix-----###
S_Matrix_file_name = sys.argv[2]
#S_Matrix_file_name = "/S_Matrix/S_Matrix_EFM_Nr_"+EFM_Nr+"_For.txt"
path = os.getcwd()+S_Matrix_file_name
S_Matrix_file = open(path,"r+")
S_Matrix_file_contents = S_Matrix_file.read()
S_Matrix_file_contents = S_Matrix_file_contents[:-2]
S_Matrix_file_contents = S_Matrix_file_contents.replace("\n"," ")
S_Matrix_file_contents = S_Matrix_file_contents.split(", ")
S_Matrix = []
for line in S_Matrix_file_contents:
line = line[1:-1]
line = list(line.split(" "))
line = line[1:]
line_float = [float(entry) for entry in line]
S_Matrix.append(line_float)
S_Matrix = np.array(S_Matrix)
#print(S_Matrix)
###-----Load Standard Change of Gibbs Free Energy Values-----###
dG0_file_name = sys.argv[3]
#dG0_file_name = "/dG0/dG0_EFM_Nr_"+EFM_Nr+"_For.txt"
path = os.getcwd()+dG0_file_name
dG0_file = open(path,"r+")
dG0_file_contents = dG0_file.read()
dG0_file_contents = dG0_file_contents[2:-1]
dG0_file_contents = dG0_file_contents.split(', ')
dG0_float = [float(entry) for entry in dG0_file_contents]
dG0 = np.array(dG0_float)
# RT is a constant
T=303.15
R=8.3145e-3
RT = R*T
###-----Load Metabolite Concentration ranges-----###
MetRange_file_name = sys.argv[4]
#MetRange_file_name = "/Met_Ranges/MetRange_EFM_Nr_"+EFM_Nr+"_For.txt"
path = os.getcwd()+MetRange_file_name
MetRange_file = open(path,"r+")
MetRange_file_contents = MetRange_file.read()
MetRange_file_contents = MetRange_file_contents[:-2]
MetRange_file_contents = MetRange_file_contents.replace("\n"," ")
MetRange_file_contents = MetRange_file_contents.split(", ")
MetRange = []
for line in MetRange_file_contents:
line = line[1:-1]
line = list(line.split(" "))
line_float = [float(entry)/1000 for entry in line]
MetRange.append(line_float)
#MetRange = np.log(np.array(MetRange))
MetRange = np.round(np.log(np.array(MetRange)),3)
#print(MetRange)
###-----Load MDF Value-----###
MDF_file_name = sys.argv[5]
#MDF_file_name = "/MDF/MDF_EFM_Nr_"+EFM_Nr+"_For.txt"
path = os.getcwd()+MDF_file_name
MDF_file = open(path,"r+")
MDF_file_contents = MDF_file.read()
#MDF = round(float(MDF_file_contents),2)
MDF = float(MDF_file_contents)
###-----Load Starting Concentration set-----###
Conc_Init_file_name = sys.argv[6]
#Conc_Init_file_name = "/Conc_Init/Conc_Init_EFM_Nr_"+EFM_Nr+"_For.txt"
path = os.getcwd()+Conc_Init_file_name
Conc_Init_file = open(path,"r+")
Conc_Init_file_contents = Conc_Init_file.read()
Conc_Init_file_contents = Conc_Init_file_contents[2:-1]
Conc_Init_file_contents = Conc_Init_file_contents.split(', ')
Conc_Init_float = [float(entry) for entry in Conc_Init_file_contents]
c_0 = np.round(np.log(np.array(Conc_Init_float)),3)
#c_0 = np.log(np.array(Conc_Init_float))
#print(c_0)
###-----Load Ratio Matrix-----###
R_Matrix_file_name = sys.argv[7]
#R_Matrix_file_name = "/Ratio_Matrix/Ratio_Matrix_EFM_Nr_"+EFM_Nr+"_For.txt"
path = os.getcwd()+R_Matrix_file_name
R_Matrix_file = open(path,"r+")
R_Matrix_file_contents = R_Matrix_file.read()
R_Matrix_file_contents = R_Matrix_file_contents[:-2]
R_Matrix_file_contents = R_Matrix_file_contents.replace("\n"," ")
R_Matrix_file_contents = R_Matrix_file_contents.split(", ")
R_Matrix = []
for line in R_Matrix_file_contents:
line = line[1:-1]
line = list(line.split(" "))
line = line[1:]
line_float = [float(entry) for entry in line]
R_Matrix.append(line_float)
R_Matrix = np.array(R_Matrix)
###-----Load Name References-----###
Name_References_file_name = sys.argv[8]
#R_Matrix_file_name = "/Ratio_Matrix/Ratio_Matrix_EFM_Nr_"+EFM_Nr+"_For.txt"
path = os.getcwd()+Name_References_file_name
Name_References_file = open(path,"r+")
Name_References_file_contents = Name_References_file.readlines()
max_tot_c = 0.5
nr_c_met = 0
for line in Name_References_file_contents:
#print(line)
if line[0] =="M":
if "[e]" not in line:
nr_c_met +=1
if "h2o" in line:
max_tot_c += 1
if "biomass" in line:
max_tot_c += 1
if "PHB" in line:
max_tot_c += 1
#########-----Algorithm------#########
# Constrain concentration ratios
# Use natural log
ratio_lim = np.log(np.array([
[ 0.499, 50.1 ], # 0.5 < ATP / ADP < 50
[ 0.00499, 0.501 ], # 0.005 < NADH / NAD < 0.5
[ 0.0499, 50.1 ], # 0.05 < NADPH / NADP < 50
[ 0.099, 10.1 ] # 0.1 < QH2 / Q < 10
]))
# Define function for random sampling of concentrations
def random_c(MetRange):
sample = np.array([np.random.random() for n in range(0, MetRange.shape[0])])
return sample * (MetRange[:,1] - MetRange[:,0]) + MetRange[:,0]
# Define function for checking if set is thermodynamically feasible
def df_ok(c,MDF):
# Calculate delta G prime
df = -(dG0 + RT * np.sum(np.transpose(S_Matrix) * c, 1))
# Check if all driving forces higher than 0
#print("df is:\n")
#print(sum(df > MDF*0))
# if not sum(df >= MDF*0.9) == df.shape[0]:
# print("It's the dGs!")
return sum(df >= MDF*0.9) == df.shape[0]
# Define function for checking if set has acceptable ratios
def ratios_ok(c):
#ratios = np.sum(R_Matrix.T * c, 1).reshape([ratio_lim.shape[0], 1])
#print(ratios)
ratios = np.sum(R_Matrix.T * c, 1).reshape([ratio_lim.shape[0], 1])
min = np.sum(np.subtract(ratios, ratio_lim) >= 0, 0)[0] == ratios.shape[0]
max = np.sum(np.subtract(ratios, ratio_lim) <= 0, 0)[1] == ratios.shape[0]
# if not min or max:
# print("It's the ratios")
return min and max
# Define function for checking that sum of concentrations is not too high (0.5 M)
def sum_ok(c, max_tot_c):
#print("sum of all conc is:\n")
#print(np.sum(np.exp(c)))
## Sum only intracellular metabolites
return np.sum(np.exp(c_0[-nr_c_met:])) <= max_tot_c
# Define function that checks concentrations are within limits
def limits_ok(c):
c_l = c.reshape([c.shape[0],1])
min = np.sum(np.subtract(c_l, MetRange) >= 0, 0)[0] == c.shape[0]
max = np.sum(np.subtract(c_l, MetRange) <= 0, 0)[1] == c.shape[0]
# if not min or max:
# print("It's the ranges!")
return min and max
# Define function for checking feasibility, ratios, sum, and limits in one go
def is_feasible(c,MDF,max_tot_c):
return df_ok(c,MDF) and limits_ok(c) and ratios_ok(c) and sum_ok(c[2:],max_tot_c)
print("Found feasible set!")
# Define function for checking feasibility, ratios, sum, and limits in one go
def is_feasible_final(c,MDF,max_tot_c):
if not df_ok(c,MDF):
print("It is the dG!")
# if not ratios_ok(c):
# print("It is the ratios!")
# ratios = np.sum(R_Matrix.T * c, 1).reshape([ratio_lim.shape[0], 1])
# print(np.exp(ratios))
if not limits_ok(c):
print("It is the ranges!")
return df_ok(c,MDF) and limits_ok(c) and ratios_ok(c) and sum_ok(c[2:],max_tot_c)
print("Found feasible set!")
# Modify direction in order to get unstuck from concentration limits, a.k.a. The Unsticking Function TM
def unstick_direction(c, direction, MetRange):
# Determine what metabolites are stuck at limits
stuck = c.reshape((c.size,1)) == MetRange
# Determine current signs of direction vector
dirsign = np.sign(direction)
# Pick a random sign for metabolites stuck at max
max_sign = np.random.choice([-1,1], 1)
# All directions for metabolites stuck at max must be the same sign
dirsign[stuck[:,1] * dirsign != 0] = max_sign
# All directions for metabolites stuck at min must be the opposite sign
dirsign[stuck[:,0] * dirsign != 0] = -max_sign
# Determine the directions that must change sign
change_sign = dirsign != np.sign(direction)
# Change the sign of directions that must change sign
direction[change_sign] = direction[change_sign] * -1
# Return the compatibility-modified "unstuck" direction vector
return direction
# Define function for selecting a random direction
def random_direction(c):
# Create a random vector of the same length as c
direction = np.array([np.random.random() for n in range(0, c.shape[0])])
# Subtract 0.5 to introduce negative directions
direction = direction - 0.5
# Set fixed concentration direction to zero
direction[MetRange[:,1] - MetRange[:,0] == 0] = 0
# Normalize length of direction vector
normalized_direction = direction / np.linalg.norm(direction)
return normalized_direction
# Define function to generate one feasible metabolite concentration set
def generate_feasible_c(MetRange, MDF,max_tot_c):
c = random_c(MetRange) # Initialize c
while not is_feasible(c, MDF,max_tot_c):
c = random_c(MetRange) # Generate new c until feasible
return c
# Determine minimum and maximum possible theta given concentration limits
def calculate_theta_hard_limit(c, direction, MetRange):
# Find smallest fraction of direction that hits limit if added
theta_max = np.vstack([
(MetRange[:,1] - c)[direction != 0] / direction[direction != 0],
(MetRange[:,0] - c)[direction != 0] / direction[direction != 0]
])
#print(theta_max)
theta_max = np.max(theta_max, 0)
#print(theta_max)
theta_max = min(theta_max[theta_max >= 0])
#print(theta_max)
# Find smallest fraction of direction that hits limit if subtracted
theta_min = np.vstack([
(c - MetRange[:,1])[direction != 0] / direction[direction != 0],
(c - MetRange[:,0])[direction != 0] / direction[direction != 0]
])
#print(theta_min)
theta_min = np.max(theta_min, 0)
#print(theta_min)
theta_min = -min(theta_min[theta_min >= 0])
#print(theta_min)
return (theta_min, theta_max)
# Define function for determining minimum and maximum step length (theta)
def theta_range(c, direction, max_tot_c, precision=1e-3):
# Define function for honing in on a theta limit
def hone_theta(theta_outer, max_tot_c, theta_inner=0):
if is_feasible(c + theta_outer * direction, MDF, max_tot_c):
# If the outer theta is feasible, accept that solution
theta_inner = theta_outer
else:
while abs(theta_outer - theta_inner) > precision:
# Calculate a theta between outer and inner limits
theta_cur = (theta_outer + theta_inner) / 2
if is_feasible(c + theta_cur * direction, MDF, max_tot_c):
# Move outwards, set inner limit to current theta
theta_inner = theta_cur
else:
# Move inwards, set outer limit to current theta
theta_outer = theta_cur
# Return inner theta
return theta_inner
# Get hard limits on theta from concentrations
theta_lim = calculate_theta_hard_limit(c, direction, MetRange)
# Hone in on upper theta
theta_upper = hone_theta(theta_lim[1],max_tot_c)
# Hone in on lower theta
theta_lower = hone_theta(theta_lim[0],max_tot_c)
# Return results
return [theta_lower, theta_upper]
# Define function for performing hit-and-run sampling within the solution space
def hit_and_run(S_Matrix, dG0, MetRange, ratio_lim, R_Matrix, n_samples, MDF, max_tot_c, precision=1e-3):
# Generate starting point
#c = generate_feasible_c(MetRange, MDF)
#print("--- %s seconds to find the first feasible ---" % (time.time() - start_time))
# Take starting point from Input
c=c_0
# Set up concentration storage list
fMCSs = [c]
# Perform n steps
for i in range(0, n_samples - 1):
# Generate random direction
direction = random_direction(c)
# Make sure that the algorithm doesn't get stuck at the boundaries of the solution space
direction_unstuck = unstick_direction(c, direction,MetRange)
# Determine minimum and maximum step length
theta = theta_range(c, direction_unstuck, max_tot_c, precision=precision)
# Perform a random sampling of the step length
theta = theta[0] + np.random.random() * (theta[1] - theta[0])
# Perform step
c = c + theta * direction
# Ensure feasibility
if not is_feasible_final(c,MDF,max_tot_c):
print("Warning: Infeasible point reached.")
break
# Store concentration
fMCSs.append(c)
# Return list of concentrations
return fMCSs
count = 0
final_out_Conc = ''
final_out_dG = ''
for c in hit_and_run(S_Matrix, dG0, MetRange, ratio_lim, R_Matrix, 5000, MDF, max_tot_c):
# Print CSV in mM
count+=1
final_out_Conc = final_out_Conc + "fMCS"+str(count)+"," + ",".join([str(np.round(np.exp(x)*1000,3)) for x in c]) + "\n"
df = -(dG0 + RT * np.sum(np.transpose(S_Matrix) * c, 1))
final_out_dG = final_out_dG + "fMCS"+str(count)+"," + ",".join([str(np.round(df_1,3)) for df_1 in df]) + "\n"
Sampling_file_contents = final_out_dG
Sampling_file_contents = Sampling_file_contents.split('\n')
for line in Sampling_file_contents[:-1]:
line_split = line.split(',')
if line_split[0] == 'fMCS1':
#print(line_split[0])
line_split = line_split[1:]
#print(line_split)
line_split = [float(entry) for entry in line_split]
Data_All = np.array(line_split)
#elif line_split[0] != 'fMCS1':
else:
#print(line_split[0])
line_split = line_split[1:]
line_split = [float(entry) for entry in line_split]
Data_fmc_Others = np.array(line_split)
Data_All = np.vstack((Data_All,Data_fmc_Others))
## Calculate Median and MAD
medians = np.round(np.median(Data_All, axis=0),3)
final_out_Median = medians
#print(medians)
MADs = np.round(stats.median_abs_deviation(Data_All),3)
final_out_MAD = MADs
#print(MADs)
# Median_File = open("Medians_"+EFM_Nr+".txt","w")
# np.savetxt(Median_File,medians)
# Median_File.close()
# MAD_File = open("MADs_"+EFM_Nr+".txt","w")
# np.savetxt(MAD_File,MADs)
# MAD_File.close()
# Output_File_Name_Conc = sys.argv[8]
# #Output_File = open("Sampling_Results_EFM_Nr_"+EFM_Nr+"_For_WT.txt","w")
# Output_File_Conc = open(Output_File_Name_Conc,"w")
# Output_File_Conc.write(final_out_Conc)
# Output_File_Conc.close()
# Output_File_Name_dG = sys.argv[9]
# Output_File_dG = open(Output_File_Name_dG,"w")
# Output_File_dG.write(final_out_dG)
# Output_File_dG.close()
Output_File_Name_Median = sys.argv[9]
#Output_File_Median = open(Output_File_Name_Median,"w")
path_Out_1 = os.getcwd()+Output_File_Name_Median
Output_File_Median = open(path_Out_1,"w")
np.savetxt(Output_File_Median,final_out_Median)
#Output_File_Median.write(final_out_Median)
Output_File_Median.close()
Output_File_Name_MAD = sys.argv[10]
#Output_File_MAD = open(Output_File_Name_MAD,"w")
path_Out_2 = os.getcwd()+Output_File_Name_MAD
Output_File_MAD = open(path_Out_2,"w")
np.savetxt(Output_File_MAD,final_out_MAD)
#Output_File_MAD.write(final_out_MAD)
Output_File_MAD.close()
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.mixture import GaussianMixture as GMM
from sklearn.cluster import DBSCAN
from time import time
def color_match(im, Q = 5, verbose = False):
GMM_FEATURE_MATRIX = im.reshape(-1,3)
model = GMM(n_components=Q,covariance_type='diag')
CLOSEST_PRIMARY_COLORS = model.fit_predict(GMM_FEATURE_MATRIX)
if verbose:
c = model.means_[CLOSEST_PRIMARY_COLORS]
c = c.reshape(im.shape)
if c.max()>1:
plt.imshow(c.astype(int))
else:
plt.imshow(c)
plt.xticks([])
plt.yticks([])
plt.title('Primary colors found with KMeans')
plt.show()
return CLOSEST_PRIMARY_COLORS
def spacial_cluster(q, EPS = 5, verbose = False):
model_2 = DBSCAN(eps=EPS, n_jobs=-1)
r = model_2.fit_predict(q)
objects = []
for j in range(r.max()+1):
obj = q[np.where(r==j)]
(x0,y0),(x1,y1) = obj.min(0),obj.max(0)
cx,cy = ( (x0+x1)//2 ,(y0+y1)//2 )
w,h = ( x1-x0, y1-y0 )
objects.append([cx,cy,w,h])
if verbose:
plt.scatter(obj[:,0],obj[:,1],marker='.')
if verbose:
plt.show()
return objects
def cod(im, Q=5, eps=5, verbose = False):
CLOSEST_PRIMARY_COLORS = color_match(im, Q, verbose)
compressed_image = CLOSEST_PRIMARY_COLORS.reshape(im.shape[:2])
t0 = time()
OBJECT_BBOXES = []
for i in range(Q):
q = np.flip(np.array(np.where(compressed_image==i)).T,1)
OBJECT_BBOXES = OBJECT_BBOXES + spacial_cluster(q, eps, verbose)
print("DBSCAN took {} seconds".format(round(time()-t0,2)))
return OBJECT_BBOXES
|
nilq/baby-python
|
python
|
from inventory.env import Staging
from inventory.project import BackEnd, FrontEnd
class DevelopHost(Staging, BackEnd, FrontEnd):
ansible_host = 'develop_hostname'
version = 'develop'
extra = {'debug': 1}
class StagingHost(Staging, BackEnd, FrontEnd):
ansible_host = 'master_hostname'
version = 'master'
extra_branches = ['foo', 'bar']
extra_objs = [
{
'prop1': 'value1',
'prop2': 'value2',
},
{
'prop3': 'value3',
'prop4': 'value4',
},
]
|
nilq/baby-python
|
python
|
"""
Tests of neo.io.neomatlabio
"""
import unittest
from neo.io import MicromedIO
from neo.test.iotest.common_io_test import BaseTestIO
class TestMicromedIO(BaseTestIO, unittest.TestCase, ):
ioclass = MicromedIO
entities_to_download = [
'micromed'
]
entities_to_test = [
'micromed/File_micromed_1.TRC'
]
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
# ElectrumSV - lightweight Bitcoin SV client
# Copyright (C) 2019-2020 The ElectrumSV Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from asyncio import Event, Queue, new_event_loop, run_coroutine_threadsafe, CancelledError
from concurrent.futures import CancelledError as FCancelledError
from functools import partial
import queue
import threading
from aiorpcx import instantiate_coroutine
from .logs import logs
logger = logs.get_logger("async")
class ASync(object):
'''This helper coordinates setting up an asyncio event loop thread, executing coroutines
from a different thread, and running completion callbacks in a different thread.
'''
def __init__(self):
self._queue = queue.Queue()
self.thread = threading.Thread(target=self._main, name="async")
self.loop = new_event_loop()
self.start_event = threading.Event()
self.stop_event = self.event()
self.futures = set()
def event(self):
'''Return an asyncio.Event for our event loop.'''
return Event(loop=self.loop)
def queue(self, maxsize=0):
'''Return an asyncio.Event for our event loop.'''
return Queue(maxsize, loop=self.loop)
def __enter__(self):
logger.info('starting async thread')
self.thread.start()
# Wait for the thread to definitively start before returning
self.start_event.wait()
logger.info('async thread started')
return self
def __exit__(self, exc_type, exc_value, traceback):
# Wait for the thread to definitively stop before returning
# stop_event must be set from the loop
logger.info('stopping async thread')
self.loop.call_soon_threadsafe(self.stop_event.set)
self.thread.join()
logger.info('async thread stopped')
async def _wait_until_stopped(self):
await self.stop_event.wait()
for future in list(self.futures):
future.cancel()
def _main(self):
self.start_event.set()
self.loop.run_until_complete(self._wait_until_stopped())
self.loop.close()
def _spawn(self, coro, args):
coro = instantiate_coroutine(coro, args)
return run_coroutine_threadsafe(coro, self.loop)
def _collect(self, on_done, future):
self.futures.remove(future)
if on_done:
self._queue.put((on_done, future))
else:
try:
future.result()
except (CancelledError, FCancelledError):
pass
except Exception:
logger.exception('async task raised an unhandled exception')
def spawn(self, coro, *args, on_done=None):
future = self._spawn(coro, args)
self.futures.add(future)
future.add_done_callback(partial(self._collect, on_done))
return future
def spawn_and_wait(self, coro, *args, timeout=None):
future = self._spawn(coro, args)
return future.result(timeout)
def run_pending_callbacks(self):
while not self._queue.empty():
on_done, future = self._queue.get()
try:
on_done(future)
except Exception:
logger.exception('unhandled exception in run_pending_callbacks')
|
nilq/baby-python
|
python
|
import json
from ipaddress import IPv4Address
from pytest_toolbox.comparison import AnyInt, RegexStr
from .conftest import Factory
async def test_login(cli, url, factory: Factory):
user = await factory.create_user()
r = await cli.post(
url('auth:login'),
data=json.dumps({'email': user.email, 'password': user.password}),
headers={'Content-Type': 'application/json', 'Origin': 'null'},
)
obj = await r.json()
assert obj == {
'auth_token': RegexStr('.*'),
'session': {'session_id': AnyInt(), 'ts': AnyInt(), 'name': 'Tes Ting', 'email': 'testing-1@example.com'},
}
# auth_token is tested in test_auth_ui
async def test_logout(cli, url, db_conn, factory: Factory):
await factory.create_user()
assert 1 == await db_conn.fetchval('select count(*) from auth_sessions')
session_id = await db_conn.fetchval('select id from auth_sessions')
h = {'Authentication': 'testing' * 6}
data = {'session_id': session_id, 'ip': '1.2.3.4', 'user_agent': 'whatever', 'action': 'logout'}
r = await cli.post(url('auth:update-session'), json=data, headers=h)
assert r.status == 200, await r.text()
data = {'session_id': session_id, 'ip': '255.255.255.1', 'user_agent': None, 'action': 'logout'}
r = await cli.post(url('auth:finish-session'), json=data, headers=h)
assert r.status == 200, await r.text()
assert 1 == await db_conn.fetchval('select count(*) from auth_sessions')
s_id, active = await db_conn.fetchrow('select id, active from auth_sessions')
assert active is False
assert 3 == await db_conn.fetchval('select count(*) from auth_user_agents')
r = await db_conn.fetch(
"""
select ip, action, ua.value as user_agent from auth_session_events e
join auth_user_agents ua on e.user_agent = ua.id
where session=$1
order by e.id
""",
s_id,
)
events = [dict(e) for e in r]
assert events == [
{'ip': IPv4Address('127.0.0.1'), 'action': 'login-pw', 'user_agent': RegexStr('Python/.+')},
{'ip': IPv4Address('1.2.3.4'), 'action': 'update', 'user_agent': 'whatever'},
{'ip': IPv4Address('255.255.255.1'), 'action': 'logout', 'user_agent': ''},
]
async def test_logout_invalid(cli, url, db_conn):
h = {'Authentication': 'testing' * 6}
data = {'session_id': 123, 'ip': '255.255.255.1', 'user_agent': 'whatever', 'action': 'logout'}
r = await cli.post(url('auth:finish-session'), json=data, headers=h)
assert r.status == 400, await r.text()
assert await r.json() == {'message': 'wrong session id'}
assert await db_conn.fetchval('select count(*) from auth_session_events') == 0
async def test_logout_invalid_auth(cli, url, db_conn, factory: Factory):
await factory.create_user()
assert 1 == await db_conn.fetchval('select count(*) from auth_sessions')
session_id = await db_conn.fetchval('select id from auth_sessions')
h = {'Authentication': 'testing' * 5}
r = await cli.post(url('auth:finish-session'), json={'session_id': session_id, 'event': '{"foo": 4}'}, headers=h)
assert r.status == 403, await r.text()
assert await r.text() == 'invalid Authentication header'
|
nilq/baby-python
|
python
|
from datasets.base.image.manipulator import ImageDatasetManipulator
import numpy as np
import copy
from datasets.base.common.operator.manipulator import fit_objects_bounding_box_in_image_size, \
update_objects_bounding_box_validity, prepare_bounding_box_annotation_standard_conversion
from data.types.bounding_box_format import BoundingBoxFormat
from data.types.pixel_coordinate_system import PixelCoordinateSystem
from data.types.bounding_box_coordinate_system import BoundingBoxCoordinateSystem
from data.types.pixel_definition import PixelDefinition
class ImageDatasetTweakTool:
def __init__(self, dataset: dict):
self.manipulator = ImageDatasetManipulator(dataset)
def apply_index_filter(self, indices):
self.manipulator.apply_index_filter(indices)
def sort_by_image_size_ratio(self, descending=False):
image_sizes = []
for image in self.manipulator:
image_sizes.append(image.get_image_size())
image_sizes = np.array(image_sizes)
if descending:
ratio = image_sizes[:, 0] / image_sizes[:, 1]
else:
ratio = image_sizes[:, 1] / image_sizes[:, 0]
indices = ratio.argsort()
self.manipulator.apply_index_filter(indices)
def bounding_box_fit_in_image_size(self, exclude_non_validity=True):
for image in self.manipulator:
fit_objects_bounding_box_in_image_size(image, self.manipulator.context_dao, exclude_non_validity)
def bounding_box_update_validity(self, skip_if_mark_non_validity=True):
for image in self.manipulator:
update_objects_bounding_box_validity(image, self.manipulator.context_dao, skip_if_mark_non_validity)
def bounding_box_remove_non_validity_objects(self):
for image in self.manipulator:
for object_ in image:
if object_.has_bounding_box():
_, validity = object_.get_bounding_box()
if validity is False:
object_.delete()
def annotation_standard_conversion(self, bounding_box_format: BoundingBoxFormat = None,
pixel_coordinate_system: PixelCoordinateSystem = None,
bounding_box_coordinate_system: BoundingBoxCoordinateSystem = None,
pixel_definition: PixelDefinition = None):
converter = prepare_bounding_box_annotation_standard_conversion(bounding_box_format, pixel_coordinate_system,
bounding_box_coordinate_system,
pixel_definition,
self.manipulator.context_dao)
if converter is None:
return
for image in self.manipulator:
for object_ in image:
if object_.has_bounding_box():
bounding_box, bounding_box_validity = object_.get_bounding_box()
bounding_box = converter(bounding_box)
object_.set_bounding_box(bounding_box, bounding_box_validity)
def bounding_box_remove_empty_annotation_objects(self):
for image in self.manipulator:
for object_ in image:
if not object_.has_bounding_box():
object_.delete()
def remove_empty_annotation(self):
for image in self.manipulator:
if len(image) == 0:
image.delete()
def remove_invalid_image(self):
for image in self.manipulator:
w, h = image.get_image_size()
if w == 0 or h == 0:
image.delete()
def remove_category_ids(self, category_ids: list):
for image in self.manipulator:
for object_ in image:
if object_.has_category_id():
if object_.get_category_id() in category_ids:
object_.delete()
category_id_name_map: dict = copy.copy(self.manipulator.get_category_id_name_map())
for category_id in category_ids:
category_id_name_map.pop(category_id)
self.manipulator.set_category_id_name_map(category_id_name_map)
def make_category_id_sequential(self):
category_id_name_map = self.manipulator.get_category_id_name_map()
new_category_ids = list(range(len(category_id_name_map)))
old_new_category_id_map = {o: n for n, o in zip(new_category_ids, category_id_name_map.keys())}
for image in self.manipulator:
for object_ in image:
if object_.has_category_id():
if object_.get_category_id() in old_new_category_id_map:
object_.set_category_id(old_new_category_id_map[object_.get_category_id()])
new_category_id_name_map = {n: category_id_name_map[o] for n, o in
zip(new_category_ids, category_id_name_map.keys())}
self.manipulator.set_category_id_name_map(new_category_id_name_map)
|
nilq/baby-python
|
python
|
import numpy as np
import pydub
import librosa
import scipy
import scipy.fftpack as fft
silence_threshold = 60 # in -dB relative to max sound which is 0dB
lambdaa = 1 # amplitude of delta signal in PEFBEs
n_mels = 60 # feature dimension for each frame
segment_length = 41 # 1 segment is 41 frames
segment_hop_length = 20 # nearly 50% overlap
class Clip:
"""A single 5-sec long recording."""
RATE = 22050 # All recordings in ESC are 44.1 kHz but the paper downsampled to 22.05kHz
frame_length=550 # 25 ms windows
hop_length=275 # 50% overlap
class Audio:
"""The actual audio data of the clip.
Uses a context manager to load/unload the raw audio data. This way clips
can be processed sequentially with reasonable memory usage.
"""
def __init__(self, path):
self.path = path
def __enter__(self):
# Actual recordings are sometimes not frame accurate, so we trim/overlay to exactly 5 seconds
self.data = pydub.AudioSegment.silent(duration=5000)
self.data = self.data.overlay((pydub.AudioSegment.from_file(self.path)[0:5000]).set_frame_rate(Clip.RATE))
self.raw = (np.fromstring(self.data._data, dtype="int16") + 0.5) / (0x7FFF + 0.5) # convert to float
return(self)
def __exit__(self, exception_type, exception_value, traceback):
if exception_type is not None:
print (exception_type, exception_value, traceback)
del self.data
del self.raw
def __init__(self, audiopath,path):
self.path = path
self.target = (self.path.split(".")[0]).split("-")[-1]
self.fold = self.path.split("-")[0]
self.audio = Clip.Audio(audiopath+"/"+self.path)
self.category = None
with self.audio as audio:
self.is_silent = librosa.effects._signal_to_frame_nonsilent(audio.raw,top_db=silence_threshold,frame_length=Clip.frame_length, hop_length=Clip.hop_length)
self.non_silent = self.remove_silence(audio)
################# Unsegmented features. 60 - dimensional ###################
self.compute_PEFBEs()
self.compute_FBEs()
self.num_frames = len(self.non_silent)
del self.is_silent
del self.non_silent
######################## Segment the clip into smaller parts. 41 frames(50% overlap) in the PEFBE paper. ########################
self.mel_spectra = self.segment(self.mel_spectra.T).T
self.log_spectra = self.segment(self.log_spectra.T).T
self.log_delta = self.segment(self.log_delta.T).T
self.log_delta2 = self.segment(self.log_delta2.T).T
self.PEmel_spectra = self.segment(self.PEmel_spectra.T).T
self.PElog_spectra = self.segment(self.PElog_spectra.T).T
self.PElog_delta = self.segment(self.PElog_delta.T).T
self.PElog_delta2 = self.segment(self.PElog_delta2.T).T
def remove_silence(self,audio):
# returns a list of numpy arrays (list of frames)
newsig = []
j = 0
while j < len(self.is_silent):
silent_count = 0
#look for continuous silent frames
while(j<len(self.is_silent) and (not self.is_silent[j])):
silent_count +=1
j+=1
#skip all these frames if more than 3 continuously
if(silent_count<=3):
if(silent_count==0):
newsig.append(audio.raw[(j)*Clip.hop_length:(j+2)*Clip.hop_length])
for k in range(silent_count):
newsig.append(audio.raw[(j+k)*Clip.hop_length:(j+k+2)*Clip.hop_length])
j += silent_count
j+=1
#drop the partially filled frames
while(len(newsig[-1])!=Clip.frame_length):
del(newsig[-1])
newsig.append(audio.raw[-Clip.frame_length:])
return newsig
def compute_PEFBEs(self):
power_spectra = []
for frame in self.non_silent:
delta = lambdaa*scipy.signal.unit_impulse(Clip.frame_length)
frame += delta
fft_frame = fft.fft(frame)
normalised_frame = (fft_frame - np.mean(fft_frame)) / np.std(fft_frame)
power_frame = np.abs(fft_frame)**2
power_spectra.append(power_frame)
power_spectra = np.array(power_spectra)
self.PEmel_spectra = librosa.feature.melspectrogram(S=power_spectra.T,n_mels=n_mels)
self.PElog_spectra = librosa.core.power_to_db(self.PEmel_spectra)
self.PElog_delta = librosa.feature.delta(self.PElog_spectra)
self.PElog_delta2 = librosa.feature.delta(self.PElog_delta)
def compute_FBEs(self):
power_spectra = []
for frame in self.non_silent:
fft_frame = fft.fft(frame)
power_frame = np.abs(fft_frame)**2
power_spectra.append(power_frame)
power_spectra = np.array(power_spectra)
self.mel_spectra = librosa.feature.melspectrogram(S=power_spectra.T,n_mels=n_mels)
self.log_spectra = librosa.core.power_to_db(self.mel_spectra)
self.log_delta = librosa.feature.delta(self.log_spectra)
self.log_delta2 = librosa.feature.delta(self.log_delta)
def segment(self,list):
newsig = []
n = len(list)
if(n < segment_length):
#### Make a segment by duplicating frames
new_segment = []
for j in range(int(segment_length/n)):
new_segment.extend(list[:])
new_segment.extend(list[:segment_length - n])
newsig.append(np.array(new_segment))
else:
for j in range(int(n/segment_hop_length)):
newsig.append(list[j*segment_hop_length:(j+2)*segment_hop_length+1])
#remove partially-filled segments from the end
while(len(newsig[-1])!=segment_length):
del(newsig[-1])
# add a segment for last few frames tht might have been left out
if(len(list)%segment_length != 0):
newsig.append(list[-segment_length:])
return np.array(newsig)
def _print_stats(self,data):
print(data.shape,np.max(data),np.min(data),np.mean(data),np.std(data))
def print_clip_stats(self):
print("length max min mean std")
print("FBE mel ----------------------------------")
self._print_stats(self.mel_spectra)
print("FBE log ------------------------------")
self._print_stats(self.log_spectra)
print("FBE log delta ------------------------------")
self._print_stats(self.log_delta)
print("FBE log delta2 ------------------------------")
self._print_stats(self.log_delta2)
print("PEFBE mel ----------------------------------")
self._print_stats(self.PEmel_spectra)
print("PEFBE log ------------------------------")
self._print_stats(self.PElog_spectra)
print("PEFBE log delta------------------------------")
self._print_stats(self.PElog_delta)
print("PEFBE log delta2 ------------------------------")
self._print_stats(self.PElog_delta2)
print(len(self.non_silent))
def __repr__(self):
return '<Target:{0}|Category:{1}|Fold:{2}|Number of frames:{3}|Number of segments:{4}>\nClip name : {5}'.format(self.target,self.category,self.fold,self.num_frames,self.log_spectra.shape[2],self.path)
|
nilq/baby-python
|
python
|
"""empty message
Revision ID: 40557a55e174
Revises: 0f9ddf8fec06
Create Date: 2021-09-13 03:11:26.003799
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '40557a55e174'
down_revision = '0f9ddf8fec06'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'product_user', ['user_id', 'product_id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'product_user', type_='unique')
# ### end Alembic commands ###
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
import pdb
import hsmix
import scipy as sp
#====================================================================
def test_ideal_gas_press():
TOL = .03
xHS = 1.0
# atmospheric conditions
mHS = 28.97
kT = 1.0/40 # 300 K
V0 = 39270.0
V_a = V0*np.array([0.99,1.01])
# 1bar = kT/V0*1.6e6
# V_a = V0*np.linspace( .5, 5, 1001)
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
Fgas_a = np.zeros( V_a.shape )
Sgas_a = np.zeros( V_a.shape )
for ind, V in enumerate( V_a ):
iFgas, iSgas = hsmix.ideal_gas( V, kT, xHS, mHS )
Fgas_a[ind] = iFgas
Sgas_a[ind] = iSgas
P = -np.diff(Fgas_a)/np.diff(V_a)*hsmix.GPA_EV_ANG3
assert np.abs(np.log(P*1e4/1.013)) < TOL, \
'Must recover 1 bar atmospheric pressure'
#====================================================================
def test_ideal_gas_entropy():
TOL = 1e-3
xHS = 1.0
# atmospheric conditions
mHS = 28.97
kT0 = 1.0/40 # 300 K
kT_a = kT0*np.array([.99,1.01])
V = 39270.0
# 1bar = kT/V0*1.6e6
# V_a = V0*np.linspace( .5, 5, 1001)
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
Fgas_a = np.zeros( kT_a.shape )
Sgas_a = np.zeros( kT_a.shape )
for ind, kT in enumerate( kT_a ):
iFgas, iSgas = hsmix.ideal_gas( V, kT, xHS, mHS )
Fgas_a[ind] = iFgas
Sgas_a[ind] = iSgas
S = -np.diff(Fgas_a)/np.diff(kT_a)
assert np.abs( np.log( np.mean(Sgas_a)/S ) ) < TOL
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
#====================================================================
def test_ideal_mix():
kT = 1.0
xHS = np.array([.5,.5])
Fmix, Smix = hsmix.ideal_mix( kT, xHS )
assert Smix == np.log(2), 'Smix of 50/50 mix should equal log(2)'
Fmix, Smix = hsmix.ideal_mix( kT, np.array([0.0,1.0]) )
assert Smix==0, 'Purely 1 component yields Smix=0'
#====================================================================
def test_hard_sphere_mix():
TOL = 1e-2
fpackHS_a=np.array([0.2333, 0.2692, 0.3106, 0.3583, 0.3808, 0.4393, 0.5068])
dHS = np.array([1, 3])
xHS = np.array([0.5, 0.5])
V_a = np.sum( xHS*np.pi/6*dHS**3 )/fpackHS_a
FexHS_kT = np.zeros( V_a.shape )
debug_output = None
debug_output = None
for ind, V in enumerate(V_a):
iFexHS_kT, idebug_output = hsmix.hard_sphere_mix( V, xHS, dHS, debug_output=True )
FexHS_kT[ind] = iFexHS_kT
if debug_output is None:
debug_output = {}
for key in idebug_output:
debug_output[key] = np.array(idebug_output[key])
else:
for key in idebug_output:
debug_output[key] = np.append(debug_output[key],
idebug_output[key])
Z_a = np.array([2.368,2.772,3.356,4.241,4.764,6.567,9.898])
Sk_a = -np.array([0.139,.205,.306,.467,.564,.898,1.495])
assert np.all(np.abs(np.log(debug_output['S_k']/Sk_a)) < TOL), \
'S_k values disagree with Mansoori 1971 Table 2.'
assert np.all(np.abs(np.log(debug_output['Z']/Z_a)) < TOL), \
'Z values disagree with Mansoori 1971 Table 2.'
assert False, 'excess S values do not match Mansoori 1971 Table 2 values'
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
#====================================================================
def test_bessel_inv_laplace_euler():
TOL = 1e-6
t=np.linspace(1e-3,15,100)
# Bessel function test (ringing oscillation)
lapfun0 = lambda s: 1.0/np.sqrt(s**2+1)
ynuminv = hsmix.inv_laplace_euler( lapfun0, t, tshft=0.0 )
yexact = sp.special.jv(0,t)
assert np.all( np.abs(ynuminv-yexact) < TOL ), \
'numerical inverse not within tolerance'
#====================================================================
def test_invsqrt_cos_inv_laplace_euler():
TOL = 1e-6
t=np.linspace(0.1,20,100)
# Bessel function test (ringing oscillation)
lapfun0 = lambda s: 1.0/np.sqrt(s)*np.exp(-1.0/s)
ynuminv = hsmix.inv_laplace_euler( lapfun0, t, tshft=0.0 )
yexact = 1.0/np.sqrt(np.pi*t)*np.cos(np.sqrt(4*t))
assert np.all( np.abs(ynuminv-yexact) < TOL ), \
'numerical inverse not within tolerance'
#====================================================================
def test_exp_cos_inv_laplace_euler():
TOL = 1e-6
t=np.linspace(0.1,20,100)
omega = 2.0
a = 1.0
# Bessel function test (ringing oscillation)
lapfun0 = lambda s, a=a, omega=omega: (s+a)/((s+a)**2+omega**2)
ynuminv = hsmix.inv_laplace_euler( lapfun0, t, tshft=0.0 )
yexact = np.exp(-a*t)*np.cos(omega*t)
assert np.all( np.abs(ynuminv-yexact) < TOL ), \
'numerical inverse not within tolerance'
#====================================================================
def test_shifted_exp_cos_inv_laplace_euler():
TOL = 1e-6
N = 1001
N = 101
omega = 4.0
a = 0.8
tshft = 1.5
delt = np.linspace(0.01,6,N)
t = delt+ tshft
yexact = np.exp(-a*(t-tshft))*np.cos(omega*(t-tshft))+1.0
yexact = np.exp(-a*(t-tshft))*np.cos(omega*(t-tshft))+1.0
yexact[t<tshft] = 0.0
lapfun0 = lambda s, a=a, omega=omega: \
np.exp(-s*tshft)*( (s+a)/((s+a)**2+omega**2) + 1.0/s )
# ynuminv = hsmix.inv_laplace_euler( lapfun0, t, tshft=0.0 )
ynuminv = hsmix.inv_laplace_euler( lapfun0, delt, tshft=tshft )
# NOTE nan value at first value
dely = ynuminv-yexact
dely = dely[~np.isnan(dely)]
#plt.clf()
#plt.plot(t,yexact,'r-',t,ynuminv,'k-')
assert np.all( np.abs(dely) < TOL ), \
'numerical inverse not within tolerance'
#====================================================================
def test_hard_sphere_PDF():
dHS = 1.0
test_hard_sphere_PDF( V, xHS, dHS, rmax=5.0, N=101 ):
N = 301
dHS = 1.0
V = 1.3
V = 3.
lapfun0 = lambda s, V=V, xHS=1.0, dHS=dHS:\
np.squeeze( hsmix.hard_sphere_LT_PDF( s, V, np.array([xHS]),
np.array([dHS]) ) )
delt = np.linspace(0.01,6,N)
ynuminv = hsmix.inv_laplace_euler( lapfun0, delt, tshft=dHS )
fpack = np.pi/6*dHS**3/V
lam0 = 2*np.pi/(1-fpack)
lam1 = np.pi**2*(dHS**2/V)/(1-fpack)**2
fpack = np.pi/6*dHS**3/V
zeta = fpack*dHS**3
((1-zeta) + 1.5*fpack*dHS**3)/(1.0-zeta)**2
gij_contact = hsmix.hard_sphere_contact_PDF( V, np.array([xHS]),
np.array([dHS]) )
hsmix.hard_sphere_PDF( V, xHS, dHS, rmax=5.0, N=101 ):
r = np.linspace(dHS, 6*dHS,100)
gij = hsmix.hard_sphere_PDF( r, V, np.array([xHS]), np.array([dHS]) )
gii =
gij = 1.0/(2*np.pi)*(lam0 + 0.5*lam1*dHS + 1.0/18*lam1**2/lam0*dHS**2)
# lapfun = lambda s: np.exp(s*tshft)*lapfun0(s)
# ynuminv = hsmix.nlinvsteh( lapfun, delt, n=10 )
plt.plot( delt+dHS, ynuminv/(delt+dHS), 'k-')
|
nilq/baby-python
|
python
|
from simplerpcgen.rpcgen import rpcgen
|
nilq/baby-python
|
python
|
import os
import sys
from .graph import SubtaskGraph
from sge.mazemap import Mazemap
import numpy as np
from .utils import get_id_from_ind_multihot
from sge.utils import WHITE, BLACK, DARK, LIGHT, GREEN, DARK_RED
class MazeEnv(object): # single batch
def __init__(self, args, game_name, graph_param, game_len, gamma):
if game_name == 'playground':
from sge.playground import Playground
game_config = Playground()
graph_folder = os.path.join('.', 'data', 'subtask_graph_play')
filename = 'play_{param}'.format(param=graph_param)
elif game_name == 'mining':
from sge.mining import Mining
game_config = Mining()
graph_folder = os.path.join('.', 'data', 'subtask_graph_mining')
filename = 'mining_{param}'.format(param=graph_param)
self.config = game_config
self.max_task = self.config.nb_subtask_type
self.subtask_list = self.config.subtask_list
# graph & map
self.graph = SubtaskGraph(
graph_folder, filename, self.max_task) # just load all graph
self.map = Mazemap(game_name, game_config)
self.gamma = gamma
# init
self.game_length = int(np.random.uniform(
0.8, 1.2) * game_len)
self.step_reward = 0.0
def step(self, action):
if self.graph.graph_index is None:
raise RuntimeError('Error: Environment has never been reset()')
sub_id = -1
if self.game_over or self.time_over:
raise ValueError(
'Environment has already been terminated. need to be reset!')
oid = self.map.act(action)
if (action, oid) in self.config.subtask_param_to_id: # if (action, item) is one of the subtasks
sid = self.config.subtask_param_to_id[(action, oid)]
if sid in self.subtask_id_list: # if sub_id is in the subtask graph
sub_id = sid
else:
#print('Warning! Executed a non-existing subtask')
pass
#
self.reward = self._act_subtask(sub_id)
self.ret += self.reward*self.gamma
self.step_count += 1
self.time_over = self.step_count >= self.game_length
self.game_over = (self.eligibility*self.mask).sum().item() == 0
return self._get_state(), self.reward, (self.game_over or self.time_over), self._get_info()
def reset(self, graph_index=None): # after every episode
#if self.seed is not None:
# np.random.seed(self.seed)
if graph_index is None:
graph_index = np.random.permutation(self.graph.num_graph)[0]
else:
graph_index = graph_index % self.graph.num_graph
# 1. reset graph
if graph_index >= 0:
self.graph.set_graph_index(graph_index)
self.nb_subtask = len(self.graph.subtask_id_list)
self.rew_mag = self.graph.rew_mag
self.subtask_id_list = self.graph.subtask_id_list
# 2. reset subtask status
self.executed_sub_ind = -1
self.game_over = False
self.time_over = False
self.mask, self.mask_id = np.ones(
self.nb_subtask, dtype=np.uint8), np.zeros(self.max_task, dtype=np.uint8)
for ind, sub_id in self.graph.ind_to_id.items():
self.mask_id[sub_id] = 1
self.completion, self.comp_id = np.zeros(
self.nb_subtask, dtype=np.int8), np.zeros(self.max_task, dtype=np.uint8)
self._compute_elig()
self.step_count, self.ret, self.reward = 0, 0, 0
# 3. reset map
self.map.reset(self.subtask_id_list)
return self._get_state(), self._get_info()
def state_spec(self):
return [
{'dtype': self.map.get_obs().dtype, 'name': 'observation', 'shape': self.map.get_obs().shape},
{'dtype': self.mask_id.dtype, 'name': 'mask', 'shape': self.mask_id.shape},
{'dtype': self.comp_id.dtype, 'name': 'completion', 'shape': self.comp_id.shape},
{'dtype': self.elig_id.dtype, 'name': 'eligibility', 'shape': self.elig_id.shape},
{'dtype': int, 'name': 'step', 'shape': ()}
]
def get_actions(self):
return self.config.legal_actions
# internal
def _get_state(self):
step = self.game_length - self.step_count
return {
'observation': self.map.get_obs(),
'mask': self.mask_id.astype(np.float),
'completion': self.comp_id.astype(np.float),
'eligibility': self.elig_id.astype(np.float),
'step': step
}
def _get_info(self):
return {
'graph': self.graph
}
def _act_subtask(self, sub_id):
self.executed_sub_ind = -1
reward = self.step_reward
if sub_id < 0:
return reward
sub_ind = self.graph.id_to_ind[sub_id]
if self.eligibility[sub_ind] == 1 and self.mask[sub_ind] == 1:
self.completion[sub_ind] = 1
self.comp_id[sub_id] = 1
reward += self.rew_mag[sub_ind]
self.executed_sub_ind = sub_ind
self.mask[sub_ind] = 0
self.mask_id[sub_id] = 0
self._compute_elig()
return reward
def _compute_elig(self):
self.eligibility = self.graph.get_elig(self.completion)
self.elig_id = get_id_from_ind_multihot(
self.eligibility, self.graph.ind_to_id, self.max_task)
|
nilq/baby-python
|
python
|
from django.contrib.auth.backends import BaseBackend
from naloge.models import Uporabnik
from accounts.francek import *
from django.conf import settings
class FrancekBackend(BaseBackend):
# FrancekBackend deluje kot sekundarni nacin prijave v aplikacijo. V
# nastavitvah mora biti na zadnjem mestu - kot v naslednjem primeru.
# AUTHENTICATION_BACKENDS = [
# 'django.contrib.auth.backends.ModelBackend',
# 'accounts.authentication_backend.FrancekBackend'
# ]
# Deluje tako, da poskusa uporabnika prijaviti v njegov Francek racun. Ce je
# prijava uspesna, ustvari v Djangovi bazi podatkov nov uporabniski racun in
# mu nastavi uporabnisko ime in geslo.
# Pri naslednji prijavi Django najprej preveri ali ze pozna kaksnega
# uporabnika z vnesenimi podatki, sicer pa uporabi ta backend. Ce uporabnik
# na francku spremeni geslo, bo prvi (djangov) backend za prijavo vrnil, da
# uporabnika se nima in sprozil franckov backend.
def authenticate(self, request, username=None, password=None):
# Ce v nastavitvah ni nastavljen api kljuc za komunikacijo s Franckom,
# ne uporabi tega backenda.
if not hasattr(settings, 'FRANCEK_API_KEY') or settings.FRANCEK_API_KEY is None:
return None
francek_api = FrancekApi(settings.FRANCEK_API_KEY, 'crkozmed')
try:
francek_uporabnik = francek_api.login(username, password)
# Ce uporabnik ni ucitelj, se ne more prijaviti
if francek_uporabnik.get_role() is not FrancekUserRole.teacher:
return None
except Exception:
# Ce je pri prijavi s Franckom prislo do napake, uporabnik ne
# obstaja in vrnemo None
return None
# Preverimo, ali uporabnik z vnesenim uporabniskim imenom ze obstaja v
# bazi. Ce obstaja, to najverjetneje pomeni, da si je uporabnik na
# Francku spremenil geslo, a se podatki v nasi aplikaciji se niso
# posodobili. Popravimo podatke.
try:
uporabnik = Uporabnik.objects.get(username=username)
except Uporabnik.DoesNotExist:
uporabnik = Uporabnik(username=username)
# Uporabniku nastavimo is_staff na True, da se lahko prijavi v zaledje Djanga
uporabnik.is_staff = True
# Popravimo geslo uporabniku in ga shranimo
uporabnik.set_password(password)
uporabnik.save()
return uporabnik
def get_user(self, user_id):
try:
return Uporabnik.objects.get(pk=user_id)
except Uporabnik.DoesNotExist:
return None
|
nilq/baby-python
|
python
|
from app.programs.loader import load
list = load('app/programs/original')
|
nilq/baby-python
|
python
|
n = input("Enter the name:: ")
reverseString = []
i = len(n)
while i > 0:
reverseString += n[ i - 1 ]
i = i - 1
reverseString = ''.join(reverseString)
print("ReversedString::", reverseString)
|
nilq/baby-python
|
python
|
pa = int(input('Digite o primeiro termo da PA: '))
r = int(input('Digite a razao da PA: '))
c = 0
mais = 10
tot = 0
print('Os termos são', end=" ")
while mais != 0:
tot += mais
while c <= tot:
c += 1
print('{}'.format(pa), end=' -> ')
pa = pa + r
print('PAUSA')
mais = int(input('Quantos termos a mais? '))
print('Foram digitados um total de {} termos'.format(tot))
|
nilq/baby-python
|
python
|
from io import StringIO
from cline import CannotMakeArguments, CommandLineArguments
from mock import patch
from pytest import raises
from smokestack.exceptions import SmokestackError
from smokestack.register import register
from smokestack.tasks.operate import OperateTask, OperateTaskArguments
from smokestack.types import Operation
from tests.mocks import MockStackSet
def test_invoke() -> None:
register("mock", MockStackSet)
operation = Operation(execute=False, preview=True)
args = OperateTaskArguments(
operation=operation,
stack_set="mock",
)
out = StringIO()
task = OperateTask(args, out)
with patch("tests.mocks.MockStackSet.execute") as execute:
exit_code = task.invoke()
execute.assert_called_once_with(operation)
assert exit_code == 0
def test_invoke__fail() -> None:
register("mock", MockStackSet)
operation = Operation(execute=False, preview=True)
args = OperateTaskArguments(
operation=operation,
stack_set="mock",
)
out = StringIO()
task = OperateTask(args, out)
with patch("tests.mocks.MockStackSet.execute", side_effect=SmokestackError("fire")):
exit_code = task.invoke()
expect = """
🔥 fire
"""
assert out.getvalue() == expect
assert exit_code == 1
def test_make_args__execute_and_preview() -> None:
args = CommandLineArguments(
{
"execute": True,
"preview": True,
"set": "foo",
}
)
assert OperateTask.make_args(args) == OperateTaskArguments(
log_level="CRITICAL",
operation=Operation(execute=True, preview=True),
stack_set="foo",
)
def test_make_args__no_operation() -> None:
args = CommandLineArguments(
{
"execute": False,
"preview": False,
"set": "foo",
}
)
with raises(CannotMakeArguments) as ex:
OperateTask.make_args(args)
assert str(ex.value) == "Must execute and/or preview."
|
nilq/baby-python
|
python
|
import os
import json
import argparse
import glob as gb
import utils as ut
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def main(args):
""" Execute:
-------------------------------------------------------------------
python process.py --path data/v6/forest-06 --output results/v6 && \
python process.py --path data/v6/forest-05 --output results/v6 && \
python process.py --path data/v6/forest-04 --output results/v6 && \
python process.py --path data/v6/forest-03 --output results/v6 && \
python process.py --path data/v6/forest-02 --output results/v6 && \
python process.py --path data/v6/forest-01 --output results/v6
-------------------------------------------------------------------
python process.py --path data/v7/forest-14 --output results/v7 && \
python process.py --path data/v7/forest-13 --output results/v7 && \
python process.py --path data/v7/forest-12 --output results/v7
-------------------------------------------------------------------
python process.py --path data/v8/forest-43 --output results/v8 && \
python process.py --path data/v8/forest-42 --output results/v8 && \
python process.py --path data/v8/forest-41 --output results/v8 && \
python process.py --path data/v8/forest-33 --output results/v8 && \
python process.py --path data/v8/forest-32 --output results/v8 && \
python process.py --path data/v8/forest-31 --output results/v8 && \
python process.py --path data/v8/forest-23 --output results/v8 && \
python process.py --path data/v8/forest-22 --output results/v8 && \
python process.py --path data/v8/forest-21 --output results/v8
-------------------------------------------------------------------
"""
# zip files
zip_files = os.path.join(args.path, '*.zip')
for zip_path in sorted(gb.glob(zip_files, recursive=True)):
# load data
data = ut.load_data(zip_path)
simulation = next(iter(data))
# load images
df = data[simulation]['images']
df = df[df['type'] == 'monochrome']
df = df.reset_index(drop=True)
# load parameters
parameters = data[simulation]['parameters']
parameters['images'] = df.shape[0]
print(f'process {simulation}', json.dumps(parameters, indent=4), '\n')
# output folder
output_folder = os.path.join(args.output, simulation)
os.makedirs(output_folder, exist_ok=True)
name_suffix = f'-{parameters["preset"]}-{parameters["view"]}'
# integrate ground
ground, alphas = ut.integrate_ground(df, parameters)
np.save(os.path.join(output_folder, f'ground{name_suffix}.npy'), ground)
np.save(os.path.join(output_folder, f'alpha{name_suffix}.npy'), alphas)
# plot stage image
fig, ax = plt.subplots(figsize=(16, 16))
ut.plot_image(ax, data[simulation]['stage'], 'stage')
ut.export_plot(fig, os.path.join(output_folder, f'stage{name_suffix}.png'))
# calculate ground visibility
scanned = np.count_nonzero(ground[:, :, 0])
captured = np.count_nonzero(ground[:, :, 1])
visibility = captured / scanned
# plot ground images
fig, axs = plt.subplots(1, 3, figsize=(24, 6))
ut.plot_heatmap(axs[0], ground[:, :, 0], 'scanned pixels (count)')
ut.plot_heatmap(axs[1], ground[:, :, 1], 'visible pixels (count)')
ut.plot_heatmap(axs[2], ut.normalize_image(ground[:, :, 1] > 0), f'visibility ({visibility:.2f})')
ut.export_plot(fig, os.path.join(output_folder, f'ground{name_suffix}.png'))
# export parameters
with open(os.path.join(output_folder, f'parameters{name_suffix}.json'), 'w') as f:
json.dump(parameters, f, indent=4)
if __name__ == '__main__':
# arguments
argp = argparse.ArgumentParser(description='AOS-Evaluation')
argp.add_argument('--path', default=os.path.join('data'), type=str, help='folder path of simulation zip files [PATH]')
argp.add_argument('--output', default=os.path.join('results'), type=str, help='folder path of evaluation export files [PATH]')
args = argp.parse_args()
# main
main(args)
|
nilq/baby-python
|
python
|
# python 3 headers, required if submitting to Ansible
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.utils.display import Display
display = Display()
class FilterModule(object):
"""
ansible filter
"""
def filters(self):
return {
'compare_list': self.compare_list,
'validate_attachment_hash': self.validate_attachment_hash,
}
def compare_list(self, data_list, compare_to_list):
"""
"""
display.v("compare_list({}, {})".format(data_list, compare_to_list))
result = []
for i in data_list:
if i in compare_to_list:
result.append(i)
# randomized result :(
# result = list(
# set(
# data_list).intersection(sorted(compare_to_list)
# )
# )
display.v("return : {}".format(result))
return result
def validate_attachment_hash(self, data, compare_to_list):
"""
"""
display.v("validate_attachment_hash({}, {})".format(data, compare_to_list))
if ':' in data:
for i in compare_to_list:
if i[:-1] in data:
return True
else:
if data in compare_to_list:
return True
return False
|
nilq/baby-python
|
python
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config for training implicit models."""
import ml_collections
def get_config():
"""Default configs for the experiments"""
config = ml_collections.ConfigDict()
#Dataset Configs
config.dataset = get_dataset_config()
#Model Configs
config.model = get_model_config()
# LF configs
config.lightfield = get_lf_config()
#Training Configs
config.train = get_train_config()
#Evaluation Configs
config.eval = get_eval_config()
config.seed = 33
config.dev_run = False
config.trial = 0 # Dummy for repeated runs.
config.lock()
return config
def get_dataset_config():
"""Configs for the dataset"""
dataset_config = ml_collections.ConfigDict()
dataset_config.name = "ff_epipolar"
dataset_config.data_dir = ""
dataset_config.base_dir = ""
dataset_config.scene = ""
dataset_config.batch_size = 16384
dataset_config.batching = "single_image"
# The downsampling factor of images, 0 for no downsample
dataset_config.factor = 4
# Render generated images if set to True
dataset_config.render_path = False
dataset_config.spherify = False
# will take every 1/N images as LLFF test set.
dataset_config.llffhold = 8
# If True, generate rays through the center of each pixel.
# Note: While this is the correct way to handle rays, it
# is not the way rays are handled in the original NeRF paper.
dataset_config.use_pixel_centers = False
# to store height and width
dataset_config.image_height = -1
dataset_config.image_width = -1
# To store number of train views
dataset_config.num_train_views = -1
dataset_config.num_interpolation_views = 10
return dataset_config
def get_model_config():
"""Configurations for the model"""
model_config = ml_collections.ConfigDict()
model_config.name = "lfnr"
model_config.near = 0.
model_config.far = 1.
model_config.net_depth = 8
model_config.net_width = 256
# Depth of the second part of MLP after conditioning
# on view direction
model_config.net_depth_condition = 1
model_config.net_width_condition = 128
# add a skip connection to the output vector of every
# skip_layer layers.
model_config.skip_layer = 4
model_config.num_rgb_channels = 3
model_config.num_sigma_channels = 1
model_config.randomized = False
# Position encoding config
model_config.mapping_type = "positional_encoding"
#Min and max degree for positional encoding for points
model_config.min_deg_point = 0
model_config.max_deg_point = 10
#Degree of positional encoding for view directions
model_config.deg_view = 4
model_config.num_coarse_samples = 64
model_config.num_fine_samples = 128
model_config.use_viewdirs = True
# std dev of noise added to regularize sigma output.
# For LLFF dataset(in Nerf)
model_config.noise_std = 1.
# sampling linearly in disparity rather than depth.
model_config.lindisp = False
model_config.net_activation = "relu"
model_config.rgb_activation = "sigmoid"
model_config.sigma_activation = "relu"
model_config.white_bkgd = False
#------------------------------------
# For Transformer
model_config.transformer_layers = 8
model_config.transformer_heads = 1
model_config.qkv_dim = 256
model_config.transformer_mlp_dim = 256
#------------------------------------
# Epipolar conv features
model_config.use_conv_features = True
model_config.conv_feature_dim = (32,)
model_config.ksize1 = 3
model_config.ksize2 = 5
#--------------------------------------
# For epipolar projection
model_config.num_projections = 128
model_config.interpolation_type = "rounding"
model_config.use_learned_embedding = True
model_config.learned_embedding_mode = "concat"
model_config.mask_invalid_projection = False
model_config.return_attn = False
model_config.init_final_precision = "DEFAULT"
return model_config
def get_lf_config():
"""Configurations relationg to lf representation"""
lf_config = ml_collections.ConfigDict()
lf_config.name = "lightslab"
lf_config.st_plane = .5
lf_config.uv_plane = 1.
lf_config.sphere_radius = 3.0
lf_config.sphere_center = [0., 0., 0.]
lf_config.encoding_name = "positional_encoding"
# Min and max degree for positional encoding for points
lf_config.min_deg_point = 0
lf_config.max_deg_point = 4
return lf_config
def get_train_config():
"""Configurations relating to training"""
train_config = ml_collections.ConfigDict()
train_config.lr_init = 2.0e-3
train_config.warmup_epochs = 2
train_config.weight_decay = 0.
train_config.warmup_steps = 2500
train_config.lr_final = 2.0e-5
# train_config.lr_delay_steps = 2500
# A multiplier on the learning rate when the step
# is < lr_delay_steps
train_config.lr_delay_mult = 0.1
# The gradient clipping magnitude (disabled if == 0).
train_config.grad_max_norm = 0
train_config.grad_max_val = 0
train_config.max_steps = 250000
train_config.num_epochs = 180
train_config.checkpoint_every_steps = 1000
train_config.log_loss_every_steps = 500
train_config.render_every_steps = 5000
train_config.gc_every_steps = 10000
return train_config
def get_eval_config():
"""Configuration relation to model evaluation"""
eval_config = ml_collections.ConfigDict()
eval_config.eval_once = False
eval_config.save_output = True
# the size of chunks for evaluation inferences,
# set to the value that fits your GPU/TPU memory.
eval_config.chunk = 8192
eval_config.inference = False
return eval_config
|
nilq/baby-python
|
python
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.tfexample_decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
class TFExampleDecoderTest(test.TestCase):
def _EncodedFloatFeature(self, ndarray):
return feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=ndarray.flatten().tolist()))
def _EncodedInt64Feature(self, ndarray):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=ndarray.flatten().tolist()))
def _EncodedBytesFeature(self, tf_encoded):
with self.test_session():
encoded = tf_encoded.eval()
def BytesList(value):
return feature_pb2.BytesList(value=[value])
return feature_pb2.Feature(bytes_list=BytesList(encoded))
def _BytesFeature(self, ndarray):
values = ndarray.flatten().tolist()
for i in range(len(values)):
values[i] = values[i].encode('utf-8')
return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=values))
def _StringFeature(self, value):
value = value.encode('utf-8')
return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=[value]))
def _Encoder(self, image, image_format):
assert image_format in ['jpeg', 'JPEG', 'png', 'PNG', 'raw', 'RAW']
if image_format in ['jpeg', 'JPEG']:
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_jpeg(tf_image)
if image_format in ['png', 'PNG']:
tf_image = constant_op.constant(image, dtype=dtypes.uint8)
return image_ops.encode_png(tf_image)
if image_format in ['raw', 'RAW']:
return constant_op.constant(image.tostring(), dtype=dtypes.string)
def GenerateImage(self, image_format, image_shape):
"""Generates an image and an example containing the encoded image.
Args:
image_format: the encoding format of the image.
image_shape: the shape of the image to generate.
Returns:
image: the generated image.
example: a TF-example with a feature key 'image/encoded' set to the
serialized image and a feature key 'image/format' set to the image
encoding format ['jpeg', 'JPEG', 'png', 'PNG', 'raw'].
"""
num_pixels = image_shape[0] * image_shape[1] * image_shape[2]
image = np.linspace(
0, num_pixels - 1, num=num_pixels).reshape(image_shape).astype(np.uint8)
tf_encoded = self._Encoder(image, image_format)
example = example_pb2.Example(features=feature_pb2.Features(feature={
'image/encoded': self._EncodedBytesFeature(tf_encoded),
'image/format': self._StringFeature(image_format)
}))
return image, example.SerializeToString()
def DecodeExample(self, serialized_example, item_handler, image_format):
"""Decodes the given serialized example with the specified item handler.
Args:
serialized_example: a serialized TF example string.
item_handler: the item handler used to decode the image.
image_format: the image format being decoded.
Returns:
the decoded image found in the serialized Example.
"""
serialized_example = array_ops.reshape(serialized_example, shape=[])
decoder = tfexample_decoder.TFExampleDecoder(
keys_to_features={
'image/encoded':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=image_format),
},
items_to_handlers={'image': item_handler})
[tf_image] = decoder.decode(serialized_example, ['image'])
return tf_image
def RunDecodeExample(self, serialized_example, item_handler, image_format):
tf_image = self.DecodeExample(serialized_example, item_handler,
image_format)
with self.test_session():
decoded_image = tf_image.eval()
# We need to recast them here to avoid some issues with uint8.
return decoded_image.astype(np.float32)
def testDecodeExampleWithJpegEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='jpeg', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example, tfexample_decoder.Image(), image_format='jpeg')
# Need to use a tolerance of 1 because of noise in the jpeg encode/decode
self.assertAllClose(image, decoded_image, atol=1.001)
def testDecodeExampleWithJPEGEncoding(self):
test_image_channels = [1, 3]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='JPEG', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(channels=channels),
image_format='JPEG')
# Need to use a tolerance of 1 because of noise in the jpeg encode/decode
self.assertAllClose(image, decoded_image, atol=1.001)
def testDecodeExampleWithNoShapeInfo(self):
test_image_channels = [1, 3]
for channels in test_image_channels:
image_shape = (2, 3, channels)
_, serialized_example = self.GenerateImage(
image_format='jpeg', image_shape=image_shape)
tf_decoded_image = self.DecodeExample(
serialized_example,
tfexample_decoder.Image(
shape=None, channels=channels),
image_format='jpeg')
self.assertEqual(tf_decoded_image.get_shape().ndims, 3)
def testDecodeExampleWithPngEncoding(self):
test_image_channels = [1, 3, 4]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='png', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(channels=channels),
image_format='png')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithPNGEncoding(self):
test_image_channels = [1, 3, 4]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='PNG', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(channels=channels),
image_format='PNG')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithRawEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='raw', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(shape=image_shape),
image_format='raw')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithRAWEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='RAW', image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(shape=image_shape),
image_format='RAW')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithJpegEncodingAt16BitCausesError(self):
image_shape = (2, 3, 3)
unused_image, serialized_example = self.GenerateImage(
image_format='jpeg', image_shape=image_shape)
# decode_raw support uint16 now so ValueError will be thrown instead.
with self.assertRaisesRegexp(
ValueError,
'true_fn and false_fn must have the same type: uint16, uint8'):
unused_decoded_image = self.RunDecodeExample(
serialized_example,
tfexample_decoder.Image(dtype=dtypes.uint16),
image_format='jpeg')
def testDecodeExampleWithStringTensor(self):
tensor_shape = (2, 3, 1)
np_array = np.array([[['ab'], ['cd'], ['ef']],
[['ghi'], ['jkl'], ['mnop']]])
example = example_pb2.Example(features=feature_pb2.Features(feature={
'labels': self._BytesFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels':
parsing_ops.FixedLenFeature(
tensor_shape,
dtypes.string,
default_value=constant_op.constant(
'', shape=tensor_shape, dtype=dtypes.string))
}
items_to_handlers = {'labels': tfexample_decoder.Tensor('labels'),}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
labels = labels.astype(np_array.dtype)
self.assertTrue(np.array_equal(np_array, labels))
def testDecodeExampleWithFloatTensor(self):
np_array = np.random.rand(2, 3, 1).astype('f')
example = example_pb2.Example(features=feature_pb2.Features(feature={
'array': self._EncodedFloatFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'array': parsing_ops.FixedLenFeature(np_array.shape, dtypes.float32)
}
items_to_handlers = {'array': tfexample_decoder.Tensor('array'),}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_array] = decoder.decode(serialized_example, ['array'])
self.assertAllEqual(tf_array.eval(), np_array)
def testDecodeExampleWithInt64Tensor(self):
np_array = np.random.randint(1, 10, size=(2, 3, 1))
example = example_pb2.Example(features=feature_pb2.Features(feature={
'array': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'array': parsing_ops.FixedLenFeature(np_array.shape, dtypes.int64)
}
items_to_handlers = {'array': tfexample_decoder.Tensor('array'),}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_array] = decoder.decode(serialized_example, ['array'])
self.assertAllEqual(tf_array.eval(), np_array)
def testDecodeExampleWithVarLenTensor(self):
np_array = np.array([[[1], [2], [3]], [[4], [5], [6]]])
example = example_pb2.Example(features=feature_pb2.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {'labels': tfexample_decoder.Tensor('labels'),}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array.flatten())
def testDecodeExampleWithFixLenTensorWithShape(self):
np_array = np.array([[1, 2, 3], [4, 5, 6]])
example = example_pb2.Example(features=feature_pb2.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels':
parsing_ops.FixedLenFeature(
np_array.shape, dtype=dtypes.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.Tensor(
'labels', shape=np_array.shape),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array)
def testDecodeExampleWithVarLenTensorToDense(self):
np_array = np.array([[1, 2, 3], [4, 5, 6]])
example = example_pb2.Example(features=feature_pb2.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.Tensor(
'labels', shape=np_array.shape),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array)
def testDecodeExampleShapeKeyTensor(self):
np_image = np.random.rand(2, 3, 1).astype('f')
np_labels = np.array([[[1], [2], [3]], [[4], [5], [6]]])
example = example_pb2.Example(features=feature_pb2.Features(feature={
'image': self._EncodedFloatFeature(np_image),
'image/shape': self._EncodedInt64Feature(np.array(np_image.shape)),
'labels': self._EncodedInt64Feature(np_labels),
'labels/shape': self._EncodedInt64Feature(np.array(np_labels.shape)),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image': parsing_ops.VarLenFeature(dtype=dtypes.float32),
'image/shape': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels/shape': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'image':
tfexample_decoder.Tensor(
'image', shape_keys='image/shape'),
'labels':
tfexample_decoder.Tensor(
'labels', shape_keys='labels/shape'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_image, tf_labels] = decoder.decode(serialized_example,
['image', 'labels'])
self.assertAllEqual(tf_image.eval(), np_image)
self.assertAllEqual(tf_labels.eval(), np_labels)
def testDecodeExampleMultiShapeKeyTensor(self):
np_image = np.random.rand(2, 3, 1).astype('f')
np_labels = np.array([[[1], [2], [3]], [[4], [5], [6]]])
height, width, depth = np_labels.shape
example = example_pb2.Example(features=feature_pb2.Features(feature={
'image': self._EncodedFloatFeature(np_image),
'image/shape': self._EncodedInt64Feature(np.array(np_image.shape)),
'labels': self._EncodedInt64Feature(np_labels),
'labels/height': self._EncodedInt64Feature(np.array([height])),
'labels/width': self._EncodedInt64Feature(np.array([width])),
'labels/depth': self._EncodedInt64Feature(np.array([depth])),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image': parsing_ops.VarLenFeature(dtype=dtypes.float32),
'image/shape': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels/height': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels/width': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'labels/depth': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'image':
tfexample_decoder.Tensor(
'image', shape_keys='image/shape'),
'labels':
tfexample_decoder.Tensor(
'labels',
shape_keys=['labels/height', 'labels/width', 'labels/depth']),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_image, tf_labels] = decoder.decode(serialized_example,
['image', 'labels'])
self.assertAllEqual(tf_image.eval(), np_image)
self.assertAllEqual(tf_labels.eval(), np_labels)
def testDecodeExampleWithSparseTensor(self):
np_indices = np.array([[1], [2], [5]])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
example = example_pb2.Example(features=feature_pb2.Features(feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'values': parsing_ops.VarLenFeature(dtype=dtypes.float32),
}
items_to_handlers = {'labels': tfexample_decoder.SparseTensor(),}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels.indices, np_indices)
self.assertAllEqual(labels.values, np_values)
self.assertAllEqual(labels.dense_shape, np_values.shape)
def testDecodeExampleWithSparseTensorWithKeyShape(self):
np_indices = np.array([[1], [2], [5]])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
np_shape = np.array([6])
example = example_pb2.Example(features=feature_pb2.Features(feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
'shape': self._EncodedInt64Feature(np_shape),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'values': parsing_ops.VarLenFeature(dtype=dtypes.float32),
'shape': parsing_ops.VarLenFeature(dtype=dtypes.int64),
}
items_to_handlers = {
'labels': tfexample_decoder.SparseTensor(shape_key='shape'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels.indices, np_indices)
self.assertAllEqual(labels.values, np_values)
self.assertAllEqual(labels.dense_shape, np_shape)
def testDecodeExampleWithSparseTensorWithGivenShape(self):
np_indices = np.array([[1], [2], [5]])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
np_shape = np.array([6])
example = example_pb2.Example(features=feature_pb2.Features(feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'values': parsing_ops.VarLenFeature(dtype=dtypes.float32),
}
items_to_handlers = {
'labels': tfexample_decoder.SparseTensor(shape=np_shape),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels.indices, np_indices)
self.assertAllEqual(labels.values, np_values)
self.assertAllEqual(labels.dense_shape, np_shape)
def testDecodeExampleWithSparseTensorToDense(self):
np_indices = np.array([1, 2, 5])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
np_shape = np.array([6])
np_dense = np.array([0.0, 0.1, 0.2, 0.0, 0.0, 0.6]).astype('f')
example = example_pb2.Example(features=feature_pb2.Features(feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'values': parsing_ops.VarLenFeature(dtype=dtypes.float32),
}
items_to_handlers = {
'labels':
tfexample_decoder.SparseTensor(
shape=np_shape, densify=True),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllClose(labels, np_dense)
def testDecodeExampleWithTensor(self):
tensor_shape = (2, 3, 1)
np_array = np.random.rand(2, 3, 1)
example = example_pb2.Example(features=feature_pb2.Features(feature={
'image/depth_map': self._EncodedFloatFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/depth_map':
parsing_ops.FixedLenFeature(
tensor_shape,
dtypes.float32,
default_value=array_ops.zeros(tensor_shape))
}
items_to_handlers = {'depth': tfexample_decoder.Tensor('image/depth_map')}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_depth] = decoder.decode(serialized_example, ['depth'])
depth = tf_depth.eval()
self.assertAllClose(np_array, depth)
def testDecodeExampleWithItemHandlerCallback(self):
np.random.seed(0)
tensor_shape = (2, 3, 1)
np_array = np.random.rand(2, 3, 1)
example = example_pb2.Example(features=feature_pb2.Features(feature={
'image/depth_map': self._EncodedFloatFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/depth_map':
parsing_ops.FixedLenFeature(
tensor_shape,
dtypes.float32,
default_value=array_ops.zeros(tensor_shape))
}
def HandleDepth(keys_to_tensors):
depth = list(keys_to_tensors.values())[0]
depth += 1
return depth
items_to_handlers = {
'depth':
tfexample_decoder.ItemHandlerCallback('image/depth_map',
HandleDepth)
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_depth] = decoder.decode(serialized_example, ['depth'])
depth = tf_depth.eval()
self.assertAllClose(np_array, depth - 1)
def testDecodeImageWithItemHandlerCallback(self):
image_shape = (2, 3, 3)
for image_encoding in ['jpeg', 'png']:
image, serialized_example = self.GenerateImage(
image_format=image_encoding, image_shape=image_shape)
with self.test_session():
def ConditionalDecoding(keys_to_tensors):
"""See base class."""
image_buffer = keys_to_tensors['image/encoded']
image_format = keys_to_tensors['image/format']
def DecodePng():
return image_ops.decode_png(image_buffer, 3)
def DecodeJpg():
return image_ops.decode_jpeg(image_buffer, 3)
image = control_flow_ops.case(
{
math_ops.equal(image_format, 'png'): DecodePng,
},
default=DecodeJpg,
exclusive=True)
image = array_ops.reshape(image, image_shape)
return image
keys_to_features = {
'image/encoded':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value='jpeg')
}
items_to_handlers = {
'image':
tfexample_decoder.ItemHandlerCallback(
['image/encoded', 'image/format'], ConditionalDecoding)
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_image] = decoder.decode(serialized_example, ['image'])
decoded_image = tf_image.eval()
if image_encoding == 'jpeg':
# For jenkins:
image = image.astype(np.float32)
decoded_image = decoded_image.astype(np.float32)
self.assertAllClose(image, decoded_image, rtol=.5, atol=1.001)
else:
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithBoundingBoxSparse(self):
num_bboxes = 10
np_ymin = np.random.rand(num_bboxes, 1)
np_xmin = np.random.rand(num_bboxes, 1)
np_ymax = np.random.rand(num_bboxes, 1)
np_xmax = np.random.rand(num_bboxes, 1)
np_bboxes = np.hstack([np_ymin, np_xmin, np_ymax, np_xmax])
example = example_pb2.Example(features=feature_pb2.Features(feature={
'image/object/bbox/ymin': self._EncodedFloatFeature(np_ymin),
'image/object/bbox/xmin': self._EncodedFloatFeature(np_xmin),
'image/object/bbox/ymax': self._EncodedFloatFeature(np_ymax),
'image/object/bbox/xmax': self._EncodedFloatFeature(np_xmax),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/object/bbox/ymin': parsing_ops.VarLenFeature(dtypes.float32),
'image/object/bbox/xmin': parsing_ops.VarLenFeature(dtypes.float32),
'image/object/bbox/ymax': parsing_ops.VarLenFeature(dtypes.float32),
'image/object/bbox/xmax': parsing_ops.VarLenFeature(dtypes.float32),
}
items_to_handlers = {
'object/bbox':
tfexample_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],
'image/object/bbox/'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_bboxes] = decoder.decode(serialized_example, ['object/bbox'])
bboxes = tf_bboxes.eval()
self.assertAllClose(np_bboxes, bboxes)
def testDecodeExampleWithBoundingBoxDense(self):
num_bboxes = 10
np_ymin = np.random.rand(num_bboxes, 1)
np_xmin = np.random.rand(num_bboxes, 1)
np_ymax = np.random.rand(num_bboxes, 1)
np_xmax = np.random.rand(num_bboxes, 1)
np_bboxes = np.hstack([np_ymin, np_xmin, np_ymax, np_xmax])
example = example_pb2.Example(features=feature_pb2.Features(feature={
'image/object/bbox/ymin': self._EncodedFloatFeature(np_ymin),
'image/object/bbox/xmin': self._EncodedFloatFeature(np_xmin),
'image/object/bbox/ymax': self._EncodedFloatFeature(np_ymax),
'image/object/bbox/xmax': self._EncodedFloatFeature(np_xmax),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/object/bbox/ymin': parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True),
'image/object/bbox/xmin': parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True),
'image/object/bbox/ymax': parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True),
'image/object/bbox/xmax': parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True),
}
items_to_handlers = {
'object/bbox':
tfexample_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],
'image/object/bbox/'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
[tf_bboxes] = decoder.decode(serialized_example, ['object/bbox'])
bboxes = tf_bboxes.eval()
self.assertAllClose(np_bboxes, bboxes)
def testDecodeExampleWithRepeatedImages(self):
image_shape = (2, 3, 3)
image_format = 'png'
image, _ = self.GenerateImage(
image_format=image_format, image_shape=image_shape)
tf_encoded = self._Encoder(image, image_format)
with self.test_session():
tf_string = tf_encoded.eval()
example = example_pb2.Example(features=feature_pb2.Features(feature={
'image/encoded': feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[tf_string, tf_string])),
'image/format': self._StringFeature(image_format),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = array_ops.reshape(serialized_example, shape=[])
decoder = tfexample_decoder.TFExampleDecoder(
keys_to_features={
'image/encoded':
parsing_ops.FixedLenFeature(
(2,), dtypes.string),
'image/format':
parsing_ops.FixedLenFeature(
(), dtypes.string, default_value=image_format),
},
items_to_handlers={'image': tfexample_decoder.Image(repeated=True)})
[tf_image] = decoder.decode(serialized_example, ['image'])
output_image = tf_image.eval()
self.assertEqual(output_image.shape, (2, 2, 3, 3))
self.assertAllEqual(np.squeeze(output_image[0, :, :, :]), image)
self.assertAllEqual(np.squeeze(output_image[1, :, :, :]), image)
def testDecodeExampleWithLookup(self):
example = example_pb2.Example(features=feature_pb2.Features(feature={
'image/object/class/text': self._BytesFeature(
np.array(['cat', 'dog', 'guinea pig'])),
}))
serialized_example = example.SerializeToString()
# 'dog' -> 0, 'guinea pig' -> 1, 'cat' -> 2
table = lookup_ops.index_table_from_tensor(
constant_op.constant(['dog', 'guinea pig', 'cat']))
with self.test_session() as sess:
sess.run(lookup_ops.tables_initializer())
serialized_example = array_ops.reshape(serialized_example, shape=[])
keys_to_features = {
'image/object/class/text': parsing_ops.VarLenFeature(dtypes.string),
}
items_to_handlers = {
'labels':
tfexample_decoder.LookupTensor('image/object/class/text', table),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
obtained_class_ids = decoder.decode(serialized_example)[0].eval()
self.assertAllClose([2, 0, 1], obtained_class_ids)
if __name__ == '__main__':
test.main()
|
nilq/baby-python
|
python
|
import bartender
import atexit
from flask import Flask, request, Response
from drinks import drink_list, drink_options
#import atexit
from menu import MenuItem, Menu, Back, MenuContext, MenuDelegate
atexit.register(bartender.Bartender.atExit)
pete = bartender.Bartender()
pete.buildMenu(drink_list, drink_options)
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def respond():
requestData = str(request.data)[4:].replace("'", "")
if(requestData == "clean"):
while(bartender.screenItem.name != "Configure"):
pete.menuContext.advance()
pete.menuContext.select()
while(bartender.screenItem.name != "Clean"):
pete.menuContext.advance()
pete.menuContext.select()
for i in range(0,1):
while(bartender.screenItem.name != "Back"):
pete.menuContext.advance()
pete.menuContext.select()
return Response(status=200)
i = 0
while(requestData != bartender.screenItem.name):
if(i == 2):
break
pete.menuContext.advance()
if(bartender.screenItem.name == "Configure"):
i += 1
if(requestData == bartender.screenItem.name):
pete.menuContext.select()
return Response(status=200)
if __name__=='__main__':
#atexit.register(bartender.Bartender.atExit)
app.run(host='0.0.0.0')
|
nilq/baby-python
|
python
|
import datetime
# Gets time from milliseconds
# Returns string formatted as HH:MM:SS:mmm, MM:SS:mmm or S:mmm, depending on the time.
def get_time_from_milliseconds(milli):
milliseconds = milli % 1000
seconds= (milli//1000)%60
minutes= (milli//(1000*60))%60
hours= (milli//(1000*60*60))%24
if hours == 0:
if minutes == 0:
return '%d.%03d' % (seconds, milliseconds)
return '%02d:%02d.%03d' % (minutes, seconds, milliseconds)
return '%02d:%02d:%02d.%03d' % (hours, minutes, seconds, milliseconds)
# Returns a string formatted as YYYY-MM-DD
def get_date_today():
return datetime.date.today().strftime("%Y-%m-%d")
|
nilq/baby-python
|
python
|
import numpy as np
from sklearn.metrics import r2_score
from metaflow_helper.models import LightGBMRegressor
from metaflow_helper.constants import RunMode
def test_lightgbm_model_regressor_handler_train():
n_examples = 10
n_repeat = 10
offset = 10
X = np.repeat(np.arange(n_examples), n_repeat)[:, None]
y = np.repeat(np.arange(n_examples).astype(float) + offset, n_repeat)
model_handler = LightGBMRegressor(
mode=RunMode.TRAIN,
max_depth=1,
min_child_samples=1,
iterations=100,
)
model_handler.fit(X, y)
y_pred = model_handler.predict(X)
np.testing.assert_allclose(y, y_pred, rtol=2)
assert r2_score(y, y_pred) > 0.9
def test_lightgbm_model_regressor_handler_test():
n_examples = 10
n_repeat = 10
offset = 10
X = np.repeat(np.arange(n_examples), n_repeat)[:, None]
y = np.repeat(np.arange(n_examples).astype(float) + offset, n_repeat)
model_handler = LightGBMRegressor(
mode=RunMode.TEST,
max_depth=1,
min_child_samples=1,
iterations=100,
)
model_handler.fit(X, y)
y_pred = model_handler.predict(X)
np.testing.assert_allclose(y, y_pred, rtol=2)
assert r2_score(y, y_pred) > 0.9
|
nilq/baby-python
|
python
|
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation
from tensorflow.keras.layers import UpSampling2D, add, concatenate, MaxPool2D, Dropout
import tensorflow.keras.backend as K
import numpy as np
def basic_Block(inputs, out_filters, strides=(1, 1), with_conv_shortcut=False):
x = Conv2D(out_filters, 3, padding='same', strides=strides, use_bias=False, kernel_initializer='he_normal')(inputs)
x = BatchNormalization(axis=3,)(x)
x = Activation('relu')(x)
x = Conv2D(out_filters, 3, padding='same', use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization(axis=3)(x)
if with_conv_shortcut:
residual = Conv2D(out_filters, 1, strides=strides, use_bias=False, kernel_initializer='he_normal')(input)
residual = BatchNormalization(axis=3)(residual)
x = add([x, residual])
else:
x = add([x, inputs])
x = Activation('relu')(x)
return x
def bottleneck_Block(inputs, out_filters, strides=(1, 1), with_conv_shortcut=False):
expansion = 4
de_filters = int(out_filters / expansion)
x = Conv2D(de_filters, 1, use_bias=False, kernel_initializer='he_normal')(inputs)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Conv2D(de_filters, 3, strides=strides, padding='same', use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Conv2D(out_filters, 1, use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization(axis=3)(x)
if with_conv_shortcut:
residual = Conv2D(out_filters, 1, strides=strides, use_bias=False, kernel_initializer='he_normal')(inputs)
residual = BatchNormalization(axis=3)(residual)
x = add([x, residual])
else:
x = add([x, inputs])
x = Activation('relu')(x)
return x
# 第一个block, 包括两个3*3的下采样用于图片的输入和 N11
def stem_net(inputs):
x = Conv2D(64, 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(inputs)
x = BatchNormalization(axis=3)(x)
# x = Activation('relu')(x)
x = Conv2D(64, 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = bottleneck_Block(x, 256, with_conv_shortcut=True)
x = bottleneck_Block(x, 256, with_conv_shortcut=False)
x = bottleneck_Block(x, 256, with_conv_shortcut=False)
x = bottleneck_Block(x, 256, with_conv_shortcut=False)
return x
# 第一个
def transition_layer1(x, out_chan):
x0 = Conv2D(out_chan[0], 3, padding='same', use_bias=False, kernel_initializer='he_normal')(x)
x0 = BatchNormalization(axis=3)(x0)
x0 = Activation('relu')(x0)
x1 = Conv2D(out_chan[1], 3, strides=(2, 2),
padding='same', use_bias=False, kernel_initializer='he_normal')(x)
x1 = BatchNormalization(axis=3)(x1)
x1 = Activation('relu')(x1)
return [x0, x1]
# block1_0
def make_branch1(x, out_chan):
x1_0 = basic_Block(x[0], out_chan[0], with_conv_shortcut=False)
x1_0 = basic_Block(x1_0, out_chan[0], with_conv_shortcut=False)
x1_0 = basic_Block(x1_0, out_chan[0], with_conv_shortcut=False)
x1_0 = basic_Block(x1_0, out_chan[0], with_conv_shortcut=False)
x1_1 = basic_Block(x[1], out_chan[1], with_conv_shortcut=False)
x1_1 = basic_Block(x1_1, out_chan[1], with_conv_shortcut=False)
x1_1 = basic_Block(x1_1, out_chan[1], with_conv_shortcut=False)
x1_1 = basic_Block(x1_1, out_chan[1], with_conv_shortcut=False)
return [x1_0, x1_1]
# 不同分辨率之间的交互
def fuse_layer1(x, out_filters):
# x0_0 = x[0]
x0_1 = Conv2D(out_filters[0], 1, use_bias=False, kernel_initializer='he_normal')(x[1])
x0_1 = BatchNormalization(axis=3)(x0_1)
x0_1 = tf.compat.v1.image.resize_bilinear(x0_1, [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True)
x0 = add([x[0], x0_1])
x0 = Activation('relu')(x0)
x1_0 = Conv2D(out_filters[1], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[0])
x1_0 = BatchNormalization(axis=3)(x1_0)
# x1_1 = x[1]
x1 = add([x1_0, x[1]])
x1 = Activation('relu')(x1)
return [x0, x1]
def transition_layer2(x, out_chan):
# x0 = x[0]
# x1 = x[1]
x2 = Conv2D(out_chan[2], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[1])
x2 = BatchNormalization(axis=3)(x2)
x2 = Activation('relu')(x2)
return [x[0], x[1], x2]
def make_branch2(x, out_filters):
x2_0 = basic_Block(x[0], out_filters[0], with_conv_shortcut=False)
x2_0 = basic_Block(x2_0, out_filters[0], with_conv_shortcut=False)
x2_0 = basic_Block(x2_0, out_filters[0], with_conv_shortcut=False)
x2_0 = basic_Block(x2_0, out_filters[0], with_conv_shortcut=False)
x2_1 = basic_Block(x[1], out_filters[1], with_conv_shortcut=False)
x2_1 = basic_Block(x2_1, out_filters[1], with_conv_shortcut=False)
x2_1 = basic_Block(x2_1, out_filters[1], with_conv_shortcut=False)
x2_1 = basic_Block(x2_1, out_filters[1], with_conv_shortcut=False)
x2_2 = basic_Block(x[2], out_filters[2], with_conv_shortcut=False)
x2_2 = basic_Block(x2_2, out_filters[2], with_conv_shortcut=False)
x2_2 = basic_Block(x2_2, out_filters[2], with_conv_shortcut=False)
x2_2 = basic_Block(x2_2, out_filters[2], with_conv_shortcut=False)
return [x2_0, x2_1, x2_2]
def fuse_layer2(x, out_chan):
x0_1 = Conv2D(out_chan[0], 1, use_bias=False, kernel_initializer='he_normal')(x[1])
x0_1 = BatchNormalization(axis=3)(x0_1)
x0_2 = Conv2D(out_chan[0], 1, use_bias=False, kernel_initializer='he_normal')(x[2])
x0_2 = BatchNormalization(axis=3)(x0_2)
x0_1 = tf.compat.v1.image.resize_bilinear(x0_1, [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True)
x0_2 = tf.compat.v1.image.resize_bilinear(x0_2, [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True)
x0 = add([x[0], x0_1, x0_2])
x0 = Activation('relu')(x0)
x1_0 = Conv2D(out_chan[1], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[0])
x1_0 = BatchNormalization(axis=3)(x1_0)
x1_2 = Conv2D(out_chan[1], 1, use_bias=False, kernel_initializer='he_normal')(x[2])
x1_2 = BatchNormalization(axis=3)(x1_2)
x1_2 = tf.compat.v1.image.resize_bilinear(x1_2, [tf.shape(x[1])[1], tf.shape(x[1])[2]], align_corners=True)
x1 = add([x1_0, x[1], x1_2])
x1 = Activation('relu')(x1)
x2_0 = Conv2D(out_chan[0], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[0])
x2_0 = BatchNormalization(axis=3)(x2_0)
x2_0 = Conv2D(out_chan[2], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x2_0)
x2_0 = BatchNormalization(axis=3)(x2_0)
x2_1 = Conv2D(out_chan[2], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[1])
x2_1 = BatchNormalization(axis=3)(x2_1)
x2 = add([x2_0, x2_1, x[2]])
x2 = Activation('relu')(x2)
return [x0, x1, x2]
# 变换通道数
def transition_layer3(x, out_chan):
# x0 = x[0]
# x1 = x[1]
# x2 = x[2]
x3 = Conv2D(out_chan[3], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[2])
x3 = BatchNormalization(axis=3)(x3)
x3 = Activation('relu')(x3)
return [x[0], x[1], x[2], x3]
def make_branch3(x, out_chan):
x3_0 = basic_Block(x[0], out_chan[0], with_conv_shortcut=False)
x3_0 = basic_Block(x3_0, out_chan[0], with_conv_shortcut=False)
x3_0 = basic_Block(x3_0, out_chan[0], with_conv_shortcut=False)
x3_0 = basic_Block(x3_0, out_chan[0], with_conv_shortcut=False)
x3_1 = basic_Block(x[1], out_chan[1], with_conv_shortcut=False)
x3_1 = basic_Block(x3_1, out_chan[1], with_conv_shortcut=False)
x3_1 = basic_Block(x3_1, out_chan[1], with_conv_shortcut=False)
x3_1 = basic_Block(x3_1, out_chan[1], with_conv_shortcut=False)
x3_2 = basic_Block(x[2], out_chan[2], with_conv_shortcut=False)
x3_2 = basic_Block(x3_2, out_chan[2], with_conv_shortcut=False)
x3_2 = basic_Block(x3_2, out_chan[2], with_conv_shortcut=False)
x3_2 = basic_Block(x3_2, out_chan[2], with_conv_shortcut=False)
x3_3 = basic_Block(x[3], out_chan[3], with_conv_shortcut=False)
x3_3 = basic_Block(x3_3, out_chan[3], with_conv_shortcut=False)
x3_3 = basic_Block(x3_3, out_chan[3], with_conv_shortcut=False)
x3_3 = basic_Block(x3_3, out_chan[3], with_conv_shortcut=False)
return [x3_0, x3_1, x3_2, x3_3]
def fuse_layer3(x, num_chan):
x0_1 = Conv2D(num_chan[0], 1, use_bias=False, kernel_initializer='he_normal')(x[1])
x0_1 = BatchNormalization(axis=3)(x0_1)
x0_2 = Conv2D(num_chan[0], 1, use_bias=False, kernel_initializer='he_normal')(x[2])
x0_2 = BatchNormalization(axis=3)(x0_2)
x0_3 = Conv2D(num_chan[0], 1, use_bias=False, kernel_initializer='he_normal')(x[3])
x0_3 = BatchNormalization(axis=3)(x0_3)
x0_1 = tf.compat.v1.image.resize_bilinear(x0_1, [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True)
x0_2 = tf.compat.v1.image.resize_bilinear(x0_2, [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True)
x0_3 = tf.compat.v1.image.resize_bilinear(x0_3, [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True)
x0 = add([x[0], x0_1, x0_2, x0_3])
x0 = Activation('relu')(x0)
x1_0 = Conv2D(num_chan[1], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x[0])
x1_0 = BatchNormalization()(x1_0)
x1_2 = Conv2D(num_chan[1], 1, padding='same', use_bias=False, kernel_initializer='he_normal')(x[2])
x1_2 = BatchNormalization()(x1_2)
x1_3 = Conv2D(num_chan[1], 1, padding='same', use_bias=False, kernel_initializer='he_normal')(x[3])
x1_2 = tf.compat.v1.image.resize_bilinear(x1_2, [tf.shape(x[1])[1], tf.shape(x[1])[2]], align_corners=True)
x1_3 = tf.compat.v1.image.resize_bilinear(x1_3, [tf.shape(x[1])[1], tf.shape(x[1])[2]], align_corners=True)
x1 = add([x1_0, x[1], x1_2, x1_3])
x1 = Activation('relu')(x1)
x2_0 = Conv2D(num_chan[0], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x[0])
x2_0 = BatchNormalization()(x2_0)
x2_0 = Conv2D(num_chan[2], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x2_0)
x2_0 = BatchNormalization()(x2_0)
x2_1 = Conv2D(num_chan[2], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x[1])
x2_1 = BatchNormalization()(x2_1)
x2_3 = Conv2D(num_chan[2], 1, padding='same', use_bias=False, kernel_initializer='he_normal')(x[3])
x2_3 = tf.compat.v1.image.resize_bilinear(x2_3, [tf.shape(x[2])[1], tf.shape(x[2])[2]], align_corners=True)
x2 = add([x2_0, x2_1, x[2], x2_3])
x2 = Activation('relu')(x2)
x3_0 = Conv2D(num_chan[0], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x[0])
x3_0 = BatchNormalization()(x3_0)
x3_0 = Conv2D(num_chan[0], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x3_0)
x3_0 = BatchNormalization()(x3_0)
x3_0 = Conv2D(num_chan[3], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x3_0)
x3_0 = BatchNormalization()(x3_0)
x3_1 = Conv2D(num_chan[1], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x[1])
x3_1 = BatchNormalization()(x3_1)
x3_1 = Conv2D(num_chan[3], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x3_1)
x3_1 = BatchNormalization()(x3_1)
x3_2 = Conv2D(num_chan[3], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x[2])
x3_2 = BatchNormalization()(x3_2)
x3 = add([x3_0, x3_1, x3_2, x[3]])
x3 = Activation('relu')(x3)
return [x0, x1, x2, x3]
# 最后的输出层
def final_layer(x, classes, size, activation):
x0 = x[0]
x1 = tf.compat.v1.image.resize_bilinear(x[1], [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True)
x2 = tf.compat.v1.image.resize_bilinear(x[2], [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True)
x3 = tf.compat.v1.image.resize_bilinear(x[3], [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True)
x = concatenate([x0, x1, x2, x3], axis=-1)
# x = Conv2D(x.shape[3], 3, 1, use_bias=False, padding='same', kernel_initializer='he_normal')(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
x = tf.compat.v1.image.resize_bilinear(x, size, align_corners=True)
x = Conv2D(x.shape[3], 1, 1, use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(classes, 1, kernel_initializer='he_normal')(x)
if activation in {'softmax', 'sigmoid'}:
x = Activation(activation, name=activation)(x)
return x
def seg_hrnet(batch_size,
height,
width,
channel,
classes,
activation='softmax',
hrnet_type='hrnet_w48'):
if hrnet_type == 'hrnet_w18':
size = [18, 36, 72, 144]
elif hrnet_type == 'hrnet_w32':
size = [32, 64, 128, 256]
elif hrnet_type == 'hrnet_w48':
size = [48, 96, 192, 384]
else:
raise ValueError("Unsupported hrnet type!")
inputs = Input(batch_shape=(batch_size,) + (height, width, channel))
x = stem_net(inputs)
x = transition_layer1(x, size[:2])
for i in range(1):
x = make_branch1(x, size[:2])
x = fuse_layer1(x, size[:2])
x = transition_layer2(x, size[:3])
for i in range(4):
x = make_branch2(x, size[:3])
x = fuse_layer2(x, size[:3])
x = transition_layer3(x, size)
for i in range(3):
x = make_branch3(x, size)
x = fuse_layer3(x, size)
out = final_layer(x, classes=classes, size=(tf.shape(inputs)[1], tf.shape(inputs)[2]), activation=activation)
model = Model(inputs=inputs, outputs=out)
return model
def spatial_gather_module(feats, probs, scale):
batch_size, h, w, c = probs.get_shape().as_list()
probs = tf.transpose(tf.reshape(probs, (batch_size, -1, c)), [0, 2, 1])
feats = tf.reshape(feats, (batch_size, -1, feats.shape[3]))
# feats = tf.transpose(feats, [0, 2, 1]) # batch, h*w, c
probs = K.softmax(scale * probs, axis=2) # batch, k, h*w
# ocr_context = tf.expand_dims(tf.transpose(tf.matmul(probs, feats), [0, 2, 1]), axis=3)
ocr_context = tf.expand_dims(tf.matmul(probs, feats), axis=2)
return ocr_context
def SpatialOCR_Module(feats, proxy_feats, key_chan, out_chan, scale=1, dropout=0.1):
batch_size, h, w, c = feats.get_shape().as_list()
if scale > 1:
feats = MaxPool2D((scale, scale))
# f_pixel
query = Conv2D(key_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(feats)
query = BatchNormalization(axis=3)(query)
query = Activation('relu')(query)
query = Conv2D(key_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(query)
query = BatchNormalization(axis=3)(query)
query = Activation('relu')(query)
query = tf.reshape(query, [batch_size, -1, key_chan]) # batch, h*w, chan
# f_object
key = Conv2D(key_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(proxy_feats)
key = BatchNormalization(axis=3)(key)
key = Activation('relu')(key)
key = Conv2D(key_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(key)
key = BatchNormalization(axis=3)(key)
key = Activation('relu')(key)
key = tf.transpose(tf.reshape(key, [batch_size, -1, key_chan]), (0, 2, 1))
# f_down
value = Conv2D(key_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(proxy_feats)
value = BatchNormalization(axis=3)(value)
value = Activation('relu')(value)
value = tf.reshape(value, [batch_size, -1, key_chan])
sim_map = tf.matmul(query, key)
sim_map = (key_chan ** -.5) * sim_map
sim_map = K.softmax(sim_map, axis=-1)
# add bg context
context = tf.matmul(sim_map, value)
context = tf.reshape(context, [batch_size, tf.shape(feats)[1], tf.shape(feats)[2], key_chan])
# f_up
context = Conv2D(key_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(context)
context = BatchNormalization(axis=3)(context)
context = Activation('relu')(context)
if scale > 1:
context = UpSampling2D(size=(scale, scale), interpolation='bilinear')(context)
output = concatenate([context, feats], axis=-1)
output = Conv2D(out_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(output)
output = BatchNormalization(axis=3)(output)
output = Activation('relu')(output)
output = Dropout(dropout)(output)
return output
def ocr_module(x, classes=1, activation='sigmoid'):
x0 = x[0]
x1 = tf.compat.v1.image.resize_bilinear(x[1], [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True)
x2 = tf.compat.v1.image.resize_bilinear(x[2], [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True)
x3 = tf.compat.v1.image.resize_bilinear(x[3], [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True)
feats = concatenate([x0, x1, x2, x3], axis=-1)
out_aux = Conv2D(feats.shape[3], 1, 1, padding='same', use_bias=True, kernel_initializer='he_normal')(feats)
out_aux = BatchNormalization(axis=3)(out_aux)
out_aux = Activation('relu')(out_aux)
out_aux = Conv2D(classes, 1, 1, padding='same', use_bias=True, kernel_initializer='he_normal')(out_aux)
feats = Conv2D(512, 3, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(feats)
feats = BatchNormalization()(feats)
feats = Activation('relu')(feats)
context = spatial_gather_module(feats, out_aux, scale=1)
feats = SpatialOCR_Module(feats, context, key_chan=256, out_chan=512, scale=1, dropout=0.05)
out = Conv2D(classes, 1, 1, padding='same', kernel_initializer='he_normal')(feats)
if activation in {'softmax', 'sigmoid'}:
out_aux = Activation(activation)(out_aux)
out = Activation(activation)(out)
return out_aux, out
def seg_hrnet_ocr(batch_size,
height,
width,
channel,
classes,
activation='softmax',
hrnet_type='hrnet_w48'):
if hrnet_type == 'hrnet_w18':
size = [18, 36, 72, 144]
elif hrnet_type == 'hrnet_w32':
size = [32, 64, 128, 256]
elif hrnet_type == 'hrnet_w48':
size = [48, 96, 192, 384]
else:
raise ValueError("Unsupported hrnet type!")
inputs = Input(batch_shape=(batch_size,) + (height, width, channel))
x = stem_net(inputs)
x = transition_layer1(x, size[:2])
for i in range(1):
x = make_branch1(x, size[:2])
x = fuse_layer1(x, size[:2])
x = transition_layer2(x, size[:3])
for i in range(4):
x = make_branch2(x, size[:3])
x = fuse_layer2(x, size[:3])
x = transition_layer3(x, size)
for i in range(3):
x = make_branch3(x, size)
x = fuse_layer3(x, size)
out_aux, out = ocr_module(x, classes=classes, activation=activation)
model = Model(inputs=inputs, outputs=(out, out_aux))
return model
if __name__ == "__main__":
from tensorflow.keras.utils import plot_model
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files/Graphviz 2.44.1/bin/'
model1 = seg_hrnet_ocr(batch_size=2, height=512, width=512, channel=3, classes=19, hrnet_type='hrnet_w48')
model1.summary()
plot_model(model1, to_file='./seg_hrnet.png', show_shapes=True)
|
nilq/baby-python
|
python
|
c = get_config()
#Export all the notebooks in the current directory to the sphinx_howto format.
c.NbConvertApp.notebooks = ['*.ipynb']
c.NbConvertApp.export_format = 'markdown'
c.NbConvertApp.output_files_dir = '../assets/posts/{notebook_name}_files'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013,2015,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the search dns command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestSearchRack(TestBrokerCommand):
def test_100_byrowcolumn(self):
command = ["search", "rack", "--row", "k", "--column", "3",
"--city", "ny", "--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "City ny", command)
self.matchoutput(out, "Row: k", command)
self.matchoutput(out, "Column: 3", command)
self.matchclean(out, "City ln", command)
def test_101_byrack(self):
command = ["search", "rack", "--rack", "np13"]
out = self.commandtest(command)
self.matchoutput(out, "np13", command)
def test_102_empty_byrack(self):
command = ["search", "rack", "--rack", "npxx"]
self.noouttest(command)
def test_103_bybuilding(self):
command = ["search", "rack", "--building", "np",
"--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "Building np", command)
self.matchclean(out, "Building ut", command)
def test_104_bycity(self):
command = ["search", "rack", "--city", "ny",
"--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "City ny", command)
self.matchclean(out, "City ln", command)
def test_105_bycountry(self):
command = ["search", "rack", "--country", "us",
"--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "Country us", command)
self.matchclean(out, "Country tk", command)
def test_106_byorganization(self):
command = ["search", "rack", "--organization", "ms",
"--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "Organization ms", command)
self.matchclean(out, "Organization dw", command)
def test_107_bycontinent(self):
command = ["search", "rack", "--continent", "na",
"--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "Continent na", command)
self.matchclean(out, "Continent as", command)
def test_108_byhub(self):
command = ["search", "rack", "--hub", "ny",
"--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "Hub ny", command)
self.matchclean(out, "Hub ln", command)
def test_109_bycampus(self):
command = ["search", "rack", "--campus", "ny",
"--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "Campus ny", command)
self.matchclean(out, "Campus tk", command)
def test_110_all(self):
command = ["search", "rack", "--all"]
out = self.commandtest(command)
self.matchoutput(out, "np13", command)
def test_111_all_row_column(self):
command = ["search", "rack", "--all", "--row", "k",
"--column", "3", "--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "Rack: ut13", command)
self.matchoutput(out, "Row: k", command)
self.matchoutput(out, "Column: 3", command)
def test_112_format_raw(self):
command = ["search", "rack", "--all", "--format", "raw"]
out = self.commandtest(command)
self.matchoutput(out, "ut13", command)
def test_113_format_csv(self):
command = ["search", "rack", "--all", "--format", "csv"]
out = self.commandtest(command)
self.matchoutput(out, "ut13", command)
def test_115_search_rack(self):
command = ["update_rack", "--rack", "np3", "--fullname", "TEST FULLname", "--uri", "TEST uri"]
self.noouttest(command)
command = ["search_rack", "--fullname", "TEST FULLname", "--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "np3", command)
self.matchoutput(out, "Location URI: TEST uri", command)
def test_116_search_rack(self):
command = ["search_rack", "--fullname", "TEST"]
out = self.commandtest(command)
self.matchclean(out, "np3", command)
def test_117_search_rack(self):
command = ["search_rack", "--uri", "TEST uri"]
out = self.commandtest(command)
self.matchoutput(out, "np3", command)
def test_118_search_rack(self):
command = ["search_rack", "--uri", "TEST uri", "--fullname", "TEST FULLname"]
out = self.commandtest(command)
self.matchoutput(out, "np3", command)
def test_119_search_rack(self):
command = ["search_rack", "--uri", "TEST", "--fullname", "TEST FULLname"]
out = self.commandtest(command)
self.matchclean(out, "np3", command)
def test_120_search_rack_case_insensite(self):
command = ["search_rack", "--uri", "test uri", "--fullname", "test FULLname"]
out = self.commandtest(command)
self.matchoutput(out, "np3", command)
def test_125_update_rack_back(self):
command = ["update_rack", "--rack", "np3", "--fullname", "np3", "--uri", ""]
out = self.commandtest(command)
command = ["search_rack", "--fullname", "TEST FULLname"]
self.noouttest(command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestSearchRack)
unittest.TextTestRunner(verbosity=2).run(suite)
|
nilq/baby-python
|
python
|
import urllib.parse
from sp_api.api import ProductFees
from sp_api.base import Marketplaces
def test_get_fees_for_sku():
print(ProductFees().get_product_fees_estimate_for_sku("Foo's Club", 39.32, is_fba=False))
|
nilq/baby-python
|
python
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from typing import List
import uvicorn
from fastapi import FastAPI
from ..executor import BaseExecutor
from ..util import cli_server_register
from ..util import stats_wrapper
from paddlespeech.server.engine.engine_factory import EngineFactory
from paddlespeech.server.restful.api import setup_router
from paddlespeech.server.utils.config import get_config
__all__ = ['ServerExecutor']
app = FastAPI(
title="PaddleSpeech Serving API", description="Api", version="0.0.1")
@cli_server_register(
name='paddlespeech_server.start', description='Start the service')
class ServerExecutor(BaseExecutor):
def __init__(self):
super(ServerExecutor, self).__init__()
self.parser = argparse.ArgumentParser(
prog='paddlespeech_server.start', add_help=True)
self.parser.add_argument(
"--config_file",
action="store",
help="yaml file of the app",
default="./conf/application.yaml")
self.parser.add_argument(
"--log_file",
action="store",
help="log file",
default="./log/paddlespeech.log")
def init(self, config) -> bool:
"""system initialization
Args:
config (CfgNode): config object
Returns:
bool:
"""
# init api
api_list = list(config.engine_backend)
api_router = setup_router(api_list)
app.include_router(api_router)
# init engine
engine_pool = []
for engine in config.engine_backend:
engine_pool.append(EngineFactory.get_engine(engine_name=engine))
if not engine_pool[-1].init(
config_file=config.engine_backend[engine]):
return False
return True
def execute(self, argv: List[str]) -> bool:
args = self.parser.parse_args(argv)
config = get_config(args.config_file)
if self.init(config):
uvicorn.run(app, host=config.host, port=config.port, debug=True)
@stats_wrapper
def __call__(self,
config_file: str="./conf/application.yaml",
log_file: str="./log/paddlespeech.log"):
"""
Python API to call an executor.
"""
config = get_config(config_file)
if self.init(config):
uvicorn.run(app, host=config.host, port=config.port, debug=True)
|
nilq/baby-python
|
python
|
import os
import datetime
from omegaconf import OmegaConf
from . import io
from . import features
from . import models
from . import metrics
from . import kfolds
from . import permutation
conf = None
def setup(config="config.yaml"):
global conf
conf = OmegaConf.load(config)
if not os.path.exists('output'):
os.makedirs('output')
model_name = conf.get('model_name', conf.task)
if conf.get("output_directory", None) is None:
conf.output_directory = 'output/' + model_name
if not os.path.exists(conf.output_directory):
os.makedirs(conf.output_directory)
elif conf.task in ['simple', 'kFolds', 'kFoldsEnsemble']:
print("Error: Model already exists with name: " + model_name)
exit()
image_directory = conf.output_directory + '/figures'
if not os.path.exists(image_directory):
os.makedirs(image_directory)
image_directory = image_directory + '/'
conf.image_directory = image_directory
if conf.data.directory is None:
print("Error: No data directory set")
exit()
elif conf.data.directory[-1] != "/":
conf.data.directory += "/"
conf.target_names = [t.name for t in conf.targets]
conf.pretty_feature_names = [f.name for f in conf.pretty_features]
|
nilq/baby-python
|
python
|
"Livestreamer main class"
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import re
import sys
# Python 2/3 compatibility
try:
from urllib.parse import urlsplit
except ImportError:
from urlparse import urlsplit
try:
from configparser import SafeConfigParser
except ImportError:
from ConfigParser import SafeConfigParser
import requests
from livestreamer import Livestreamer, StreamError, PluginError, NoPluginError
from livedumper import common
# This is just a guess, don't know if it's optimal.
KB = 1024
READ_BUFFER = 512 * KB # 512kB
# http://livestreamer.readthedocs.org/en/latest/api.html
AVAILABLE_OPTIONS = {'hds-live-edge': 'float',
'hds-segment-attempts': 'int',
'hds-segment-threads': 'int',
'hds-segment-timeout': 'float',
'hds-timeout': 'float',
'hls-live-edge': 'int',
'hls-segment-attempts': 'int',
'hls-segment-threads': 'int',
'hls-segment-timeout': 'float',
'hls-timeout': 'float',
'http-proxy': 'str',
'https-proxy': 'str',
'http-cookies': 'str',
'http-headers': 'str',
'http-query-params': 'str',
'http-trust-env': 'bool',
'http-ssl-verify': 'bool',
'http-ssl-cert': 'str',
'http-timeout': 'float',
'http-stream-timeout': 'float',
'subprocess-errorlog': 'bool',
'ringbuffer-size': 'int',
'rtmp-proxy': 'str',
'rtmp-rtmpdump': 'str',
'rtmp-timeout': 'float',
'stream-segment-attempts': 'int',
'stream-segment-threads': 'int',
'stream-segment-timeout': 'float',
'stream-timeout': 'float'}
VIDEO_EXTENSIONS = {'AkamaiHDStream': '.flv', # http://bit.ly/1Bfa6Qc
'HDSStream': '.f4f', # http://bit.ly/1p7Ednb
'HLSStream': '.ts', # http://bit.ly/1t0oVBn
'HTTPStream': '.mp4', # Can be WebM too?
'RTMPStream': '.flv'} # http://bit.ly/1nQwWUd
# Compiling regex before using it may give a slightly better performance,
# specially if user downloads various videos simultaneously.
_RE_PAGE_TITLE = re.compile(r'<title>(.+?)</title>')
# Matches any character which is not a Unicode word character.
# I don't care if your system doesn't support unicode in filenames
# this is f****** 2014!
_RE_INVALID_CHARS = re.compile(r'\W', re.UNICODE)
class LivestreamerDumper(object):
"Main class for dumping streams"
def __init__(self, config_path):
"""LivestreamerDumper constructor
Parameters:
config_path: path to user config directory
"""
self.fd = None
self.config_path = config_path
def open(self, url, quality):
"""Attempt to open stream from *url*.
Exits with '-1' (using self.exit()) in case of error, including
an error msg.
"""
self.original_url = url
try:
self.livestreamer = Livestreamer()
self._load_config()
streams = self.livestreamer.streams(url)
except NoPluginError:
self.exit("Livestreamer is unable to handle the URL '{}'".
format(url))
except PluginError as err:
self.exit("Plugin error: {}".format(err))
if quality not in streams:
print("Unable to find '{}' stream on URL '{}'"
.format(quality, url), file=sys.stderr)
self.exit("List of available streams: {}".
format(sorted(streams.keys())))
self.stream = streams[quality]
try:
self.fd = self.stream.open()
except StreamError as err:
self.exit("Failed to open stream: {}".format(err))
def _load_config(self):
"Load and parse config file, pass options to livestreamer"
config = SafeConfigParser()
config_file = os.path.join(self.config_path, 'settings.ini')
config.read(config_file)
for option, type in list(AVAILABLE_OPTIONS.items()):
if config.has_option('DEFAULT', option):
if type == 'int':
value = config.getint('DEFAULT', option)
if type == 'float':
value = config.getfloat('DEFAULT', option)
if type == 'bool':
value = config.getboolean('DEFAULT', option)
if type == 'str':
value = config.get('DEFAULT', option)
self.livestreamer.set_option(option, value)
def get_title(self):
"""Returns the filename from URL (including extension), that
may be:
https://www.youtube.com/watch?v=ZEtEH-GIAJE ->
'[Hatsune Miku] After Rain Sweet*Drops [English Sub] -
YouTube.mp4'
https://www.youtube.com/watch?v=ZEtEH-GIAJE ->
'watch_v=ZEtEH-GIAJE.mp4'
The former case occurs when URL is a web page with <title> tags.
The last case will occur in pages with malformed HTML or when
you pass a non-HTML URL as a parameter (for example, a link to
a direct HTML5 video).
The extension will be detected according to the stream type,
for example RTMPStream will always be '.flv'. The only format
that may returns a wrong extension is HTTPStream, since there
is no standard container in this case. We assume (for now) that
every HTTPStream is '.mp4'.
"""
stream_type = self.stream.__class__.__name__
try:
extension = VIDEO_EXTENSIONS[stream_type]
except KeyError:
print('No extension found...', file=sys.stderr)
extension = ''
r = requests.get(self.original_url)
regex_result = _RE_PAGE_TITLE.search(r.text)
if regex_result is not None:
filename = regex_result.group(1)
# Badly formatted HTML (e.g. no '<title>')
else:
# 'http://www.example.com/path1/path2?q=V1' ->
# 'http', 'www.example.com', '/path1/path2', 'q=V1'
split_url = urlsplit(self.original_url)
# '/path1/path2' -> 'path2'
filename = split_url.path.split('/')[-1]
# 'path2' -> 'path2_q=V1'
if split_url.query:
filename = filename + '_' + split_url.query
# Substitute invalid chars for '_'
filename = _RE_INVALID_CHARS.sub('_', filename)
# Since Windows (Explorer?) has a retarted limit for 255 chars for
# filename, including the path, we need to limit the filename to a sane
# size. In this case I am using 80 chars.
return filename[:80] + extension
def stop(self):
"If stream is opened, close it"
if self.fd:
self.fd.close()
self.fd = None
def exit(self, msg=0):
"Close an opened stream and call sys.exit(msg)."
self.stop()
sys.exit(msg)
def dump(self, filepath):
"Attempt to dump an opened stream to path *filepath*."
common.ask_overwrite(filepath)
filename = os.path.basename(filepath)
file_size = 0
with open(filepath, 'ab') as f:
try:
while True:
buf = self.fd.read(READ_BUFFER)
if not buf:
break
f.write(buf)
file_size = file_size + (READ_BUFFER / KB)
print("Downloaded {} KB of file '{}'".
format(file_size, filename), end='\r')
except KeyboardInterrupt:
self.exit("\nPartial download of file '{}'".format(filepath))
print("\nComplete download of file '{}'".format(filepath))
|
nilq/baby-python
|
python
|
"""
Tests ``from __future__ import absolute_import`` (only important for
Python 2.X)
"""
import jedi
from .. import helpers
@helpers.cwd_at("test/test_evaluate/absolute_import")
def test_can_complete_when_shadowing():
script = jedi.Script(path="unittest.py")
assert script.completions()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import math
import os
import random
import unittest
from typing import List, Optional, Tuple
import numpy as np
import torch
import torch.distributed as dist
from torchrec.distributed.embedding_sharding import bucketize_kjt_before_all2all
from torchrec.distributed.embeddingbag import (
EmbeddingBagCollectionSharder,
)
from torchrec.distributed.model_parallel import DistributedModelParallel
from torchrec.distributed.tests.test_model import TestSparseNN
from torchrec.distributed.utils import get_unsharded_module_names
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.sparse.tests.tests_utils import keyed_jagged_tensor_equals
from torchrec.tests.utils import get_free_port
def _compute_translated_lengths(
row_indices: List[int],
indices_offsets: List[int],
lengths_size: int,
trainers_size: int,
block_sizes: List[int],
) -> List[int]:
translated_lengths = [0] * trainers_size * lengths_size
batch_size = int(lengths_size / len(block_sizes))
iteration = feature_offset = batch_iteration = 0
for start_offset, end_offset in zip(indices_offsets, indices_offsets[1:]):
# iterate all rows that belong to current feature and batch iteration
for row_idx in row_indices[start_offset:end_offset]:
# compute the owner of this row
trainer_offset = int(row_idx / block_sizes[feature_offset])
# we do not have enough trainers to handle this row
if trainer_offset >= trainers_size:
continue
trainer_lengths_offset = trainer_offset * lengths_size
# compute the offset in lengths that is local in each trainer
local_lengths_offset = feature_offset * batch_size + batch_iteration
# increment the corresponding length in the trainer
translated_lengths[trainer_lengths_offset + local_lengths_offset] += 1
# bookkeeping
iteration += 1
feature_offset = int(iteration / batch_size)
batch_iteration = (batch_iteration + 1) % batch_size
return translated_lengths
def _compute_translated_indices_with_weights(
translated_lengths: List[int],
row_indices: List[int],
indices_offsets: List[int],
lengths_size: int,
weights: Optional[List[int]],
trainers_size: int,
block_sizes: List[int],
) -> List[Tuple[int, int]]:
translated_indices_with_weights = [(0, 0)] * len(row_indices)
translated_indices_offsets = np.cumsum([0] + translated_lengths)
batch_size = int(lengths_size / len(block_sizes))
iteration = feature_offset = batch_iteration = 0
for start_offset, end_offset in zip(indices_offsets, indices_offsets[1:]):
# iterate all rows that belong to current feature and batch iteration
# and assign the translated row index to the corresponding offset in output
for current_offset in range(start_offset, end_offset):
row_idx = row_indices[current_offset]
feature_block_size = block_sizes[feature_offset]
# compute the owner of this row
trainer_offset = int(row_idx / feature_block_size)
if trainer_offset >= trainers_size:
continue
trainer_lengths_offset = trainer_offset * lengths_size
# compute the offset in lengths that is local in each trainer
local_lengths_offset = feature_offset * batch_size + batch_iteration
# since we know the number of rows belonging to each trainer,
# we can figure out the corresponding offset in the translated indices list
# for the current translated index
translated_indices_offset = translated_indices_offsets[
trainer_lengths_offset + local_lengths_offset
]
translated_indices_with_weights[translated_indices_offset] = (
row_idx % feature_block_size,
weights[current_offset] if weights else 0,
)
# the next row that goes to this trainer for this feature and batch
# combination goes to the next offset
translated_indices_offsets[
trainer_lengths_offset + local_lengths_offset
] += 1
# bookkeeping
iteration += 1
feature_offset = int(iteration / batch_size)
batch_iteration = (batch_iteration + 1) % batch_size
return translated_indices_with_weights
def block_bucketize_ref(
keyed_jagged_tensor: KeyedJaggedTensor,
trainers_size: int,
block_sizes: torch.Tensor,
) -> KeyedJaggedTensor:
lengths_list = keyed_jagged_tensor.lengths().view(-1).tolist()
indices_list = keyed_jagged_tensor.values().view(-1).tolist()
weights_list = (
keyed_jagged_tensor.weights().view(-1).tolist()
if keyed_jagged_tensor.weights() is not None
else None
)
block_sizes_list = block_sizes.view(-1).tolist()
lengths_size = len(lengths_list)
"""
each element in indices_offsets signifies both the starting offset, in indices_list,
that corresponds to all rows in a particular feature and batch iteration,
and the ending offset of the previous feature/batch iteration
For example:
given that features_size = 2 and batch_size = 2, an indices_offsets of
[0,1,4,6,6] signifies that:
elements in indices_list[0:1] belongs to feature 0 batch 0
elements in indices_list[1:4] belongs to feature 0 batch 1
elements in indices_list[4:6] belongs to feature 1 batch 0
elements in indices_list[6:6] belongs to feature 1 batch 1
"""
indices_offsets = np.cumsum([0] + lengths_list)
translated_lengths = _compute_translated_lengths(
row_indices=indices_list,
indices_offsets=indices_offsets,
lengths_size=lengths_size,
trainers_size=trainers_size,
block_sizes=block_sizes_list,
)
translated_indices_with_weights = _compute_translated_indices_with_weights(
translated_lengths=translated_lengths,
row_indices=indices_list,
indices_offsets=indices_offsets,
lengths_size=lengths_size,
weights=weights_list,
trainers_size=trainers_size,
block_sizes=block_sizes_list,
)
translated_indices = [
translated_index for translated_index, _ in translated_indices_with_weights
]
translated_weights = [
translated_weight for _, translated_weight in translated_indices_with_weights
]
expected_keys = [
f"{key}@bucket_{index}"
for index in range(trainers_size)
for key in keyed_jagged_tensor.keys()
]
return KeyedJaggedTensor(
keys=expected_keys,
lengths=torch.tensor(
translated_lengths, dtype=keyed_jagged_tensor.lengths().dtype
)
.view(-1)
.cuda(),
values=torch.tensor(
translated_indices, dtype=keyed_jagged_tensor.values().dtype
).cuda(),
weights=torch.tensor(translated_weights).float().cuda()
if weights_list
else None,
)
class UtilsTest(unittest.TestCase):
def test_get_unsharded_module_names(self) -> None:
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
os.environ["LOCAL_WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = str("localhost")
os.environ["MASTER_PORT"] = str(get_free_port())
os.environ["GLOO_DEVICE_TRANSPORT"] = "TCP"
device = torch.device("cpu")
backend = "gloo"
if not dist.is_initialized():
dist.init_process_group(backend=backend)
tables = [
EmbeddingBagConfig(
num_embeddings=10,
embedding_dim=4,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
)
for i in range(2)
]
weighted_tables = [
EmbeddingBagConfig(
num_embeddings=10,
embedding_dim=4,
name="weighted_table_" + str(i),
feature_names=["weighted_feature_" + str(i)],
)
for i in range(2)
]
m = TestSparseNN(
tables=tables,
weighted_tables=weighted_tables,
dense_device=device,
sparse_device=device,
)
dmp = DistributedModelParallel(
module=m,
init_data_parallel=False,
device=device,
sharders=[
EmbeddingBagCollectionSharder(),
],
)
np.testing.assert_array_equal(
sorted(get_unsharded_module_names(dmp)),
sorted(["module.over", "module.dense"]),
)
# pyre-ignore[56]
@unittest.skipIf(
torch.cuda.device_count() <= 0,
"CUDA is not available",
)
def test_kjt_bucketize_before_all2all(self) -> None:
index_type = random.choice([torch.int, torch.long])
offset_type = random.choice([torch.int, torch.long])
world_size = random.randint(1, 129)
MAX_NUM_FEATURES = 15
MAX_BATCH_SIZE = 15
MAX_LENGTH = 10
# max number of rows needed for a given feature to have unique row index
MAX_ROW_COUNT = MAX_LENGTH * MAX_BATCH_SIZE
num_features = random.randint(2, MAX_NUM_FEATURES)
batch_size = random.randint(2, MAX_BATCH_SIZE)
lengths_list = [
random.randrange(MAX_LENGTH + 1) for _ in range(num_features * batch_size)
]
keys_list = [f"feature_{i}" for i in range(num_features)]
# for each feature, generate unrepeated row indices
indices_lists = [
random.sample(
range(MAX_ROW_COUNT),
# number of indices needed is the length sum of all batches for a feature
sum(
lengths_list[
feature_offset * batch_size : (feature_offset + 1) * batch_size
]
),
)
for feature_offset in range(num_features)
]
indices_list = list(itertools.chain(*indices_lists))
weights_list = [random.randint(1, 100) for _ in range(len(indices_list))]
# for each feature, calculate the minimum block size needed to
# distribute all rows to the available trainers
block_sizes_list = [
math.ceil((max(feature_indices_list) + 1) / world_size)
for feature_indices_list in indices_lists
]
kjt = KeyedJaggedTensor(
keys=keys_list,
lengths=torch.tensor(lengths_list, dtype=offset_type)
.view(num_features * batch_size)
.cuda(),
values=torch.tensor(indices_list, dtype=index_type).cuda(),
weights=torch.tensor(weights_list, dtype=torch.float).cuda(),
)
"""
each entry in block_sizes identifies how many hashes for each feature goes
to every rank; we have three featues in `self.features`
"""
block_sizes = torch.tensor(block_sizes_list, dtype=index_type).cuda()
block_bucketized_kjt, _ = bucketize_kjt_before_all2all(
kjt, world_size, block_sizes, False, False
)
expected_block_bucketized_kjt = block_bucketize_ref(
kjt,
world_size,
block_sizes,
)
print(f"block_sizes: {block_sizes}")
print(f"num_features: {num_features}")
print(f"batch_size: {batch_size}")
print(f"world_size: {world_size}")
print(f"KeyedJaggedTensor: {kjt}")
print(f"block_bucketized KeyedJaggedTensor: {block_bucketized_kjt}")
print(
f"expected_block_bucketized KeyedJaggedTensor: {expected_block_bucketized_kjt}"
)
self.assertTrue(
keyed_jagged_tensor_equals(
block_bucketized_kjt, expected_block_bucketized_kjt
)
)
|
nilq/baby-python
|
python
|
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from .....exabel.api.analytics.v1 import prediction_model_messages_pb2 as exabel_dot_api_dot_analytics_dot_v1_dot_prediction__model__messages__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
DESCRIPTOR = _descriptor.FileDescriptor(name='exabel/api/analytics/v1/prediction_model_service.proto', package='exabel.api.analytics.v1', syntax='proto3', serialized_options=b'\n\x1bcom.exabel.api.analytics.v1B\x1bPredictionModelServiceProtoP\x01Z\x1bexabel.com/api/analytics/v1', create_key=_descriptor._internal_create_key, serialized_pb=b'\n6exabel/api/analytics/v1/prediction_model_service.proto\x12\x17exabel.api.analytics.v1\x1a7exabel/api/analytics/v1/prediction_model_messages.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/api/field_behavior.proto"u\n\x1fCreatePredictionModelRunRequest\x12\x13\n\x06parent\x18\x01 \x01(\tB\x03\xe0A\x02\x12=\n\x03run\x18\x02 \x01(\x0b2+.exabel.api.analytics.v1.PredictionModelRunB\x03\xe0A\x022\xcf\x01\n\x16PredictionModelService\x12\xb4\x01\n\x18CreatePredictionModelRun\x128.exabel.api.analytics.v1.CreatePredictionModelRunRequest\x1a+.exabel.api.analytics.v1.PredictionModelRun"1\x82\xd3\xe4\x93\x02+"$/v1/{parent=predictionModels/*}/runs:\x03runBY\n\x1bcom.exabel.api.analytics.v1B\x1bPredictionModelServiceProtoP\x01Z\x1bexabel.com/api/analytics/v1b\x06proto3', dependencies=[exabel_dot_api_dot_analytics_dot_v1_dot_prediction__model__messages__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_field__behavior__pb2.DESCRIPTOR])
_CREATEPREDICTIONMODELRUNREQUEST = _descriptor.Descriptor(name='CreatePredictionModelRunRequest', full_name='exabel.api.analytics.v1.CreatePredictionModelRunRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='parent', full_name='exabel.api.analytics.v1.CreatePredictionModelRunRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\xe0A\x02', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='run', full_name='exabel.api.analytics.v1.CreatePredictionModelRunRequest.run', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\xe0A\x02', file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=203, serialized_end=320)
_CREATEPREDICTIONMODELRUNREQUEST.fields_by_name['run'].message_type = exabel_dot_api_dot_analytics_dot_v1_dot_prediction__model__messages__pb2._PREDICTIONMODELRUN
DESCRIPTOR.message_types_by_name['CreatePredictionModelRunRequest'] = _CREATEPREDICTIONMODELRUNREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreatePredictionModelRunRequest = _reflection.GeneratedProtocolMessageType('CreatePredictionModelRunRequest', (_message.Message,), {'DESCRIPTOR': _CREATEPREDICTIONMODELRUNREQUEST, '__module__': 'exabel.api.analytics.v1.prediction_model_service_pb2'})
_sym_db.RegisterMessage(CreatePredictionModelRunRequest)
DESCRIPTOR._options = None
_CREATEPREDICTIONMODELRUNREQUEST.fields_by_name['parent']._options = None
_CREATEPREDICTIONMODELRUNREQUEST.fields_by_name['run']._options = None
_PREDICTIONMODELSERVICE = _descriptor.ServiceDescriptor(name='PredictionModelService', full_name='exabel.api.analytics.v1.PredictionModelService', file=DESCRIPTOR, index=0, serialized_options=None, create_key=_descriptor._internal_create_key, serialized_start=323, serialized_end=530, methods=[_descriptor.MethodDescriptor(name='CreatePredictionModelRun', full_name='exabel.api.analytics.v1.PredictionModelService.CreatePredictionModelRun', index=0, containing_service=None, input_type=_CREATEPREDICTIONMODELRUNREQUEST, output_type=exabel_dot_api_dot_analytics_dot_v1_dot_prediction__model__messages__pb2._PREDICTIONMODELRUN, serialized_options=b'\x82\xd3\xe4\x93\x02+"$/v1/{parent=predictionModels/*}/runs:\x03run', create_key=_descriptor._internal_create_key)])
_sym_db.RegisterServiceDescriptor(_PREDICTIONMODELSERVICE)
DESCRIPTOR.services_by_name['PredictionModelService'] = _PREDICTIONMODELSERVICE
|
nilq/baby-python
|
python
|
"""Metadata read/write support for bup."""
# Copyright (C) 2010 Rob Browning
#
# This code is covered under the terms of the GNU Library General
# Public License as described in the bup LICENSE file.
import errno, os, sys, stat, pwd, grp, struct, re
from cStringIO import StringIO
from bup import vint, xstat
from bup.drecurse import recursive_dirlist
from bup.helpers import add_error, mkdirp, log, is_superuser
from bup.xstat import utime, lutime, lstat
import bup._helpers as _helpers
try:
import xattr
except ImportError:
log('Warning: Linux xattr support missing; install python-pyxattr.\n')
xattr = None
if xattr:
try:
xattr.get_all
except AttributeError:
log('Warning: python-xattr module is too old; '
'install python-pyxattr instead.\n')
xattr = None
try:
import posix1e
except ImportError:
log('Warning: POSIX ACL support missing; install python-pylibacl.\n')
posix1e = None
try:
from bup._helpers import get_linux_file_attr, set_linux_file_attr
except ImportError:
# No need for a warning here; the only reason they won't exist is that we're
# not on Linux, in which case files don't have any linux attrs anyway, so
# lacking the functions isn't a problem.
get_linux_file_attr = set_linux_file_attr = None
# WARNING: the metadata encoding is *not* stable yet. Caveat emptor!
# Q: Consider hardlink support?
# Q: Is it OK to store raw linux attr (chattr) flags?
# Q: Can anything other than S_ISREG(x) or S_ISDIR(x) support posix1e ACLs?
# Q: Is the application of posix1e has_extended() correct?
# Q: Is one global --numeric-ids argument sufficient?
# Q: Do nfsv4 acls trump posix1e acls? (seems likely)
# Q: Add support for crtime -- ntfs, and (only internally?) ext*?
# FIXME: Fix relative/abs path detection/stripping wrt other platforms.
# FIXME: Add nfsv4 acl handling - see nfs4-acl-tools.
# FIXME: Consider other entries mentioned in stat(2) (S_IFDOOR, etc.).
# FIXME: Consider pack('vvvvsss', ...) optimization.
# FIXME: Consider caching users/groups.
## FS notes:
#
# osx (varies between hfs and hfs+):
# type - regular dir char block fifo socket ...
# perms - rwxrwxrwxsgt
# times - ctime atime mtime
# uid
# gid
# hard-link-info (hfs+ only)
# link-target
# device-major/minor
# attributes-osx see chflags
# content-type
# content-creator
# forks
#
# ntfs
# type - regular dir ...
# times - creation, modification, posix change, access
# hard-link-info
# link-target
# attributes - see attrib
# ACLs
# forks (alternate data streams)
# crtime?
#
# fat
# type - regular dir ...
# perms - rwxrwxrwx (maybe - see wikipedia)
# times - creation, modification, access
# attributes - see attrib
verbose = 0
_have_lchmod = hasattr(os, 'lchmod')
def _clean_up_path_for_archive(p):
# Not the most efficient approach.
result = p
# Take everything after any '/../'.
pos = result.rfind('/../')
if pos != -1:
result = result[result.rfind('/../') + 4:]
# Take everything after any remaining '../'.
if result.startswith("../"):
result = result[3:]
# Remove any '/./' sequences.
pos = result.find('/./')
while pos != -1:
result = result[0:pos] + '/' + result[pos + 3:]
pos = result.find('/./')
# Remove any leading '/'s.
result = result.lstrip('/')
# Replace '//' with '/' everywhere.
pos = result.find('//')
while pos != -1:
result = result[0:pos] + '/' + result[pos + 2:]
pos = result.find('//')
# Take everything after any remaining './'.
if result.startswith('./'):
result = result[2:]
# Take everything before any remaining '/.'.
if result.endswith('/.'):
result = result[:-2]
if result == '' or result.endswith('/..'):
result = '.'
return result
def _risky_path(p):
if p.startswith('/'):
return True
if p.find('/../') != -1:
return True
if p.startswith('../'):
return True
if p.endswith('/..'):
return True
return False
def _clean_up_extract_path(p):
result = p.lstrip('/')
if result == '':
return '.'
elif _risky_path(result):
return None
else:
return result
# These tags are currently conceptually private to Metadata, and they
# must be unique, and must *never* be changed.
_rec_tag_end = 0
_rec_tag_path = 1
_rec_tag_common = 2 # times, owner, group, type, perms, etc.
_rec_tag_symlink_target = 3
_rec_tag_posix1e_acl = 4 # getfacl(1), setfacl(1), etc.
_rec_tag_nfsv4_acl = 5 # intended to supplant posix1e acls?
_rec_tag_linux_attr = 6 # lsattr(1) chattr(1)
_rec_tag_linux_xattr = 7 # getfattr(1) setfattr(1)
class ApplyError(Exception):
# Thrown when unable to apply any given bit of metadata to a path.
pass
class Metadata:
# Metadata is stored as a sequence of tagged binary records. Each
# record will have some subset of add, encode, load, create, and
# apply methods, i.e. _add_foo...
## Common records
# Timestamps are (sec, ns), relative to 1970-01-01 00:00:00, ns
# must be non-negative and < 10**9.
def _add_common(self, path, st):
self.mode = st.st_mode
self.uid = st.st_uid
self.gid = st.st_gid
self.rdev = st.st_rdev
self.atime = st.st_atime
self.mtime = st.st_mtime
self.ctime = st.st_ctime
self.owner = self.group = ''
try:
self.owner = pwd.getpwuid(st.st_uid)[0]
except KeyError, e:
add_error("no user name for id %s '%s'" % (st.st_gid, path))
try:
self.group = grp.getgrgid(st.st_gid)[0]
except KeyError, e:
add_error("no group name for id %s '%s'" % (st.st_gid, path))
def _encode_common(self):
atime = xstat.nsecs_to_timespec(self.atime)
mtime = xstat.nsecs_to_timespec(self.mtime)
ctime = xstat.nsecs_to_timespec(self.ctime)
result = vint.pack('VVsVsVvVvVvV',
self.mode,
self.uid,
self.owner,
self.gid,
self.group,
self.rdev,
atime[0],
atime[1],
mtime[0],
mtime[1],
ctime[0],
ctime[1])
return result
def _load_common_rec(self, port):
data = vint.read_bvec(port)
(self.mode,
self.uid,
self.owner,
self.gid,
self.group,
self.rdev,
self.atime,
atime_ns,
self.mtime,
mtime_ns,
self.ctime,
ctime_ns) = vint.unpack('VVsVsVvVvVvV', data)
self.atime = xstat.timespec_to_nsecs((self.atime, atime_ns))
self.mtime = xstat.timespec_to_nsecs((self.mtime, mtime_ns))
self.ctime = xstat.timespec_to_nsecs((self.ctime, ctime_ns))
def _recognized_file_type(self):
return stat.S_ISREG(self.mode) \
or stat.S_ISDIR(self.mode) \
or stat.S_ISCHR(self.mode) \
or stat.S_ISBLK(self.mode) \
or stat.S_ISFIFO(self.mode) \
or stat.S_ISSOCK(self.mode) \
or stat.S_ISLNK(self.mode)
def _create_via_common_rec(self, path, create_symlinks=True):
# If the path already exists and is a dir, try rmdir.
# If the path already exists and is anything else, try unlink.
st = None
try:
st = xstat.lstat(path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if st:
if stat.S_ISDIR(st.st_mode):
try:
os.rmdir(path)
except OSError, e:
if e.errno == errno.ENOTEMPTY:
msg = 'refusing to overwrite non-empty dir' + path
raise Exception(msg)
raise
else:
os.unlink(path)
if stat.S_ISREG(self.mode):
assert(self._recognized_file_type())
fd = os.open(path, os.O_CREAT|os.O_WRONLY|os.O_EXCL, 0600)
os.close(fd)
elif stat.S_ISDIR(self.mode):
assert(self._recognized_file_type())
os.mkdir(path, 0700)
elif stat.S_ISCHR(self.mode):
assert(self._recognized_file_type())
os.mknod(path, 0600 | stat.S_IFCHR, self.rdev)
elif stat.S_ISBLK(self.mode):
assert(self._recognized_file_type())
os.mknod(path, 0600 | stat.S_IFBLK, self.rdev)
elif stat.S_ISFIFO(self.mode):
assert(self._recognized_file_type())
os.mknod(path, 0600 | stat.S_IFIFO)
elif stat.S_ISSOCK(self.mode):
os.mknod(path, 0600 | stat.S_IFSOCK)
elif stat.S_ISLNK(self.mode):
assert(self._recognized_file_type())
if self.symlink_target and create_symlinks:
# on MacOS, symlink() permissions depend on umask, and there's
# no way to chown a symlink after creating it, so we have to
# be careful here!
oldumask = os.umask((self.mode & 0777) ^ 0777)
try:
os.symlink(self.symlink_target, path)
finally:
os.umask(oldumask)
# FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
else:
assert(not self._recognized_file_type())
add_error('not creating "%s" with unrecognized mode "0x%x"\n'
% (path, self.mode))
def _apply_common_rec(self, path, restore_numeric_ids=False):
# FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
# EACCES errors at this stage are fatal for the current path.
if lutime and stat.S_ISLNK(self.mode):
try:
lutime(path, (self.atime, self.mtime))
except OSError, e:
if e.errno == errno.EACCES:
raise ApplyError('lutime: %s' % e)
else:
raise
else:
try:
utime(path, (self.atime, self.mtime))
except OSError, e:
if e.errno == errno.EACCES:
raise ApplyError('utime: %s' % e)
else:
raise
# Don't try to restore owner unless we're root, and even
# if asked, don't try to restore the owner or group if
# it doesn't exist in the system db.
uid = self.uid
gid = self.gid
if not restore_numeric_ids:
if not self.owner:
uid = -1
add_error('ignoring missing owner for "%s"\n' % path)
else:
if not is_superuser():
uid = -1 # Not root; assume we can't change owner.
else:
try:
uid = pwd.getpwnam(self.owner)[2]
except KeyError:
uid = -1
fmt = 'ignoring unknown owner %s for "%s"\n'
add_error(fmt % (self.owner, path))
if not self.group:
gid = -1
add_error('ignoring missing group for "%s"\n' % path)
else:
try:
gid = grp.getgrnam(self.group)[2]
except KeyError:
gid = -1
add_error('ignoring unknown group %s for "%s"\n'
% (self.group, path))
try:
os.lchown(path, uid, gid)
except OSError, e:
if e.errno == errno.EPERM:
add_error('lchown: %s' % e)
else:
raise
if _have_lchmod:
os.lchmod(path, stat.S_IMODE(self.mode))
elif not stat.S_ISLNK(self.mode):
os.chmod(path, stat.S_IMODE(self.mode))
## Path records
def _encode_path(self):
if self.path:
return vint.pack('s', self.path)
else:
return None
def _load_path_rec(self, port):
self.path = vint.unpack('s', vint.read_bvec(port))[0]
## Symlink targets
def _add_symlink_target(self, path, st):
try:
if stat.S_ISLNK(st.st_mode):
self.symlink_target = os.readlink(path)
except OSError, e:
add_error('readlink: %s', e)
def _encode_symlink_target(self):
return self.symlink_target
def _load_symlink_target_rec(self, port):
self.symlink_target = vint.read_bvec(port)
## POSIX1e ACL records
# Recorded as a list:
# [txt_id_acl, num_id_acl]
# or, if a directory:
# [txt_id_acl, num_id_acl, txt_id_default_acl, num_id_default_acl]
# The numeric/text distinction only matters when reading/restoring
# a stored record.
def _add_posix1e_acl(self, path, st):
if not posix1e: return
if not stat.S_ISLNK(st.st_mode):
try:
if posix1e.has_extended(path):
acl = posix1e.ACL(file=path)
self.posix1e_acl = [acl, acl] # txt and num are the same
if stat.S_ISDIR(st.st_mode):
acl = posix1e.ACL(filedef=path)
self.posix1e_acl.extend([acl, acl])
except EnvironmentError, e:
if e.errno != errno.EOPNOTSUPP:
raise
def _encode_posix1e_acl(self):
# Encode as two strings (w/default ACL string possibly empty).
if self.posix1e_acl:
acls = self.posix1e_acl
txt_flags = posix1e.TEXT_ABBREVIATE
num_flags = posix1e.TEXT_ABBREVIATE | posix1e.TEXT_NUMERIC_IDS
acl_reps = [acls[0].to_any_text('', '\n', txt_flags),
acls[1].to_any_text('', '\n', num_flags)]
if len(acls) < 3:
acl_reps += ['', '']
else:
acl_reps.append(acls[2].to_any_text('', '\n', txt_flags))
acl_reps.append(acls[3].to_any_text('', '\n', num_flags))
return vint.pack('ssss',
acl_reps[0], acl_reps[1], acl_reps[2], acl_reps[3])
else:
return None
def _load_posix1e_acl_rec(self, port):
data = vint.read_bvec(port)
acl_reps = vint.unpack('ssss', data)
if acl_reps[2] == '':
acl_reps = acl_reps[:2]
self.posix1e_acl = [posix1e.ACL(text=x) for x in acl_reps]
def _apply_posix1e_acl_rec(self, path, restore_numeric_ids=False):
if not posix1e:
if self.posix1e_acl:
add_error("%s: can't restore ACLs; posix1e support missing.\n"
% path)
return
if self.posix1e_acl:
acls = self.posix1e_acl
if len(acls) > 2:
if restore_numeric_ids:
acls[3].applyto(path, posix1e.ACL_TYPE_DEFAULT)
else:
acls[2].applyto(path, posix1e.ACL_TYPE_DEFAULT)
if restore_numeric_ids:
acls[1].applyto(path, posix1e.ACL_TYPE_ACCESS)
else:
acls[0].applyto(path, posix1e.ACL_TYPE_ACCESS)
## Linux attributes (lsattr(1), chattr(1))
def _add_linux_attr(self, path, st):
if not get_linux_file_attr: return
if stat.S_ISREG(st.st_mode) or stat.S_ISDIR(st.st_mode):
try:
attr = get_linux_file_attr(path)
if attr != 0:
self.linux_attr = attr
except OSError, e:
if e.errno == errno.EACCES:
add_error('read Linux attr: %s' % e)
elif e.errno == errno.ENOTTY: # Inappropriate ioctl for device.
add_error('read Linux attr: %s' % e)
else:
raise
def _encode_linux_attr(self):
if self.linux_attr:
return vint.pack('V', self.linux_attr)
else:
return None
def _load_linux_attr_rec(self, port):
data = vint.read_bvec(port)
self.linux_attr = vint.unpack('V', data)[0]
def _apply_linux_attr_rec(self, path, restore_numeric_ids=False):
if self.linux_attr:
if not set_linux_file_attr:
add_error("%s: can't restore linuxattrs: "
"linuxattr support missing.\n" % path)
return
set_linux_file_attr(path, self.linux_attr)
## Linux extended attributes (getfattr(1), setfattr(1))
def _add_linux_xattr(self, path, st):
if not xattr: return
try:
self.linux_xattr = xattr.get_all(path, nofollow=True)
except EnvironmentError, e:
if e.errno != errno.EOPNOTSUPP:
raise
def _encode_linux_xattr(self):
if self.linux_xattr:
result = vint.pack('V', len(self.linux_xattr))
for name, value in self.linux_xattr:
result += vint.pack('ss', name, value)
return result
else:
return None
def _load_linux_xattr_rec(self, file):
data = vint.read_bvec(file)
memfile = StringIO(data)
result = []
for i in range(vint.read_vuint(memfile)):
key = vint.read_bvec(memfile)
value = vint.read_bvec(memfile)
result.append((key, value))
self.linux_xattr = result
def _apply_linux_xattr_rec(self, path, restore_numeric_ids=False):
if not xattr:
if self.linux_xattr:
add_error("%s: can't restore xattr; xattr support missing.\n"
% path)
return
existing_xattrs = set(xattr.list(path, nofollow=True))
if self.linux_xattr:
for k, v in self.linux_xattr:
if k not in existing_xattrs \
or v != xattr.get(path, k, nofollow=True):
try:
xattr.set(path, k, v, nofollow=True)
except IOError, e:
if e.errno == errno.EPERM:
raise ApplyError('xattr.set: %s' % e)
else:
raise
existing_xattrs -= frozenset([k])
for k in existing_xattrs:
try:
xattr.remove(path, k, nofollow=True)
except IOError, e:
if e.errno == errno.EPERM:
raise ApplyError('xattr.remove: %s' % e)
else:
raise
def __init__(self):
# optional members
self.path = None
self.symlink_target = None
self.linux_attr = None
self.linux_xattr = None
self.posix1e_acl = None
self.posix1e_acl_default = None
def write(self, port, include_path=True):
records = include_path and [(_rec_tag_path, self._encode_path())] or []
records.extend([(_rec_tag_common, self._encode_common()),
(_rec_tag_symlink_target, self._encode_symlink_target()),
(_rec_tag_posix1e_acl, self._encode_posix1e_acl()),
(_rec_tag_linux_attr, self._encode_linux_attr()),
(_rec_tag_linux_xattr, self._encode_linux_xattr())])
for tag, data in records:
if data:
vint.write_vuint(port, tag)
vint.write_bvec(port, data)
vint.write_vuint(port, _rec_tag_end)
@staticmethod
def read(port):
# This method should either: return a valid Metadata object;
# throw EOFError if there was nothing at all to read; throw an
# Exception if a valid object could not be read completely.
tag = vint.read_vuint(port)
try: # From here on, EOF is an error.
result = Metadata()
while True: # only exit is error (exception) or _rec_tag_end
if tag == _rec_tag_path:
result._load_path_rec(port)
elif tag == _rec_tag_common:
result._load_common_rec(port)
elif tag == _rec_tag_symlink_target:
result._load_symlink_target_rec(port)
elif tag == _rec_tag_posix1e_acl:
result._load_posix1e_acl_rec(port)
elif tag ==_rec_tag_nfsv4_acl:
result._load_nfsv4_acl_rec(port)
elif tag == _rec_tag_linux_attr:
result._load_linux_attr_rec(port)
elif tag == _rec_tag_linux_xattr:
result._load_linux_xattr_rec(port)
elif tag == _rec_tag_end:
return result
else: # unknown record
vint.skip_bvec(port)
tag = vint.read_vuint(port)
except EOFError:
raise Exception("EOF while reading Metadata")
def isdir(self):
return stat.S_ISDIR(self.mode)
def create_path(self, path, create_symlinks=True):
self._create_via_common_rec(path, create_symlinks=create_symlinks)
def apply_to_path(self, path=None, restore_numeric_ids=False):
# apply metadata to path -- file must exist
if not path:
path = self.path
if not path:
raise Exception('Metadata.apply_to_path() called with no path');
if not self._recognized_file_type():
add_error('not applying metadata to "%s"' % path
+ ' with unrecognized mode "0x%x"\n' % self.mode)
return
num_ids = restore_numeric_ids
try:
self._apply_common_rec(path, restore_numeric_ids=num_ids)
self._apply_posix1e_acl_rec(path, restore_numeric_ids=num_ids)
self._apply_linux_attr_rec(path, restore_numeric_ids=num_ids)
self._apply_linux_xattr_rec(path, restore_numeric_ids=num_ids)
except ApplyError, e:
add_error(e)
def from_path(path, statinfo=None, archive_path=None, save_symlinks=True):
result = Metadata()
result.path = archive_path
st = statinfo or xstat.lstat(path)
result._add_common(path, st)
if save_symlinks:
result._add_symlink_target(path, st)
result._add_posix1e_acl(path, st)
result._add_linux_attr(path, st)
result._add_linux_xattr(path, st)
return result
def save_tree(output_file, paths,
recurse=False,
write_paths=True,
save_symlinks=True,
xdev=False):
# Issue top-level rewrite warnings.
for path in paths:
safe_path = _clean_up_path_for_archive(path)
if safe_path != path:
log('archiving "%s" as "%s"\n' % (path, safe_path))
start_dir = os.getcwd()
try:
for (p, st) in recursive_dirlist(paths, xdev=xdev):
dirlist_dir = os.getcwd()
os.chdir(start_dir)
safe_path = _clean_up_path_for_archive(p)
m = from_path(p, statinfo=st, archive_path=safe_path,
save_symlinks=save_symlinks)
if verbose:
print >> sys.stderr, m.path
m.write(output_file, include_path=write_paths)
os.chdir(dirlist_dir)
finally:
os.chdir(start_dir)
def _set_up_path(meta, create_symlinks=True):
# Allow directories to exist as a special case -- might have
# been created by an earlier longer path.
if meta.isdir():
mkdirp(meta.path)
else:
parent = os.path.dirname(meta.path)
if parent:
mkdirp(parent)
meta.create_path(meta.path, create_symlinks=create_symlinks)
class _ArchiveIterator:
def next(self):
try:
return Metadata.read(self._file)
except EOFError:
raise StopIteration()
def __iter__(self):
return self
def __init__(self, file):
self._file = file
def display_archive(file):
for meta in _ArchiveIterator(file):
if verbose:
print meta.path # FIXME
else:
print meta.path
def start_extract(file, create_symlinks=True):
for meta in _ArchiveIterator(file):
if verbose:
print >> sys.stderr, meta.path
xpath = _clean_up_extract_path(meta.path)
if not xpath:
add_error(Exception('skipping risky path "%s"' % meta.path))
else:
meta.path = xpath
_set_up_path(meta, create_symlinks=create_symlinks)
def finish_extract(file, restore_numeric_ids=False):
all_dirs = []
for meta in _ArchiveIterator(file):
xpath = _clean_up_extract_path(meta.path)
if not xpath:
add_error(Exception('skipping risky path "%s"' % dir.path))
else:
if os.path.isdir(meta.path):
all_dirs.append(meta)
else:
if verbose:
print >> sys.stderr, meta.path
meta.apply_to_path(path=xpath,
restore_numeric_ids=restore_numeric_ids)
all_dirs.sort(key = lambda x : len(x.path), reverse=True)
for dir in all_dirs:
# Don't need to check xpath -- won't be in all_dirs if not OK.
xpath = _clean_up_extract_path(dir.path)
if verbose:
print >> sys.stderr, dir.path
dir.apply_to_path(path=xpath, restore_numeric_ids=restore_numeric_ids)
def extract(file, restore_numeric_ids=False, create_symlinks=True):
# For now, just store all the directories and handle them last,
# longest first.
all_dirs = []
for meta in _ArchiveIterator(file):
xpath = _clean_up_extract_path(meta.path)
if not xpath:
add_error(Exception('skipping risky path "%s"' % meta.path))
else:
meta.path = xpath
if verbose:
print >> sys.stderr, '+', meta.path
_set_up_path(meta, create_symlinks=create_symlinks)
if os.path.isdir(meta.path):
all_dirs.append(meta)
else:
if verbose:
print >> sys.stderr, '=', meta.path
meta.apply_to_path(restore_numeric_ids=restore_numeric_ids)
all_dirs.sort(key = lambda x : len(x.path), reverse=True)
for dir in all_dirs:
# Don't need to check xpath -- won't be in all_dirs if not OK.
xpath = _clean_up_extract_path(dir.path)
if verbose:
print >> sys.stderr, '=', xpath
# Shouldn't have to check for risky paths here (omitted above).
dir.apply_to_path(path=dir.path,
restore_numeric_ids=restore_numeric_ids)
|
nilq/baby-python
|
python
|
import functools
import hashlib
import os
from typing import BinaryIO, Final, List, Optional, final, Iterable
@final
class Team:
team_id: Final[str]
name: Final[str]
def __init__(self, team_id: str, name: str):
self.team_id = team_id
self.name = name
@final
class Replay:
PLAYER_TAG_PREFIX: Final = "player:"
OPP_TAG_PREFIX: Final = "opponent:"
GAME_TAG_PREFIX: Final = "game:"
path: Final[str]
replay_hash: Final[str]
tags: Final[List[str]]
notes: str
teams: Final[List[Team]]
timestamp: Optional[int]
player_team: Optional[int]
opponent_team: Optional[int]
@staticmethod
def hash_replay_data(replay_data: BinaryIO) -> str:
hash_calculator = hashlib.sha256()
for buf in iter(functools.partial(replay_data.read, 4096), b""):
hash_calculator.update(buf)
return hash_calculator.hexdigest()
@staticmethod
def hash_replay_from_path(replay_path: str) -> str:
with open(replay_path, "rb") as replay_file:
return Replay.hash_replay_data(replay_file)
@staticmethod
def create_player_tag(tag_name: str):
return Replay.PLAYER_TAG_PREFIX + tag_name
@staticmethod
def create_opponent_tag(tag_name: str):
return Replay.OPP_TAG_PREFIX + tag_name
@staticmethod
def create_game_tag(tag_name: str):
return Replay.GAME_TAG_PREFIX + tag_name
def __init__(
self,
path: str,
replay_hash: str = "",
tags: Optional[List[str]] = None,
notes: Optional[str] = None,
teams: Optional[List[Team]] = None,
timestamp: Optional[int] = None,
player_team: Optional[int] = None,
opponent_team: Optional[int] = None,
):
if not replay_hash:
replay_hash = Replay.hash_replay_from_path(path)
if tags is None:
tags = []
if notes is None:
notes = ""
if teams is None:
teams = []
self.path = os.path.normpath(path)
self.replay_hash = replay_hash
self.tags = list(dict.fromkeys(tags))
self.notes = notes
self.teams = teams
self.timestamp = timestamp
self.player_team = player_team
self.opponent_team = opponent_team
def set_tags(self, tags: Iterable[str]):
self.tags.clear()
self.tags.extend(dict.fromkeys(tags))
def append_tag(self, tag: str):
if tag not in set(self.tags):
self.tags.append(tag)
def prepend_tag(self, tag: str):
if tag not in set(self.tags):
new_tags = [tag] + self.tags
self.set_tags(new_tags)
def remove_tag(self, tag: str):
if tag in set(self.tags):
self.tags.remove(tag)
|
nilq/baby-python
|
python
|
from .extension import setup
__version__ = "0.1.0"
__all__ = ["setup"]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import csv
import logging
import subprocess
import os
import sys
from github import Github
from s3_helper import S3Helper
from get_robot_token import get_best_robot_token
from pr_info import PRInfo, get_event
from build_download_helper import download_all_deb_packages
from upload_result_helper import upload_results
from docker_pull_helper import get_image_with_version
from commit_status_helper import post_commit_status
from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
from tee_popen import TeePopen
def get_run_command(build_path, result_folder, server_log_folder, image):
cmd = "docker run --cap-add=SYS_PTRACE -e S3_URL='https://clickhouse-datasets.s3.amazonaws.com' " + \
f"--volume={build_path}:/package_folder " \
f"--volume={result_folder}:/test_output " \
f"--volume={server_log_folder}:/var/log/clickhouse-server {image}"
return cmd
def process_results(result_folder, server_log_path, run_log_path):
test_results = []
additional_files = []
# Just upload all files from result_folder.
# If task provides processed results, then it's responsible for content of result_folder.
if os.path.exists(result_folder):
test_files = [f for f in os.listdir(result_folder) if os.path.isfile(os.path.join(result_folder, f))]
additional_files = [os.path.join(result_folder, f) for f in test_files]
if os.path.exists(server_log_path):
server_log_files = [f for f in os.listdir(server_log_path) if os.path.isfile(os.path.join(server_log_path, f))]
additional_files = additional_files + [os.path.join(server_log_path, f) for f in server_log_files]
additional_files.append(run_log_path)
status_path = os.path.join(result_folder, "check_status.tsv")
if not os.path.exists(status_path):
return "failure", "check_status.tsv doesn't exists", test_results, additional_files
logging.info("Found check_status.tsv")
with open(status_path, 'r', encoding='utf-8') as status_file:
status = list(csv.reader(status_file, delimiter='\t'))
if len(status) != 1 or len(status[0]) != 2:
return "error", "Invalid check_status.tsv", test_results, additional_files
state, description = status[0][0], status[0][1]
results_path = os.path.join(result_folder, "test_results.tsv")
with open(results_path, 'r', encoding='utf-8') as results_file:
test_results = list(csv.reader(results_file, delimiter='\t'))
if len(test_results) == 0:
raise Exception("Empty results")
return state, description, test_results, additional_files
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
stopwatch = Stopwatch()
temp_path = os.getenv("TEMP_PATH", os.path.abspath("."))
repo_path = os.getenv("REPO_COPY", os.path.abspath("../../"))
reports_path = os.getenv("REPORTS_PATH", "./reports")
check_name = sys.argv[1]
if not os.path.exists(temp_path):
os.makedirs(temp_path)
pr_info = PRInfo(get_event())
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, check_name)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = get_image_with_version(reports_path, 'clickhouse/stress-test')
packages_path = os.path.join(temp_path, "packages")
if not os.path.exists(packages_path):
os.makedirs(packages_path)
download_all_deb_packages(check_name, reports_path, packages_path)
server_log_path = os.path.join(temp_path, "server_log")
if not os.path.exists(server_log_path):
os.makedirs(server_log_path)
result_path = os.path.join(temp_path, "result_path")
if not os.path.exists(result_path):
os.makedirs(result_path)
run_log_path = os.path.join(temp_path, "runlog.log")
run_command = get_run_command(packages_path, result_path, server_log_path, docker_image)
logging.info("Going to run func tests: %s", run_command)
with TeePopen(run_command, run_log_path) as process:
retcode = process.wait()
if retcode == 0:
logging.info("Run successfully")
else:
logging.info("Run failed")
subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True)
s3_helper = S3Helper('https://s3.amazonaws.com')
state, description, test_results, additional_logs = process_results(result_path, server_log_path, run_log_path)
ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, check_name, test_results)
report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [run_log_path] + additional_logs, check_name)
print(f"::notice ::Report url: {report_url}")
post_commit_status(gh, pr_info.sha, check_name, description, state, report_url)
prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, check_name)
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
|
nilq/baby-python
|
python
|
"""
Top-level namespace for meta-analyses.
"""
from . import cbma
from . import ibma
from . import esma
__all__ = ['cbma', 'ibma', 'esma']
|
nilq/baby-python
|
python
|
## 테스트 셋 기본
def make_test_set():
test_df = pd.read_csv("sample_submission.csv", usecols=["order_id"])
# order_id에 맞는 user_id를 찾아서 merge
orders_df = pd.read_csv("orders.csv", usecols=["order_id","user_id", "order_dow", "order_hour_of_day"])
test_df = pd.merge(test_df, orders_df, how="inner", on="order_id")
del orders_df
# prior과 merge
# 유저와 order_id 에 맞는 상품 목록
test_df = pd.merge(test_df, latest_order(), how="inner", on="user_id")
products_df = pd.read_csv("products.csv", usecols = ["product_id", "aisle_id","department_id"])
test_df = pd.merge(test_df, products_df, how="inner", on="product_id")
del products_df
#### 밑부분 원래 제거!
test_df = test_df.drop(["reordered_count","reordered_sum","reordered_latest"], axis = 1)
return test_df
## 만든 feature 붙이기 : 계속 추가 예정
def test_result():
test_x = make_test_set()
test_x = pd.merge(test_x, dep_prob(), how="left", on=["user_id","department_id"])
test_x = pd.merge(test_x, aisle_prob(), how="left", on=["user_id","aisle_id"])
test_x = pd.merge(test_x, dow_prob(), how="left", on = ["user_id", "order_dow"])
test_x = pd.merge(test_x, hour_prob(), how="left", on=["user_id","order_hour_of_day"])
test_x = pd.merge(test_x, organic_prob(), how="left", on=["user_id","product_id"])
return test_x
|
nilq/baby-python
|
python
|
import orodja
import re
import unicodedata
import os
from pathlib import Path
leta = ["/pyeongchang-2018", "/sochi-2014", "/vancouver-2010", "/turin-2006", "/salt-lake-city-2002", "/nagano-1998",
"/lillehammer-1994", "/albertville-1992", "/calgary-1988", "/sarajevo-1984", "/lake-placid-1980", "/innsbruck-1976",
"/sapporo-1972", "/grenoble-1968", "/innsbruck-1964", "/squaw-valley-1960", "/cortina-d-ampezzo-1956", "/oslo-1952",
"/st-moritz-1948", "/garmisch-partenkirchen-1936", "/lake-placid-1932", "/st-moritz-1928", "/chamonix-1924"]
disciplina1 = "/alpine-skiing"
poddiscipline1_1 = ["/mens-alpine-combined", "/mens-downhill", "/mens-giant-slalom", "/mens-slalom", "/mens-super-g",
"/ladies-alpine-combined", "/ladies-downhill", "/ladies-giant-slalom", "/ladies-slalom", "/ladies-super-g"]
poddiscipline1_2 = ["/alpine-combined-men", "/downhill-men", "/giant-slalom-men", "/slalom-men", "/super-g-men",
"/alpine-combined-women", "/downhill-women", "/giant-slalom-women", "/slalom-women", "/super-g-women"]
disciplina2 = "/biathlon"
poddiscipline2_1 = ["/mens-10km-sprint", "/mens-12-5km-pursuit", "/mens-15km-mass-start", "/mens-20km-individual",
"/womens-10km-pursuit", "/womens-12-5km-mass-start", "/womens-15km-individual", "/womens-7-5km-sprint"]
poddiscipline2_2 = ["/10km-men", "/12-5km-pursuit-men", "/15km-mass-start-men", "/20km-men",
"/10km-pursuit-women", "/12-5km-mass-start-women", "/15km-women", "/7-5km-women"]
disciplina3 = "/cross-country-skiing"
poddiscipline3_1 = ["/mens-15km-free", "/mens-15km-15km-skiathlon", "/mens-50km-mass-start-classic", "/mens-sprint-classic",
"/ladies-10km-free", "/ladies-30km-mass-start-classic", "/ladies-7-5km-7-5km-skiathlon", "/ladies-sprint-classic"]
poddiscipline3_2 = ["/15km-men", "/skiathlon-15km-15km-men", "/50km-men", "/sprint-15km-men",
"/10km-women", "/30km-women", "/skiathlon-7-5km-7-5km-women", "/sprint-15km-women"]
disciplina4 = "/figure-skating"
poddiscipline4_1 = ["/mens-single-skating",
"/ladies-single-skating"]
poddiscipline4_2 = ["/individual-men",
"/individual-women"]
disciplina5 = "/freestyle-skiing"
poddiscipline5_1 = ["/mens-aerials", "/mens-moguls", "/mens-ski-cross", "/mens-ski-halfpipe", "/mens-ski-slopestyle",
"/ladies-aerials", "/ladies-moguls", "/ladies-ski-cross", "/ladies-ski-halfpipe", "/ladies-ski-slopestyle"]
poddiscipline5_2 = ["/aerials-men", "/moguls-women", "/ski-cross-men", "/ski-halfpipe-men", "/ski-slopestyle-men",
"/aerials-women", "/moguls-women", "/ski-cross-women", "/ski-halfpipe-women", "/ski-slopestyle-women"]
disciplina6 = "/luge"
poddiscipline6_1 = ["/mens-singles",
"/womens-singles"]
poddiscipline6_2 = ["/singles-men",
"/singles-women"]
disciplina7 = "/nordic-combined"
poddiscipline7_1 = ["/mens-individual-gundersen-lh-10km", "/mens-individual-gundersen-nh-10km"]
poddiscipline7_2 = ["/individual-lh-men", "/individual-men"]
disciplina8_1 = "/short-track"
poddiscipline8_1 = ["/mens-1000m", "/mens-1500m", "/mens-500m",
"/ladies-1000m", "/ladies-1500m", "/ladies-500m"]
disciplina8_2 = "/short-track-speed-skating"
poddiscipline8_2 = ["/1000m-men", "/1500m-men", "/500m-men",
"/1000m-women", "/1500m-women", "/500m-women"]
disciplina9 = "/skeleton"
poddiscipline9_1 = ["/men",
"/women"]
poddiscipline9_2 = ["/individual-men",
"/individual-women"]
disciplina10 = "/ski-jumping"
poddiscipline10_1 = ["/mens-large-hill-individual", "/mens-normal-hill-individual",
"/ladies-normal-hill-individual"]
poddiscipline10_2 = ["/large-hill-individual-men", "/normal-hill-individual-men",
"/normal-hill-individualwomen"]
disciplina11 = "/snowboard"
poddiscipline11_1 = ["/mens-big-air", "/mens-halfpipe", "/mens-parallel-giant-slalom", "/mens-slopestyle", "/mens-snowboard-cross",
"/ladies-big-air", "/ladies-halfpipe", "/ladies-parallel-giant-slalom", "/ladies-slopestyle", "/ladies-snowboard-cross"]
poddiscipline11_2 = ["/parallel-slalom-men", "/half-pipe-men", "/giant-parallel-slalom-men", "/slopestyle-men", "/snowboard-cross-men",
"/parallel-slalom-women", "/half-pipe-women", "/giant-parallel-slalom-women", "/slopestyle-women", "/snowboard-cross-women"]
disciplina12 = "/speed-skating"
poddiscipline12_1 = ["/mens-10000m", "/mens-1000m", "/mens-1500m", "/mens-5000m", "/mens-500m", "/mens-mass-start",
"/ladies-1000m", "/ladies-1500m", "/ladies-3000m", "/ladies-5000m", "/ladies-500m", "/ladies-mass-start"]
poddiscipline12_2 = ["/10000m-men", "/1000m-men", "/1500m-men", "/5000m-men", "/2x500m-men",
"/1000m-women", "/1500m-women", "/3000m-women", "/5000m-women", "/2x500m-women"]
osnovni_naslov = "https://www.olympic.org"
def podatki_posameznik(datoteka, olimpijske, disciplina, poddisciplina):
'''
Funkcija sprejme ime datoteke, olimpijske igre in disciplino in naredi seznam
slovarjev v katerih so rezultati tekmovalca.
'''
print(datoteka)
with open(str(datoteka), encoding='utf-8') as f:
vsebina = f.read()
stevec = 0
for tekmovalec in re.finditer(
r'<tr>.+?<td class="col1">(?P<mesto>.*?)</td>.+?<td class="col2">'
r'.+?<a href="/(?P<ime>.+?)">.+?<span class="picture">'
r'.+?<span>(?P<drzava>\D{3})</span>'
r'.+?<td class="col3">(?P<rezultat>.*?)</td>.+?</tr>'
,vsebina, flags=re.DOTALL):
mesto = tekmovalec.group('mesto')
x = re.search(r'\d+', mesto)
if x:
mesto = x.group()
else:
if re.search('G', mesto):
mesto = '1'
elif re.search('S', mesto):
mesto = '2'
elif re.search('B', mesto):
mesto = '3'
else:
mesto = ''
stevec += 1
if str(stevec) != mesto or mesto == '':
continue
ime = tekmovalec.group('ime')
if ime not in tekmovalci:
tekmovalci.add(ime)
ime = ime.replace("-", " ")
ime = ime.title()
drzava = tekmovalec.group('drzava')
rezultat = tekmovalec.group('rezultat')
rezultat = rezultat.strip()
rezultat = rezultat.replace("\n", "")
igre = olimpijske[1:]
igre = igre.replace("-", " ")
igre = igre.capitalize()
# za vsakega nastopajočega ustvarimo slovar
nastop = {}
nastop['igre'] = igre
nastop['disciplina'] = disciplina
nastop['poddisciplina'] = poddisciplina
nastop['mesto'] = mesto
nastop['ime'] = ime
nastop['drzava'] = drzava
nastop['rezultat'] = rezultat
rezultati.append(nastop)
sez.add(tekmovalec.group('ime'))
def posameznik_rojstni_dan(datoteka, sportnik):
'''
Funkcija sprejme ime datotekein ime tekmovalca in naredi dva seznama.
V enem so slovarji z imenom tekmovalca in njegovim rojstnim dnem. V drugem
so slovarji z kratico in polnim imenom drzave.
'''
print(datoteka)
with open(str(datoteka), encoding='utf-8') as f:
vsebina = f.read()
for tekmovalec in re.finditer(
r'<div class="flag-image">'
r'.+?<span>(?P<kratica>\D\D\D)</span>'
r'.+?<div class="frame">'
r'.+?<strong class="title">Country </strong>.+?'
r'<a (itemprop="url" )?href="/(?P<drzava>.+?)">.+?</a>'
r'.+?<strong class="title">(Born|Lived)</strong>(?P<datum>.+?)</div>'
, vsebina, flags=re.DOTALL):
ime = sportnik
ime = ime.replace("-", " ")
ime = ime.title()
datum = tekmovalec.group('datum')
datum = datum.replace("\n", "")
meseci = {'Jan':'01', 'Feb':'02', 'Mar':'03', 'Apr':'04', 'May':'05',
'Jun':'06', 'Jul':'07', 'Aug':'08', 'Sep':'09', 'Oct':'10',
'Nov':'11', 'Dec':'12'}
kratica = tekmovalec.group('kratica')
nastopajoci = {}
nastopajoci['ime'] = ime
nastopajoci['drzava'] = kratica
if '01 Jan 0001' == datum[:11]:
nastopajoci['datum'] = ''
else:
datum = datum[:11] # nekateri imajo naveden še datum smrti
st = meseci[datum[3:6]]
nastopajoci['datum'] = datum[:2] + '.' + st + '.' + datum[-4:]
roj_dan_tekmovalcev.append(nastopajoci)
drzava = tekmovalec.group('drzava')
drzava = drzava.replace("-", " ")
drzava = drzava.title()
if kratica not in drz:
drz.add(kratica)
drzave_s_kratico = {}
drzave_s_kratico['kratica'] = kratica
drzave_s_kratico['drzava'] = drzava
drzave.append(drzave_s_kratico)
def prenesi_html():
'''
Funcija za shranitev html datoteke za tekme. Sklicuje se na funkcijo
shrani iz datoteke orodja.
'''
for poddisciplina in poddiscipline1_1:
naslov = osnovni_naslov + leta[0] + disciplina1 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(leta[0], disciplina1[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline2_1:
naslov = osnovni_naslov + leta[0] + disciplina2 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(leta[0], disciplina2[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline3_1:
naslov = osnovni_naslov + leta[0] + disciplina3 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(leta[0], disciplina3[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline4_1:
naslov = osnovni_naslov + leta[0] + disciplina4 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(leta[0], disciplina4[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline5_1:
naslov = osnovni_naslov + leta[0] + disciplina5 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(leta[0], disciplina5[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline6_1:
naslov = osnovni_naslov + leta[0] + disciplina6 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(leta[0], disciplina6[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline7_1:
naslov = osnovni_naslov + leta[0] + disciplina7 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(leta[0], disciplina7[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline8_1:
naslov = osnovni_naslov + leta[0] + disciplina8_1 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(leta[0], disciplina8_1[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline9_1:
naslov = osnovni_naslov + leta[0] + disciplina9 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(leta[0], disciplina9[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline10_1:
naslov = osnovni_naslov + leta[0] + disciplina10 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(leta[0], disciplina10[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline11_1:
naslov = osnovni_naslov + leta[0] + disciplina11 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(leta[0], disciplina11[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline12_1:
naslov = osnovni_naslov + leta[0] + disciplina12 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(leta[0], disciplina12[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for olimpijske in leta[1:]:
for poddisciplina in poddiscipline1_2:
naslov = osnovni_naslov + olimpijske + disciplina1 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(olimpijske, disciplina1[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline2_2:
naslov = osnovni_naslov + olimpijske + disciplina2 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(olimpijske, disciplina2[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline3_2:
naslov = osnovni_naslov + olimpijske + disciplina3 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(olimpijske, disciplina3[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline4_2:
naslov = osnovni_naslov + olimpijske + disciplina4 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(olimpijske, disciplina4[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline5_2:
naslov = osnovni_naslov + olimpijske + disciplina5 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(olimpijske, disciplina5[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline6_2:
naslov = osnovni_naslov + olimpijske + disciplina6 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(olimpijske, disciplina6[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline7_2:
naslov = osnovni_naslov + olimpijske + disciplina7 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(olimpijske, disciplina7[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline8_2:
naslov = osnovni_naslov + olimpijske + disciplina8_2 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(olimpijske, disciplina8_2[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline9_2:
naslov = osnovni_naslov + olimpijske + disciplina9 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(olimpijske, disciplina9[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline10_2:
naslov = osnovni_naslov + olimpijske + disciplina10 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(olimpijske, disciplina10[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline11_2:
naslov = osnovni_naslov + olimpijske + disciplina11 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(olimpijske, disciplina11[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
for poddisciplina in poddiscipline12_2:
naslov = osnovni_naslov + olimpijske + disciplina12 + poddisciplina
datoteka = "rezultati_{}_{}_{}.html".format(olimpijske, disciplina12[1:], poddisciplina[1:])
orodja.shrani(naslov, datoteka)
def prenesi_html_tekmovalca():
'''
Funcija za shranitev html datoteke za vsakega tekmovalca. Sklicuje se
na funkcijo shrani iz datoteke orodja.
'''
for tekmovalec in tekmovalci:
tekmovalec.replace('\n', '')
naslov = osnovni_naslov + "/" + tekmovalec
datoteka = "{}.html".format(tekmovalec)
pot = os.path.join("tekmovalci", datoteka)
orodja.shrani(naslov, pot)
def preberi_podatke():
'''
Funkcija shrani rezultate tekmovalcev v seznam s pomocjo zgornjih dveh
funkcij: podatki_posameznik in podatki_skupine.
'''
for poddisc in poddiscipline1_1:
disc = disciplina1.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(leta[0], disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, leta[0], disc, poddisc)
for poddisc in poddiscipline2_1:
disc = disciplina2.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(leta[0], disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, leta[0], disc, poddisc)
for poddisc in poddiscipline3_1:
disc = disciplina3.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(leta[0], disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, leta[0], disc, poddisc)
for poddisc in poddiscipline4_1:
disc = disciplina4.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(leta[0], disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, leta[0], disc, poddisc)
for poddisc in poddiscipline5_1:
disc = disciplina5.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(leta[0], disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, leta[0], disc, poddisc)
for poddisc in poddiscipline6_1:
disc = disciplina6.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(leta[0], disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, leta[0], disc, poddisc)
for poddisc in poddiscipline7_1:
disc = disciplina7.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(leta[0], disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, leta[0], disc, poddisc)
for poddisc in poddiscipline8_1:
disc = disciplina8_1.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(leta[0], disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, leta[0], disc, poddisc)
for poddisc in poddiscipline9_1:
disc = disciplina9.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(leta[0], disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, leta[0], disc, poddisc)
for poddisc in poddiscipline10_1:
disc = disciplina10.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(leta[0], disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, leta[0], disc, poddisc)
for poddisc in poddiscipline11_1:
disc = disciplina11.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(leta[0], disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, leta[0], disc, poddisc)
for poddisc in poddiscipline12_1:
disc = disciplina12.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(leta[0], disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, leta[0], disc, poddisc)
for olimpijske in leta[1:]:
for poddisc in poddiscipline1_2:
disc = disciplina1.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(olimpijske, disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, olimpijske, disc, poddisc)
for poddisc in poddiscipline2_2:
disc = disciplina2.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(olimpijske, disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, olimpijske, disc, poddisc)
for poddisc in poddiscipline3_2:
disc = disciplina3.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(olimpijske, disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, olimpijske, disc, poddisc)
for poddisc in poddiscipline4_2:
disc = disciplina4.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(olimpijske, disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, olimpijske, disc, poddisc)
for poddisc in poddiscipline5_2:
disc = disciplina5.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(olimpijske, disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, olimpijske, disc, poddisc)
for poddisc in poddiscipline6_2:
disc = disciplina6.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(olimpijske, disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, olimpijske, disc, poddisc)
for poddisc in poddiscipline7_2:
disc = disciplina7.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(olimpijske, disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, olimpijske, disc, poddisc)
for poddisc in poddiscipline8_2:
disc = disciplina8_2.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(olimpijske, disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, olimpijske, disc, poddisc)
for poddisc in poddiscipline9_2:
disc = disciplina9.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(olimpijske, disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, olimpijske, disc, poddisc)
for poddisc in poddiscipline10_2:
disc = disciplina10.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(olimpijske, disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, olimpijske, disc, poddisc)
for poddisc in poddiscipline11_2:
disc = disciplina11.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(olimpijske, disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, olimpijske, disc, poddisc)
for poddisc in poddiscipline12_2:
disc = disciplina12.replace("/", "")
poddisc = poddisc.replace("/", "")
dat = Path("rezultati_{}_{}_{}.html".format(olimpijske, disc, poddisc))
#print(dat)
disc = disc.replace('-',' ')
poddisc = poddisc.replace('-', ' ')
podatki_posameznik(dat, olimpijske, disc, poddisc)
def preberi_podatke_tekmovalcev():
'''
Funkcija shrani rojstne dneve tekmovalcev in kratice in polna imena drzav v
seznam s pomocjo zgornje funkcije posameznik_rojstni_dan.
'''
tekm = set()
f = open('tekmovalci.txt', 'r')
for line in f:
tekm.add(line)
f.close()
mnozica_tekmovalcev = [tekmovalec[:-1] for tekmovalec in tekm]
for tekmovalec in mnozica_tekmovalcev:
dat = Path("tekmovalci")
pot = dat / "{}.html".format(tekmovalec)
posameznik_rojstni_dan(pot, tekmovalec)
def zapisi_tekmovalce(tekmovalci):
'''
Funkcija v datoteko tekmovalci.txt zapise vsa imena tekmovalcev iz seznama.
'''
f = open("tekmovalci.txt", "w+", encoding='utf-8')
for tekmovalec in tekmovalci:
f.write(tekmovalec + "\n")
f.close()
rezultati = []
tekmovalci = set()
roj_dan_tekmovalcev = []
sez = set()
drz = set()
drzave = []
#prenesi_html()
preberi_podatke()
#prenesi_html_tekmovalca()
zapisi_tekmovalce(tekmovalci)
preberi_podatke_tekmovalcev()
#orodja.zapisi_tabelo(rezultati, ['igre', 'disciplina', 'poddisciplina', 'mesto', 'ime', 'drzava', 'rezultat'], 'rezultati.csv')
#orodja.zapisi_tabelo(roj_dan_tekmovalcev, ['ime', 'datum'], 'roj_dan_tekmovalcev.csv')
#orodja.zapisi_tabelo(drzave, ['kratica', 'drzava'], 'seznam_drzav.csv')
orodja.zapisi_json(rezultati, 'rezultati.json')
orodja.zapisi_json(roj_dan_tekmovalcev, 'roj_dan_tekmovalcev.json')
orodja.zapisi_json(drzave, 'drzave.json')
|
nilq/baby-python
|
python
|
#
# MythBox for XBMC
#
# Copyright (C) 2011 analogue@yahoo.com
# http://mythbox.googlecode.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
__scriptname__ = "MythBox for XBMC"
__author__ = "analogue@yahoo.com"
__url__ = "http://mythbox.googlecode.com"
__git_url__ = "http://github.com/analogue/mythbox"
__credits__ = "bunch of ppl"
if __name__ == '__main__':
print __scriptname__
# WinPDB debugger
#import rpdb2
#rpdb2.start_embedded_debugger('xxx')
import os, sys, xbmcaddon
scriptDir = xbmcaddon.Addon('script.mythbox').getAddonInfo('path')
sys.path.insert(0, os.path.join(scriptDir, 'resources', 'src'))
import xbmcgui
import xbmc
splash = xbmcgui.WindowXML('mythbox_splash.xml', scriptDir)
splash.show()
from mythbox.bootstrapper import BootStrapper
BootStrapper(splash).run()
|
nilq/baby-python
|
python
|
import io
import json
import os
import click
from demisto_sdk.commands.common.constants import (PACK_METADATA_SUPPORT,
PACKS_DIR,
PACKS_PACK_META_FILE_NAME,
FileType)
from demisto_sdk.commands.common.errors import (ERROR_CODE,
FOUND_FILES_AND_ERRORS,
FOUND_FILES_AND_IGNORED_ERRORS,
PRESET_ERROR_TO_CHECK,
PRESET_ERROR_TO_IGNORE)
from demisto_sdk.commands.common.tools import (find_type, get_pack_name,
get_yaml)
class BaseValidator:
def __init__(self, ignored_errors=None, print_as_warnings=False, suppress_print: bool = False):
self.ignored_errors = ignored_errors if ignored_errors else {}
self.print_as_warnings = print_as_warnings
self.checked_files = set() # type: ignore
self.suppress_print = suppress_print
@staticmethod
def should_ignore_error(error_code, ignored_errors):
"""Return True is code should be ignored and False otherwise"""
if ignored_errors is None:
return False
# check if specific codes are ignored
if error_code in ignored_errors:
return True
# in case a whole section of codes are selected
code_type = error_code[:2]
if code_type in ignored_errors:
return True
return False
def handle_error(self, error_message, error_code, file_path, should_print=True, suggested_fix=None, warning=False,
drop_line=False):
"""Handle an error that occurred during validation
Args:
drop_line (bool): Whether to drop a line at the beginning of the error message
warning (bool): Print the error as a warning
suggested_fix(str): A suggested fix
error_message(str): The error message
file_path(str): The file from which the error occurred
error_code(str): The error code
should_print(bool): whether the command should be printed
Returns:
str. Will return the formatted error message if it is not ignored, an None if it is ignored
"""
formatted_error = f"{file_path}: [{error_code}] - {error_message}".rstrip("\n") + "\n"
if drop_line:
formatted_error = "\n" + formatted_error
if file_path:
if not isinstance(file_path, str):
file_path = str(file_path)
file_name = os.path.basename(file_path)
self.check_file_flags(file_name, file_path)
else:
file_name = 'No-Name'
if self.should_ignore_error(error_code, self.ignored_errors.get(file_name)) or warning:
if self.print_as_warnings or warning:
click.secho(formatted_error, fg="yellow")
self.add_to_report_error_list(error_code, file_path, FOUND_FILES_AND_IGNORED_ERRORS)
return None
if should_print and not self.suppress_print:
if suggested_fix:
click.secho(formatted_error[:-1], fg="bright_red")
if error_code == 'ST109':
click.secho("Please add to the root of the yml a description.\n", fg="bright_red")
else:
click.secho(suggested_fix + "\n", fg="bright_red")
else:
click.secho(formatted_error, fg="bright_red")
self.add_to_report_error_list(error_code, file_path, FOUND_FILES_AND_ERRORS)
return formatted_error
def check_file_flags(self, file_name, file_path):
if file_name not in self.checked_files:
self.check_deprecated(file_path)
self.update_checked_flags_by_support_level(file_path)
self.checked_files.add(file_name)
def check_deprecated(self, file_path):
if file_path.endswith('.yml'):
yml_dict = get_yaml(file_path)
if ('deprecated' in yml_dict and yml_dict['deprecated'] is True) or \
(find_type(file_path) == FileType.PLAYBOOK and 'hidden' in yml_dict and
yml_dict['hidden'] is True):
self.add_flag_to_ignore_list(file_path, 'deprecated')
@staticmethod
def get_metadata_file_content(meta_file_path):
with io.open(meta_file_path, mode="r", encoding="utf-8") as file:
metadata_file_content = file.read()
return json.loads(metadata_file_content)
def update_checked_flags_by_support_level(self, file_path):
pack_name = get_pack_name(file_path)
if pack_name:
metadata_path = os.path.join(PACKS_DIR, pack_name, PACKS_PACK_META_FILE_NAME)
metadata_json = self.get_metadata_file_content(metadata_path)
support = metadata_json.get(PACK_METADATA_SUPPORT)
if support in ('partner', 'community'):
self.add_flag_to_ignore_list(file_path, support)
@staticmethod
def create_reverse_ignored_errors_list(errors_to_check):
ignored_error_list = []
all_errors = ERROR_CODE.values()
for error_code in all_errors:
error_type = error_code[:2]
if error_code not in errors_to_check and error_type not in errors_to_check:
ignored_error_list.append(error_code)
return ignored_error_list
def add_flag_to_ignore_list(self, file_path, flag):
additional_ignored_errors = []
if flag in PRESET_ERROR_TO_IGNORE:
additional_ignored_errors = PRESET_ERROR_TO_IGNORE[flag]
elif flag in PRESET_ERROR_TO_CHECK:
additional_ignored_errors = self.create_reverse_ignored_errors_list(PRESET_ERROR_TO_CHECK[flag])
file_name = os.path.basename(file_path)
if file_name in self.ignored_errors:
self.ignored_errors[file_name].extend(additional_ignored_errors)
else:
self.ignored_errors[file_name] = additional_ignored_errors
@staticmethod
def add_to_report_error_list(error_code, file_path, error_list):
formatted_file_and_error = f'{file_path} - [{error_code}]'
if formatted_file_and_error not in error_list:
error_list.append(formatted_file_and_error)
|
nilq/baby-python
|
python
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from deeploader.dataset.dataset_base import ArrayDataset
import util
from dataset.data_util import get_img
def rotate(angle, x, y):
"""
基于原点的弧度旋转
:param angle: 弧度
:param x: x
:param y: y
:return:
"""
rotatex = math.cos(angle) * x - math.sin(angle) * y
rotatey = math.cos(angle) * y + math.sin(angle) * x
return rotatex, rotatey
def xy_rorate(theta, x, y, centerx, centery):
"""
针对中心点进行旋转
:param theta:
:param x:
:param y:
:param centerx:
:param centery:
:return:
"""
r_x, r_y = rotate(theta, x - centerx, y - centery)
return centerx + r_x, centery + r_y
def rbox2quad(x, y, width, height, theta):
"""
传入矩形的x,y和宽度高度,弧度,转成QUAD格式
:param x:
:param y:
:param width:
:param height:
:param theta:
:return:
"""
centerx = x + width / 2
centery = y + height / 2
x1, y1 = xy_rorate(theta, x, y, centerx, centery)
x2, y2 = xy_rorate(theta, x + width, y, centerx, centery)
x3, y3 = xy_rorate(theta, x + width, y + height, centerx, centery)
x4, y4 = xy_rorate(theta, x, y + height, centerx, centery)
return [x1, y1, x2, y2, x3, y3, x4, y4]
def get_bboxes(img, gt_path):
lines = util.io.read_lines(gt_path)
bboxes = []
tags = []
for line in lines:
line = util.str.remove_all(line, '\xef\xbb\xbf')
gt = util.str.split(line, ' ')
diff = np.int(gt[1])
x, y, w, h = np.int(gt[2]), np.int(gt[3]), np.int(gt[4]), np.int(gt[5])
angle = np.float(gt[-1])
bbox = rbox2quad(x, y, w, h, angle)
bbox = np.array(bbox).reshape((4, 2)).tolist()
bboxes.append(bbox)
if diff == 1:
tags.append(False)
else:
tags.append(True)
return bboxes, tags
class MSRATD500Dataset(ArrayDataset):
def __init__(self, ctw_root='.', split='train', **kargs):
ArrayDataset.__init__(self, **kargs)
ctw_root_dir = ctw_root + '/MSRA-TD500/'
ctw_train_data_dir = ctw_root_dir + 'train/'
ctw_train_gt_dir = ctw_root_dir + 'train/'
ctw_test_data_dir = ctw_root_dir + 'test/'
ctw_test_gt_dir = ctw_root_dir + 'test/'
if split == 'train':
data_dirs = [ctw_train_data_dir]
gt_dirs = [ctw_train_gt_dir]
else:
data_dirs = [ctw_test_data_dir]
gt_dirs = [ctw_test_gt_dir]
self.img_paths = []
self.gt_paths = []
for data_dir, gt_dir in zip(data_dirs, gt_dirs):
img_names = util.io.ls(data_dir, '.jpg')
img_names.sort()
img_paths = []
gt_paths = []
for idx, img_name in enumerate(img_names):
img_path = data_dir + img_name
img_paths.append(img_path)
gt_name = img_name.split('.')[0] + '.gt'
gt_path = gt_dir + gt_name
gt_paths.append(gt_path)
self.img_paths.extend(img_paths)
self.gt_paths.extend(gt_paths)
def size(self):
return len(self.img_paths)
def getData(self, index):
"""
Load MSRA-TD500 data
:param index: zero-based data index
:return: A dict like { img: RGB, bboxes: nxkx2 np array, tags: n }
"""
img_path = self.img_paths[index]
gt_path = self.gt_paths[index]
# RGB
img = get_img(img_path)
# bbox normed to 0~1
bboxes, tags = get_bboxes(img, gt_path)
item = {'img': img, 'type': 'contour', 'bboxes': bboxes, 'tags': tags,
'path': img_path}
return item
|
nilq/baby-python
|
python
|
from abc import abstractmethod
from dataclasses import dataclass
import textwrap
from typing import Any, Callable, Dict, Iterable, Iterator, List, Sequence, Tuple, Union
import clingo
from clingo import MessageCode, Symbol, SymbolicAtom
from clingo import ast
from clingo.ast import parse_string
from eclingo.prefixes import atom_user_name
from .mappings import EpistemicSymbolToTestSymbolMapping, SymbolToEpistemicLiteralMapping, SymbolToEpistemicLiteralMappingUsingProgramLiterals, SymbolToEpistemicLiteralMappingUsingShowStatements
import clingox
from clingox import program as clingox_program
from clingox.backend import SymbolicBackend
class ASTParsedObject():
pass
ASTObject = Union[ASTParsedObject, ast.AST] # pylint: disable=no-member
@dataclass(frozen=True)
class ShowStatement(ASTParsedObject):
name: str
arity: int
poistive: bool
class ProgramBuilder():
def __init__(self, control, show_signature: set[ShowStatement]):
self.control = control
self.show_signature = show_signature
self.bulider = clingo.ast.ProgramBuilder(self.control)
def add(self, statement: ASTObject):
if isinstance(statement, ShowStatement):
self.show_signature.add(statement)
elif isinstance(statement, ast.AST):
return self.bulider.add(statement)
else:
raise RuntimeError("Non recognised object: " + str(statement))
def __enter__(self):
self.bulider.__enter__()
return self
def __exit__(self, type_, value, traceback):
return self.bulider.__exit__(type_, value, traceback)
class InternalStateControl(object):
def __init__(self, arguments: Sequence[str] = (), logger: Callable[[MessageCode, str], None] = None, message_limit: int = 20, *, control: clingo.Control = None):
if control is None:
control = clingo.Control(arguments, logger, message_limit)
self.control = control
self.ground_program = clingox_program.Program()
self.control.register_observer(clingox_program.ProgramObserver(self.ground_program))
self.show_signature: set[ShowStatement] = set()
self.epistemic_to_test_mapping = EpistemicSymbolToTestSymbolMapping()
self.show_mapping = SymbolToEpistemicLiteralMapping()
def add_program(self, program: str) -> None:
with self.builder() as builder:
parse_string(program, builder.add)
def builder(self) -> ProgramBuilder:
return ProgramBuilder(self.control, self.show_signature)
def add_to(self, control: Union['InternalStateControl', clingo.Control]):
program = self.ground_program
with control.backend() as backend:
mapping = clingox_program.Remapping(backend, program.output_atoms, program.facts)
program.add_to_backend(backend, mapping)
return mapping
def facts(self) -> Iterable[Symbol]:
for symbolic_atom in self.control.symbolic_atoms:
if symbolic_atom.is_fact:
yield symbolic_atom.symbol
def show_symbols(self) -> Iterator[Symbol]:
for symbolic_atom in self.show_symbolic_atoms():
yield symbolic_atom.symbol
def atom_to_symbol_mapping(self) -> Dict[int, Symbol]:
mapping = dict()
for symbolic_atom in self.control.symbolic_atoms:
if not symbolic_atom.is_fact:
mapping.update({symbolic_atom.literal : symbolic_atom.symbol})
return mapping
def show_symbolic_atoms(self) -> Iterator[SymbolicAtom]:
for show_statement in self.show_signature:
symbolic_atoms = self.control.symbolic_atoms
show_statment_user_name = atom_user_name(show_statement.name)
yield from symbolic_atoms.by_signature(show_statment_user_name, show_statement.arity, show_statement.poistive)
def ground(self, parts: Sequence[Tuple[str, Sequence[Symbol]]], context: Any = None) -> None:
self.control.ground(parts, context)
self.epistemic_to_test_mapping = EpistemicSymbolToTestSymbolMapping(self.control.symbolic_atoms)
self.show_mapping = self._generate_show_mapping()
def _generate_show_mapping(self) -> SymbolToEpistemicLiteralMapping:
if self.show_signature:
return SymbolToEpistemicLiteralMappingUsingShowStatements(self.show_symbols())
else:
return SymbolToEpistemicLiteralMappingUsingProgramLiterals(self.epistemic_to_test_mapping.epistemic_literals())
def symbolic_backend(self) -> SymbolicBackend:
return clingox.backend.SymbolicBackend(self.control.backend())
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.control, attr)
class Application(object):
@abstractmethod
def main(self, control: InternalStateControl, files: Sequence[str]) -> None:
raise NotImplementedError
class ApplicationWrapper(clingo.Application):
def __init__(self, application):
self.application = application
def main(self, control: clingo.Control, files: Sequence[str]) -> None:
internal_control = InternalStateControl(control=control)
return self.application.main(internal_control, files)
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.application, attr)
def clingo_main(application: Application, files: Sequence[str] = ()) -> int:
application_wrapper = ApplicationWrapper(application)
return clingo.clingo_main(application_wrapper, files)
|
nilq/baby-python
|
python
|
"""Testing module for priorityq."""
import pytest
@pytest.fixture
def test_q():
"""Test fixtures of priority qs."""
from src.priorityq import PriorityQ
q0 = PriorityQ()
q1 = PriorityQ()
q1.insert('sgds', 10)
q1.insert('another', 9)
q1.insert('another', 8)
q1.insert('another', 7)
q1.insert('another', 6)
return q0, q1
def test_priority_q_insert(test_q):
"""Test priorityq insert on a list of none."""
test_q[0].insert('sgds', 10)
assert test_q[0]._container.container[1] == (10, 'sgds')
def test_priority_q_insert_multiple(test_q):
"""Test priorityq insert multi on a list of none."""
assert test_q[1]._container.container[1] == (10, 'sgds')
def test_priority_q_new_highest(test_q):
"""Test priorityq changes head with new highest priority."""
test_q[1].insert('highest', 100)
assert test_q[1]._container.container[1] == (100, 'highest')
def test_priority_q_pop(test_q):
"""Test priority q pop, remove highest priority."""
assert test_q[1].pop() == 'sgds'
def test_priority_q_pop_empty(test_q):
"""Test priority q pop, raises index error on empty."""
with pytest.raises(IndexError):
test_q[0].pop()
def test_peek_returns_highest_priority(test_q):
"""Test priority q returns highest value."""
assert test_q[1].peek() == 'sgds'
def test_priority_q_peek_empty(test_q):
"""Test priority q peek, returns None."""
assert test_q[0].peek() is None
|
nilq/baby-python
|
python
|
###
#
# Lenovo Redfish examples - Get metric inventory
#
# Copyright Notice:
#
# Copyright 2019 Lenovo Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
###
import sys
import redfish
import json
import lenovo_utils as utils
def get_metric_definition_report(ip, login_account, login_password):
"""Get metric inventory
:params ip: BMC IP address
:type ip: string
:params login_account: BMC user name
:type login_account: string
:params login_password: BMC user password
:type login_password: string
:returns: returns metric inventory when succeeded or error message when failed
"""
result = {}
try:
# Connect using the BMC address, account name, and password
# Create a REDFISH object
login_host = "https://" + ip
REDFISH_OBJ = redfish.redfish_client(base_url=login_host, username=login_account,
password=login_password, default_prefix='/redfish/v1', cafile=utils.g_CAFILE)
# Login into the server and create a session
REDFISH_OBJ.login(auth=utils.g_AUTH)
except:
result = {'ret': False, 'msg': "Please check if the username, password, IP is correct."}
return result
# Get ServiceRoot resource
response_base_url = REDFISH_OBJ.get('/redfish/v1', None)
# Get response_telemetry_service_url
if response_base_url.status == 200:
if 'TelemetryService' in response_base_url.dict:
telemetry_service_url = response_base_url.dict['TelemetryService']['@odata.id']
else:
result = {'ret': False, 'msg': "TelemetryService is not supported"}
REDFISH_OBJ.logout()
return result
else:
result = {'ret': False, 'msg': "Access url /redfish/v1 failed. Error code %s" % response_base_url.status}
REDFISH_OBJ.logout()
return result
response_telemetry_service_url = REDFISH_OBJ.get(telemetry_service_url, None)
if response_telemetry_service_url.status != 200:
result = {'ret': False, 'msg': "Access url %s failed. Error code %s" % (telemetry_service_url, response_telemetry_service_url.status)}
REDFISH_OBJ.logout()
return result
metric_inventory = {}
# Get MetricDefinition collection
metric_collection_url = response_telemetry_service_url.dict['MetricDefinitions']['@odata.id']
response_metric_collection_url = REDFISH_OBJ.get(metric_collection_url, None)
if response_metric_collection_url.status != 200:
result = {'ret': False, 'msg': "Access url %s failed. Error code %s" % (metric_collection_url, response_metric_collection_url.status)}
REDFISH_OBJ.logout()
return result
# Get each MetricDefinition
metric_definitons = []
for metric_member in response_metric_collection_url.dict["Members"]:
metric_url = metric_member['@odata.id']
metric_list = metric_url.split("/")
response_metric_url = REDFISH_OBJ.get(metric_url, None)
if response_metric_url.status == 200:
metric_detail = {}
for property in response_metric_url.dict:
if property not in ["Description","@odata.context","@odata.id","@odata.type","@odata.etag", "Links", "Actions", "RelatedItem"]:
metric_detail[property] = response_metric_url.dict[property]
metric_entry = {metric_list[-1]: metric_detail}
metric_definitons.append(metric_entry)
else:
result = {'ret': False,
'msg': "Access url %s failed. Error code %s" %(metric_url, response_metric_url.status)}
REDFISH_OBJ.logout()
return result
# Get MetricReports collection
metric_collection_url = response_telemetry_service_url.dict['MetricReports']['@odata.id']
response_metric_collection_url = REDFISH_OBJ.get(metric_collection_url, None)
if response_metric_collection_url.status != 200:
result = {'ret': False, 'msg': "Access url %s failed. Error code %s" % (metric_collection_url, response_metric_collection_url.status)}
REDFISH_OBJ.logout()
return result
# Get each MetricReport
metric_reports = []
for metric_member in response_metric_collection_url.dict["Members"]:
metric_url = metric_member['@odata.id']
metric_list = metric_url.split("/")
response_metric_url = REDFISH_OBJ.get(metric_url, None)
if response_metric_url.status == 200:
metric_detail = {}
for property in response_metric_url.dict:
if property not in ["Description","@odata.context","@odata.id","@odata.type","@odata.etag", "Links", "Actions", "RelatedItem"]:
metric_detail[property] = response_metric_url.dict[property]
metric_entry = {metric_list[-1]: metric_detail}
metric_reports.append(metric_entry)
else:
result = {'ret': False,
'msg': "Access url %s failed. Error code %s" %(metric_url, response_metric_url.status)}
REDFISH_OBJ.logout()
return result
# Set result
metric_inventory['MetricDefinitions'] = metric_definitons
metric_inventory['MetricReports'] = metric_reports
result['ret'] = True
result['metric_inventory'] = metric_inventory
try:
REDFISH_OBJ.logout()
except:
pass
return result
def add_parameter():
argget = utils.create_common_parameter_list()
args = argget.parse_args()
parameter_info = utils.parse_parameter(args)
return parameter_info
if __name__ == '__main__':
# Get parameters from config.ini and/or command line
parameter_info = add_parameter()
# Get connection info from the parameters user specified
ip = parameter_info['ip']
login_account = parameter_info["user"]
login_password = parameter_info["passwd"]
# Get metric inventory and check result
result = get_metric_definition_report(ip, login_account, login_password)
if result['ret'] is True:
del result['ret']
sys.stdout.write(json.dumps(result['metric_inventory'], sort_keys=True, indent=2) + '\n')
else:
sys.stderr.write(result['msg'] + '\n')
|
nilq/baby-python
|
python
|
#!usr/bin/python
# -*- coding:utf8 -*-
# 列表生成式(列表推导式)
# 1. 提取出1-20之间的奇数
# odd_list = []
# for i in range(21):
# if i % 2 == 1:
# odd_list.append(i)
# odd_list = [i for i in range(21) if i % 2 == 1]
# print(odd_list)
# 2. 逻辑复杂的情况 如果是奇数将结果平方
# 列表生成式性能高于列表操作
def handle_item(item):
return item * item
odd_list = [handle_item(i) for i in range(21) if i % 2 == 1]
print(odd_list)
# 生成器表达式
odd_gen = (i for i in range(21) if i % 2 == 1)
print(type(odd_gen))
for item in odd_gen:
print(item)
# 字典推导式
my_dict = {"bobby1": 22, "bobby2": 23, "imooc.com": 5}
reversed_dict = {value:key for key, value in my_dict.items()}
print(reversed_dict)
# 集合推导式
my_set = set(my_dict.keys())
my_set = {key for key, value in my_dict.items()}
print(type(my_set))
|
nilq/baby-python
|
python
|
"""
Desenvolva uma lógica que leia o peso e a altura de uma pessoa,
calcule seu IMC e mostre seu status.
Rasgue as minhas cartas
E não me procure mais
Assim será melhor, meu bem
O retrato que eu te dei
Se ainda tens, não sei
Mas se tiver, devolva-me
Devolva-me - Adriana Calcanhotto ♪♫
"""
peso = float(input('Informe o seu peso: '))
altura = float(input('Informe a sua altura: '))
imc = peso / altura ** 2
print('Com o IMC de {:.2f} você está '.format(imc), end='')
if imc < 18.5:
print('abaixo do peso !')
elif imc < 25:
print('no peso ideal !')
elif imc < 30:
print('com sobrepeso !')
elif imc < 40:
print('obeso !')
else:
print('com obesidade mórbida !')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from flask import Flask
from peewee import MySQLDatabase
from celery import Celery
from config import config
db = MySQLDatabase(None)
def create_app(config_name):
"""
创建flask应用对象
:param config_name:
:return:
"""
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
db.init(**app.config['MYSQL'])
from .models import models
db.create_tables(models, safe=True)
from .hooks import before_app_request, after_app_request
app.before_request(before_app_request)
app.teardown_request(after_app_request)
from .blueprints.cms_main import bp_cms_main
from .blueprints.cms_api import bp_cms_api
from .blueprints.open_main import bp_open_main
from .blueprints.open_api import bp_open_api
from .blueprints.sample_h5_main import bp_sample_h5_main
from .blueprints.sample_h5_api import bp_sample_h5_api
app.register_blueprint(bp_cms_main, subdomain=app.config['SUBDOMAIN'].get('cms_main'))
app.register_blueprint(bp_cms_api, subdomain=app.config['SUBDOMAIN'].get('cms_api'), url_prefix='/api')
app.register_blueprint(bp_open_main, subdomain=app.config['SUBDOMAIN'].get('open_main'))
app.register_blueprint(bp_open_api, subdomain=app.config['SUBDOMAIN'].get('open_api'), url_prefix='/api')
app.register_blueprint(bp_sample_h5_main, subdomain=app.config['SUBDOMAIN'].get('sample_h5_main'))
app.register_blueprint(bp_sample_h5_api, subdomain=app.config['SUBDOMAIN'].get('sample_h5_api'), url_prefix='/api')
return app
def create_celery_app(app=None):
"""
创建celery应用对象
:param app:
:return:
"""
import os
app = app or create_app(os.getenv('FLASK_CONFIG') or 'default')
celery = Celery(app.import_name)
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
|
nilq/baby-python
|
python
|
from __future__ import print_function, division
#
import sys,os
quspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,quspin_path)
#
from quspin.operators import hamiltonian # Hamiltonians and operators
from quspin.basis import spinful_fermion_basis_1d # Hilbert space spinful fermion basis
import numpy as np # generic math functions
#
##### define model parameters #####
L=6 # system size
J=1.0 # hopping strength
U=np.sqrt(2) # onsite interaction strength
#
##### construct basis at half-filling in the 0-total momentum and +1-spin flip sector
basis=spinful_fermion_basis_1d(L=L,Nf=(L//2,L//2),a=1,kblock=0,sblock=1)
print(basis)
#
##### define PBC site-coupling lists for operators
# define site-coupling lists
hop_right = [[-J,i,(i+1)%L] for i in range(L)] # hopping to the right PBC
hop_left = [[J,i,(i+1)%L] for i in range(L)] # hopping to the left PBC
int_list = [[U,i,i] for i in range(L)] # onsite interaction
# static and dynamic lists
static= [
["+-|", hop_left], # up hop left
["-+|", hop_right], # up hop right
["|+-", hop_left], # down hop left
["|-+", hop_right], # down hop right
["n|n", int_list], # onsite interaction
]
dynamic=[]
###### construct Hamiltonian
H=hamiltonian(static,dynamic,dtype=np.float64,basis=basis)
|
nilq/baby-python
|
python
|
"""
Sponge Knowledge Base
Action metadata Record type
"""
def createBookType(name):
return RecordType(name, [
IntegerType("id").withNullable().withLabel("Identifier"),
StringType("author").withLabel("Author"),
StringType("title").withLabel("Title")
])
BOOK = {"id":1, "author":"James Joyce", "title":"Ulysses"}
class RecordAsResultAction(Action):
def onConfigure(self):
self.withArg(IntegerType("bookId")).withResult(createBookType("book").withNullable())
def onCall(self, bookId):
global BOOK
return BOOK if bookId == BOOK["id"] else None
class RecordAsArgAction(Action):
def onConfigure(self):
self.withArg(createBookType("book")).withNoResult()
def onCall(self, book):
global BOOK
BOOK = {"id":1, "author":book["author"], "title":book["title"]}
|
nilq/baby-python
|
python
|
"""
restriction generaters representing sets of packages
"""
|
nilq/baby-python
|
python
|
# http://www.geeksforgeeks.org/design-a-stack-that-supports-getmin-in-o1-time-and-o1-extra-space/
from sys import maxint
class MyStack:
def __init__(self):
self.minimum = -maxint-1
self.stack = []
def push(self,val):
if not self.stack:
self.minimum = val
self.stack.append(val)
else:
if val > self.minimum:
self.stack.append(val)
else:
self.stack.append(2*val - self.minimum)
self.minimum = val
def pop(self):
if self.stack:
val = self.stack.pop()
if val >= self.minimum:
return val
else:
self.minimum = 2*self.minimum - val
return self.minimum
else:
return None
if __name__ == "__main__":
s = MyStack()
print s.push(3), s.stack,s.minimum
print s.push(5), s.stack,s.minimum
print s.push(2), s.stack,s.minimum
print s.push(1), s.stack,s.minimum
print s.push(1), s.stack,s.minimum
print s.push(-1), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
print s.pop(), s.stack,s.minimum
|
nilq/baby-python
|
python
|
import pygame
import math
pygame.font.init()
DEBUG_FONT = pygame.font.Font(None, 22)
def get_surface(obj):
""" Returns a Surface representing the parameter.
if obj is the filename of an image, a surface containing the image will be returned.
if obj is a Surface, it will be returned unchanged.
"""
if isinstance(obj, pygame.Surface):
return obj
return pygame.image.load(obj)
def get_anchor(obj, anchor):
""" Returns the point representing the anchor on the given Surface or Rect.
obj can be a Surface or Rect.
anchor should be a string of one of the point attributes (e.g. 'topleft',
'center', 'midbottom', etc.).
"""
if anchor not in ['topleft', 'bottomleft', 'topright', 'bottomright',
'midtop', 'midleft', 'midbottom', 'midright', 'center']:
raise ValueError('Invalid anchor')
try:
return getattr(obj.get_rect(), anchor)
except AttributeError:
return getattr(obj, anchor)
def blit_anchors(dest, dest_anchor, src, src_anchor):
""" Blits the source onto the destination such that their anchors align.
src_anchor and dest_anchor can be strings of one of the point attributes (topleft, center,
midbottom, etc.) or a position on their respective surfaces (e.g [100, 200]).
"""
try:
src_anchor = get_anchor(src, src_anchor)
except ValueError:
pass # Assume src_anchor is already a point. If not, it will fail in the map().
try:
dest_anchor = get_anchor(dest, dest_anchor)
except ValueError:
pass # Assume dest_anchor is already a point. If not, it will fail in the map().
topleft = list(map(lambda a,b,c: a - b + c, src.get_rect().topleft, src_anchor, dest_anchor))
dest.blit(src, topleft)
def get_color(obj):
""" Returns a Color object representing the parameter.
"""
try:
return pygame.Color(obj)
except ValueError:
if isinstance(obj, basestring): # Invalid color name
raise
elif len(obj) not in range(1, 5):
raise ValueError('Object does not represent a color')
else:
return obj
def draw_fps(surface, clock, anchor='topright', color='red'):
""" Draws an FPS counter on a surface at the given anchor.
"""
fps_surface = DEBUG_FONT.render(str(int(clock.get_fps())), True, get_color(color))
blit_anchors(surface, anchor, fps_surface, anchor)
def font_render_multiline(font, text, antialias, color, background=None, justify='left', line_spacing=0):
""" Returns a Surface containing the text in the given font.
The first five parameters are the ones used to render single line text.
justify can be 'left', 'right', or 'center'.
line_spacing is how much space to leave between lines in units of the font's height.
"""
anchors = {'left':'topleft', 'right':'topright', 'center':'center'}
lines = text.split('\n')
width = max([font.size(line)[0] for line in lines])
line_height = font.size('')[1]
height = math.ceil(line_height * (len(lines) + line_spacing * (len(lines) - 1)))
multiline = pygame.Surface((width, height))
if background is not None:
multiline.set_colorkey(background)
multiline.fill(background)
else:
multiline.convert_alpha()
multiline.fill([128, 128, 128, 0])
anchor_x = getattr(multiline.get_rect(), justify)
try:
anchor_x = anchor_x[0]
except:
pass
y = 0
while len(lines):
if background is None:
line = font.render(lines.pop(0), antialias, color)
else:
line = font.render(lines.pop(0), antialias, color, background)
dest_anchor = [anchor_x, int(y)]
blit_anchors(multiline, dest_anchor, line, anchors[justify])
y += (1 + line_spacing) * line_height
return multiline
def offset(point, offset):
""" Offsets a point by an amount.
Equivalent to adding vectors.
"""
return tuple(map(sum, zip(point, offset)))
def rect_largest_fit(inner, outer):
""" Moves and resizes a Rect to the largest it can be while still fitting in another Rect and maintaining its aspect ratio.
"""
# TODO: check behavior when inner is larger than outer in one or both dimensions
inner.topleft = outer.topleft
w_ratio = outer.w / inner.w
h_ratio = outer.h / inner.h
if w_ratio < h_ratio:
inner.w = outer.w
inner.h *= w_ratio
else:
inner.h = outer.h
inner.w *= h_ratio
class FloatRect(object):
def __init__(self, topleft, size):
self._left, self._top = map(float, topleft)
self._width, self._height = map(float, size)
self._half_height, self._half_width = [a / 2.0 for a in size]
self._centerx = self._left + self._half_height
self._centery = self._top + self._half_width
self._right = self._left + self._width
self._bottom = self._top + self._height
def left():
doc = "The left property."
def fget(self):
return self._left
def fset(self, value):
flt = float(value)
self._right += flt - self._left
self._left = flt
self._centerx = flt + self._half_width
def fdel(self):
del self._left
return locals()
left = property(**left())
def right():
doc = "The right property."
def fget(self):
return self._right
def fset(self, value):
flt = float(value)
self._left += flt - self._right
self._right = flt
self._centerx = self._left + self._half_width
def fdel(self):
del self._right
return locals()
right = property(**right())
def top():
doc = "The top property."
def fget(self):
return self._top
def fset(self, value):
flt = float(value)
self._bottom += flt - self._top
self._top = flt
self._centery = flt + self._half_height
def fdel(self):
del self._top
return locals()
top = property(**top())
def bottom():
doc = "The bottom property."
def fget(self):
return self._bottom
def fset(self, value):
flt = float(value)
self._top += flt - self._bottom
self._bottom = flt
self._centery = self._top + self._half_height
def fdel(self):
del self._bottom
return locals()
bottom = property(**bottom())
def centerx():
doc = "The centerx property."
def fget(self):
return self._centerx
def fset(self, value):
flt = float(value)
self._left = flt - self._half_width
self._right = flt + self._half_width
self._centerx = flt
def fdel(self):
del self._centerx
return locals()
centerx = property(**centerx())
def centery():
doc = "The centery property."
def fget(self):
return self._centery
def fset(self, value):
flt = float(value)
self._top = flt - self._half_height
self._bottom = flt + self._half_height
self._centery = flt
def fdel(self):
del self._centery
return locals()
centery = property(**centery())
def width():
doc = "The width property."
def fget(self):
return self._width
def fset(self, value):
flt = float(value)
self._width = flt
self._half_width = flt / 2
self.centerx = self.centerx # Set left and right
def fdel(self):
del self._width
return locals()
w = width = property(**width())
def height():
doc = "The height property."
def fget(self):
return self._height
def fset(self, value):
flt = float(value)
self._height = flt
self._half_height = flt / 2
self.centery = self.centery # Set top and bottom
def fdel(self):
del self._height
return locals()
h = height = property(**height())
def size():
doc = "The size property."
def fget(self):
return [self.width, self.height]
def fset(self, value):
self.width, self.height = value
return locals()
size = property(**size())
def topleft():
doc = "The topleft property."
def fget(self):
return [self.left, self.top]
def fset(self, value):
self.left, self.top = value
return locals()
topleft = property(**topleft())
def bottomleft():
doc = "The bottomleft property."
def fget(self):
return [self.left, self.bottom]
def fset(self, value):
self.left, self.bottom = value
return locals()
bottomleft = property(**bottomleft())
def topright():
doc = "The topright property."
def fget(self):
return [self.right, self.top]
def fset(self, value):
self.right, self.top = value
return locals()
topright = property(**topright())
def bottomright():
doc = "The bottomright property."
def fget(self):
return [self.right, self.bottom]
def fset(self, value):
self.right, self.bottom = value
return locals()
bottomright = property(**bottomright())
def midtop():
doc = "The midtop property."
def fget(self):
return [self.centerx, self.top]
def fset(self, value):
self.centerx, self.top = value
return locals()
midtop = property(**midtop())
def midleft():
doc = "The midleft property."
def fget(self):
return [self.left, self.centery]
def fset(self, value):
self.left, self.centery = value
return locals()
midleft = property(**midleft())
def midbottom():
doc = "The midbottom property."
def fget(self):
return [self.centerx, self.bottom]
def fset(self, value):
self.centerx, self.bottom = value
return locals()
midbottom = property(**midbottom())
def midright():
doc = "The midright property."
def fget(self):
return [self.right, self.centery]
def fset(self, value):
self.right, self.centery = value
return locals()
midright = property(**midright())
def __repr__(self):
return 'FloatRect(%s, %s)' % (str(self.bottomleft), str(self.size))
class RectDivider(object):
""" Given a large Rect and a small one, allow iteration through non-overlapping locations of the small Rect
"""
returned_start = False
def __init__(self, outer, inner, direction='horizontal', horizontal='right', vertical='down', zigzag=False):
"""
outer is the outer Rect.
inner is the inner Rect and the first return value.
direction is whether to move 'vertically' or 'horizontally' first.
horizontal is whether to move 'left' or 'right' when moving horizontally.
vertical is whether to move 'up' or 'down' when moving vertically.
zigzag is whether to zigzag when reaching an edge rather than reset to the other side.
"""
self.outer = outer.copy()
self.inner = inner.copy()
self.zigzag = zigzag
# Resize self.outer so inner fits without any left over.
# This makes zigzagging simpler.
self.outer.w -= self.outer.w % self.inner.w
self.outer.h -= self.outer.h % self.inner.h
dir_err = ValueError('Invalid direction')
if direction == 'vertical':
self.d = 'v'
elif direction == 'horizontal':
self.d = 'h'
else:
raise dir_err
if horizontal == 'left':
self.h = -1
elif horizontal == 'right':
self.h = 1
else:
raise dir_err
if vertical == 'up':
self.v = -1
elif vertical == 'down':
self.v = 1
else:
raise dir_err
def __iter__(self): return self
def next(self):
if not self.returned_start:
self.returned_start = True
return self.inner
if self.d == 'h':
self.inner.left += self.h * self.inner.w
clamped = self.inner.clamp(self.outer)
if clamped != self.inner:
self.inner.top += self.v * self.inner.h
if self.zigzag:
self.h *= -1
if self.h == 1:
self.inner.left = self.outer.left
else:
self.inner.right = self.outer.right
else:
self.inner.top += self.v * self.inner.h
clamped = self.inner.clamp(self.outer)
if clamped != self.inner:
self.inner.left += self.h * self.inner.w
if self.zigzag:
self.v *= -1
if self.v == 1:
self.inner.top = self.outer.top
else:
self.inner.bottom = self.outer.bottom
clamped = self.inner.clamp(self.outer)
if clamped != self.inner:
raise StopIteration
return self.inner
|
nilq/baby-python
|
python
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.experimental.abc.abc_infer import ApproximateBayesianComputation
class ApproximateBayesianComputationTest(unittest.TestCase):
def setUp(self):
torch.manual_seed(8665309)
class CoinTossModel:
def __init__(self, observation_shape):
self.observation_shape = observation_shape
@bm.random_variable
def bias(self):
return dist.Beta(0.5, 0.5)
@bm.random_variable
def coin_toss(self):
return dist.Bernoulli(self.bias().repeat(self.observation_shape))
def toss_head_count(self, toss_vals):
return torch.sum(toss_vals)
def toss_mean(self, toss_vals):
return torch.mean(toss_vals)
@bm.functional
def num_heads(self):
return self.toss_head_count(self.coin_toss())
@bm.functional
def mean_value(self):
return self.toss_mean(self.coin_toss())
def test_abc_inference(self):
model = self.CoinTossModel(observation_shape=10)
COIN_TOSS_DATA = dist.Bernoulli(0.9).sample([10])
num_heads_key = model.num_heads()
mean_value_key = model.mean_value()
abc = ApproximateBayesianComputation(
tolerance={num_heads_key: 1.0, mean_value_key: 0.1}
)
observations = {
num_heads_key: model.toss_head_count(COIN_TOSS_DATA),
mean_value_key: model.toss_mean(COIN_TOSS_DATA),
}
queries = [model.bias()]
samples = abc.infer(
queries, observations, num_samples=10, num_chains=1, verbose=None
)
mean = torch.mean(samples[model.bias()][0])
self.assertTrue(mean.item() > 0.65)
abc.reset()
def test_abc_inference_with_singleton_arguments(self):
model = self.CoinTossModel(observation_shape=10)
COIN_TOSS_DATA = dist.Bernoulli(0.95).sample([10])
abc = ApproximateBayesianComputation(
distance_function=torch.dist, tolerance=1.0
)
observations = {
model.num_heads(): model.toss_head_count(COIN_TOSS_DATA),
model.mean_value(): model.toss_mean(COIN_TOSS_DATA),
}
queries = [model.bias()]
samples = abc.infer(
queries, observations, num_samples=10, num_chains=1, verbose=None
)
mean = torch.mean(samples[model.bias()][0])
self.assertTrue(mean.item() > 0.65)
abc.reset()
def test_single_inference_step(self):
model = self.CoinTossModel(observation_shape=10)
abc = ApproximateBayesianComputation(tolerance={model.num_heads(): 1.0})
abc.observations_ = {model.num_heads(): torch.tensor(15.0)}
self.assertEqual(abc._single_inference_step(), 0.0)
abc.reset()
def test_max_attempts(self):
model = self.CoinTossModel(observation_shape=100)
COIN_TOSS_DATA = dist.Bernoulli(0.9).sample([100])
abc = ApproximateBayesianComputation(
tolerance={model.num_heads(): 0.1}, max_attempts_per_sample=2
)
observations = {model.num_heads(): model.toss_head_count(COIN_TOSS_DATA)}
queries = [model.bias()]
with self.assertRaises(RuntimeError):
abc.infer(
queries, observations, num_samples=100, num_chains=1, verbose=None
)
abc.reset()
def test_shape_mismatch(self):
model = self.CoinTossModel(observation_shape=100)
abc = ApproximateBayesianComputation(tolerance={model.num_heads(): 0.1})
observations = {model.num_heads(): torch.tensor([3, 4])}
queries = [model.bias()]
with self.assertRaises(ValueError):
abc.infer(
queries, observations, num_samples=100, num_chains=1, verbose=None
)
abc.reset()
def test_simulate_mode(self):
model = self.CoinTossModel(observation_shape=10)
COIN_TOSS_DATA = dist.Bernoulli(0.9).sample([10])
abc = ApproximateBayesianComputation(
tolerance={model.num_heads(): 1, model.mean_value(): 0.1}
)
observations = {
model.num_heads(): model.toss_head_count(COIN_TOSS_DATA),
model.mean_value(): model.toss_mean(COIN_TOSS_DATA),
}
queries = [model.bias()]
samples = abc.infer(
queries, observations, num_samples=1, num_chains=1, verbose=None
)
# simulate 10 coin tosses from accepted bias sample
sim_observations = {model.bias(): samples[model.bias()][0]}
sim_queries = [model.coin_toss()]
sim_abc = ApproximateBayesianComputation(simulate=True)
sim_samples = sim_abc.infer(
sim_queries, sim_observations, num_samples=10, num_chains=1, verbose=None
)
self.assertTrue(torch.sum(sim_samples[model.coin_toss()][0] == 1.0) > 5)
|
nilq/baby-python
|
python
|
from .base import init
|
nilq/baby-python
|
python
|
__author__ = 'zaxlct'
__date__ = '2017/4/6 下午12:14'
import re
from django import forms
from operation.models import UserAsk
# class UserAskForm(forms.Form):
# name = forms.CharField(required=True, min_length=2, max_length=20)
# phone = forms.CharField(required=True, min_length=11, max_length=11)
# course_name = forms.CharField(required=True, min_length=5, max_length=50)
class UserAskForm(forms.ModelForm):
# 还可以新增字段
# price = forms.CharField(required=True, min_length=2, max_length=20)
class Meta:
model = UserAsk
fields = ['name', 'mobile', 'course_name']
# def clean_name(self):
# def clean_course_name(self):
def clean_mobile(self):
# 手机号验证
mobile = self.cleaned_data['mobile']
p = re.compile('^0\d{2,3}\d{7,8}$|^1[358]\d{9}$|^147\d{8}')
if p.match(mobile):
# 这里还能返回外键
return mobile
raise forms.ValidationError('手机号码格式不对', code='mobile_inval')
|
nilq/baby-python
|
python
|
from .libs import metadata
from .libs import utils
from .libs.athena import Athena
from .libs.s3 import S3
from .libs.csv_parser import single_column_csv_to_list, csv_to_list_of_dicts
from .libs.policy_generator import PolicyGenerator
import argparse
import logging
def arguments():
parser = argparse.ArgumentParser()
parser.add_argument("metadata")
parser.add_argument("--setup", action="store_true")
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
return args
def initialize_classes(args):
""" Reading metadata, performing metadata validation, initializing required classes.
Classes / metadata stored in initc dictionary. """
initc = {}
meta = metadata.read(args.metadata)
initc['boto'] = utils.Boto(meta)
initc['meta'] = metadata.set_defaults(meta, initc['boto'])
initc['s3'] = S3(initc['meta'], initc['boto'].session)
initc['athena'] = Athena(initc['meta'], initc['boto'].session)
initc['policygen'] = PolicyGenerator()
return initc
def get_arns_from_athena_output(users_or_roles, initc):
""" Function to get list of arns of active users or roles. """
if users_or_roles == "users":
athena_output_files = initc['athena'].active_users_output_files
services_by_query = initc['athena'].services_by_user_query
elif users_or_roles == "roles":
athena_output_files = initc['athena'].active_roles_output_files
services_by_query = initc['athena'].services_by_role_query
for dictionary in athena_output_files:
obj = initc['s3'].get_object(initc['meta']["behold_bucket"], dictionary["path"])
list_of_arns = single_column_csv_to_list(obj)
initc['s3'].put_object(
bucket=initc['meta']['behold_bucket'],
key=f"behold_results/{dictionary['account']}/{users_or_roles}/active_{users_or_roles}_in"
f"_last_{initc['meta']['days_back']}_days.txt",
encoded_object="\n".join(list_of_arns).encode()
)
services_by_query(
account=dictionary["account"],
list_of_arns=list_of_arns
)
def build_behold_output_files(users_or_roles, initc):
""" Builds list of services/actions and IAM policy for each role or user. """
if users_or_roles == "users":
athena_services_by_output_files = initc['athena'].services_by_user_output_files
elif users_or_roles == "roles":
athena_services_by_output_files = initc['athena'].services_by_role_output_files
for dictionary in athena_services_by_output_files:
obj = initc['s3'].get_object(initc['meta']["behold_bucket"], dictionary["path"])
list_of_dicts = csv_to_list_of_dicts(obj)
path_to_output = f"behold_results/{dictionary['account']}/{users_or_roles}/{dictionary['name']}/{dictionary['name']}_"
supported_actions, unsupported_actions = initc['policygen'].generate_list_of_actions(list_of_dicts)
formatted_supported_actions = initc['policygen'].format_actions(supported_actions)
initc['s3'].put_object(
bucket=initc['meta']["behold_bucket"],
key=path_to_output + "iam_actions.txt",
encoded_object=formatted_supported_actions.encode()
)
policy = initc['policygen'].build_policy(supported_actions)
initc['s3'].put_object(
bucket=initc['meta']['behold_bucket'],
key=path_to_output + "iam_policy.json",
encoded_object=policy.encode()
)
if unsupported_actions:
initc['s3'].put_object(
bucket=initc['meta']['behold_bucket'],
key=path_to_output + "unsupported_actions.txt",
encoded_object="\n".join(unsupported_actions).encode()
)
def main():
args = arguments()
if args.debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(
level=log_level,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
initc = initialize_classes(args)
# If --setup flag is passed, the Athena table and partition tables are set up.
# Only needs to be done once unless metadata is updated to add more accounts, regions, or years.
if args.setup:
initc['athena'].set_up_table_and_partitions()
initc['athena'].active_resources()
get_arns_from_athena_output("users", initc)
get_arns_from_athena_output("roles", initc)
build_behold_output_files("users", initc)
build_behold_output_files("roles", initc)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import boto3
import json
from datetime import datetime
#to download, <bucket, obj name, file path to dl to>
# s3.download_file(
# "iot-fastgame-proj-ads","beard.jpg","downloads/beard.jpg"
# )
#to upload <file path to upload from, bucket, obj name>
# s3.upload_file('images/pokemon.jpg','iot-fastgame-proj-ads','pokemon.jpg')
#download_all_ads --> save img name and tags into a file, json?
#choose_ad --> check file, choose best match according to tags, display ad
#
def upload_images(viewerbucketname, imagepath, imagename):
# Declare
s3 = boto3.client("s3")
s3buckets = boto3.resource("s3")
adsbucket = s3buckets.Bucket(viewerbucketname)
name = datetime.now().strftime("%H:%M:%S") + ".png"
s3.upload_file(imagepath + imagename, viewerbucketname, name)
def download_images(adbucketname, download_path ,filter='all'):
# Declare
s3 = boto3.client("s3")
s3buckets = boto3.resource("s3")
adsbucket = s3buckets.Bucket(adbucketname)
object_summary_iterator = adsbucket.objects.all()
tosave=[]
for i in object_summary_iterator: #iterate thru all objs
print(i.key)
object = s3buckets.Object(adbucketname,i.key)
try:
objtopics = object.metadata['topics']
objtopiclist = [x.strip() for x in objtopics.split(',')]
print(objtopiclist)
#maybe can check if downloaded alr
if filter == 'all':
s3.download_file(adbucketname,i.key,download_path+i.key)
elif filter in objtopiclist:
s3.download_file(adbucketname,i.key,download_path+i.key)
tofile={"name":i.key,"tags":objtopiclist}
tosave.append(tofile)
except:
pass
with open("tags.json", "w") as outfile:
json.dump(tosave, outfile)
def download_image(adbucketname, download_path, img_name):
s3 = boto3.client("s3")
s3buckets = boto3.resource("s3")
f = open("tags.json")
tosave = json.load(f)
print(tosave)
object = s3buckets.Object(adbucketname,img_name) # get the bucket :)
try:
objtopics = object.metadata['topics']
objtopiclist = [x.strip() for x in objtopics.split(',')]
tofile={"name":img_name,"tags":objtopiclist}
if tofile not in tosave:
print("Save file")
tosave.append(tofile)
s3.download_file(adbucketname,img_name,download_path+img_name)
except:
pass
with open("tags.json", "w") as outfile:
json.dump(tosave, outfile)
|
nilq/baby-python
|
python
|
import turtle as t # підключення бібліотеки
from random import randint
from turtle import *
screen = t.getscreen() # вікно
t.title("Черепашка")
my_turtle = t.Turtle()
my_turtle.shape("turtle") # square , triangle , classic
#my_turtle.color("green")
my_turtle.color("black","red")
my_turtle.shapesize(2,2,0)
#for i in range(0,50):
# print(randint(3,5))
#my_turtle.forward(1)
#for num in range(8):
# penup()
# forward(10)
# pendown()
# forward(10)
#my_turtle.goto(-100,-100)
#print(my_turtle.pos())
# forward вперед
# backward назад
# left вліво
# right вправо
#my_turtle.forward(100)
#for i in range(0,180):
# my_turtle.left(1)
# my_turtle.forward(1)
#print(my_turtle.pos())
#my_turtle.circle(30)
#my_turtle.circle(40)
# (x,y) (0,0)
#my_turtle.goto(100,100)
#number = 0
#number2 = 40
#for i in range(1,20):
# my_turtle.shapesize(i,i,0)
# number2 = number2 - 1
# my_turtle.forward(5)
# my_turtle.shapesize(number2,number2,0)
|
nilq/baby-python
|
python
|
cisco_ios = "Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)"
a = cisco_ios.split()
print(a)
b = a.index('Version')
c = a[b+1]
d = c[:-1]
print(d)
# for i in a:
# if i=='Version':
# print(i)
|
nilq/baby-python
|
python
|
from math import sqrt, ceil
def p1(num: int):
size = ceil(sqrt(num))
center = ceil((size - 1) / 2)
return max(0, center - 1 + abs(center - num % size))
assert p1(1) == 0
assert p1(12) == 3
assert p1(23) == 2
assert p1(1024) == 31
assert p1(347991) == 480
# p2 349975
# https://oeis.org/A141481
|
nilq/baby-python
|
python
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2008 Brendan Dolan-Gavitt <bdolangavitt@wesleyan.edu>
#
# Additional Authors:
# Mike Auty <mike.auty@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
from volatility import renderers
import volatility.plugins.procdump as procdump
from volatility.renderers.basic import Address
import volatility.win32.tasks as tasks
import volatility.debug as debug
import volatility.utils as utils
import volatility.cache as cache
class DLLDump(procdump.ProcDump):
"""Dump DLLs from a process address space"""
def __init__(self, config, *args, **kwargs):
procdump.ProcDump.__init__(self, config, *args, **kwargs)
config.remove_option("OFFSET")
config.add_option('REGEX', short_option = 'r',
help = 'Dump dlls matching REGEX',
action = 'store', type = 'string')
config.add_option('IGNORE-CASE', short_option = 'i',
help = 'Ignore case in pattern match',
action = 'store_true', default = False)
config.add_option('OFFSET', short_option = 'o', default = None,
help = 'Dump DLLs for Process with physical address OFFSET',
action = 'store', type = 'int')
config.add_option('BASE', short_option = 'b', default = None,
help = 'Dump DLLS at the specified BASE offset in the process address space',
action = 'store', type = 'int')
@cache.CacheDecorator(lambda self: "tests/dlldump/regex={0}/ignore_case={1}/offset={2}/base={3}".format(self._config.REGEX, self._config.IGNORE_CASE, self._config.OFFSET, self._config.BASE))
def calculate(self):
addr_space = utils.load_as(self._config)
if self._config.DUMP_DIR == None:
debug.error("Please specify a dump directory (--dump-dir)")
if not os.path.isdir(self._config.DUMP_DIR):
debug.error(self._config.DUMP_DIR + " is not a directory")
if self._config.OFFSET != None:
data = [self.virtual_process_from_physical_offset(addr_space, self._config.OFFSET)]
else:
data = self.filter_tasks(tasks.pslist(addr_space))
if self._config.REGEX:
try:
if self._config.IGNORE_CASE:
mod_re = re.compile(self._config.REGEX, re.I)
else:
mod_re = re.compile(self._config.REGEX)
except re.error, e:
debug.error('Error parsing regular expression: %s' % e)
for proc in data:
ps_ad = proc.get_process_address_space()
if ps_ad == None:
continue
mods = dict((mod.DllBase.v(), mod) for mod in proc.get_load_modules())
if self._config.BASE:
if mods.has_key(self._config.BASE):
mod_name = mods[self._config.BASE].BaseDllName
else:
mod_name = "UNKNOWN"
yield proc, ps_ad, int(self._config.BASE), mod_name
else:
for mod in mods.values():
if self._config.REGEX:
if not mod_re.search(str(mod.FullDllName or '')) and not mod_re.search(str(mod.BaseDllName or '')):
continue
yield proc, ps_ad, mod.DllBase.v(), mod.BaseDllName
def generator(self, data):
for proc, ps_ad, mod_base, mod_name in data:
if not ps_ad.is_valid_address(mod_base):
result = "Error: DllBase is unavailable (possibly due to paging)"
else:
process_offset = ps_ad.vtop(proc.obj_offset)
dump_file = "module.{0}.{1:x}.{2:x}.dll".format(proc.UniqueProcessId, process_offset, mod_base)
result = self.dump_pe(ps_ad, mod_base, dump_file)
yield (0,
[Address(proc.obj_offset),
str(proc.ImageFileName),
Address(mod_base),
str(mod_name or ''),
str(result)])
def unified_output(self, data):
return renderers.TreeGrid(
[("Process(V)", Address),
("Name", str),
("Module Base", Address),
("Module Name", str),
("Result", str)], self.generator(data))
def render_text(self, outfd, data):
if self._config.DUMP_DIR == None:
debug.error("Please specify a dump directory (--dump-dir)")
if not os.path.isdir(self._config.DUMP_DIR):
debug.error(self._config.DUMP_DIR + " is not a directory")
self.table_header(outfd,
[("Process(V)", "[addrpad]"),
("Name", "20"),
("Module Base", "[addrpad]"),
("Module Name", "20"),
("Result", "")])
for proc, ps_ad, mod_base, mod_name in data:
if not ps_ad.is_valid_address(mod_base):
result = "Error: DllBase is paged"
else:
process_offset = ps_ad.vtop(proc.obj_offset)
dump_file = "module.{0}.{1:x}.{2:x}.dll".format(proc.UniqueProcessId, process_offset, mod_base)
result = self.dump_pe(ps_ad, mod_base, dump_file)
self.table_row(outfd,
proc.obj_offset,
proc.ImageFileName,
mod_base, str(mod_name or ''), result)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
__author__ = 'abbot'
import requests
# response = requests.get("https://www.12306.cn/mormhweb/", verify = False)
# print(response.text)
response = requests.get("http://www.baidu.com")
print(response.content)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'a test module'
import sys
_author_ = 'tianmaolin'
def fun1(*a):
print(a)
def fun2(**b):
print(b)
# fun1(1, 2, 5)
# fun2(name='tianmlin', age=22)
def test():
args = sys.argv
if len(args) == 1:
print("Hello World!")
elif len(args) == 2:
print("Hello,%s!" % args[1])
else:
print("Too many arguments!")
if __name__ == '__main__':
test()
|
nilq/baby-python
|
python
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from contextlib import closing
import mox
import requests
from six import StringIO
from pants.net.http.fetcher import Fetcher
from pants.util.contextutil import temporary_file
class FetcherTest(mox.MoxTestBase):
def setUp(self):
super(FetcherTest, self).setUp()
self.requests = self.mox.CreateMockAnything()
self.response = self.mox.CreateMock(requests.Response)
self.fetcher = Fetcher(requests_api=self.requests)
self.listener = self.mox.CreateMock(Fetcher.Listener)
def expect_get(self, url, chunk_size_bytes, timeout_secs, listener=True):
self.requests.get(url, stream=True, timeout=timeout_secs).AndReturn(self.response)
self.response.status_code = 200
self.response.headers = {'content-length': '11'}
if listener:
self.listener.status(200, content_length=11)
chunks = ['0123456789', 'a']
self.response.iter_content(chunk_size=chunk_size_bytes).AndReturn(chunks)
return chunks
def test_get(self):
for chunk in self.expect_get('http://bar', chunk_size_bytes=1024, timeout_secs=60):
self.listener.recv_chunk(chunk)
self.listener.finished()
self.response.close()
self.mox.ReplayAll()
self.fetcher.fetch('http://bar',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def test_checksum_listener(self):
digest = self.mox.CreateMockAnything()
for chunk in self.expect_get('http://baz', chunk_size_bytes=1, timeout_secs=37):
self.listener.recv_chunk(chunk)
digest.update(chunk)
self.listener.finished()
digest.hexdigest().AndReturn('42')
self.response.close()
self.mox.ReplayAll()
checksum_listener = Fetcher.ChecksumListener(digest=digest)
self.fetcher.fetch('http://baz',
checksum_listener.wrap(self.listener),
chunk_size_bytes=1,
timeout_secs=37)
self.assertEqual('42', checksum_listener.checksum)
def test_download_listener(self):
downloaded = ''
for chunk in self.expect_get('http://foo', chunk_size_bytes=1048576, timeout_secs=3600):
self.listener.recv_chunk(chunk)
downloaded += chunk
self.listener.finished()
self.response.close()
self.mox.ReplayAll()
with closing(StringIO()) as fp:
self.fetcher.fetch('http://foo',
Fetcher.DownloadListener(fp).wrap(self.listener),
chunk_size_bytes=1024 * 1024,
timeout_secs=60 * 60)
self.assertEqual(downloaded, fp.getvalue())
def test_size_mismatch(self):
self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response)
self.response.status_code = 200
self.response.headers = {'content-length': '11'}
self.listener.status(200, content_length=11)
self.response.iter_content(chunk_size=1024).AndReturn(['a', 'b'])
self.listener.recv_chunk('a')
self.listener.recv_chunk('b')
self.response.close()
self.mox.ReplayAll()
with self.assertRaises(self.fetcher.Error):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def test_get_error_transient(self):
self.requests.get('http://foo', stream=True, timeout=60).AndRaise(requests.ConnectionError)
self.mox.ReplayAll()
with self.assertRaises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def test_get_error_permanent(self):
self.requests.get('http://foo', stream=True, timeout=60).AndRaise(requests.TooManyRedirects)
self.mox.ReplayAll()
with self.assertRaises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertTrue(e.exception.response_code is None)
def test_http_error(self):
self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response)
self.response.status_code = 404
self.listener.status(404)
self.response.close()
self.mox.ReplayAll()
with self.assertRaises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertEqual(404, e.exception.response_code)
def test_iter_content_error(self):
self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response)
self.response.status_code = 200
self.response.headers = {}
self.listener.status(200, content_length=None)
self.response.iter_content(chunk_size=1024).AndRaise(requests.Timeout)
self.response.close()
self.mox.ReplayAll()
with self.assertRaises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def expect_download(self, path_or_fd=None):
downloaded = ''
for chunk in self.expect_get('http://1', chunk_size_bytes=13, timeout_secs=13, listener=False):
downloaded += chunk
self.response.close()
self.mox.ReplayAll()
path = self.fetcher.download('http://1',
path_or_fd=path_or_fd,
chunk_size_bytes=13,
timeout_secs=13)
return downloaded, path
def test_download(self):
downloaded, path = self.expect_download()
try:
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
finally:
os.unlink(path)
def test_download_fd(self):
with temporary_file() as fd:
downloaded, path = self.expect_download(path_or_fd=fd)
self.assertEqual(path, fd.name)
fd.close()
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
def test_download_path(self):
with temporary_file() as fd:
fd.close()
downloaded, path = self.expect_download(path_or_fd=fd.name)
self.assertEqual(path, fd.name)
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
|
nilq/baby-python
|
python
|
import os
from typing import Any
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
import cssutils
import requests
url_re = r'https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)'
def delete_duplicates(l: list) -> list:
new_l = []
for element in l:
if element not in new_l:
new_l.append(element)
return new_l
def parse_css(css: str) -> dict:
dct = {}
sheet = cssutils.parseString(css)
for rule in sheet:
selector = rule.selectorText
styles = rule.style.cssText
dct[selector] = styles
return dct
def delete_query(uri: str, query_name: str) -> str:
parsed_url = urlparse(uri)
url_query = parse_qs(parsed_url.query, keep_blank_values=True)
url_query.pop(query_name, None)
cleaned = urlunparse(parsed_url._replace(query=urlencode(url_query, True)))
return cleaned
def dump_html(uri: str) -> None:
with open('dumo.html', 'w', encoding='utf-8') as f:
f.write(requests.get(uri).text)
def get_env_var(var_name: str, default: Any = None, required: bool = False) -> Any:
value = os.environ.get(var_name, default=default)
if not value and required:
raise ValueError(
f'You must specify environment variable named {var_name}. '
'In Heroku go to App settings -> Config Vars -> Reveal Config Vars -> Add. '
f'In Bash type \"export {var_name}=your_value\".'
)
return value
|
nilq/baby-python
|
python
|
# Generated by Django 2.2 on 2019-06-21 07:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0055_auto_20190620_1527'),
]
operations = [
migrations.AddField(
model_name='presentation',
name='is_keynote',
field=models.BooleanField(default=False, help_text='키노트 스피커인 경우 TRUE로 설정합니다.'),
),
]
|
nilq/baby-python
|
python
|
# AUTOGENERATED FILE! PLEASE DON'T EDIT
from .callbacks import Callback, Callbacks, Cbs
import k1lib, os, torch
__all__ = ["Autosave", "DontTrainValid", "InspectLoss", "ModifyLoss", "Cpu", "Cuda",
"DType", "InspectBatch", "ModifyBatch", "InspectOutput", "ModifyOutput",
"Beep"]
@k1lib.patch(Cbs)
class Autosave(Callback):
"""Autosaves 3 versions of the network to disk"""
def __init__(self): super().__init__(); self.order = 23
def endRun(self):
os.system("mv autosave-1.pth autosave-0.pth")
os.system("mv autosave-2.pth autosave-1.pth")
self.l.save("autosave-2.pth")
@k1lib.patch(Cbs)
class DontTrainValid(Callback):
"""If is not training, then don't run m.backward() and opt.step().
The core training loop in k1lib.Learner don't specifically do this,
cause there may be some weird cases where you want to also train valid."""
def _common(self):
if not self.l.model.training: return True
def startBackward(self): return self._common()
def startStep(self): return self._common()
@k1lib.patch(Cbs)
class InspectLoss(Callback):
"""Expected `f` to take in 1 float."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 15
def endLoss(self): self.f(self.loss.detach())
@k1lib.patch(Cbs)
class ModifyLoss(Callback):
"""Expected `f` to take in 1 float and return 1 float."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 13
def endLoss(self): self.l.loss = self.f(self.loss)
@k1lib.patch(Cbs)
class Cuda(Callback):
"""Moves batch and model to the default GPU"""
def startRun(self): self.l.model.cuda()
def startBatch(self):
self.l.xb = self.l.xb.cuda()
self.l.yb = self.l.yb.cuda()
@k1lib.patch(Cbs)
class Cpu(Callback):
"""Moves batch and model to CPU"""
def startRun(self): self.l.model.cpu()
def startBatch(self):
self.l.xb = self.l.xb.cpu()
self.l.yb = self.l.yb.cpu()
@k1lib.patch(Cbs)
class DType(Callback):
"""Moves batch and model to a specified data type"""
def __init__(self, dtype): super().__init__(); self.dtype = dtype
def startRun(self): self.l.model = self.l.model.to(self.dtype)
def startBatch(self):
self.l.xb = self.l.xb.to(self.dtype)
self.l.yb = self.l.yb.to(self.dtype)
@k1lib.patch(Cbs)
class InspectBatch(Callback):
"""Expected `f` to take in 2 tensors."""
def __init__(self, f:callable): super().__init__(); self.f = f; self.order = 15
def startBatch(self): self.f(self.l.xb, self.l.yb)
@k1lib.patch(Cbs)
class ModifyBatch(Callback):
"""Modifies xb and yb on the fly. Expected `f`
to take in 2 tensors and return 2 tensors."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 13
def startBatch(self): self.l.xb, self.l.yb = self.f(self.l.xb, self.l.yb)
@k1lib.patch(Cbs)
class InspectOutput(Callback):
"""Expected `f` to take in 1 tensor."""
def __init__(self, f): super().__init__(); self.f = f; self.order = 15
def endPass(self): self.f(self.y)
@k1lib.patch(Cbs)
class ModifyOutput(Callback):
"""Modifies output on the fly. Expected `f` to take
in 1 tensor and return 1 tensor"""
def __init__(self, f): super().__init__(); self.f = f; self.order = 13
def endPass(self): self.l.y = self.f(self.y)
@k1lib.patch(Cbs)
class Beep(Callback):
"""Plays a beep sound when the run is over"""
def endRun(self): k1lib.beep()
|
nilq/baby-python
|
python
|
import cowsay
print(cowsay.get_output_string('trex', 'Hello (extinct) World'))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding=utf-8
#list:[]
bicycles = ['trek', 'cannodale', 'redline', 'speciakixrdd']
print(bicycles)
#下标正数: 0,1,2,... , n - 1; 到着数: -1, -2, ...., n
print(bicycles[0].title())
print(bicycles[-1])
motorcycles = ['honda', 'yamaha', 'suzyki']
print(motorcycles)
## 修改
motorcycles[0] = 'ducati'
print(motorcycles)
##末尾添加append(str)
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
motorcycles.append('ducati')
print(motorcycles)
motorcycles = []
print(motorcycles)
motorcycles.append('honda')
motorcycles.append('yamaha')
motorcycles.append('suzuki')
print(motorcycles)
print("=============")
##在列表下标x处添加insert(n, str)
motorcycles = ['honda', 'yamaha', 'suzuhi']
print(motorcycles)
motorcycles.insert(0, 'ducati')
print(motorcycles)
print("====================")
## 删除del
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
del motorcycles[0]
print(motorcycles)
##删除pop(x = n - 1),将末尾元素弹出,返回弹出的值
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
pop_motorcycles = motorcycles.pop()
print(motorcycles)
print(pop_motorcycles)
motorcycles.pop(0)
print(motorcycles)
##不知道要删除的元素的下标时, 用remove(str), 删除第一个str
day = ['mon', 'tue', 'wed', 'thu', 'fri']
print(day)
day.remove('wed')
print(day)
##sort()
day = ['mon', 'tue', 'wed', 'thu', 'fri']
print(day)
day.sort()
print(day)
day.sort(reverse=True)
print(day)
##sorted(str), 返回排序后的列表, 但本列表顺序不变
num = [1, 4, 7, 2, 0, 5]
print(num)
num2 = sorted(num)
print(num2)
print(num)
print("\n")
##reverse(), 反转列表
print(day)
day.reverse()
print(day)
#len, 确定列表长度
l = len(day)
print(l)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.