blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e847237ba52e9ae5708ef5eac9d85b5ac9248ea7
|
854b94d7be92582bd191a7cb63143a95e5b5c337
|
/hyfetch/distros/hyperbola.py
|
8628a0710280318c469376d49fc057aef6e37f7e
|
[
"MIT"
] |
permissive
|
hykilpikonna/hyfetch
|
673c0c999d0f3f542349824495ad6004f450ebac
|
98863df16d70b030696f4b94080d114396320f35
|
refs/heads/master
| 2023-08-17T10:41:10.289997
| 2023-08-17T03:37:23
| 2023-08-17T03:37:23
| 479,913,941
| 447
| 78
|
MIT
| 2023-09-14T14:39:18
| 2022-04-10T04:38:15
|
Shell
|
UTF-8
|
Python
| false
| false
| 693
|
py
|
hyperbola.py
|
# This file is automatically generated. Please do not modify.
from . import AsciiArt
hyperbola = AsciiArt(match=r'''"Hyperbola"*''', color='8', ascii=r"""
${c1} WW
KX W
WO0W NX0O
NOO0NW WNXK0OOKW
W0OOOOOOOOOOOOKN
N0OOOOOOO0KXW
WNXXXNW
NXK00000KN
WNK0OOOOOOOOOO0W
NK0OOOOOOOOOOOOOO0W
X0OOOOOOO00KK00OOOOOK
X0OOOO0KNWW WX0OO0W
X0OO0XNW KOOW
N00KNW KOW
NKXN W0W
WW W
""")
|
0cf185c7526cf94e9088474db5e3870f9ce93f9c
|
08cdf212eebebdff17e888522b0c6bc837fd0b3a
|
/brain/brain/models/sqlobjects.py
|
446da2db434ddab5c5ccadf1ddd9a71a9d5e4adf
|
[
"Apache-2.0"
] |
permissive
|
quarkslab/irma
|
1f5b32c17195f709d3bb9ff7f7199aad4c76dfd3
|
4e3e2c0fa82e352a1a7a7fd02381a4d84bed9f09
|
refs/heads/master
| 2023-03-03T15:37:51.480982
| 2022-10-19T19:30:27
| 2022-10-19T19:30:27
| 26,581,177
| 267
| 81
|
Apache-2.0
| 2023-03-01T23:09:39
| 2014-11-13T09:47:20
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 6,995
|
py
|
sqlobjects.py
|
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
from sqlalchemy import Column, Integer, Float, String, \
event, ForeignKey, Boolean
import config.parser as config
from sqlalchemy.engine import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from irma.common.utils.utils import UUID
from irma.common.base.exceptions import IrmaDatabaseError, \
IrmaDatabaseResultNotFound
from irma.common.base.utils import IrmaScanStatus
from irma.common.utils.compat import timestamp
# SQLite fix for ForeignKey support
# see http://docs.sqlalchemy.org/en/latest/dialects/sqlite.html
if config.sqldb.dbms == 'sqlite':
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection,
connection_record): # pragma: no cover
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
Base = declarative_base()
tables_prefix = '{0}_'.format(config.sqldb.tables_prefix)
class Scan(Base):
__tablename__ = '{0}scan'.format(tables_prefix)
# SQLite fix for auto increment on ids
# see http://docs.sqlalchemy.org/en/latest/dialects/sqlite.html
if config.sqldb.dbms == 'sqlite':
__table_args__ = {'sqlite_autoincrement': True}
# Fields
id = Column(
Integer,
autoincrement=True,
nullable=False,
primary_key=True,
name='id'
)
scan_id = Column(
String,
index=True,
nullable=False,
name='scan_id'
)
status = Column(
Integer,
nullable=False,
name='status'
)
timestamp = Column(
Float(precision=2),
nullable=False,
name='timestamp'
)
# Many to one Scan <-> User
user_id = Column(
Integer,
ForeignKey('{0}user.id'.format(tables_prefix)),
index=True,
nullable=False,
)
jobs = relationship("Job", backref="scan", lazy='subquery')
def __init__(self, frontend_scanid, user_id):
self.scan_id = frontend_scanid
self.status = IrmaScanStatus.empty
self.timestamp = timestamp()
self.user_id = user_id
@property
def files(self):
return set(job.filename for job in self.jobs)
@property
def nb_files(self):
return len(self.files)
@classmethod
def get_scan(cls, scan_id, user_id, session):
try:
return session.query(cls).filter(
cls.scan_id == scan_id and cls.user_id == user_id
).one()
except NoResultFound as e:
raise IrmaDatabaseResultNotFound(e)
except MultipleResultsFound as e:
raise IrmaDatabaseError(e)
class User(Base):
__tablename__ = '{0}user'.format(tables_prefix)
# SQLite fix for auto increment on ids
# see http://docs.sqlalchemy.org/en/latest/dialects/sqlite.html
if config.sqldb.dbms == 'sqlite':
__table_args__ = {'sqlite_autoincrement': True}
# Fields
id = Column(
Integer,
autoincrement=True,
nullable=False,
primary_key=True,
name='id'
)
name = Column(
String,
nullable=False,
name='name'
)
rmqvhost = Column(
String,
index=True,
nullable=False,
name='rmqvhost'
)
ftpuser = Column(
String,
nullable=False,
name='ftpuser'
)
scans = relationship("Scan", backref="user")
def __init__(self, name, rmqvhost, ftpuser):
self.name = name
self.rmqvhost = rmqvhost
self.ftpuser = ftpuser
@staticmethod
def get_by_rmqvhost(session, rmqvhost=None):
# FIXME: get rmq_vhost dynamically
if rmqvhost is None:
rmqvhost = config.brain_config['broker_frontend'].vhost
try:
return session.query(User).filter(
User.rmqvhost == rmqvhost
).one()
except NoResultFound as e:
raise IrmaDatabaseResultNotFound(e)
except MultipleResultsFound as e:
raise IrmaDatabaseError(e)
class Job(Base):
__tablename__ = '{0}job'.format(tables_prefix)
# Fields
task_id = Column(
String,
name="task_id",
primary_key=True,
)
# Many to one Job <-> Scan
scan_id = Column(
Integer,
ForeignKey('{0}scan.id'.format(tables_prefix)),
index=True,
nullable=False,
)
filename = Column(
String,
nullable=False,
name='filename'
)
probename = Column(
String,
nullable=False,
name='probename'
)
def __init__(self, scanid, filename, probename):
self.task_id = UUID.generate()
self.scan_id = scanid
self.filename = filename
self.probename = probename
class Probe(Base):
__tablename__ = '{0}probe'.format(tables_prefix)
# SQLite fix for auto increment on ids
# see http://docs.sqlalchemy.org/en/latest/dialects/sqlite.html
if config.sqldb.dbms == 'sqlite':
__table_args__ = {'sqlite_autoincrement': True}
# Fields
id = Column(
Integer,
autoincrement=True,
nullable=False,
primary_key=True,
name='id'
)
name = Column(
String,
nullable=False,
index=True,
name='name'
)
display_name = Column(
String,
nullable=False,
index=True,
name='display_name'
)
category = Column(
String,
nullable=False,
name='category'
)
mimetype_regexp = Column(
String,
name='mimetype_regexp'
)
online = Column(
Boolean,
name='online'
)
def __init__(self, name, display_name, category, mimetype_regexp, online):
self.name = name
self.display_name = display_name
self.category = category
self.mimetype_regexp = mimetype_regexp
self.online = online
@classmethod
def get_by_name(cls, name, session):
try:
return session.query(cls).filter(
Probe.name == name
).one()
except NoResultFound as e:
raise IrmaDatabaseResultNotFound(e)
except MultipleResultsFound as e:
raise IrmaDatabaseError(e)
@classmethod
def all(cls, session):
return session.query(cls).all()
|
65e08d2eedd407cd4ead0de8c5bbf5e5adbce8f8
|
499f5402baed77d000c65f243b457c69dc3d2fe4
|
/tests/in/test_measurable.py
|
ab044d102be79012b7e1df28de43a7c487c93ae6
|
[
"MIT"
] |
permissive
|
evereux/pycatia
|
416189b34f3c60effea8a76258e36ffc5ae86e22
|
5f5726d5dc66265b3eba8a01910c4aeae424365d
|
refs/heads/master
| 2023-08-21T10:03:41.660445
| 2023-08-09T16:21:10
| 2023-08-09T16:21:10
| 159,069,580
| 141
| 42
|
MIT
| 2023-08-09T11:15:27
| 2018-11-25T20:04:31
|
Python
|
UTF-8
|
Python
| false
| false
| 17,519
|
py
|
test_measurable.py
|
#! /usr/bin/python3.9
"""
This file is named test_measurable.py so these tests are run first. Otherwise the tests would fail for
test_document.py. I've no idea why at the moment.
"""
from pycatia import CATIADocHandler
from pycatia.enumeration.enumeration_types import cat_measurable_name
from pycatia.mec_mod_interfaces.hybrid_body import HybridBody
from pycatia.mec_mod_interfaces.part_document import PartDocument
from tests.create_source_parts import geom_set_arcs
from tests.create_source_parts import geom_set_cylinders
from tests.create_source_parts import geom_set_lines
from tests.create_source_parts import geom_set_planes
from tests.create_source_parts import geom_set_points
from tests.create_source_parts import geom_set_surfaces
from tests.source_files import cat_part_measurable
def round_tuple(tuple_object, decimal_places=6):
rounded_list = list()
for item in tuple_object:
if isinstance(item, int) or isinstance(item, float):
rounded = round(item, decimal_places)
rounded_list.append(rounded)
else:
rounded_list.append(item)
return tuple(rounded_list)
def test_area():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
bodies = part.bodies
body = bodies.item(1)
reference = part.create_reference_from_object(body)
measurable = spa_workbench.get_measurable(reference)
area_m = measurable.area
area = 0.04
assert area == round(area_m, 6)
def test_geometry_name():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
bodies = part.bodies
body = bodies.item(1)
reference = part.create_reference_from_object(body)
measurable = spa_workbench.get_measurable(reference)
assert measurable.geometry_name == cat_measurable_name.index("CatMeasurableVolume")
def test_length():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_lines)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
line1 = hybrid_body.hybrid_shapes.item(1)
line1_reference = part.create_reference_from_object(line1)
line1_measurable = spa_workbench.get_measurable(line1_reference)
length = 141.421356
catia_length = line1_measurable.length
assert length == round(catia_length, 6)
def test_perimeter():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_surfaces)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
surface = hybrid_body.hybrid_shapes.item(1)
surface_reference = part.create_reference_from_object(surface)
surface_measurable = spa_workbench.get_measurable(surface_reference)
perimeter = 400
catia_perimeter = surface_measurable.perimeter
assert perimeter == round(catia_perimeter, 6)
def test_radius():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_arcs)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
arc = hybrid_body.hybrid_shapes.item(1)
arc_reference = part.create_reference_from_object(arc)
arc_measurable = spa_workbench.get_measurable(arc_reference)
radius = 25.0
catia_radius = arc_measurable.radius
assert radius == round(catia_radius, 6)
def test_angle_between():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_lines)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
line1 = hybrid_body.hybrid_shapes.item(1)
line1_reference = part.create_reference_from_object(line1)
line1_measurable = spa_workbench.get_measurable(line1_reference)
line2 = hybrid_body.hybrid_shapes.item(2)
line2_reference = part.create_reference_from_object(line2)
angle = 45.0
catia_angle = line1_measurable.get_angle_between(line2_reference)
assert angle == round(catia_angle, 6)
def test_get_axis():
"""
# I've really no idea what the axis for an arc/circle/cylinder is.
# I can't reproduce these figures in CATIA.
:return:
"""
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_arcs)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
arc = hybrid_body.hybrid_shapes.item(1)
arc_reference = part.create_reference_from_object(arc)
arc_measurable = spa_workbench.get_measurable(arc_reference)
axis = (0.0, 0.0, 441.941738)
catia_axis = arc_measurable.get_axis()
assert axis == (round(catia_axis[0], 6), round(catia_axis[1], 6), round(catia_axis[2], 6))
def test_get_axis_system():
"""
:return:
"""
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
axis_systems = part.axis_systems
axis = axis_systems.item(1)
axis_reference = part.create_reference_from_object(axis)
axis_measurable = spa_workbench.get_measurable(axis_reference)
axis_system = (0.000, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.000000, 0.000000, 0.000000, 1.000000)
catia_axis = axis_measurable.get_axis_system()
assert axis_system == (
round(catia_axis[0], 6),
round(catia_axis[1], 6),
round(catia_axis[2], 6),
round(catia_axis[3], 6),
round(catia_axis[4], 6),
round(catia_axis[5], 6),
round(catia_axis[6], 6),
round(catia_axis[7], 6),
round(catia_axis[8], 6),
round(catia_axis[9], 6),
round(catia_axis[10], 6),
round(catia_axis[11], 6),
)
def test_get_direction():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_lines)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
line1 = hybrid_body.hybrid_shapes.item(1)
line1_reference = part.create_reference_from_object(line1)
line1_measurable = spa_workbench.get_measurable(line1_reference)
direction_vector = (0.707107, 0.707107, 0)
catia_direction = line1_measurable.get_direction()
assert direction_vector == (
round(catia_direction[0], 6),
round(catia_direction[1], 6),
round(catia_direction[2], 6),
)
def test_get_minimum_distance():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_lines)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
line1 = hybrid_body.hybrid_shapes.item(1)
line1_reference = part.create_reference_from_object(line1)
line1_measurable = spa_workbench.get_measurable(line1_reference)
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_points)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
point = hybrid_body.hybrid_shapes.item(2)
point_reference = part.create_reference_from_object(point)
minimum_distance = 70.710678
catia_minimum_distance = line1_measurable.get_minimum_distance(point_reference)
assert minimum_distance == round(catia_minimum_distance, 6)
def test_get_minimum_distance_points():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_points)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
point1 = hybrid_body.hybrid_shapes.item(1)
point1_reference = part.create_reference_from_object(point1)
point1_measurable = spa_workbench.get_measurable(point1_reference)
point2 = hybrid_body.hybrid_shapes.item(3)
point2_reference = part.create_reference_from_object(point2)
minimum_distance_points = (0.000000, 0.000000, 0.000000, 100.000000, 100.000000, 0.000000, None, None, None)
catia_minimum_distance_points = point1_measurable.get_minimum_distance_points(point2_reference)
assert minimum_distance_points == round_tuple(catia_minimum_distance_points, 6)
def test_get_plane():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_planes)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
plane = hybrid_body.hybrid_shapes.item(1)
plane_reference = part.create_reference_from_object(plane)
plane_measurable = spa_workbench.get_measurable(plane_reference)
plane = (0.0, 0.0, -200.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0)
catia_plane = plane_measurable.get_plane()
catia_plane = round_tuple(catia_plane, 6)
assert plane == catia_plane
def test_get_point():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_points)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
point1 = hybrid_body.hybrid_shapes.item(3)
point1_reference = part.create_reference_from_object(point1)
point1_measurable = spa_workbench.get_measurable(point1_reference)
point = (
100,
100,
0,
)
catia_point = point1_measurable.get_point()
catia_point = round_tuple(catia_point, 6)
assert point == catia_point
def test_get_points_on_axis():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_cylinders)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
cylinder = hybrid_body.hybrid_shapes.item(1)
cylinder_reference = part.create_reference_from_object(cylinder)
cylinder_measurable = spa_workbench.get_measurable(cylinder_reference)
cylinder = (
100,
100,
50,
100,
100,
100,
100,
100,
0,
)
catia_cylinder = cylinder_measurable.get_points_on_axis()
catia_cylinder = round_tuple(catia_cylinder, 6)
assert cylinder == catia_cylinder
def test_get_points_on_curve():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_lines)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
line1 = hybrid_body.hybrid_shapes.item(1)
line1_reference = part.create_reference_from_object(line1)
line1_measurable = spa_workbench.get_measurable(line1_reference)
points_on_curve = (
0.0,
0.0,
0.0,
50.0,
50.0,
0,
100.0,
100.0,
0,
)
catia_points_on_curve = line1_measurable.get_points_on_curve()
catia_points_on_curve = round_tuple(catia_points_on_curve, 6)
assert points_on_curve == catia_points_on_curve
def test_volume():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
bodies = part.bodies
body = bodies.item(1)
reference = part.create_reference_from_object(body)
measurable = spa_workbench.get_measurable(reference)
volume = 0.0005
catia_volume = measurable.volume
assert volume == round(catia_volume, 6)
def test_centre_of_gravity():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
bodies = part.bodies
body = bodies.item(1)
reference = part.create_reference_from_object(body)
measurable = spa_workbench.get_measurable(reference)
gx = 50
gy = 50
gz = 25
centre_of_gravity = measurable.get_cog()
assert (gx, gy, gz) == (
round(centre_of_gravity[0], 6),
round(centre_of_gravity[1], 6),
round(centre_of_gravity[2], 6),
)
def test_angle():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_arcs)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
arc = hybrid_body.hybrid_shapes.item(1)
arc_reference = part.create_reference_from_object(arc)
arc_measurable = spa_workbench.get_measurable(arc_reference)
angle = 360
catia_angle = arc_measurable.angle
assert angle == catia_angle
def test_center():
with CATIADocHandler(cat_part_measurable) as caa:
document = caa.document
assert document is not None
spa_workbench = document.spa_workbench()
part = PartDocument(document.com_object).part
hybrid_bodies = part.hybrid_bodies
hybrid_body_item = hybrid_bodies.get_item_by_name(geom_set_arcs)
assert hybrid_body_item is not None
hybrid_body = HybridBody(hybrid_body_item.com_object)
arc = hybrid_body.hybrid_shapes.item(1)
arc_reference = part.create_reference_from_object(arc)
arc_measurable = spa_workbench.get_measurable(arc_reference)
catia_center = arc_measurable.get_center()
center = (0, 100, 0)
assert center == catia_center
|
156755252b1610643d07bbe85e86339dcd8868ab
|
95a9386c323eaf2cd05d8f2e50c36863fb052ab7
|
/tests/filters/transformations_test.py
|
70b161ec1c26eac73009122ea81a8ac68de1fb4a
|
[
"MIT"
] |
permissive
|
googlefonts/ufo2ft
|
a431ed54a895e654c1fff1d449959aef91ac3398
|
b3895a96ca910c1764df016bfee4719448cfec4a
|
refs/heads/main
| 2023-08-21T12:52:10.348076
| 2023-08-04T15:45:55
| 2023-08-04T15:45:55
| 29,605,985
| 106
| 28
|
MIT
| 2023-09-12T08:07:28
| 2015-01-21T19:00:19
|
Python
|
UTF-8
|
Python
| false
| false
| 7,308
|
py
|
transformations_test.py
|
from math import isclose
import pytest
from ufo2ft.filters.transformations import TransformationsFilter
@pytest.fixture(
params=[
{
"capHeight": 700,
"xHeight": 500,
"glyphs": [
{"name": "space", "width": 500},
{
"name": "a",
"width": 350,
"outline": [
("moveTo", ((0, 0),)),
("lineTo", ((300, 0),)),
("lineTo", ((300, 300),)),
("lineTo", ((0, 300),)),
("closePath", ()),
],
"anchors": [(100, 200, "top"), (100, -200, "bottom")],
},
{
"name": "b",
"width": 450,
"outline": [
("addComponent", ("a", (1, 0, 0, 1, 0, 0))),
("addComponent", ("c", (1, 0, 0, 1, 0, 0))),
("addComponent", ("a", (1, 0, 0, 1, 10, -10))),
],
},
{
"name": "c",
"outline": [
("moveTo", ((0, 0),)),
("lineTo", ((300, 0),)),
("lineTo", ((150, 300),)),
("closePath", ()),
],
},
{
"name": "d",
"outline": [("addComponent", ("b", (1, 0, 0, -1, 0, 0)))],
},
],
}
]
)
def font(request, FontClass):
font = FontClass()
font.info.capHeight = request.param["capHeight"]
font.info.xHeight = request.param["xHeight"]
for param in request.param["glyphs"]:
glyph = font.newGlyph(param["name"])
glyph.width = param.get("width", 0)
pen = glyph.getPen()
for operator, operands in param.get("outline", []):
getattr(pen, operator)(*operands)
for x, y, name in param.get("anchors", []):
glyph.appendAnchor(dict(x=x, y=y, name=name))
return font
@pytest.fixture(
params=TransformationsFilter.Origin,
ids=[e.name for e in TransformationsFilter.Origin],
)
def origin(request):
return request.param
class TransformationsFilterTest:
def test_invalid_origin_value(self):
with pytest.raises(ValueError) as excinfo:
TransformationsFilter(Origin=5)
excinfo.match(r"is not a valid (TransformationsFilter\.)?Origin")
def test_empty_glyph(self, font):
filter_ = TransformationsFilter(OffsetY=51, include={"space"})
assert not filter_(font)
def test_Identity(self, font):
filter_ = TransformationsFilter()
assert not filter_(font)
def test_OffsetX(self, font):
filter_ = TransformationsFilter(OffsetX=-10)
assert filter_(font)
a = font["a"]
assert (a[0][0].x, a[0][0].y) == (-10, 0)
assert (a.anchors[1].x, a.anchors[1].y) == (90, -200)
# base glyph was already transformed, component didn't change
assert font["b"].components[0].transformation[-2:] == (0, 0)
def test_OffsetY(self, font):
filter_ = TransformationsFilter(OffsetY=51)
assert filter_(font)
a = font["a"]
assert (a[0][0].x, a[0][0].y) == (0, 51)
assert (a.anchors[1].x, a.anchors[1].y) == (100, -149)
assert font["b"].components[0].transformation[-2:] == (0, 0)
def test_OffsetXY(self, font):
filter_ = TransformationsFilter(OffsetX=-10, OffsetY=51)
assert filter_(font)
a = font["a"]
assert (a[0][0].x, a[0][0].y) == (-10, 51)
assert (a.anchors[1].x, a.anchors[1].y) == (90, -149)
assert font["b"].components[0].transformation[-2:] == (0, 0)
def test_ScaleX(self, font, origin):
# different Origin heights should not affect horizontal scale
filter_ = TransformationsFilter(ScaleX=50, Origin=origin)
assert filter_(font)
a = font["a"]
assert (a[0][0].x, a[0][0].y) == (0, 0)
assert (a[0][2].x, a[0][2].y) == (150, 300)
assert a.width == 350 * 0.50
def test_ScaleY(self, font, origin):
percent = 50
filter_ = TransformationsFilter(ScaleY=percent, Origin=origin)
assert filter_(font)
factor = percent / 100
origin_height = filter_.get_origin_height(font, origin)
bottom = origin_height * factor
top = bottom + 300 * factor
a = font["a"]
# only y coords change
assert (a[0][0].x, a[0][0].y) == (0, bottom)
assert (a[0][2].x, a[0][2].y) == (300, top)
def test_ScaleXY(self, font, origin):
percent = 50
filter_ = TransformationsFilter(ScaleX=percent, ScaleY=percent, Origin=origin)
assert filter_(font)
factor = percent / 100
origin_height = filter_.get_origin_height(font, origin)
bottom = origin_height * factor
top = bottom + 300 * factor
a = font["a"]
# both x and y change
assert (a[0][0].x, a[0][0].y) == (0, bottom)
assert (a[0][2].x, a[0][2].y) == (150, top)
assert a.width == 350 * factor
def test_Slant(self, font, origin):
filter_ = TransformationsFilter(Slant=45, Origin=origin)
assert filter_(font)
origin_height = filter_.get_origin_height(font, origin)
a = font["a"]
assert isclose(a[0][0].x, -origin_height)
assert a[0][0].y == 0
def test_composite_glyphs(self, font):
filter_ = TransformationsFilter(
OffsetX=-10, OffsetY=51, ScaleX=50, ScaleY=50, exclude={"c"}
)
assert filter_(font)
b = font["b"]
# component 'a' #1 was not transformed, because the base glyph was already
# transformed, and the component's own transformation is identity
assert b.components[0].transformation == (1, 0, 0, 1, 0, 0)
# component 'c' was transformed, because base glyph was not included
assert b.components[1].transformation == (0.5, 0, 0, 0.5, -10, 51)
# component 'a' #2 was partly transformed: the base glyph was transformed, but
# the component's original transformation was not identity; thus
# it was modified to compensate for the transformation already applied to
# the base glyph (scale stays same, offsets are scaled)
assert b.components[2].transformation == (1, 0, 0, 1, 5, -5)
d = font["d"]
# component 'b' was transformed as well as its base glyph, because
# its original transform had a scale, so it was necessary to
# compensate for the transformation applied on the base glyph
assert d.components[0].transformation == (1, 0, 0, -1, 0, 102)
def test_ScaleOffset_width(self, font, origin):
percent = 50
filter_ = TransformationsFilter(
OffsetX=-100, ScaleX=percent, ScaleY=percent, Origin=origin
)
assert filter_(font)
factor = percent / 100
a = font["a"]
# The offset value here should not change the fact that the glyph
# bounding box is scaled by 50%.
assert a.width == 350 * factor
|
7f444bdef4cb250f358bd6c1913fb4410caabf8e
|
e1fa3d6dc2b47403c610f05f70cd2799cd4ee5c5
|
/setup.py
|
a4720fa9045b20ff31f949b03ff4805f2cf2a406
|
[
"MIT"
] |
permissive
|
rcmalli/keras-vggface
|
4d9f5fc3950c93cc598077954f06b421471520f8
|
bee35376e76e35d00aeec503f2f242611a97b38a
|
refs/heads/master
| 2023-07-17T03:15:28.339212
| 2023-04-16T18:21:16
| 2023-04-16T18:21:16
| 71,151,117
| 961
| 456
|
MIT
| 2023-04-16T18:35:05
| 2016-10-17T15:07:40
|
Python
|
UTF-8
|
Python
| false
| false
| 766
|
py
|
setup.py
|
from setuptools import setup, find_packages
exec(open('keras_vggface/version.py').read())
setup(
name='keras_vggface',
version=__version__,
description='VGGFace implementation with Keras framework',
url='https://github.com/rcmalli/keras-vggface',
author='Refik Can MALLI',
author_email="mallir@itu.edu.tr",
license='MIT',
keywords=['keras', 'vggface', 'deeplearning'],
packages=find_packages(exclude=["tools", "training", "temp", "test", "data", "visualize","image",".venv",".github"]),
zip_safe=False,
install_requires=[
'numpy>=1.9.1', 'scipy>=0.14', 'h5py', 'pillow', 'keras',
'six>=1.9.0', 'pyyaml'
],
extras_require={
"tf": ["tensorflow"],
"tf_gpu": ["tensorflow-gpu"],
})
|
648bd77283c4f0324fa1bd98f063f2635ad4a680
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tools/pot/openvino/tools/pot/configs/templates/__init__.py
|
6b0e0edda5b1c6f9cf245472d40337bf7d3c1409
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 77
|
py
|
__init__.py
|
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
|
886df378a3822c9ce8c6d8049de29f10f535d64b
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/dag/vis_utils.py
|
0f83257295a10b8c3507dd3ec77a861ada7cf814
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,021
|
py
|
vis_utils.py
|
from ray.dag import DAGNode
import os
import tempfile
from ray.dag.utils import _DAGNodeNameGenerator
from ray.util.annotations import DeveloperAPI
@DeveloperAPI
def plot(dag: DAGNode, to_file=None):
if to_file is None:
tmp_file = tempfile.NamedTemporaryFile(suffix=".png")
to_file = tmp_file.name
extension = "png"
else:
_, extension = os.path.splitext(to_file)
if not extension:
extension = "png"
else:
extension = extension[1:]
graph = _dag_to_dot(dag)
graph.write(to_file, format=extension)
# Render the image directly if running inside a Jupyter notebook
try:
from IPython import display
return display.Image(filename=to_file)
except ImportError:
pass
# close temp file if needed
try:
tmp_file.close()
except NameError:
pass
def _check_pydot_and_graphviz():
"""Check if pydot and graphviz are installed.
pydot and graphviz are required for plotting. We check this
during runtime rather than adding them to Ray dependencies.
"""
try:
import pydot
except ImportError:
raise ImportError(
"pydot is required to plot DAG, " "install it with `pip install pydot`."
)
try:
pydot.Dot.create(pydot.Dot())
except (OSError, pydot.InvocationException):
raise ImportError(
"graphviz is required to plot DAG, "
"download it from https://graphviz.gitlab.io/download/"
)
def _get_nodes_and_edges(dag: DAGNode):
"""Get all unique nodes and edges in the DAG.
A basic dfs with memorization to get all unique nodes
and edges in the DAG.
Unique nodes will be used to generate unique names,
while edges will be used to construct the graph.
"""
edges = []
nodes = []
def _dfs(node):
nodes.append(node)
for child_node in node._get_all_child_nodes():
edges.append((child_node, node))
return node
dag.apply_recursive(_dfs)
return nodes, edges
def _dag_to_dot(dag: DAGNode):
"""Create a Dot graph from dag.
TODO(lchu):
1. add more Dot configs in kwargs,
e.g. rankdir, alignment, etc.
2. add more contents to graph,
e.g. args, kwargs and options of each node
"""
# Step 0: check dependencies and init graph
_check_pydot_and_graphviz()
import pydot
graph = pydot.Dot(rankdir="LR")
# Step 1: generate unique name for each node in dag
nodes, edges = _get_nodes_and_edges(dag)
name_generator = _DAGNodeNameGenerator()
node_names = {}
for node in nodes:
node_names[node] = name_generator.get_node_name(node)
# Step 2: create graph with all the edges
for edge in edges:
graph.add_edge(pydot.Edge(node_names[edge[0]], node_names[edge[1]]))
# if there is only one node
if len(nodes) == 1 and len(edges) == 0:
graph.add_node(pydot.Node(node_names[nodes[0]]))
return graph
|
0f5d06e8fd2af8e83d88e06468fc645cec9fb6a2
|
8a85eb9b50864626cd2674f15b07df3d5dbe0b73
|
/neo/core/imagesequence.py
|
231a4a20463184ad653128f9711cf4e55e538a30
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
NeuralEnsemble/python-neo
|
287d3457a44c45f4dcbee0e9f9a2a5d83142de69
|
354c8d9d5fbc4daad3547773d2f281f8c163d208
|
refs/heads/master
| 2023-09-06T03:29:34.835053
| 2023-09-01T09:17:14
| 2023-09-01T09:17:14
| 3,949,530
| 265
| 213
|
BSD-3-Clause
| 2023-09-14T19:09:24
| 2012-04-06T12:48:48
|
Python
|
UTF-8
|
Python
| false
| false
| 10,208
|
py
|
imagesequence.py
|
"""
This module implements :class:`ImageSequence`, a 3D array.
:class:`ImageSequence` inherits from :class:`basesignal.BaseSignal` which
derives from :class:`BaseNeo`, and from :class:`quantities.Quantity`which
in turn inherits from :class:`numpy.array`.
Inheritance from :class:`numpy.array` is explained here:
http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
In brief:
* Initialization of a new object from constructor happens in :meth:`__new__`.
This is where user-specified attributes are set.
* :meth:`__array_finalize__` is called for all new objects, including those
created by slicing. This is where attributes are copied over from
the old object.
"""
from neo.core.analogsignal import AnalogSignal, _get_sampling_rate
import quantities as pq
import numpy as np
from neo.core.baseneo import BaseNeo
from neo.core.basesignal import BaseSignal
from neo.core.dataobject import DataObject
class ImageSequence(BaseSignal):
"""
Representation of a sequence of images, as an array of three dimensions
organized as [frame][row][column].
Inherits from :class:`quantities.Quantity`, which in turn inherits from
:class:`numpy.ndarray`.
*usage*::
>>> from neo.core import ImageSequence
>>> import quantities as pq
>>>
>>> img_sequence_array = [[[column for column in range(20)]for row in range(20)]
... for frame in range(10)]
>>> image_sequence = ImageSequence(img_sequence_array, units='V',
... sampling_rate=1 * pq.Hz,
... spatial_scale=1 * pq.micrometer)
>>> image_sequence
ImageSequence 10 frames with width 20 px and height 20 px; units V; datatype int64
sampling rate: 1.0
spatial_scale: 1.0
>>> image_sequence.spatial_scale
array(1.) * um
*Required attributes/properties*:
:image_data: (3D NumPy array, or a list of 2D arrays)
The data itself
:units: (quantity units)
:sampling_rate: *or* **frame_duration** (quantity scalar) Number of
samples per unit time or
duration of a single image frame.
If both are specified, they are
checked for consistency.
:spatial_scale: (quantity scalar) size for a pixel.
:t_start: (quantity scalar) Time when sequence begins. Default 0.
*Recommended attributes/properties*:
:name: (str) A label for the dataset.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
*Optional attributes/properties*:
:dtype: (numpy dtype or str) Override the dtype of the signal array.
:copy: (bool) True by default.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Properties available on this object*:
:sampling_rate: (quantity scalar) Number of samples per unit time.
(1/:attr:`frame_duration`)
:frame_duration: (quantity scalar) Duration of each image frame.
(1/:attr:`sampling_rate`)
:spatial_scale: Size of a pixel
:duration: (Quantity) Sequence duration, read-only.
(size * :attr:`frame_duration`)
:t_stop: (quantity scalar) Time when sequence ends, read-only.
(:attr:`t_start` + :attr:`duration`)
"""
_parent_objects = ("Segment",)
_parent_attrs = ("segment",)
_quantity_attr = "image_data"
_necessary_attrs = (
("image_data", pq.Quantity, 3),
("sampling_rate", pq.Quantity, 0),
("spatial_scale", pq.Quantity, 0),
("t_start", pq.Quantity, 0),
)
_recommended_attrs = BaseNeo._recommended_attrs
def __new__(cls, image_data, units=None, dtype=None, copy=True, t_start=0 * pq.s,
spatial_scale=None, frame_duration=None,
sampling_rate=None, name=None, description=None, file_origin=None,
**annotations):
"""
Constructs new :class:`ImageSequence` from data.
This is called whenever a new class:`ImageSequence` is created from
the constructor, but not when slicing.
__array_finalize__ is called on the new object.
"""
if spatial_scale is None:
raise ValueError("spatial_scale is required")
image_data = np.stack(image_data)
if len(image_data.shape) != 3:
raise ValueError("list doesn't have the correct number of dimensions")
obj = pq.Quantity(image_data, units=units, dtype=dtype, copy=copy).view(cls)
obj.segment = None
# function from analogsignal.py in neo/core directory
obj.sampling_rate = _get_sampling_rate(sampling_rate, frame_duration)
obj.spatial_scale = spatial_scale
if t_start is None:
raise ValueError("t_start cannot be None")
obj._t_start = t_start
return obj
def __init__(self, image_data, units=None, dtype=None, copy=True, t_start=0 * pq.s,
spatial_scale=None, frame_duration=None,
sampling_rate=None, name=None, description=None, file_origin=None,
**annotations):
"""
Initializes a newly constructed :class:`ImageSequence` instance.
"""
DataObject.__init__(
self, name=name, file_origin=file_origin, description=description, **annotations
)
def __array_finalize__spec(self, obj):
self.sampling_rate = getattr(obj, "sampling_rate", None)
self.spatial_scale = getattr(obj, "spatial_scale", None)
self.units = getattr(obj, "units", None)
self._t_start = getattr(obj, "_t_start", 0 * pq.s)
return obj
def signal_from_region(self, *region):
"""
Method that takes 1 or multiple regionofinterest, uses the method of each region
of interest to get the list of pixels to average.
Return a list of :class:`AnalogSignal` for each regionofinterest
"""
if len(region) == 0:
raise ValueError("no regions of interest have been given")
region_pixel = []
for i, b in enumerate(region):
r = region[i].pixels_in_region()
if not r:
raise ValueError("region " + str(i) + "is empty")
else:
region_pixel.append(r)
analogsignal_list = []
for i in region_pixel:
data = []
for frame in range(len(self)):
picture_data = []
for v in i:
picture_data.append(self.view(pq.Quantity)[frame][v[0]][v[1]])
average = picture_data[0]
for b in range(1, len(picture_data)):
average += picture_data[b]
data.append((average * 1.0) / len(i))
analogsignal_list.append(
AnalogSignal(
data, units=self.units, t_start=self.t_start, sampling_rate=self.sampling_rate
)
)
return analogsignal_list
def _repr_pretty_(self, pp, cycle):
"""
Handle pretty-printing the :class:`ImageSequence`.
"""
pp.text(
"{cls} {nframe} frames with width {width} px and height {height} px; "
"units {units}; datatype {dtype} ".format(
cls=self.__class__.__name__,
nframe=self.shape[0],
height=self.shape[1],
width=self.shape[2],
units=self.units.dimensionality.string,
dtype=self.dtype,
)
)
def _pp(line):
pp.breakable()
with pp.group(indent=1):
pp.text(line)
for line in [
"sampling rate: {!s}".format(self.sampling_rate),
"spatial_scale: {!s}".format(self.spatial_scale),
]:
_pp(line)
def _check_consistency(self, other):
"""
Check if the attributes of another :class:`ImageSequence`
are compatible with this one.
"""
if isinstance(other, ImageSequence):
for attr in ("sampling_rate", "spatial_scale", "t_start"):
if getattr(self, attr) != getattr(other, attr):
raise ValueError("Inconsistent values of %s" % attr)
# t_start attribute is handled as a property so type checking can be done
@property
def t_start(self):
"""
Time when sequence begins.
"""
return self._t_start
@t_start.setter
def t_start(self, start):
"""
Setter for :attr:`t_start`
"""
if start is None:
raise ValueError("t_start cannot be None")
self._t_start = start
@property
def duration(self):
"""
Sequence duration
(:attr:`size` * :attr:`frame_duration`)
"""
return self.shape[0] / self.sampling_rate
@property
def t_stop(self):
"""
Time when Sequence ends.
(:attr:`t_start` + :attr:`duration`)
"""
return self.t_start + self.duration
@property
def times(self):
"""
The time points of each frame in the sequence
(:attr:`t_start` + arange(:attr:`shape`)/:attr:`sampling_rate`)
"""
return self.t_start + np.arange(self.shape[0]) / self.sampling_rate
@property
def frame_duration(self):
"""
Duration of a single image frame in the sequence.
(1/:attr:`sampling_rate`)
"""
return 1.0 / self.sampling_rate
@frame_duration.setter
def frame_duration(self, duration):
"""
Setter for :attr:`frame_duration`
"""
if duration is None:
raise ValueError("frame_duration cannot be None")
elif not hasattr(duration, "units"):
raise ValueError("frame_duration must have units")
self.sampling_rate = 1.0 / duration
|
f2cdd7c939c87b6daec6902938ccc8c6c893be85
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DQM/L1TMonitor/python/L1TRPCTPG_offline_cff.py
|
4a4f26a88f5f777906145d188ef7ad59d34254f1
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 358
|
py
|
L1TRPCTPG_offline_cff.py
|
import FWCore.ParameterSet.Config as cms
from DQM.L1TMonitor.L1TRPCTPG_cfi import *
from EventFilter.RPCRawToDigi.RPCSQLiteCabling_cfi import *
from EventFilter.RPCRawToDigi.rpcUnpacker_cfi import *
#l1trpctpg.rpctpgSource = cms.InputTag("rpcunpacker")
#l1trpctpg.rpctfSource = cms.InputTag("gtUnpack")
l1trpctpgpath = cms.Path(rpcunpacker*l1trpctpg)
|
a3dc4690450da0761295b30ab7e76e5b5b8aec37
|
8ca4992e5c7f009147875549cee21c0efb7c03eb
|
/mmseg/core/evaluation/__init__.py
|
20ae97e2a80b4f0109b8b4ff42e1fcb87cd7fd9d
|
[
"Apache-2.0"
] |
permissive
|
JiayuZou2020/DiffBEV
|
0ada3f505fc5106d8b0068c319f0b80ed366b673
|
527acdb82ac028061893d9d1bbe69e589efae2a0
|
refs/heads/main
| 2023-05-23T07:25:39.465813
| 2023-04-04T02:53:05
| 2023-04-04T02:53:05
| 613,895,691
| 181
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .class_names import get_classes, get_palette
from .eval_hooks import DistEvalHook, EvalHook
from .metrics import (eval_metrics, intersect_and_union, mean_dice,
mean_fscore, mean_iou, pre_eval_to_metrics)
__all__ = [
'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore',
'eval_metrics', 'get_classes', 'get_palette', 'pre_eval_to_metrics',
'intersect_and_union'
]
|
c53508812e26d4d8a0506f55fd39a8fbe5fb5638
|
141c5ef07df60b1c9f726e4605b78a2a7c1243e9
|
/tests/test_togglex.py
|
17eed1101a16edb4ea755693fe51e537d1f574c0
|
[
"MIT"
] |
permissive
|
albertogeniola/MerossIot
|
cd8abaac236a7fb442bdf9613c7e6760123c8bd3
|
de1c22696511eee106961da3f22d3030ed9c254c
|
refs/heads/0.4.X.X
| 2023-09-01T11:11:09.793153
| 2023-04-01T15:15:50
| 2023-04-01T15:15:50
| 146,365,723
| 467
| 102
|
MIT
| 2023-09-11T06:42:13
| 2018-08-27T23:30:56
|
Python
|
UTF-8
|
Python
| false
| false
| 6,954
|
py
|
test_togglex.py
|
import os
from aiohttp import web
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
from meross_iot.controller.mixins.garage import GarageOpenerMixin
from meross_iot.controller.mixins.toggle import ToggleXMixin
from meross_iot.http_api import MerossHttpClient
from meross_iot.manager import MerossManager
from meross_iot.model.enums import OnlineStatus
from tests import async_get_client
if os.name == 'nt':
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
else:
import asyncio
class TestToggleX(AioHTTPTestCase):
async def get_application(self):
return web.Application()
async def setUpAsync(self):
# Wait some time before next test-burst
await asyncio.sleep(10)
self.meross_client, self.requires_logout = await async_get_client()
# Look for a device to be used for this test
self.meross_manager = MerossManager(http_client=self.meross_client)
await self.meross_manager.async_init()
devices = await self.meross_manager.async_device_discovery()
self.toggle_devices = self.meross_manager.find_devices(device_class=ToggleXMixin, online_status=OnlineStatus.ONLINE)
# Filter away garage openers: they are tested independently.
self.toggle_devices = list(filter(lambda x: not isinstance(x, GarageOpenerMixin), self.toggle_devices))
if len(self.toggle_devices) < 1:
self.test_device = None
else:
self.test_device = self.toggle_devices[0]
@unittest_run_loop
async def test_toggle_local_state(self):
if self.test_device is None:
self.skipTest("No ToggleX device has been found to run this test on.")
print(f"Testing device {self.test_device.name}")
# Turn off device to start from a clean state
r = await self.test_device.async_turn_off()
# Turn on the device
r = await self.test_device.async_turn_on()
self.assertTrue(self.test_device.is_on())
# Turn off the device
await asyncio.sleep(1)
r = await self.test_device.async_turn_off()
self.assertFalse(self.test_device.is_on())
@unittest_run_loop
async def test_toggle_multi_channel(self):
# Search for a device with multiple channels
multi_channel_devices = list(filter(lambda d: len(d.channels) > 1, self.toggle_devices))
if len(multi_channel_devices) < 1:
self.skipTest("Could not find any online device supporting more than 1 channel")
# Toggle non master switches
d = multi_channel_devices[0]
print(f"Testing device {d.name}")
for c in d.channels:
if c.is_master_channel:
continue
await d.async_turn_on(channel=c.index)
self.assertEqual(d.is_on(channel=c.index), True)
await asyncio.sleep(1)
await d.async_turn_off(channel=c.index)
self.assertEqual(d.is_on(channel=c.index), False)
@unittest_run_loop
async def test_toggle_master_switch(self):
# Search for a device with multiple channels
multi_channel_devices = list(filter(lambda d: len(d.channels) > 1, self.toggle_devices))
if len(multi_channel_devices) < 1:
self.skipTest("Could not find any online device supporting more than 1 channel")
# Turn on non-master switches
d = multi_channel_devices[0]
print(f"Testing device {d.name}")
master = None
for c in d.channels:
if c.is_master_channel:
master = c
continue
await d.async_turn_on(channel=c.index)
self.assertEqual(d.is_on(channel=c.index), True)
await asyncio.sleep(1)
# Turn-off master switch
self.assertIsNotNone(master)
await d.async_turn_off(channel=master.index)
# Give some time to the library to get the PUSH notification
# Then make sure that the master switch has turned off all the available switches.
await asyncio.sleep(2)
for c in d.channels:
self.assertEqual(d.is_on(channel=c.index), False)
@unittest_run_loop
async def test_usb_switches(self):
# Search for a device with usb channel
usb_dev = None
usb_channel = None
for d in self.toggle_devices:
for c in d.channels:
if c.is_usb:
usb_dev = d
usb_channel = c
break
if usb_dev is not None:
break
if usb_dev is None:
self.skipTest("Could not find any device with an usb channel")
print(f"Testing device {usb_dev.name}")
# Turn the channel off
await usb_dev.async_turn_off(channel=usb_channel.index)
self.assertFalse(usb_dev.is_on(channel=usb_channel.index))
await asyncio.sleep(1)
await usb_dev.async_turn_on(channel=usb_channel.index)
self.assertTrue(usb_dev.is_on(channel=usb_channel.index))
@unittest_run_loop
async def test_toggle_push_notification(self):
if self.test_device is None:
self.skipTest("No ToggleX device has been found to run this test on.")
print(f"Testing device {self.test_device.name}")
# Create a new manager
new_meross_client, requires_logout = await async_get_client()
m = None
try:
# Retrieve the same device with another manager
m = MerossManager(http_client=new_meross_client)
await m.async_init()
await m.async_device_discovery()
devs = m.find_devices(device_uuids=(self.test_device.uuid,))
dev = devs[0]
# Turn off device to start from a clean state
r = await self.test_device.async_turn_off()
await asyncio.sleep(2)
# Turn on the device
r = await self.test_device.async_turn_on()
# Wait a bit and make sure the other manager received the push notification
await asyncio.sleep(2)
self.assertTrue(self.test_device.is_on())
self.assertTrue(dev.is_on())
# Turn off the device
await asyncio.sleep(1)
r = await self.test_device.async_turn_off()
# Wait a bit and make sure the other manager received the push notification
await asyncio.sleep(2)
self.assertFalse(self.test_device.is_on())
self.assertFalse(dev.is_on())
finally:
if m is not None:
m.close()
if requires_logout:
await new_meross_client.async_logout()
async def tearDownAsync(self):
if self.requires_logout:
await self.meross_client.async_logout()
self.meross_manager.close()
# Give a change to asyncio clean everything up
await asyncio.sleep(1)
|
cfa72fc8c6a8c089b91eb996016f98742651dab5
|
e7795082c0131682803a09e929a86b2deddeab74
|
/app/case/__init__.py
|
b822061bc3b529152976805e25bf778487d78217
|
[
"MIT"
] |
permissive
|
liwanlei/FXTest
|
01de3ad55849b16c49d93b58d1aae21fd0fdafa0
|
aeda58d01c14194290ca149d411c3a8596cca82d
|
refs/heads/master
| 2023-04-01T15:45:26.668688
| 2023-03-19T05:19:54
| 2023-03-19T05:19:54
| 97,098,845
| 807
| 419
|
MIT
| 2022-04-23T06:52:16
| 2017-07-13T08:27:48
|
Python
|
UTF-8
|
Python
| false
| false
| 139
|
py
|
__init__.py
|
"""
@author: lileilei
@file: __init__.py.py
@time: 2018/1/31 13:19
"""
from app.case.views import case
from app.case import views, urls
|
699c211ac11ed7ba92b9d5e402f905a638c272fb
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/KoubeiMerchantDepartmentShopModifyModel.py
|
56a237d912a1cdf2c9ce1215270d0f3256003bda
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,413
|
py
|
KoubeiMerchantDepartmentShopModifyModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.SimpleShopModel import SimpleShopModel
from alipay.aop.api.domain.SimpleShopModel import SimpleShopModel
class KoubeiMerchantDepartmentShopModifyModel(object):
def __init__(self):
self._auth_code = None
self._dept_id = None
self._dept_type = None
self._shop_list_to_add = None
self._shop_list_to_remove = None
@property
def auth_code(self):
return self._auth_code
@auth_code.setter
def auth_code(self, value):
self._auth_code = value
@property
def dept_id(self):
return self._dept_id
@dept_id.setter
def dept_id(self, value):
self._dept_id = value
@property
def dept_type(self):
return self._dept_type
@dept_type.setter
def dept_type(self, value):
self._dept_type = value
@property
def shop_list_to_add(self):
return self._shop_list_to_add
@shop_list_to_add.setter
def shop_list_to_add(self, value):
if isinstance(value, list):
self._shop_list_to_add = list()
for i in value:
if isinstance(i, SimpleShopModel):
self._shop_list_to_add.append(i)
else:
self._shop_list_to_add.append(SimpleShopModel.from_alipay_dict(i))
@property
def shop_list_to_remove(self):
return self._shop_list_to_remove
@shop_list_to_remove.setter
def shop_list_to_remove(self, value):
if isinstance(value, list):
self._shop_list_to_remove = list()
for i in value:
if isinstance(i, SimpleShopModel):
self._shop_list_to_remove.append(i)
else:
self._shop_list_to_remove.append(SimpleShopModel.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.auth_code:
if hasattr(self.auth_code, 'to_alipay_dict'):
params['auth_code'] = self.auth_code.to_alipay_dict()
else:
params['auth_code'] = self.auth_code
if self.dept_id:
if hasattr(self.dept_id, 'to_alipay_dict'):
params['dept_id'] = self.dept_id.to_alipay_dict()
else:
params['dept_id'] = self.dept_id
if self.dept_type:
if hasattr(self.dept_type, 'to_alipay_dict'):
params['dept_type'] = self.dept_type.to_alipay_dict()
else:
params['dept_type'] = self.dept_type
if self.shop_list_to_add:
if isinstance(self.shop_list_to_add, list):
for i in range(0, len(self.shop_list_to_add)):
element = self.shop_list_to_add[i]
if hasattr(element, 'to_alipay_dict'):
self.shop_list_to_add[i] = element.to_alipay_dict()
if hasattr(self.shop_list_to_add, 'to_alipay_dict'):
params['shop_list_to_add'] = self.shop_list_to_add.to_alipay_dict()
else:
params['shop_list_to_add'] = self.shop_list_to_add
if self.shop_list_to_remove:
if isinstance(self.shop_list_to_remove, list):
for i in range(0, len(self.shop_list_to_remove)):
element = self.shop_list_to_remove[i]
if hasattr(element, 'to_alipay_dict'):
self.shop_list_to_remove[i] = element.to_alipay_dict()
if hasattr(self.shop_list_to_remove, 'to_alipay_dict'):
params['shop_list_to_remove'] = self.shop_list_to_remove.to_alipay_dict()
else:
params['shop_list_to_remove'] = self.shop_list_to_remove
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMerchantDepartmentShopModifyModel()
if 'auth_code' in d:
o.auth_code = d['auth_code']
if 'dept_id' in d:
o.dept_id = d['dept_id']
if 'dept_type' in d:
o.dept_type = d['dept_type']
if 'shop_list_to_add' in d:
o.shop_list_to_add = d['shop_list_to_add']
if 'shop_list_to_remove' in d:
o.shop_list_to_remove = d['shop_list_to_remove']
return o
|
b0c11edc74cf58c9191cc4665e9ad976224be07d
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/sphere/azure-mgmt-sphere/azure/mgmt/sphere/models/_azure_sphere_mgmt_client_enums.py
|
57c7a28044110f9ac29e75c7472fec668253247a
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,270
|
py
|
_azure_sphere_mgmt_client_enums.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from azure.core import CaseInsensitiveEnumMeta
class ActionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum. Indicates the action type. "Internal" refers to actions that are for internal only APIs."""
INTERNAL = "Internal"
class AllowCrashDumpCollection(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Allow crash dumps values."""
ENABLED = "Enabled"
"""Crash dump collection enabled"""
DISABLED = "Disabled"
"""Crash dump collection disabled"""
class CapabilityType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Capability image type."""
APPLICATION_DEVELOPMENT = "ApplicationDevelopment"
"""Application development capability"""
FIELD_SERVICING = "FieldServicing"
"""Field servicing capability"""
class CertificateStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Certificate status values."""
ACTIVE = "Active"
"""Certificate is active"""
INACTIVE = "Inactive"
"""Certificate is inactive"""
EXPIRED = "Expired"
"""Certificate has expired"""
REVOKED = "Revoked"
"""Certificate has been revoked"""
class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The type of identity that created the resource."""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class ImageType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Image type values."""
INVALID_IMAGE_TYPE = "InvalidImageType"
"""Invalid image."""
ONE_BL = "OneBl"
"""One Bl image type"""
PLUTON_RUNTIME = "PlutonRuntime"
"""Pluton image type"""
WIFI_FIRMWARE = "WifiFirmware"
"""Wifi firmware image type"""
SECURITY_MONITOR = "SecurityMonitor"
"""Security monitor image type"""
NORMAL_WORLD_LOADER = "NormalWorldLoader"
"""Normal world loader image type"""
NORMAL_WORLD_DTB = "NormalWorldDtb"
"""Normal world dtb image type"""
NORMAL_WORLD_KERNEL = "NormalWorldKernel"
"""Normal world kernel image type"""
ROOT_FS = "RootFs"
"""Root FS image type"""
SERVICES = "Services"
"""Services image type"""
APPLICATIONS = "Applications"
"""Applications image type"""
FW_CONFIG = "FwConfig"
"""FW config image type"""
BOOT_MANIFEST = "BootManifest"
"""Boot manifest image type"""
NWFS = "Nwfs"
"""Nwfs image type"""
TRUSTED_KEYSTORE = "TrustedKeystore"
"""Trusted key store image type"""
POLICY = "Policy"
"""Policy image type"""
CUSTOMER_BOARD_CONFIG = "CustomerBoardConfig"
"""Customer board config image type"""
UPDATE_CERT_STORE = "UpdateCertStore"
"""Update certificate store image type"""
BASE_SYSTEM_UPDATE_MANIFEST = "BaseSystemUpdateManifest"
"""Base system update manifest image type"""
FIRMWARE_UPDATE_MANIFEST = "FirmwareUpdateManifest"
"""Firmware update manifest image type"""
CUSTOMER_UPDATE_MANIFEST = "CustomerUpdateManifest"
"""Customer update manifest image type"""
RECOVERY_MANIFEST = "RecoveryManifest"
"""Recovery manifest image type"""
MANIFEST_SET = "ManifestSet"
"""manifest set image type"""
OTHER = "Other"
"""Other image type"""
class Origin(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The intended executor of the operation; as in Resource Based Access Control (RBAC) and audit
logs UX. Default value is "user,system".
"""
USER = "user"
SYSTEM = "system"
USER_SYSTEM = "user,system"
class OSFeedType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""OS feed type values."""
RETAIL = "Retail"
"""Retail OS feed type."""
RETAIL_EVAL = "RetailEval"
"""Retail evaluation OS feed type."""
class ProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Provisioning state of the resource."""
SUCCEEDED = "Succeeded"
"""Resource has been created."""
FAILED = "Failed"
"""Resource creation failed."""
CANCELED = "Canceled"
"""Resource creation was canceled."""
PROVISIONING = "Provisioning"
"""The resource is being provisioned"""
UPDATING = "Updating"
"""The resource is being updated"""
DELETING = "Deleting"
"""The resource is being deleted"""
ACCEPTED = "Accepted"
"""The resource create request has been accepted"""
class RegionalDataBoundary(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Regional data boundary values."""
NONE = "None"
"""No data boundary"""
EU = "EU"
"""EU data boundary"""
class UpdatePolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Update policy values."""
UPDATE_ALL = "UpdateAll"
"""Update all policy."""
NO3_RD_PARTY_APP_UPDATES = "No3rdPartyAppUpdates"
"""No update for 3rd party app policy."""
|
d0c24c87203f1f09b15f1c7a379d95791a8eefd5
|
b12d39daea67c38209bc3e04a538379b7e9b59ef
|
/findpeaks/filters/lee_sigma.py
|
2b8aa6abe907f4442d8db43a90cb8b16012513f1
|
[
"MIT"
] |
permissive
|
erdogant/findpeaks
|
0e6c547c01a803c1f072c9b44bd301ca7a909d6f
|
b1735bbdcca8f176e0fd0b235b4cd014d1f8a2ea
|
refs/heads/master
| 2023-08-29T18:32:23.956359
| 2023-08-11T18:16:29
| 2023-08-11T18:16:29
| 260,400,472
| 164
| 21
|
NOASSERTION
| 2023-08-11T18:16:31
| 2020-05-01T07:12:37
|
Python
|
UTF-8
|
Python
| false
| false
| 16,543
|
py
|
lee_sigma.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2023: Caroline Goehner: <carolinesophie.goehner@eurac.edu> <carosophie.goehner@gmail.com>
# This part of the library was written at Eurac Research in the
# framework of the project ScaleAgData (SCALING ΑGRICULTURAL SENSOR
# DATA for an improved monitoring of agri-environmental conditions.
# Duration: 01/01/2023 - 31/12/2026) funded by the Horizon Europe
# program under the grant agreement no 101086355.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import xarray as xr
from joblib import Parallel, delayed
sigma_DEFAULT = 0.9 # for general applications
win_size_DEFAULT = 7
num_looks_DEFAULT = 1
tk_DEFAULT = 5 # as in S1TBX
num_cores_DEFAULT = -1
data_measure_DEFAULT = "intensity"
def assert_parameters(sigma, win_size, num_looks, tk, data_measure):
"""
Asserts parameters in range.
Parameters:
- sigma: in [0.5, 0.6, 0.7, 0.8, 0.9]
- win_size: should be odd, at least 3
- num_looks: in [1, 2, 3, 4]
- tk: in [5, 6, 7]
- data_measure: "intensity" or "amplitude"
"""
if sigma not in [0.5, 0.6, 0.7, 0.8, 0.9]: raise Exception("Sigma parameter has to be 0.5, 0.6, 0.7, 0.8, or 0.9, submitted %s" %(sigma))
if win_size < 3: raise Exception('ERROR: win size must be at least 3')
if num_looks not in [1, 2, 3, 4]: raise Exception("num_looks parameter has to be 1, 2, 3 or 4, submitted %s" %(num_looks))
if tk not in [5, 6, 7]: print('[findpeaks] >For general applications it is recommended to use threshold tk between 5 and 7. You provided %s.' %(tk))
if data_measure not in ["intensity", "amplitude"]: raise Exception('ERROR: data_measure has to be "intensity" or "amplitude". You provided %s.' %(data_measure))
def ptTar(x, y, img, Z98, tk):
"""
Detect if the pixel is part of a point target of surrounding pixels
Parameters:
- x: int
X-coordinate of the pixel.
- y: int
Y-coordinate of the pixel.
- img: xarray
Input image.
- Z98: ndarray
Threshold of the 98th percentile of the img.
- tk: int
Threshold for number of K neighbouring pixels > Z98 to classify the pixel as point target, typically 5.
"""
for c in [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]]:
a = x+c[0]
b = y+c[1]
win = img[a-1:a+1, b-1:b+1] # 3x3 windows for pixels surrounding the center pixel
K_win = np.count_nonzero(win >= Z98) # number of pixels outside the Z98
if K_win >= tk: # is point target
ptTarget = True
break
else:
ptTarget = False
continue
return(ptTarget)
def lee_sigma_filter(img,
sigma = sigma_DEFAULT,
win_size = win_size_DEFAULT,
num_looks = num_looks_DEFAULT,
tk = tk_DEFAULT,
num_cores = num_cores_DEFAULT,
data_measure = data_measure_DEFAULT):
"""Lee sigma filter.
Description
-----------
Improved Lee Sigma, according to Lee Sigma filter in SNAP Sentinel-1 Toolbox.
Apply the filter with a window of win_size x win_size to a numpy matrix (containing the image), before converting to dB.
Jong-Sen Lee, Jen-Hung Wen, T. L. Ainsworth, Kun-Shan Chen and A. J. Chen, "Improved Sigma Filter for Speckle Filtering of SAR Imagery",
in IEEE Transactions on Geoscience and Remote Sensing, vol. 47, no. 1, pp. 202-213, Jan. 2009, doi: 10.1109/TGRS.2008.2002881.
Parameters
----------
img : numpy.ndarray or xarray.DataArray
Input image.
sigma : float, (default: 0.9)
Speckle noise standard deviation.
win_size : int, int (default: 7)
Window size.
num_looks : int, (default: 1)
Number of looks of the SAR img.
tk: int, (default: 5)
Threshold of neighbouring pixels outside of the 98th percentile, typically between 5 and 7.
num_cores: int, (default: -1)
Number of cores to use for parallel computing, if -1 all CPUs are used, if 1 no parallel computing is used.
Returns
-------
img_filtered : numpy.ndarray or xarray.DataArray
Filtered image, type depending on input type.
Examples
--------
>>> import findpeaks
>>> import matplotlib.pyplot as plt
>>> img = findpeaks.import_example('2dpeaks_image')
>>> # Resize
>>> img = findpeaks.stats.resize(img, size=(300,300))
>>> # Make grey image
>>> img = findpeaks.stats.togray(img)
>>> # Scale between [0-255]
>>> img = findpeaks.stats.scale(img)
>>> # Filter
>>> img_filtered = findpeaks.stats.lee_sigma_filter(img.copy(), win_size=7)
>>>
>>> plt.figure()
>>> fig, axs = plt.subplots(1,2)
>>> axs[0].imshow(img, cmap='gray'); axs[0].set_title('Input')
>>> axs[1].imshow(img_filtered, cmap='gray'); axs[1].set_title('Lee sigma filter')
"""
if win_size < 3: raise Exception('[findpeaks] >ERROR: win size must be at least 3')
if len(img.shape) > 2: raise Exception('[findpeaks] >ERROR: Image should be 2D. Hint: set the parameter: togray=True')
if ((win_size % 2) == 0): print('[findpeaks] >It is highly recommended to use odd window sizes. You provided %s, an even number.' % (win_size))
assert_parameters(sigma, win_size, num_looks, tk, data_measure) # check validity of input parameters
if data_measure == "intensity":
if num_looks == 1:
if sigma == 0.5:
I1 = 0.436 # lower sigma range
I2 = 1.920 # upper sigma range
IsigmaVP = 0.4057 # speckle noise standard deviation (adjusted)
elif sigma == 0.6:
I1 = 0.343
I2 = 2.210
IsigmaVP = 0.4954
elif sigma == 0.7:
I1 = 0.254
I2 = 2.582
IsigmaVP = 0.5911
elif sigma == 0.8:
I1 = 0.168
I2 = 3.094
IsigmaVP = 0.6966
elif sigma == 0.9:
I1 = 0.084
I2 = 3.941
IsigmaVP = 0.8191
elif num_looks == 2:
if sigma == 0.5:
I1 = 0.582
I2 = 1.584
IsigmaVP = 0.2763
elif sigma == 0.6:
I1 = 0.501
I2 = 1.755
IsigmaVP = 0.3388
elif sigma == 0.7:
I1 = 0.418
I2 = 1.972
IsigmaVP = 0.4062
elif sigma == 0.8:
I1 = 0.327
I2 = 2.260
IsigmaVP = 0.4810
elif sigma == 0.9:
I1 = 0.221
I2 = 2.744
IsigmaVP = 0.5699
elif num_looks == 3:
if sigma == 0.5:
I1 = 0.652
I2 = 1.458
IsigmaVP = 0.2222
elif sigma == 0.6:
I1 = 0.580
I2 = 1.586
IsigmaVP = 0.2736
elif sigma == 0.7:
I1 = 0.505
I2 = 1.751
IsigmaVP = 0.3280
elif sigma == 0.8:
I1 = 0.419
I2 = 1.965
IsigmaVP = 0.3892
elif sigma == 0.9:
I1 = 0.313
I2 = 2.320
IsigmaVP = 0.4624
elif num_looks == 4:
if sigma == 0.5:
I1 = 0.694
I2 = 1.385
IsigmaVP = 0.1921
elif sigma == 0.6:
I1 = 0.630
I2 = 1.495
IsigmaVP = 0.2348
elif sigma == 0.7:
I1 = 0.560
I2 = 1.627
IsigmaVP = 0.2825
elif sigma == 0.8:
I1 = 0.480
I2 = 1.804
IsigmaVP = 0.3354
elif sigma == 0.9:
I1 = 0.378
I2 = 2.094
IsigmaVP = 0.3991
elif data_measure == "amplitude":
if num_looks == 1:
if sigma == 0.5:
A1 = 0.653997
A2 = 1.40002
AsigmaVP = 0.208349
elif sigma == 0.6:
A1 = 0.578998
A2 = 1.50601
AsigmaVP = 0.255358
elif sigma == 0.7:
A1 = 0.496999
A2 = 1.63201
AsigmaVP = 0.305303
elif sigma == 0.8:
A1 = 0.403999
A2 = 1.79501
AsigmaVP = 0.361078
elif sigma == 0.9:
A1 = 0.286
A2 = 2.04301
AsigmaVP = 0.426375
elif num_looks == 2:
if sigma == 0.5:
A1 = 0.76
A2 = 1.263
AsigmaVP = 0.139021
elif sigma == 0.6:
A1 = 0.705
A2 = 1.332
AsigmaVP = 0.169777
elif sigma == 0.7:
A1 = 0.643
A2 = 1.412
AsigmaVP = 0.206675
elif sigma == 0.8:
A1 = 0.568
A2 = 1.515
AsigmaVP = 0.244576
elif sigma == 0.9:
A1 = 0.467
A2 = 1.673
AsigmaVP = 0.29107
elif num_looks == 3:
if sigma == 0.5:
A1 = 0.806
A2 = 1.21
AsigmaVP = 0.109732
elif sigma == 0.6:
A1 = 0.76
A2 = 1.263
AsigmaVP = 0.138001
elif sigma == 0.7:
A1 = 0.708
A2 = 1.327
AsigmaVP = 0.163686
elif sigma == 0.8:
A1 = 0.645
A2 = 1.408
AsigmaVP = 0.19597
elif sigma == 0.9:
A1 = 0.557
A2 = 1.531
AsigmaVP = 0.234219
elif num_looks == 4:
if sigma == 0.5:
A1 = 0.832
A2 = 1.179
AsigmaVP = 0.0894192
elif sigma == 0.6:
A1 = 0.793
A2 = 1.226
AsigmaVP = 0.112018
elif sigma == 0.7:
A1 = 0.747
A2 = 1.279
AsigmaVP = 0.139243
elif sigma == 0.8:
A1 = 0.691
A2 = 1.347
AsigmaVP = 0.167771
elif sigma == 0.9:
A1 = 0.613
A2 = 1.452
AsigmaVP = 0.201036
# variables
final_img = None
if isinstance(img, xr.DataArray): # make it possible to use xarray dataarrays as well
final_img = img.copy()
img = img.values
win_size_h = int(win_size/2) # "half" window as distance from center pixel in each direction
if data_measure == "intensity":
sigmaV = 1.0 / (num_looks ** 0.5) # standard deviation of the multiplicative speckle noise, depending on number of looks
sigmaVP = IsigmaVP
sigmaRangeLow = I1
sigmaRangeHigh = I2
elif data_measure == "amplitude":
sigmaV = 0.5227 / (num_looks ** 0.5)
sigmaVP = AsigmaVP
sigmaRangeLow = A1
sigmaRangeHigh = A2
sigmaVSqr = sigmaV**2 # variance of the multiplicative speckle noise
Z98 = np.percentile(img, 98) # threshold of the 98th percentile of the SAR img
N, M = img.shape
img_filtered = np.zeros_like(img, dtype=float)
def filter_pixel(i, j):
xleft = i - win_size_h # define left x coordinate of the selected window size
xright = i + win_size_h+1 # define right x coordinate of the selected window size, add 1 for indexing ndarrays
if xleft < 0: xleft = 0 # if outside the image dimensions set to min x coordinate
if xright >= N: xright = N # if outside the image dimensions set to max x coordinate
xleft3 = i - 1 # for 3x3 window
xright3 = i + 2
if xleft3 < 0: xleft3 = 0
if xright3 >= N: xright3 = N
yup = j - win_size_h # in y dimension
ydown = j + win_size_h+1
if yup < 0: yup = 0
if ydown >= M: ydown = M
yup3 = j - 1 # for 3x3 window
ydown3 = j + 2
if yup3 < 0: yup3 = 0
if ydown3 >= M: ydown3 = M
# 1. Point target detection + preservation
z = img[i, j] # center pixel value of window
window = img[xleft:xright, yup:ydown] # window of selected size
window_3x3 = img[xleft3:xright3, yup3:ydown3] # 3x3 window
K = np.count_nonzero(window_3x3 >= Z98) # number of pixels in the 3x3 window outside the Z98
if (ptTar(i, j, img, Z98, tk) == False # not part of a (earlier) point target
and (z.item() >= Z98) == False # pixel value is within the 98th percentile of the SAR img
or ((z.item() >= Z98) == True and (K >= tk) == False) # is not in the 98th percentile, but has enough surrounding pixels that are neither -> it will be filtered
):
# 2. Pixels selection based on the sigma range
# - MMSE on 3x3 using orig_sigmaVP to compute a priori mean (priori_x)
mean_z = window_3x3.mean() # local mean in 3x3
Var_z = window_3x3.var(dtype = np.float64) # local variance in 3x3
Var_x = (Var_z - mean_z**2 * sigmaVSqr) / (1 + sigmaVSqr) # Variance of x
if Var_x < 0: Var_x = 0.0 # according to s1tbx
b = Var_x / (Var_z+1e-50) # weight function - add small values to avoid nan weights when all the values are similar in the window
priori_x = (1-b) * mean_z + b * z # MMSE filter to calculate a priori mean
# - establish sigma range using LUT for sigma in Intensity img and num_looks:
XsigmaRangeLow = sigmaRangeLow * priori_x # lower sigma range
XsigmaRangeHigh = sigmaRangeHigh * priori_x # upper sigma range
sigmaVPSqr = sigmaVP**2 # speckle noise variance
# - select pixels in window if their values fall into sigma range, compute mean_z and Var_z
window = window[np.where(np.logical_and(window >= XsigmaRangeLow, window <= XsigmaRangeHigh))]
if np.count_nonzero(window) == 0: new_pix_value = z # when window is empty, according to S1TBX
else:
mean_z = window.mean() # local mean in the sigma range
Var_z = window.var(dtype = np.float64) # local variance in the sigma range
# 3. MMSE application
# - compute MMSE filter weight b using Var_x, based on mean_z, Var_z and sigmaVPSqr
Var_x = (Var_z - mean_z**2 * sigmaVPSqr) / (1 + sigmaVPSqr) # Variance of x
if Var_x < 0: Var_x = 0.0 # according to s1tbx
b = Var_x / (Var_z+1e-50) # weight function - add small values to avoid nan weights when all the values are similar in the window
# - filter center pixel using MMSE
new_pix_value = (1-b) * mean_z + b * z # new filtered pixel value
else: # center pixel is part of a (earlier) point target or is a point target pixel -> it will NOT be filtered
new_pix_value = z
return new_pix_value
# Parallel Process
result = Parallel(n_jobs=num_cores)(
delayed(filter_pixel)(i, j) for i in range(N) for j in range(M)
)
# Unpack the results
for (index, v), value in zip(np.ndenumerate(img_filtered), result):
img_filtered[index[0], index[1]] = value
if isinstance(final_img, xr.DataArray): # in case xarray dataarray was used
final_img.values = img_filtered
return final_img
else:
return img_filtered
|
a5864666c497c775b0f027183cc5409032410ea7
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowCryptoPkiServerRequests/cli/equal/golden_output_csr8kv_subca_expected.py
|
783ffa7c93be7fe6825273555e2b43beb4eb3c12
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
golden_output_csr8kv_subca_expected.py
|
expected_output = {
'request': {
'subordinate_ca': {
'1': {
'state': 'granted',
'fingerprint': '744566E755B84AEE18A86DF715D8EE33',
'subject_name': 'hostname=pki-reg2.cisco.com,cn=R1'
}
}
}
}
|
444661ebc478af366c8a5b453af3f2f04a7b77d0
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/neural-networks-and-deep-learning/src/old/deep_autoencoder.py
|
afafce524c5d1a62e1239f9005b96085e105761d
|
[
"MIT"
] |
permissive
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,702
|
py
|
deep_autoencoder.py
|
"""
deep_autoencoder
~~~~~~~~~~~~~~~~
A module which implements deep autoencoders.
"""
#### Libraries
# Standard library
import random
# My libraries
from backprop2 import Network, sigmoid_vec
# Third-party libraries
import numpy as np
def plot_helper(x):
import matplotlib
import matplotlib.pyplot as plt
x = np.reshape(x, (-1, 28))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.matshow(x, cmap = matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
class DeepAutoencoder(Network):
def __init__(self, layers):
"""
The list ``layers`` specifies the sizes of the nested
autoencoders. For example, if ``layers`` is [50, 20, 10] then
the deep autoencoder will be a neural network with layers of
size [50, 20, 10, 20, 50]."""
self.layers = layers
Network.__init__(self, layers+layers[-2::-1])
def train(self, training_data, epochs, mini_batch_size, eta,
lmbda):
"""
Train the DeepAutoencoder. The ``training_data`` is a list of
training inputs, ``x``, ``mini_batch_size`` is a single
positive integer, and ``epochs``, ``eta``, ``lmbda`` are lists
of parameters, with the different list members corresponding
to the different stages of training. For example, ``eta[0]``
is the learning rate used for the first nested autoencoder,
``eta[1]`` is the learning rate for the second nested
autoencoder, and so on. ``eta[-1]`` is the learning rate used
for the final stage of fine-tuning.
"""
print "\nTraining a %s deep autoencoder" % (
"-".join([str(j) for j in self.sizes]),)
training_data = double(training_data)
cur_training_data = training_data[::]
for j in range(len(self.layers)-1):
print "\nTraining the %s-%s-%s nested autoencoder" % (
self.layers[j], self.layers[j+1], self.layers[j])
print "%s epochs, mini-batch size %s, eta = %s, lambda = %s" % (
epochs[j], mini_batch_size, eta[j], lmbda[j])
self.train_nested_autoencoder(
j, cur_training_data, epochs[j], mini_batch_size, eta[j],
lmbda[j])
cur_training_data = [
(sigmoid_vec(np.dot(net.weights[0], x)+net.biases[0]),)*2
for (x, _) in cur_training_data]
print "\nFine-tuning network weights with backpropagation"
print "%s epochs, mini-batch size %s, eta = %s, lambda = %s" % (
epochs[-1], mini_batch_size, eta[-1], lmbda[-1])
self.SGD(training_data, epochs[-1], mini_batch_size, eta[-1],
lmbda[-1])
def train_nested_autoencoder(
self, j, encoded_training_data, epochs, mini_batch_size, eta, lmbda):
"""
Train the nested autoencoder that starts at layer ``j`` in the
deep autoencoder. Note that ``encoded_training_data`` is a
list with entries of the form ``(x, x)``, where the ``x`` are
encoded training inputs for layer ``j``."""
net = Network([self.layers[j], self.layers[j+1], self.layers[j]])
net.biases[0] = self.biases[j]
net.biases[1] = self.biases[-j-1]
net.weights[0] = self.weights[j]
net.weights[1] = self.weights[-j-1]
net.SGD(encoded_training_data, epochs, mini_batch_size, eta, lmbda)
self.biases[j] = net.biases[0]
self.biases[-j-1] = net.biases[1]
self.weights[j] = net.weights[0]
self.weights[-j-1] = net.weights[1]
def train_nested_autoencoder_repl(
self, j, training_data, epochs, mini_batch_size, eta, lmbda):
"""
This is a convenience method that can be used from the REPL to
train the nested autoencoder that starts at level ``j`` in the
deep autoencoder. Note that ``training_data`` is the input
data for the first layer of the network, and is a list of
entries ``x``."""
self.train_nested_autoencoder(
j,
double(
[self.feedforward(x, start=0, end=j) for x in training_data]),
epochs, mini_batch_size, eta, lmbda)
def feature(self, j, k):
"""
Return the output if neuron number ``k`` in layer ``j`` is
activated, and all others are not active. """
a = np.zeros((self.sizes[j], 1))
a[k] = 1.0
return self.feedforward(a, start=j, end=self.num_layers)
def double(l):
return [(x, x) for x in l]
|
1205e50d5741e2b5c39b6331ed39c4896cee68b5
|
69d8d91954f6623f3674d52d734d589f72383628
|
/horizon/browsers/breadcrumb.py
|
ea384aa396ab8d46600da7b1a6e2aea05bff61b3
|
[
"Apache-2.0"
] |
permissive
|
openstack/horizon
|
d031cebe126c06ad9717bbc52790b3d890e8661e
|
7896fd8c77a6766a1156a520946efaf792b76ca5
|
refs/heads/master
| 2023-09-04T06:57:58.069907
| 2023-09-01T20:17:10
| 2023-09-01T20:17:10
| 2,665,166
| 1,060
| 1,175
|
Apache-2.0
| 2023-08-07T02:33:44
| 2011-10-28T13:12:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
breadcrumb.py
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from horizon.utils import html
class Breadcrumb(html.HTMLElement):
def __init__(self, request, template, root,
subfolder_path, url, attr=None):
super().__init__()
self.template = template
self.request = request
self.root = root
self.subfolder_path = subfolder_path
self.url = url
self._subfolders = []
def get_subfolders(self):
if self.subfolder_path and not self._subfolders:
(parent, slash, folder) = self.subfolder_path.strip('/') \
.rpartition('/')
while folder:
path = "%s%s%s/" % (parent, slash, folder)
self._subfolders.insert(0, (folder, path))
(parent, slash, folder) = parent.rpartition('/')
return self._subfolders
def render(self):
"""Renders the table using the template from the table options."""
breadcrumb_template = template.loader.get_template(self.template)
extra_context = {"breadcrumb": self}
return breadcrumb_template.render(extra_context, self.request)
|
349c593df86372a5e07f98dd855788d50c6491d0
|
dee696adb52739dd7d4021878bcbade43b359b0c
|
/sonora/aio.py
|
6ec4e9a0730e5b82427002966c3e7f9e36b75465
|
[
"Apache-2.0"
] |
permissive
|
public/sonora
|
bbd5f3440301799ed53a7c5659efcefbaf572d9a
|
f5c7b3dd1826cad5c1252455e14a27bdb17eb2b6
|
refs/heads/master
| 2023-08-21T05:53:23.711907
| 2023-08-18T16:56:54
| 2023-08-18T16:56:54
| 182,774,837
| 230
| 14
|
Apache-2.0
| 2023-08-23T14:43:07
| 2019-04-22T11:54:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,503
|
py
|
aio.py
|
import asyncio
import io
import aiohttp
import grpc.experimental.aio
from sonora import protocol
import sonora.client
def insecure_web_channel(url):
return WebChannel(url)
class WebChannel:
def __init__(self, url):
if not url.startswith("http") and "://" not in url:
url = f"http://{url}"
self._url = url
self._session = aiohttp.ClientSession()
async def __aenter__(self):
return self
async def __aexit__(self, exception_type, exception_value, traceback):
await self._session.close()
def __await__(self):
yield self
def unary_unary(self, path, request_serializer, response_deserializer):
return UnaryUnaryMulticallable(
self._session, self._url, path, request_serializer, response_deserializer
)
def unary_stream(self, path, request_serializer, response_deserializer):
return UnaryStreamMulticallable(
self._session, self._url, path, request_serializer, response_deserializer
)
def stream_unary(self, path, request_serializer, response_deserializer):
return sonora.client.NotImplementedMulticallable()
def stream_stream(self, path, request_serializer, response_deserializer):
return sonora.client.NotImplementedMulticallable()
class UnaryUnaryMulticallable(sonora.client.Multicallable):
def __call__(self, request, timeout=None, metadata=None):
call_metadata = self._metadata.copy()
if metadata is not None:
call_metadata.extend(protocol.encode_headers(metadata))
return UnaryUnaryCall(
request,
timeout,
call_metadata,
self._rpc_url,
self._session,
self._serializer,
self._deserializer,
)
class UnaryStreamMulticallable(sonora.client.Multicallable):
def __call__(self, request, timeout=None, metadata=None):
call_metadata = self._metadata.copy()
if metadata is not None:
call_metadata.extend(protocol.encode_headers(metadata))
return UnaryStreamCall(
request,
timeout,
call_metadata,
self._rpc_url,
self._session,
self._serializer,
self._deserializer,
)
class Call(sonora.client.Call):
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if self._response and not self._response.closed:
self._response.close()
def __del__(self):
if self._response and not self._response.closed:
self._response.close()
async def _get_response(self):
if self._response is None:
timeout = aiohttp.ClientTimeout(total=self._timeout)
self._response = await self._session.post(
self._url,
data=protocol.wrap_message(
False, False, self._serializer(self._request)
),
headers=dict(self._metadata),
timeout=timeout,
)
protocol.raise_for_status(self._response.headers)
return self._response
async def initial_metadata(self):
response = await self._get_response()
return response.headers.items()
async def trailing_metadata(self):
return self._trailers
class UnaryUnaryCall(Call):
@Call._raise_timeout(asyncio.TimeoutError)
def __await__(self):
response = yield from self._get_response().__await__()
data = yield from response.read().__await__()
response.release()
if not data:
return
buffer = io.BytesIO(data)
messages = protocol.unwrap_message_stream(buffer)
trailers, _, message = next(messages)
if trailers:
self._trailers = protocol.unpack_trailers(message)
return
else:
result = self._deserializer(message)
try:
trailers, _, message = next(messages)
except StopIteration:
pass
else:
if trailers:
self._trailers = protocol.unpack_trailers(message)
else:
raise ValueError("UnaryUnary should only return a single message")
protocol.raise_for_status(response.headers)
return result
class UnaryStreamCall(Call):
@Call._raise_timeout(asyncio.TimeoutError)
async def read(self):
response = await self._get_response()
async for trailers, _, message in protocol.unwrap_message_stream_async(
response.content
):
if trailers:
self._trailers = protocol.unpack_trailers(message)
break
else:
return self._deserializer(message)
response.release()
protocol.raise_for_status(response.headers, self._trailers)
return grpc.experimental.aio.EOF
@Call._raise_timeout(asyncio.TimeoutError)
async def __aiter__(self):
response = await self._get_response()
async for trailers, _, message in protocol.unwrap_message_stream_async(
response.content
):
if trailers:
self._trailers = protocol.unpack_trailers(message)
break
else:
yield self._deserializer(message)
response.release()
protocol.raise_for_status(response.headers, self._trailers)
|
78efa97d3168584172a4bb2567cf92c94878c5f1
|
7f24023d365e013ec0924844c1a872edfb0c75b4
|
/tests/trac/test-issue-0073.py
|
05673ba29db7188ac889d6612284b79cab5aedee
|
[
"Python-2.0",
"MIT",
"Apache-2.0"
] |
permissive
|
pabigot/pyxb
|
cd42c024607572c6363682d389e9296caf3f2857
|
5ee5ba54c9f702dc9c9efc2731ee547ecd4dae4a
|
refs/heads/next
| 2023-05-11T03:23:19.599756
| 2023-04-29T20:38:15
| 2023-04-29T20:45:13
| 20,547,850
| 130
| 63
|
Apache-2.0
| 2021-08-19T16:52:18
| 2014-06-06T01:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,700
|
py
|
test-issue-0073.py
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="di" default="32" type="xs:int"/>
<xs:element name="fi" fixed="21" type="xs:int"/>
<xs:element name="cfi">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:int"/>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="cdi">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:int"/>
</xs:simpleContent>
</xs:complexType>
</xs:element>
</xs:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
import sys
class TestIssue0073 (unittest.TestCase):
def testDefault (self):
xmlt = six.u('<di/>');
self.assertEqual(CreateFromDocument(xmlt), 32)
xmlt = six.u('<di>32</di>');
self.assertEqual(CreateFromDocument(xmlt), 32)
xmlt = six.u('<cdi>32</cdi>');
self.assertEqual(CreateFromDocument(xmlt).value(), 32)
def testFixed (self):
xmlt = six.u('<fi/>');
self.assertEqual(CreateFromDocument(xmlt), 21)
xmlt = six.u('<fi>21</fi>');
self.assertEqual(CreateFromDocument(xmlt), 21)
xmlt = six.u('<cfi>21</cfi>');
self.assertEqual(CreateFromDocument(xmlt).value(), 21)
if __name__ == '__main__':
unittest.main()
|
46246833437b0716571b0324c8d6976c8e41591e
|
38d86234ef4ba4ed5ac3bf585bcff8615004d2a6
|
/ssseg/modules/models/backbones/__init__.py
|
2218a55da453e906834e8beac40d8703605879a8
|
[
"Apache-2.0"
] |
permissive
|
SegmentationBLWX/sssegmentation
|
e57e7a071b03214c55248c4b1e64c85796744bf1
|
fe3d0dac83055b728fe3c5df964507fc7cc4948c
|
refs/heads/main
| 2023-08-05T02:49:57.370911
| 2023-08-01T13:49:17
| 2023-08-01T13:49:17
| 306,540,019
| 725
| 97
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
__init__.py
|
'''initialize'''
from .builder import BuildBackbone
from .bricks import (
BuildDropout, BuildActivation, BuildNormalization, Scale, L2Norm, makedivisible, truncnormal,
FFN, MultiheadAttention, nchwtonlc, nlctonchw, PatchEmbed, PatchMerging, AdaptivePadding,
DynamicConv2d, AdptivePaddingConv2d, SqueezeExcitationConv2d, DepthwiseSeparableConv2d, InvertedResidual, InvertedResidualV3,
)
|
15eab73ceb7ec6f2ca0b207a4c3784bf4ddecf52
|
b26c41926fa3a7c2c061132d80e91a2750f2f468
|
/tensorflow_probability/python/internal/test_combinations_test.py
|
7903e7a52cbd85ee4be424abff335c43fb6de6c5
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/probability
|
22e679a4a883e408f8ef237cda56e3e3dfa42b17
|
42a64ba0d9e0973b1707fcd9b8bd8d14b2d4e3e5
|
refs/heads/main
| 2023-09-04T02:06:08.174935
| 2023-08-31T20:30:00
| 2023-08-31T20:31:33
| 108,053,674
| 4,055
| 1,269
|
Apache-2.0
| 2023-09-13T21:49:49
| 2017-10-23T23:50:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,657
|
py
|
test_combinations_test.py
|
# Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests generating test combinations."""
from collections import OrderedDict
# Dependency imports
from tensorflow_probability.python.internal import test_combinations
from tensorflow_probability.python.internal import test_util
class TestingCombinationsTest(test_util.TestCase):
def test_combine(self):
self.assertEqual([{
"a": 1,
"b": 2
}, {
"a": 1,
"b": 3
}, {
"a": 2,
"b": 2
}, {
"a": 2,
"b": 3
}], test_combinations.combine(a=[1, 2], b=[2, 3]))
def test_arguments_sorted(self):
self.assertEqual([
OrderedDict([("aa", 1), ("ab", 2)]),
OrderedDict([("aa", 1), ("ab", 3)]),
OrderedDict([("aa", 2), ("ab", 2)]),
OrderedDict([("aa", 2), ("ab", 3)])
], test_combinations.combine(ab=[2, 3], aa=[1, 2]))
def test_combine_single_parameter(self):
self.assertEqual([{
"a": 1,
"b": 2
}, {
"a": 2,
"b": 2
}], test_combinations.combine(a=[1, 2], b=2))
def test_add(self):
self.assertEqual(
[{
"a": 1
}, {
"a": 2
}, {
"b": 2
}, {
"b": 3
}],
(test_combinations.combine(a=[1, 2]) +
test_combinations.combine(b=[2, 3])))
@test_combinations.generate(
test_combinations.combine(a=[1, 0], b=[2, 3], c=[1]))
class CombineTheTestSuite(test_util.TestCase):
def test_add_things(self, a, b, c):
self.assertLessEqual(3, a + b + c)
self.assertLessEqual(a + b + c, 5)
def test_add_things_one_more(self, a, b, c):
self.assertLessEqual(3, a + b + c)
self.assertLessEqual(a + b + c, 5)
def not_a_test(self, a=0, b=0, c=0):
del a, b, c
self.fail()
def _test_but_private(self, a=0, b=0, c=0):
del a, b, c
self.fail()
# Check that nothing funny happens to a non-callable that starts with "_test".
test_member = 0
if __name__ == "__main__":
test_util.main()
|
3c63d20be682825d91105a728f21ccec10e54cfb
|
c2634ebec1d4448e372d174f459c3cbc03fd1edc
|
/lib/node_modules/@stdlib/math/base/tools/hermitepoly/test/fixtures/python/runner.py
|
5df0f0d1bae49da0aaf01ea5b0e7f5591b0383c5
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"SunPro",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
stdlib-js/stdlib
|
ede11aee78f08e4f78a0bb939cb0bc244850b55b
|
f10c6e7db1a2b15cdd2b6237dd0927466ebd7278
|
refs/heads/develop
| 2023-09-05T03:29:36.368208
| 2023-09-03T22:42:11
| 2023-09-03T22:42:11
| 54,614,238
| 4,163
| 230
|
Apache-2.0
| 2023-09-13T21:26:07
| 2016-03-24T04:19:52
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,860
|
py
|
runner.py
|
#!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate fixtures."""
import os
import json
import numpy as np
from scipy.special import eval_hermite
# Get the file path:
FILE = os.path.realpath(__file__)
# Extract the directory in which this file resides:
DIR = os.path.dirname(FILE)
def gen(n, x, name):
"""Generate fixture data and write to file.
# Arguments
* `n`: degree(s)
* `x`: domain
* `name::str`: output filename
# Examples
``` python
python> n = 1
python> x = linspace(-1000, 1000, 2001)
python> gen(n, x, './data.json')
```
"""
y = eval_hermite(n, x)
if isinstance(n, np.ndarray):
data = {
"n": n.tolist(),
"x": x.tolist(),
"expected": y.tolist()
}
else:
data = {
"n": n,
"x": x.tolist(),
"expected": y.tolist()
}
# Based on the script directory, create an output filepath:
filepath = os.path.join(DIR, name)
# Write the data to the output filepath as JSON:
with open(filepath, "w", encoding="utf-8") as outfile:
json.dump(data, outfile)
def main():
"""Generate fixture data."""
# Random values across `n` and `x`:
n = np.random.randint(1, 100, 1000)
x = np.random.random(1000)*100.0
gen(n, x, "random2.json")
# Medium negative:
x = np.linspace(-709.78, -1.0, 1000)
gen(1, x, "medium_negative_1.json")
gen(2, x, "medium_negative_2.json")
gen(5, x, "medium_negative_5.json")
# Medium positive:
x = np.linspace(1.0, 709.78, 1000)
gen(1, x, "medium_positive_1.json")
gen(2, x, "medium_positive_2.json")
gen(5, x, "medium_positive_5.json")
# Small positive:
x = np.linspace(2.0**-54, 1.0, 1000)
gen(1, x, "small_positive_1.json")
gen(2, x, "small_positive_2.json")
gen(5, x, "small_positive_5.json")
# Small negative:
x = np.linspace(-1.0, -2.0**-54, 1000)
gen(1, x, "small_negative_1.json")
gen(2, x, "small_negative_2.json")
gen(5, x, "small_negative_5.json")
# Tiny values:
x = np.linspace(-2.0**-54, 2.0**-54, 1000)
gen(1, x, "tiny_1.json")
gen(2, x, "tiny_2.json")
gen(5, x, "tiny_5.json")
if __name__ == "__main__":
main()
|
396f3858dfe86ccf93ed43c6bd43f201ae507bdf
|
9c54b9ea3e9fe208457bf64ad53eba8889f4b057
|
/Problem008/Python/solution_1.py
|
4bfcfda046297dd9978a11a3f9b3ab70a3131920
|
[
"MIT"
] |
permissive
|
DestructHub/ProjectEuler
|
e0d77c02f0646a85d09af64127e92ac907ebad2a
|
efba582f976cd59748566c19799d84984c77ea61
|
refs/heads/master
| 2022-12-21T13:08:00.128200
| 2021-10-06T12:47:48
| 2022-12-15T20:33:12
| 36,625,177
| 179
| 87
|
MIT
| 2022-12-15T20:33:14
| 2015-05-31T22:36:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,831
|
py
|
solution_1.py
|
#!/usr/bin/env python
# coding=utf-8
#
# Python Script
#
# Copyleft © Manoel Vilela
#
#
from functools import reduce
"""
Largest product in a series
Problem 8
The four adjacent digits in the 1000-digit number
that have the greatest product are 9 × 9 × 8 × 9 = 5832.
Find the thirteen adjacent digits in the 1000-digit number that have the greatest product.
What is the value of this product?
"""
data = '''\
73167176531330624919225119674426574742355349194934\
96983520312774506326239578318016984801869478851843\
85861560789112949495459501737958331952853208805511\
12540698747158523863050715693290963295227443043557\
66896648950445244523161731856403098711121722383113\
62229893423380308135336276614282806444486645238749\
30358907296290491560440772390713810515859307960866\
70172427121883998797908792274921901699720888093776\
65727333001053367881220235421809751254540594752243\
52584907711670556013604839586446706324415722155397\
53697817977846174064955149290862569321978468622482\
83972241375657056057490261407972968652414535100474\
82166370484403199890008895243450658541227588666881\
16427171479924442928230863465674813919123162824586\
17866458359124566529476545682848912883142607690042\
24219022671055626321111109370544217506941658960408\
07198403850962455444362981230987879927244284909188\
84580156166097919133875499200524063689912560717606\
05886116467109405077541002256983155200055935729725\
71636269561882670428252483600823257530420752963450\
'''
#method 1
# greatest = int()
# for i in range(len(data) - 13):
# product = reduce(lambda x, y: x*y, [int(x) for x in data[i:i + 13]])
# if product > greatest:
# greatest = product
# print greatest
# method 2
def product(num): return reduce(lambda x, y: x * y, [int(digit) for digit in num])
print(max([product(data[i:i + 13]) for i in range(len(data) - 13)]))
|
1cc2b26745f5e4868475e77815925d5d3727369a
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayEbppInvoiceDetailOutputQueryModel.py
|
7caea548575034a9bde0fd524a7c5425a7570b24
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,535
|
py
|
AlipayEbppInvoiceDetailOutputQueryModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEbppInvoiceDetailOutputQueryModel(object):
def __init__(self):
self._invoice_code = None
self._invoice_no = None
self._open_id = None
self._scene = None
self._skip_expense_progress_sync = None
self._user_id = None
@property
def invoice_code(self):
return self._invoice_code
@invoice_code.setter
def invoice_code(self, value):
self._invoice_code = value
@property
def invoice_no(self):
return self._invoice_no
@invoice_no.setter
def invoice_no(self, value):
self._invoice_no = value
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def scene(self):
return self._scene
@scene.setter
def scene(self, value):
self._scene = value
@property
def skip_expense_progress_sync(self):
return self._skip_expense_progress_sync
@skip_expense_progress_sync.setter
def skip_expense_progress_sync(self, value):
self._skip_expense_progress_sync = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.invoice_code:
if hasattr(self.invoice_code, 'to_alipay_dict'):
params['invoice_code'] = self.invoice_code.to_alipay_dict()
else:
params['invoice_code'] = self.invoice_code
if self.invoice_no:
if hasattr(self.invoice_no, 'to_alipay_dict'):
params['invoice_no'] = self.invoice_no.to_alipay_dict()
else:
params['invoice_no'] = self.invoice_no
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.scene:
if hasattr(self.scene, 'to_alipay_dict'):
params['scene'] = self.scene.to_alipay_dict()
else:
params['scene'] = self.scene
if self.skip_expense_progress_sync:
if hasattr(self.skip_expense_progress_sync, 'to_alipay_dict'):
params['skip_expense_progress_sync'] = self.skip_expense_progress_sync.to_alipay_dict()
else:
params['skip_expense_progress_sync'] = self.skip_expense_progress_sync
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEbppInvoiceDetailOutputQueryModel()
if 'invoice_code' in d:
o.invoice_code = d['invoice_code']
if 'invoice_no' in d:
o.invoice_no = d['invoice_no']
if 'open_id' in d:
o.open_id = d['open_id']
if 'scene' in d:
o.scene = d['scene']
if 'skip_expense_progress_sync' in d:
o.skip_expense_progress_sync = d['skip_expense_progress_sync']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
64ace9c7a7de540bc482b95bc338f7944b754fe1
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Python_Programming_in_OpenGL_Blank/PyRandQuat.py
|
dceffc2194e3aa0fdd34e7bf780ab9a0b8df2fc5
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,856
|
py
|
PyRandQuat.py
|
# pyRandQuat.py
# Dr. Blank's version
# of the Quaternion Julia Set
# with a Mandelbrot option
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from math import *
from random import *
import sys
import psyco
psyco.full()
#define some globals
global vv
global aff
global wd
global ht
global MouseX
global MouseY
# for the complex arithmetic calculations
global cr
global ci
global cj
global ck
global wk
global count
global mand
global iter
global maxpoints
global quatpoints
# initial values for complex parameters
# change these for a different set
cr = -0.20
ci = 0.80
cj = 0.0
ck = 0.0
wk = 0.0
# start out in the Julia Set... mand = 0
# mand = 1 is the Mandelbrot set
mand = 0
iter = 10
# one million random points to test
# be patient!
maxpoints = 1000000
quatpoints = 0
# variable to store the display list
global ptcloud
#define the vertex points
vv = []
#define the affine identity matrix
aff = (1.0,0.0,0.0,0.0,
0.0,1.0,0.0,0.0,
0.0,0.0,1.0,0.0,
0.0,0.0,0.0,1.0)
#initial window and mouse settings
wd = 400
ht = 400
MouseX = wd/2
MouseY = ht/2
# calculate the quaternion fractal
def calcit():
global vv
global count
global quatpoints
vv = []
count = 0
n = 0
quatpoints = 0
while count < maxpoints:
count = count + 1
x = 4*random() - 2
y = 4*random() - 2
z = 4*random() - 2
leng = calcleng(x, y, z)
# the point is constrained, plot it!
if leng < 4:
quatpoints = quatpoints + 1
vv = vv + [(x,y,z)]
dolist()
def calcleng(x, y, z):
n = 0
w = wk
if mand == 1:
kr = x
ki = y
kj = z
kk = 0
else:
kr = cr
ki = ci
kj = cj
kk = ck
while n < iter:
n = n + 1
# quaternion multiplication
temp = x+x
x = x*x - y*y - z*z - w*w + kr
y = temp*y + ki
z = temp*z + kj
w = temp*w + kk
# a form of the distance formula
dist = x*x + y*y + z*z + w*w
# if the point escapes to infinity, don't store it!
if dist > 4:
break
return dist
def dolist():
global ptcloud
# start storing the display list in ptcloud
ptcloud = glGenLists(1)
# compile the ptcloud points
glNewList(ptcloud, GL_COMPILE)
glPointSize(2.0)
glBegin(GL_POINTS)
for n in range(quatpoints):
glColor3f(sin(n),cos(n),4*sin(n)*cos(n))
glVertex3fv(vv[n])
glEnd()
glEndList()
def display():
global vv
global count
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
glMultMatrixf(aff)
glCallList(ptcloud)
glPopMatrix()
glFlush()
glutSwapBuffers()
def keyboard(key, x, y):
global mand
# toggle between the Julia and Mandelbrot sets
if key == 'm':
mand = 1
calcit()
if key == 'j':
mand = 0
calcit()
if key == chr(27) or key == 'q':
sys.exit(0)
glutPostRedisplay()
#if we change the screen dimensions
def reshape(width, height):
global wd
global ht
glClearColor(0.0, 0.0, 0.0, 0.0)
if height == 0:
height = 1
wd = width
ht = height
glViewport(0,0,wd,ht)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if wd<=ht:
glOrtho(-2.0,2.0,-2.0*ht/wd,2.0*ht/wd,-2.0,2.0)
else:
glOrtho(-2.0*wd/ht,2.0*wd/ht,-2.0,2.0,-2.0,2.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
#does nothing at this point
#def motion():
# return 0
def chaptrack():
global MouseX
global MouseY
global wd
global ht
global aff
dx = (MouseX-wd/2)/128.0
dy = (MouseY-ht/2)/128.0
glMatrixMode(GL_TEXTURE)
glPushMatrix()
glLoadIdentity()
glRotatef(dx,0,1.0,0.0)
glRotatef(dy,1.0,0.0,0.0)
glMultMatrixf(aff)
aff = glGetFloatv(GL_TEXTURE_MATRIX)
glPopMatrix()
def idle():
chaptrack()
glutPostRedisplay()
def mousemotion(x,y):
global MouseX
global MouseY
MouseX = x
MouseY = y
def init():
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH)
def main() :
global wd
global ht
glutInitDisplayMode(GLUT_RGB | GLUT_DEPTH | GLUT_DOUBLE)
glutInitWindowPosition(50, 50)
glutInitWindowSize(wd, ht)
glutInit([])
glutCreateWindow("Quaternion Fractals!")
glutKeyboardFunc(keyboard)
glutReshapeFunc(reshape)
glutDisplayFunc(display)
#glutMotionFunc(motion)
#glutMouseFunc(mouse)
glutIdleFunc(idle)
glutPassiveMotionFunc(mousemotion)
init()
# calculate the fractal
calcit()
glutMainLoop()
main()
|
f3cba9c44f01f7b1910a20f0ce4ced73e65da020
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/olap/ByConity/tests/testflows/rbac/tests/privileges/alter/alter_settings.py
|
d5a8b73534ee1a8d05c53efcc77be40c86a6987a
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 8,692
|
py
|
alter_settings.py
|
import json
from testflows.core import *
from testflows.asserts import error
from rbac.requirements import *
from rbac.helper.common import *
import rbac.helper.errors as errors
from rbac.helper.tables import table_types
aliases = {"ALTER SETTINGS", "ALTER SETTING", "ALTER MODIFY SETTING", "MODIFY SETTING", "ALL"}
def check_alter_settings_when_privilege_is_granted(table, user, node):
"""Ensures ADD SETTINGS runs as expected when the privilege is granted to the specified user
"""
with Given("I check that the modified setting is not already in the table"):
output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output)
assert "merge_with_ttl_timeout = 5" not in output['statement'], error()
with And(f"I modify settings"):
node.query(f"ALTER TABLE {table} MODIFY SETTING merge_with_ttl_timeout=5",
settings=[("user", user)])
with Then("I verify that the setting is in the table"):
output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output)
assert "SETTINGS index_granularity = 8192, merge_with_ttl_timeout = 5" in output['statement'], error()
def check_alter_settings_when_privilege_is_not_granted(table, user, node):
"""Ensures CLEAR SETTINGS runs as expected when the privilege is granted to the specified user
"""
with When("I grant the user NONE privilege"):
node.query(f"GRANT NONE TO {user}")
with And("I grant the user USAGE privilege"):
node.query(f"GRANT USAGE ON *.* TO {user}")
with Then("I try to use ALTER SETTING, has not been granted"):
exitcode, message = errors.not_enough_privileges(user)
node.query(f"ALTER TABLE {table} MODIFY SETTING merge_with_ttl_timeout=5",
settings=[("user", user)], exitcode=exitcode, message=message)
@TestScenario
def user_with_privileges(self, privilege, table_type, node=None):
"""Check that user with ALTER SETTINGS privilege is able
to alter the table
"""
if node is None:
node = self.context.node
table_name = f"merge_tree_{getuid()}"
user_name = f"user_{getuid()}"
with table(node, table_name, table_type), user(node, user_name):
with Given("I first grant the privilege"):
node.query(f"GRANT {privilege} ON {table_name} TO {user_name}")
with Then(f"I try to ALTER SETTINGS"):
check_alter_settings_when_privilege_is_granted(table_name, user_name, node)
@TestScenario
@Requirements(
RQ_SRS_006_RBAC_Privileges_AlterSettings_Revoke("1.0"),
)
def user_with_revoked_privileges(self, privilege, table_type, node=None):
"""Check that user is unable to alter settingss on table after ALTER SETTINGS privilege
on that table has been revoked from the user.
"""
if node is None:
node = self.context.node
table_name = f"merge_tree_{getuid()}"
user_name = f"user_{getuid()}"
with table(node, table_name, table_type), user(node, user_name):
with Given("I first grant the privilege"):
node.query(f"GRANT {privilege} ON {table_name} TO {user_name}")
with And("I then revoke the privileges"):
node.query(f"REVOKE {privilege} ON {table_name} FROM {user_name}")
with When(f"I try to ALTER SETTINGS"):
check_alter_settings_when_privilege_is_not_granted(table_name, user_name, node)
@TestScenario
@Requirements(
RQ_SRS_006_RBAC_Privileges_AlterSettings_Grant("1.0"),
)
def role_with_some_privileges(self, privilege, table_type, node=None):
"""Check that user can alter settings on a table after it is granted a role that
has the alter settings privilege for that table.
"""
if node is None:
node = self.context.node
table_name = f"merge_tree_{getuid()}"
user_name = f"user_{getuid()}"
role_name = f"role_{getuid()}"
with table(node, table_name, table_type), user(node, user_name), role(node, role_name):
with Given("I grant the alter settings privilege to a role"):
node.query(f"GRANT {privilege} ON {table_name} TO {role_name}")
with And("I grant role to the user"):
node.query(f"GRANT {role_name} TO {user_name}")
with Then(f"I try to ALTER SETTINGS"):
check_alter_settings_when_privilege_is_granted(table_name, user_name, node)
@TestScenario
def user_with_revoked_role(self, privilege, table_type, node=None):
"""Check that user with a role that has alter settings privilege on a table is unable to
alter settings from that table after the role with privilege has been revoked from the user.
"""
if node is None:
node = self.context.node
table_name = f"merge_tree_{getuid()}"
user_name = f"user_{getuid()}"
role_name = f"role_{getuid()}"
with table(node, table_name, table_type), user(node, user_name), role(node, role_name):
with When("I grant privileges to a role"):
node.query(f"GRANT {privilege} ON {table_name} TO {role_name}")
with And("I grant the role to a user"):
node.query(f"GRANT {role_name} TO {user_name}")
with And("I revoke the role from the user"):
node.query(f"REVOKE {role_name} FROM {user_name}")
with And("I alter settings on the table"):
check_alter_settings_when_privilege_is_not_granted(table_name, user_name, node)
@TestScenario
@Requirements(
RQ_SRS_006_RBAC_Privileges_AlterSettings_Cluster("1.0"),
)
def user_with_privileges_on_cluster(self, privilege, table_type, node=None):
"""Check that user is able to alter settings on a table with
privilege granted on a cluster.
"""
if node is None:
node = self.context.node
table_name = f"merge_tree_{getuid()}"
user_name = f"user_{getuid()}"
with When(f"granted=ALTER SETTINGS"):
with table(node, table_name, table_type):
try:
with Given("I have a user on a cluster"):
node.query(f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster")
with When("I grant alter settings privileges on a cluster"):
node.query(f"GRANT ON CLUSTER sharded_cluster ALTER SETTINGS ON {table_name} TO {user_name}")
with Then(f"I try to ALTER SETTINGS"):
check_alter_settings_when_privilege_is_granted(table_name, user_name, node)
with When("I revoke alter settings privileges on a cluster"):
node.query(f"REVOKE ON CLUSTER sharded_cluster ALTER SETTINGS ON {table_name} FROM {user_name}")
with Then(f"I try to ALTER SETTINGS"):
check_alter_settings_when_privilege_is_not_granted(table_name, user_name, node)
finally:
with Finally("I drop the user on a cluster"):
node.query(f"DROP USER {user_name} ON CLUSTER sharded_cluster")
@TestSuite
def scenario_parallelization(self, table_type, privilege):
"""Runs all scenarios in parallel for a given privilege.
"""
with Pool(4) as pool:
tasks = []
try:
for scenario in loads(current_module(), Scenario):
run_scenario(pool, tasks, Scenario(test=scenario), {"table_type": table_type, "privilege": privilege})
finally:
join(tasks)
@TestFeature
@Requirements(
RQ_SRS_006_RBAC_Privileges_AlterSettings("1.0"),
RQ_SRS_006_RBAC_Privileges_AlterSettings_TableEngines("1.0"),
RQ_SRS_006_RBAC_Privileges_All("1.0"),
RQ_SRS_006_RBAC_Privileges_None("1.0")
)
@Examples("table_type", [
(key,) for key in table_types.keys()
])
@Name("alter settings")
def feature(self, node="clickhouse1", stress=None, parallel=None):
"""Runs test suites above which check correctness over scenarios and permutations
"""
self.context.node = self.context.cluster.node(node)
if parallel is not None:
self.context.parallel = parallel
if stress is not None:
self.context.stress = stress
for example in self.examples:
table_type, = example
if table_type != "MergeTree" and not self.context.stress:
continue
with Example(str(example)):
with Pool(4) as pool:
tasks = []
try:
for alias in aliases:
run_scenario(pool, tasks, Suite(test=scenario_parallelization, name=alias,
setup=instrument_clickhouse_server_log),
{"table_type": table_type, "privilege": alias})
finally:
join(tasks)
|
0f724b79ea93a7d38b183c7d607ada6cb64b605a
|
c35d9aca896346872edad0e03e189fed2fa58441
|
/delorean/exceptions.py
|
0e33a2a9ba85d2b64b8915cdd9ddc28449298659
|
[
"MIT"
] |
permissive
|
myusuf3/delorean
|
67fd805f6954eb37df4308220987438ca4b6cbfa
|
c3b2020c06b56dbf5081a8fa33a30a6589fec593
|
refs/heads/master
| 2023-02-06T22:04:43.262136
| 2022-06-28T23:50:57
| 2022-06-28T23:50:57
| 2,420,190
| 1,384
| 122
|
MIT
| 2023-02-03T20:21:33
| 2011-09-20T03:46:19
|
Python
|
UTF-8
|
Python
| false
| false
| 520
|
py
|
exceptions.py
|
class DeloreanError(Exception):
"""
Base Delorean Exception class
"""
def __init__(self, msg):
self.msg = str(msg)
Exception.__init__(self, msg)
def __str__(self):
return self.msg
class DeloreanInvalidTimezone(DeloreanError):
"""
Exception that is raised when an invalid timezone is passed in.
"""
pass
class DeloreanInvalidDatetime(DeloreanError):
"""
Exception that is raised when an improper datetime object is passed
in.
"""
pass
|
2684ef6927074bf73d7e7e8eaeb3c90d1d485512
|
68b5cefc2b602aafd708a68006d3d6d33bc715c1
|
/Athos/Networks/CheXpert/Util.py
|
5a26bbb39aea07776126e371524b79b670cc49ce
|
[
"MIT"
] |
permissive
|
mpc-msri/EzPC
|
4d839d5741bf1e65df7e478492a047bde8e1f5a5
|
c6e60b1bd2125b169ed3c17ee37fbe0e56c4b8ad
|
refs/heads/master
| 2023-08-09T17:37:25.135926
| 2023-08-02T07:04:08
| 2023-08-02T07:04:08
| 170,995,719
| 327
| 111
|
MIT
| 2023-09-07T07:09:05
| 2019-02-16T11:23:32
|
C++
|
UTF-8
|
Python
| false
| false
| 8,717
|
py
|
Util.py
|
"""
Authors: Saksham Gupta.
Copyright:
Copyright (c) 2020 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
**
Part of code from https://github.com/kamenbliznashki/chexpert
Modified for our purposes.
**
"""
import os
import json
import math
import pickle
import numpy, sys
import pandas as pd
import numpy as np
from tqdm import tqdm
from PIL import Image
import torchvision.transforms as T
import torch
from torch.utils.data import Dataset
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "TFCompiler"))
import DumpTFMtData
class ChexpertSmall(Dataset):
url = "http://download.cs.stanford.edu/deep/CheXpert-v1.0-small.zip"
dir_name = os.path.splitext(os.path.basename(url))[
0
] # folder to match the filename
attr_all_names = [
"No Finding",
"Enlarged Cardiomediastinum",
"Cardiomegaly",
"Lung Opacity",
"Lung Lesion",
"Edema",
"Consolidation",
"Pneumonia",
"Atelectasis",
"Pneumothorax",
"Pleural Effusion",
"Pleural Other",
"Fracture",
"Support Devices",
]
# select only the competition labels
attr_names = [
"Atelectasis",
"Cardiomegaly",
"Consolidation",
"Edema",
"Pleural Effusion",
]
def __init__(self, root, mode="train", transform=None, data_filter=None):
self.root = root
self.transform = transform
assert mode in ["train", "valid"]
self.mode = mode
# if mode is train/valid; root is path to data folder with `train`/`valid` csv file to construct dataset.
self._maybe_process(data_filter)
data_file = os.path.join(
self.root, self.dir_name, "valid.pt" if mode in ["valid"] else "train.pt"
)
self.data = torch.load(data_file)
# store index of the selected attributes in the columns of the data for faster indexing
self.attr_idxs = [self.data.columns.tolist().index(a) for a in self.attr_names]
def __getitem__(self, idx):
# 1. select and load image
img_path = self.data.iloc[idx, 0] # 'Path' column is 0
img = Image.open(os.path.join(self.root, img_path)).convert("RGB")
if self.transform is not None:
img = self.transform(img)
# 2. select attributes as targets
attr = self.data.iloc[idx, self.attr_idxs].values.astype(np.float32)
attr = torch.from_numpy(attr)
# 3. save index for extracting the patient_id in prediction/eval results as 'CheXpert-v1.0-small/valid/patient64541/study1'
# performed using the extract_patient_ids function
idx = self.data.index[
idx
] # idx is based on len(self.data); if we are taking a subset of the data, idx will be relative to len(subset);
# self.data.index(idx) pulls the index in the original dataframe and not the subset
return img, attr, idx
def __len__(self):
return len(self.data)
def _maybe_process(self, data_filter):
# Dataset labels are: blank for unmentioned, 0 for negative, -1 for uncertain, and 1 for positive.
# Process by:
# 1. fill NAs (blanks for unmentioned) as 0 (negatives)
# 2. fill -1 as 1 (U-Ones method described in paper) # TODO -- setup options for uncertain labels
# 3. apply attr filters as a dictionary {data_attribute: value_to_keep} e.g. {'Frontal/Lateral': 'Frontal'}
# check for processed .pt files
train_file = os.path.join(self.root, self.dir_name, "train.pt")
valid_file = os.path.join(self.root, self.dir_name, "valid.pt")
if not (os.path.exists(train_file) and os.path.exists(valid_file)):
# load data and preprocess training data
valid_df = pd.read_csv(
os.path.join(self.root, self.dir_name, "valid.csv"),
keep_default_na=True,
)
train_df = self._load_and_preprocess_training_data(
os.path.join(self.root, self.dir_name, "train.csv"), data_filter
)
# save
torch.save(train_df, train_file)
torch.save(valid_df, valid_file)
def _load_and_preprocess_training_data(self, csv_path, data_filter):
train_df = pd.read_csv(csv_path, keep_default_na=True)
# 1. fill NAs (blanks for unmentioned) as 0 (negatives)
# attr columns ['No Finding', ..., 'Support Devices']; note AP/PA remains with NAs for Lateral pictures
train_df[self.attr_names] = train_df[self.attr_names].fillna(0)
# 2. fill -1 as 1 (U-Ones method described in paper) # TODO -- setup options for uncertain labels
train_df[self.attr_names] = train_df[self.attr_names].replace(-1, 1)
if data_filter is not None:
# 3. apply attr filters
# only keep data matching the attribute e.g. df['Frontal/Lateral']=='Frontal'
for k, v in data_filter.items():
train_df = train_df[train_df[k] == v]
with open(
os.path.join(
os.path.dirname(csv_path), "processed_training_data_filters.json"
),
"w",
) as f:
json.dump(data_filter, f)
return train_df
def compute_mean_and_std(dataset):
m = 0
s = 0
k = 1
for img, _, _ in tqdm(dataset):
x = img.mean().item()
new_m = m + (x - m) / k
s += (x - m) * (x - new_m)
m = new_m
k += 1
print("Number of datapoints: ", k)
return m, math.sqrt(s / (k - 1))
def save_data_as_pickle(dataset, mode, scalingFac):
preProcessedImgSaveFolder = "./Data_batch"
filename = os.path.join(
preProcessedImgSaveFolder, "preprocess_" + mode + "_batch" + ".p"
)
features = []
labels = []
ids = []
for img, attr, id in dataset:
img[...] = img * (1 << scalingFac)
print("Processed img {}".format(id))
# print(type(img))
img = img.reshape(-1, 1)
# print(img.shape)
features.append(img)
labels.append(attr)
ids.append(id)
pickle.dump((features, labels, ids), open(filename, "wb"))
def load_preprocess_validation_data(
preProcessedImgSaveFolder="./Data_batch",
):
valid_features, valid_labels, valid_ids = pickle.load(
open(
os.path.join(preProcessedImgSaveFolder, "preprocess_valid_batch.p"),
mode="rb",
)
)
return valid_features, valid_labels, valid_ids
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("scale", type=str, help="Scaling Factor.")
args = parser.parse_args()
scalingFac = int(args.scale)
mode = "valid"
ds = ChexpertSmall(
"../../HelperScripts/CheXpert",
mode,
transform=T.Compose(
[
# T.Grayscale(num_output_channels=3),
T.CenterCrop(320),
T.ToTensor(),
T.Normalize(mean=[0.5306], std=[0.0333]),
T.Lambda(lambda x: torch.flatten(x)),
]
),
)
print("length: ", len(ds))
print("attributes: ", ds.attr_names)
# m, s = compute_mean_and_std(ds)
# print("Dataset mean: {}; dataset std {}".format(m, s))
save_data_as_pickle(ds, mode, scalingFac)
print("\n" * 4)
print("*" * 20)
id = 1
print("Sample Image {} from Valid Dataset".format(id))
print("*" * 20)
features, labels, ids = load_preprocess_validation_data()
print(features[id].shape)
print(features[id])
print(labels[id])
print(ids[id])
if __name__ == "__main__":
main()
|
9bc3619c3dbdb0792fdbe27244a3ef3bd3d4a9f7
|
18c63c04feaed9d8cd5056a58ccde7baa1563e3a
|
/tests/test_mru.py
|
d11dba460a03e46859d8b6f1b7cb7f0572ab3a7b
|
[
"MIT"
] |
permissive
|
tkem/cachetools
|
d2aaa4ca3ea22e29a5e25f434b090bec2fe66921
|
8b56caa87f2dc624f3ec453127559ab893616efa
|
refs/heads/master
| 2023-08-11T22:39:28.854454
| 2023-05-27T20:35:28
| 2023-05-27T20:35:28
| 18,006,791
| 1,779
| 175
|
MIT
| 2023-08-30T10:15:06
| 2014-03-22T10:15:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
test_mru.py
|
import unittest
from cachetools import MRUCache
from . import CacheTestMixin
class MRUCacheTest(unittest.TestCase, CacheTestMixin):
Cache = MRUCache
def test_evict__writes_only(self):
cache = MRUCache(maxsize=2)
cache[1] = 1
cache[2] = 2
cache[3] = 3 # Evicts 1 because nothing's been used yet
assert len(cache) == 2
assert 1 not in cache, "Wrong key was evicted. Should have been '1'."
assert 2 in cache
assert 3 in cache
def test_evict__with_access(self):
cache = MRUCache(maxsize=2)
cache[1] = 1
cache[2] = 2
cache[1]
cache[2]
cache[3] = 3 # Evicts 2
assert 2 not in cache, "Wrong key was evicted. Should have been '2'."
assert 1 in cache
assert 3 in cache
def test_evict__with_delete(self):
cache = MRUCache(maxsize=2)
cache[1] = 1
cache[2] = 2
del cache[2]
cache[3] = 3 # Doesn't evict anything because we just deleted 2
assert 2 not in cache
assert 1 in cache
cache[4] = 4 # Should evict 1 as we just accessed it with __contains__
assert 1 not in cache
assert 3 in cache
assert 4 in cache
|
bb2d8b65e2257f2244b17fc5f8ac1f966f5dd824
|
b7163b44b679e082fe97cf7fcd0c73b2fcdb38eb
|
/modules/dbnd/src/dbnd/tasks/doctor/system_logging.py
|
942042ecb0886b35731804defe852dcb4d0462c9
|
[
"Apache-2.0"
] |
permissive
|
databand-ai/dbnd
|
70c95d95e12bfb8ab471a6dce27691ed658cb92d
|
d59c99dcdcd280d7eec36a693dd80f8c8c831ea2
|
refs/heads/develop
| 2023-06-24T18:07:56.524526
| 2023-05-28T07:57:36
| 2023-05-28T07:57:36
| 231,361,064
| 257
| 33
|
Apache-2.0
| 2023-08-06T08:30:28
| 2020-01-02T10:42:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
system_logging.py
|
# © Copyright Databand.ai, an IBM Company 2022
from __future__ import print_function
import logging
import sys
from dbnd._core.current import try_get_databand_context
from dbnd._core.task_build.dbnd_decorator import task
from dbnd.tasks.doctor.doctor_report_builder import DoctorStatusReportBuilder
logger = logging.getLogger(__name__)
@task
def logging_status():
# type: ()->str
"""
Shows the status of the logging system
All known loggers, logging configuration and so on.
:return:
"""
report = DoctorStatusReportBuilder("Logging Status")
report.log("logging.root", logging.root)
report.log("logging.root.handlers", logging.root.handlers)
report.log("logger", logger)
report.log("logger.handlers", logger.handlers)
# airflow usually alternate stderr/stdout
report.log("sys.stderr", sys.stderr)
report.log("sys.stderr[close]", hasattr(sys.stderr, "close"))
report.log("sys.stderr", sys.__stderr__)
report.log("sys.__stderr__[close]", hasattr(sys.__stderr__, "close"))
dbnd_context = try_get_databand_context()
if dbnd_context:
from dbnd._core.task_ctrl.task_visualiser import TaskVisualiser
report.add_sub_report(
TaskVisualiser(dbnd_context.settings.log).banner("Log Config")
)
# check airflow logging
try:
from logging import Logger
airflow_task_logger = Logger.manager.loggerDict.get("airflow.task")
if airflow_task_logger:
report.log("Airflow task logger", airflow_task_logger)
report.log("Airflow task logger handlers", airflow_task_logger.handlers)
else:
report.log("Airflow task logger", "not found")
except Exception as ex:
ex_msg = "Failed to get airflow.task logger status: %s" % ex
report.log("Airflow task logger", ex_msg)
logger.exception(ex_msg)
logging_status = report.get_status_str()
logging_status = "\n{sep}\n{msg}\n{sep}s\n".format(msg=logging_status, sep="*" * 40)
logger.info(logging_status)
# if we run this check we might have a problem with logs, we don't know how we are going to see the message
print("\n\nLogging Status (via __stderr__)%s" % logging_status, file=sys.__stderr__)
logger.info("Running logging validation.. (you will see a lot of messages)")
# now we can print things, it might be that one of them will "kill the process"
# because of some weird log handlers loop
print("Message via print")
print("Message via print stderr", file=sys.stderr)
print("Message via print __stderr__", file=sys.__stderr__)
logging.info("Message via logging root")
logger.info("Message via logger")
return logging_status
|
bde927cf42bcfe6d450425c827971d1b399f2a25
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/mock_torch/__main__.py
|
94c74c63d10977d99d1f9d1689ad69cff2a17228
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 1,632
|
py
|
__main__.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from pathlib import Path
import os
parser = argparse.ArgumentParser()
parser.add_argument(
"mock",
choices=["enable", "disable"],
help="enable/disable mocking 'import torch', default is enable",
nargs="?",
default="enable",
)
parser.add_argument("--lazy", action="store_true")
parser.add_argument("--verbose", action="store_true")
args = parser.parse_args()
torch_env = Path(__file__).parent
def main():
if args.mock == "enable":
print(
f"export ONEFLOW_MOCK_TORCH_LAZY={args.lazy}; export ONEFLOW_MOCK_TORCH_VERBOSE={args.verbose}; export PYTHONPATH={str(torch_env)}:$PYTHONPATH"
)
elif args.mock == "disable" and "PYTHONPATH" in os.environ:
paths = os.environ["PYTHONPATH"].rstrip(":").split(":")
paths = [x for x in paths if x != str(torch_env)]
path = ":".join(paths)
print(
f"export PYTHONPATH={path}; unset ONEFLOW_MOCK_TORCH_LAZY; unset ONEFLOW_MOCK_TORCH_VERBOSE"
)
if __name__ == "__main__":
main()
|
c9e034af82ba431a5460c656b9a35e9dc2809237
|
aed152c93eb7a40780c1aa5a7a719dbef33c6aa6
|
/tests/test_simplifiers_text.py
|
a2296a6ade1572c616b04e74a610a24368e878ab
|
[
"MIT"
] |
permissive
|
alan-turing-institute/ReadabiliPy
|
d8bb16e9b8e177b092a3aa54f783dd39e668d317
|
8905cb9c22d2f813dabe86be5e9e4ea4e1f36df3
|
refs/heads/master
| 2023-08-31T05:31:03.074817
| 2023-08-18T10:03:48
| 2023-08-18T10:03:48
| 154,998,135
| 116
| 25
|
MIT
| 2023-05-15T10:44:32
| 2018-10-27T18:47:08
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,061
|
py
|
test_simplifiers_text.py
|
from pytest import mark
from checks import check_exact_html_output
from readabilipy.simplifiers import normalise_text, normalise_unicode, normalise_whitespace, strip_control_characters, strip_html_whitespace
from readabilipy.simplifiers import text
def test_unicode_normalisation():
nfd_form = "Ame\u0301lie"
nfc_form = "Amélie"
assert normalise_unicode(nfd_form) == normalise_unicode(nfc_form)
def test_all_whitespace_is_normalised_to_empty_string():
tab_space_new_line_tab_space = "\t \n\t \f \r\n"
assert normalise_whitespace(tab_space_new_line_tab_space) == ""
def test_text_normalisation():
unnormalised_string = "Ame\u0301lie Poulain"
assert normalise_text(unnormalised_string) == "Amélie Poulain"
def test_strip_html_whitespace():
formatted_string = """
<html>
<body>
<p>Some text here</p>
</body>
</html>
"""
assert strip_html_whitespace(formatted_string) == "<html><body><p>Some text here</p></body></html>"
def test_strip_control_characters_non_printing_characters():
unnormalised_string = "A string with non-printing characters in\u200Bc\u200Bluded\ufeff"
assert strip_control_characters(unnormalised_string) == "A string with non-printing characters included"
assert normalise_text(unnormalised_string) == "A string with non-printing characters included"
def test_strip_control_characters_cr():
unnormalised_string = "A string with new lines\rin\u200Bc\u200Bluded\ufeff"
assert strip_control_characters(unnormalised_string) == "A string with new lines\rincluded"
assert normalise_text(unnormalised_string) == "A string with new lines included"
def test_strip_control_characters_lf():
unnormalised_string = "A string with new lines\ninc\u200Bluded\ufeff"
assert strip_control_characters(unnormalised_string) == "A string with new lines\nincluded"
assert normalise_text(unnormalised_string) == "A string with new lines included"
def test_strip_control_characters_cr_lf():
unnormalised_string = "A string with new lines\r\nin\u200Bc\u200Bluded\ufeff"
assert strip_control_characters(unnormalised_string) == "A string with new lines\r\nincluded"
assert normalise_text(unnormalised_string) == "A string with new lines included"
def test_strip_control_characters_ff():
unnormalised_string = "A string with form feed\fin\u200Bc\u200Bluded\ufeff"
assert strip_control_characters(unnormalised_string) == "A string with form feed\fincluded"
assert normalise_text(unnormalised_string) == "A string with form feed included"
def test_strip_control_characters_tab():
unnormalised_string = "A string with tabs\tin\u200Bc\u200Bluded\ufeff"
assert strip_control_characters(unnormalised_string) == "A string with tabs\tincluded"
assert normalise_text(unnormalised_string) == "A string with tabs included"
# Test whitespace around tags
@mark.parametrize('terminal_punctuation', text.terminal_punctuation_marks)
def test_ensure_correct_punctuation_joining(terminal_punctuation):
"""Do not join with ' ' if the following character is a punctuation mark."""
input_html = f"""
<div>
<p>
Some text <a href="example.com">like this</a>{terminal_punctuation} with punctuation.
</p>
</div>"""
expected_output = f"""<div><p>Some text like this{terminal_punctuation} with punctuation.</p></div>"""
check_exact_html_output(input_html, expected_output)
@mark.parametrize('matched_pair', text.matched_punctuation_marks)
def test_ensure_correct_bracket_quote_joining(matched_pair):
"""Do not join with ' ' if we are inside matched punctuation marks."""
input_html = f"""
<div>
<p>
Some text {matched_pair[0]}<a href="example.com">like this</a>{matched_pair[1]} with punctuation.
</p>
</div>"""
expected_output = f"""<div><p>Some text {matched_pair[0]}like this{matched_pair[1]} with punctuation.</p></div>"""
check_exact_html_output(input_html, expected_output)
|
eb6c257eb246b5b687fc3e7bbd6ec0cc9a541fd2
|
999f6b0dfdb387a8e4dfbbcda184582b26475a45
|
/promgen/shortcuts.py
|
ef4c60555890aec97f03d2da57133083f0cfdd5c
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
line/promgen
|
86fc6f69969ed4197caca41242206716d88335ef
|
b99f2ba1c8f41d676d43ae9a5f63f1e4fff9b664
|
refs/heads/master
| 2023-08-19T11:46:12.251750
| 2023-08-18T04:50:12
| 2023-08-18T04:50:12
| 64,717,629
| 1,049
| 169
|
MIT
| 2023-08-18T04:50:13
| 2016-08-02T02:35:13
|
Python
|
UTF-8
|
Python
| false
| false
| 507
|
py
|
shortcuts.py
|
# Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
from urllib.parse import urlunsplit
from django.conf import settings
from django.shortcuts import resolve_url
from promgen import models
def resolve_domain(*args, **kwargs):
return urlunsplit(
(
settings.PROMGEN_SCHEME,
models.Site.objects.get_current().domain,
resolve_url(*args, **kwargs),
"",
"",
)
)
|
87aa0b1831584446fa00b7d66052f416b05ef524
|
e34810541899182d3a0835e02fa68389af63a805
|
/test/test_lopf_multiinvest.py
|
e06f4a8e7187515c181b5db8d53a8025d5d670a1
|
[
"MIT"
] |
permissive
|
PyPSA/PyPSA
|
483216289643ca496d66d316a22e000afa15706c
|
38b710c73950d05164e7d6c9dd786065ee7cde44
|
refs/heads/master
| 2023-08-19T20:55:17.329666
| 2023-08-17T10:40:50
| 2023-08-17T10:40:50
| 49,414,256
| 891
| 399
|
MIT
| 2023-09-14T14:09:38
| 2016-01-11T09:04:18
|
Python
|
UTF-8
|
Python
| false
| false
| 17,293
|
py
|
test_lopf_multiinvest.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 2 10:21:16 2021.
@author: fabian
"""
import pandas as pd
import pytest
from conftest import optimize
from numpy.testing import assert_array_almost_equal as equal
from pandas import IndexSlice as idx
import pypsa
from pypsa.descriptors import get_activity_mask
MULTIINVEST_APIS = ["linopy", "native"]
kwargs = dict(multi_investment_periods=True)
@pytest.fixture
def n():
n = pypsa.Network(snapshots=range(10))
n.investment_periods = [2020, 2030, 2040, 2050]
n.add("Carrier", "gencarrier")
n.madd("Bus", [1, 2])
for i, period in enumerate(n.investment_periods):
factor = (10 + i) / 10
n.madd(
"Generator",
[f"gen1-{period}", f"gen2-{period}"],
bus=[1, 2],
lifetime=30,
build_year=period,
capital_cost=[100 / factor, 100 * factor],
marginal_cost=[i + 2, i + 1],
p_nom_extendable=True,
carrier="gencarrier",
)
for i, period in enumerate(n.investment_periods):
n.add(
"Line",
f"line-{period}",
bus0=1,
bus1=2,
length=1,
build_year=period,
lifetime=40,
capital_cost=30 + i,
x=0.0001,
s_nom_extendable=True,
)
load = range(100, 100 + len(n.snapshots))
load = pd.DataFrame({"load1": load, "load2": load}, index=n.snapshots)
n.madd(
"Load",
["load1", "load2"],
bus=[1, 2],
p_set=load,
)
return n
@pytest.fixture
def n_sus(n):
# only keep generators which are getting more expensiv and push generator
# capital cost, so that sus are activated
n.mremove("Generator", n.generators.query('bus == "1"').index)
n.generators.capital_cost *= 5
for i, period in enumerate(n.investment_periods):
factor = (10 + i) / 10
n.add(
"StorageUnit",
f"sto1-{period}",
bus=1,
lifetime=30,
build_year=period,
capital_cost=10 / factor,
marginal_cost=i,
p_nom_extendable=True,
)
return n
@pytest.fixture
def n_sts(n):
# only keep generators which are getting more expensiv and push generator
# capital cost, so that sus are activated
n.mremove("Generator", n.generators.query('bus == "1"').index)
n.generators.capital_cost *= 5
n.add("Bus", "1 battery")
n.add(
"Store",
"sto1-2020",
bus="1 battery",
e_nom_extendable=True,
e_initial=20,
build_year=2020,
lifetime=30,
capital_cost=0.1,
)
n.add(
"Link", "bus2 battery charger", bus0=1, bus1="1 battery", p_nom_extendable=True
)
n.add(
"Link",
"My bus2 battery discharger",
bus0="1 battery",
bus1=1,
p_nom_extendable=True,
)
return n
def test_single_to_multi_level_snapshots():
n = pypsa.Network(snapshots=range(2))
years = [2030, 2040]
n.investment_periods = years
assert isinstance(n.snapshots, pd.MultiIndex)
equal(n.snapshots.levels[0], years)
def test_investment_period_values():
sns = pd.MultiIndex.from_product([[2020, 2030, 2040], [1, 2, 3]])
n = pypsa.Network(snapshots=sns)
with pytest.raises(ValueError):
n.investment_periods = [2040, 2030, 2020]
with pytest.raises(ValueError):
n.investment_periods = ["2020", "2030", "2040"]
with pytest.raises(NotImplementedError):
n.investment_periods = [2020]
n = pypsa.Network(snapshots=range(2))
with pytest.raises(ValueError):
n.investment_periods = ["2020", "2030", "2040"]
def test_active_assets(n):
active_gens = n.get_active_assets("Generator", 2030)[lambda ds: ds].index
assert (active_gens == ["gen1-2020", "gen2-2020", "gen1-2030", "gen2-2030"]).all()
active_gens = n.get_active_assets("Generator", 2050)[lambda ds: ds].index
assert (
active_gens
== [
"gen1-2030",
"gen2-2030",
"gen1-2040",
"gen2-2040",
"gen1-2050",
"gen2-2050",
]
).all()
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_tiny_with_default(api):
n = pypsa.Network(snapshots=range(2))
n.investment_periods = [2020, 2030]
n.add("Bus", 1)
n.add("Generator", 1, bus=1, p_nom_extendable=True, capital_cost=10)
n.add("Load", 1, bus=1, p_set=100)
status, _ = optimize(n, api, **kwargs)
assert status == "ok"
assert n.generators.p_nom_opt.item() == 100
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_tiny_with_build_year(api):
n = pypsa.Network(snapshots=range(2))
n.investment_periods = [2020, 2030]
n.add("Bus", 1)
n.add(
"Generator", 1, bus=1, p_nom_extendable=True, capital_cost=10, build_year=2020
)
n.add("Load", 1, bus=1, p_set=100)
status, _ = optimize(n, api, **kwargs)
assert status == "ok"
assert n.generators.p_nom_opt.item() == 100
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_tiny_infeasible(api):
n = pypsa.Network(snapshots=range(2))
n.investment_periods = [2020, 2030]
n.add("Bus", 1)
n.add(
"Generator", 1, bus=1, p_nom_extendable=True, capital_cost=10, build_year=2030
)
n.add("Load", 1, bus=1, p_set=100)
with pytest.raises(ValueError):
status, cond = optimize(n, api, **kwargs)
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_simple_network(n, api):
status, cond = optimize(n, api, **kwargs)
assert status == "ok"
assert cond == "optimal"
assert (n.generators_t.p.loc[[2020, 2030, 2040], "gen1-2050"] == 0).all()
assert (n.generators_t.p.loc[[2050], "gen1-2020"] == 0).all()
assert (n.lines_t.p0.loc[[2020, 2030, 2040], "line-2050"] == 0).all()
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_simple_network_snapshot_subset(n, api):
status, cond = optimize(n, api, n.snapshots[:20], **kwargs)
assert status == "ok"
assert cond == "optimal"
assert (n.generators_t.p.loc[[2020, 2030, 2040], "gen1-2050"] == 0).all()
assert (n.generators_t.p.loc[[2050], "gen1-2020"] == 0).all()
assert (n.lines_t.p0.loc[[2020, 2030, 2040], "line-2050"] == 0).all()
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_simple_network_storage_noncyclic(n_sus, api):
n_sus.storage_units["state_of_charge_initial"] = 200
n_sus.storage_units["cyclic_state_of_charge"] = False
n_sus.storage_units["state_of_charge_initial_per_period"] = False
status, cond = optimize(n_sus, api, **kwargs)
assert status == "ok"
assert cond == "optimal"
soc = n_sus.storage_units_t.state_of_charge
p = n_sus.storage_units_t.p
assert round((soc + p).loc[idx[2020, 0], "sto1-2020"], 4) == 200
assert soc.loc[idx[2040, 9], "sto1-2020"] == 0
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_simple_network_storage_noncyclic_per_period(n_sus, api):
n_sus.storage_units["state_of_charge_initial"] = 200
n_sus.storage_units["cyclic_state_of_charge"] = False
n_sus.storage_units["state_of_charge_initial_per_period"] = True
status, cond = optimize(n_sus, api, **kwargs)
assert status == "ok"
assert cond == "optimal"
assert (n_sus.storage_units_t.p.loc[[2020, 2030, 2040], "sto1-2050"] == 0).all()
assert (n_sus.storage_units_t.p.loc[[2050], "sto1-2020"] == 0).all()
soc_initial = (n_sus.storage_units_t.state_of_charge + n_sus.storage_units_t.p).loc[
idx[:, 0], :
]
soc_initial = soc_initial.droplevel("timestep")
assert soc_initial.loc[2020, "sto1-2020"] == 200
assert soc_initial.loc[2030, "sto1-2020"] == 200
assert soc_initial.loc[2040, "sto1-2040"] == 200
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_simple_network_storage_cyclic(n_sus, api):
n_sus.storage_units["cyclic_state_of_charge"] = True
n_sus.storage_units["cyclic_state_of_charge_per_period"] = False
status, cond = optimize(n_sus, api, **kwargs)
assert status == "ok"
assert cond == "optimal"
soc = n_sus.storage_units_t.state_of_charge
p = n_sus.storage_units_t.p
assert (
soc.loc[idx[2040, 9], "sto1-2020"] == (soc + p).loc[idx[2020, 0], "sto1-2020"]
)
assert (
soc.loc[idx[2050, 9], "sto1-2030"] == (soc + p).loc[idx[2030, 0], "sto1-2030"]
)
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_simple_network_storage_cyclic_per_period(n_sus, api):
# Watch out breaks with xarray version 2022.06.00 !
n_sus.storage_units["cyclic_state_of_charge"] = True
n_sus.storage_units["cyclic_state_of_charge_per_period"] = True
status, cond = optimize(n_sus, api, **kwargs)
assert status == "ok"
assert cond == "optimal"
soc = n_sus.storage_units_t.state_of_charge
p = n_sus.storage_units_t.p
assert (
soc.loc[idx[2020, 9], "sto1-2020"] == (soc + p).loc[idx[2020, 0], "sto1-2020"]
)
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_simple_network_store_noncyclic(n_sts, api):
n_sts.stores["e_cyclic"] = False
n_sts.stores["e_initial_per_period"] = False
status, cond = optimize(n_sts, api, **kwargs)
assert status == "ok"
assert cond == "optimal"
assert (n_sts.stores_t.p.loc[[2050], "sto1-2020"] == 0).all()
e_initial = (n_sts.stores_t.e + n_sts.stores_t.p).loc[idx[:, 0], :]
e_initial = e_initial.droplevel("timestep")
assert e_initial.loc[2020, "sto1-2020"] == 20
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_simple_network_store_noncyclic_per_period(n_sts, api):
n_sts.stores["e_cyclic"] = False
n_sts.stores["e_initial_per_period"] = True
status, cond = optimize(n_sts, api, **kwargs)
assert status == "ok"
assert cond == "optimal"
assert (n_sts.stores_t.p.loc[[2050], "sto1-2020"] == 0).all()
e_initial = (n_sts.stores_t.e + n_sts.stores_t.p).loc[idx[:, 0], :]
e_initial = e_initial.droplevel("timestep")
assert e_initial.loc[2020, "sto1-2020"] == 20
assert e_initial.loc[2030, "sto1-2020"] == 20
# lifetime is over here
assert e_initial.loc[2050, "sto1-2020"] == 0
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_simple_network_store_cyclic(n_sts, api):
n_sts.stores["e_cyclic"] = True
n_sts.stores["e_cyclic_per_period"] = False
status, cond = optimize(n_sts, api, **kwargs)
assert status == "ok"
assert cond == "optimal"
assert (n_sts.stores_t.p.loc[[2050], "sto1-2020"] == 0).all()
e = n_sts.stores_t.e
p = n_sts.stores_t.p
assert e.loc[idx[2040, 9], "sto1-2020"] == (e + p).loc[idx[2020, 0], "sto1-2020"]
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_simple_network_store_cyclic_per_period(n_sts, api):
# Watch out breaks with xarray version 2022.06.00 !
n_sts.stores["e_cyclic"] = True
n_sts.stores["e_cyclic_per_period"] = True
status, cond = optimize(n_sts, api, **kwargs)
assert status == "ok"
assert cond == "optimal"
assert (n_sts.stores_t.p.loc[[2050], "sto1-2020"] == 0).all()
e = n_sts.stores_t.e
p = n_sts.stores_t.p
assert e.loc[idx[2020, 9], "sto1-2020"] == (e + p).loc[idx[2020, 0], "sto1-2020"]
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_global_constraint_primary_energy_storage(n_sus, api):
c = "StorageUnit"
n_sus.add("Carrier", "emitting_carrier", co2_emissions=100)
n_sus.df(c)["state_of_charge_initial"] = 200
n_sus.df(c)["cyclic_state_of_charge"] = False
n_sus.df(c)["state_of_charge_initial_per_period"] = False
n_sus.df(c)["carrier"] = "emitting_carrier"
n_sus.add("GlobalConstraint", name="co2limit", type="primary_energy", constant=3000)
status, cond = optimize(n_sus, api, **kwargs)
active = get_activity_mask(n_sus, c)
soc_end = n_sus.pnl(c).state_of_charge.where(active).ffill().iloc[-1]
soc_diff = n_sus.df(c).state_of_charge_initial - soc_end
emissions = n_sus.df(c).carrier.map(n_sus.carriers.co2_emissions)
assert round(soc_diff @ emissions, 0) == 3000
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_global_constraint_primary_energy_store(n_sts, api):
c = "Store"
n_sts.add("Carrier", "emitting_carrier", co2_emissions=100)
n_sts.df(c)["e_initial"] = 200
n_sts.df(c)["e_cyclic"] = False
n_sts.df(c)["e_initial_per_period"] = False
n_sts.buses.loc["1 battery", "carrier"] = "emitting_carrier"
n_sts.add("GlobalConstraint", name="co2limit", type="primary_energy", constant=3000)
status, cond = optimize(n_sts, api, **kwargs)
active = get_activity_mask(n_sts, c)
soc_end = n_sts.pnl(c).e.where(active).ffill().iloc[-1]
soc_diff = n_sts.df(c).e_initial - soc_end
emissions = n_sts.df(c).carrier.map(n_sts.carriers.co2_emissions)
assert round(soc_diff @ emissions, 0) == 3000
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_global_constraint_transmission_expansion_limit(n, api):
n.add(
"GlobalConstraint",
"expansion_limit",
type="transmission_volume_expansion_limit",
constant=100,
sense="==",
carrier_attribute="AC",
)
status, cond = optimize(n, api, **kwargs)
assert n.lines.s_nom_opt.sum() == 100
# when only optimizing the first 10 snapshots the contraint must hold for
# the 2020 period
status, cond = optimize(n, api, n.snapshots[:10], **kwargs)
assert n.lines.loc["line-2020", "s_nom_opt"] == 100
n.global_constraints["investment_period"] = 2030
status, cond = optimize(n, api, **kwargs)
assert n.lines.s_nom_opt[["line-2020", "line-2030"]].sum() == 100
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_global_constraint_transmission_cost_limit(n, api):
n.add(
"GlobalConstraint",
"expansion_limit",
type="transmission_expansion_cost_limit",
constant=1000,
sense="==",
carrier_attribute="AC",
)
status, cond = optimize(n, api, **kwargs)
assert round(n.lines.eval("s_nom_opt * capital_cost").sum(), 2) == 1000
# when only optimizing the first 10 snapshots the contraint must hold for
# the 2020 period
status, cond = optimize(n, api, n.snapshots[:10], **kwargs)
assert round(n.lines.eval("s_nom_opt * capital_cost")["line-2020"].sum(), 2) == 1000
n.global_constraints["investment_period"] = 2030
status, cond = optimize(n, api, **kwargs)
lines = n.lines.loc[["line-2020", "line-2030"]]
assert round(lines.eval("s_nom_opt * capital_cost").sum(), 2) == 1000
@pytest.mark.parametrize("api", ["native", "linopy"])
def test_global_constraint_bus_tech_limit(n, api):
n.add(
"GlobalConstraint",
"expansion_limit",
type="tech_capacity_expansion_limit",
constant=300,
sense="==",
carrier_attribute="gencarrier",
investment_period=2020,
)
status, cond = optimize(n, api, **kwargs)
assert round(n.generators.p_nom_opt[["gen1-2020", "gen2-2020"]], 1).sum() == 300
n.global_constraints["bus"] = 1
status, cond = optimize(n, api, **kwargs)
assert n.generators.at["gen1-2020", "p_nom_opt"] == 300
# make the constraint non-binding and check that the shadow price is zero
n.global_constraints.sense = "<="
status, cond = optimize(n, api, **kwargs)
assert n.global_constraints.at["expansion_limit", "mu"] == 0
@pytest.mark.parametrize("api", ["linopy"])
def test_nominal_constraint_bus_carrier_expansion_limit(n, api):
n.buses.at["1", "nom_max_gencarrier"] = 100
status, cond = optimize(n, api, **kwargs)
gen1s = [f"gen1-{period}" for period in n.investment_periods]
assert round(n.generators.p_nom_opt[gen1s], 0).sum() == 100
n.buses.drop(["nom_max_gencarrier"], inplace=True, axis=1)
n.buses.at["1", "nom_max_gencarrier_2020"] = 100
status, cond = optimize(n, api, **kwargs)
assert n.generators.at["gen1-2020", "p_nom_opt"] == 100
n.buses.drop(["nom_max_gencarrier_2020"], inplace=True, axis=1)
# make the constraint non-binding and check that the shadow price is zero
n.buses.at["1", "nom_min_gencarrier_2020"] = 100
status, cond = optimize(n, api, **kwargs)
assert (n.model.dual["Bus-nom_min_gencarrier_2020"]).item() == 0
@pytest.mark.parametrize("api", MULTIINVEST_APIS)
def test_max_growth_constraint(n, api):
# test generator grow limit
gen_carrier = n.generators.carrier.unique()[0]
n.carriers.at[gen_carrier, "max_growth"] = 218
status, cond = optimize(n, api, **kwargs)
assert all(n.generators.p_nom_opt.groupby(n.generators.build_year).sum() <= 218)
@pytest.mark.parametrize("api", ["linopy"])
def test_max_relative_growth_constraint(n, api):
# test generator relative grow limit
gen_carrier = n.generators.carrier.unique()[0]
n.carriers.at[gen_carrier, "max_growth"] = 218
n.carriers.at[gen_carrier, "max_relative_growth"] = 1.5
status, cond = optimize(n, api, **kwargs)
built_per_period = n.generators.p_nom_opt.groupby(n.generators.build_year).sum()
assert all(built_per_period - built_per_period.shift(fill_value=0) * 1.5 <= 218)
|
85265e6e916aabfac64a86e835fe34ce3668ea14
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/metric_learn/infer/sdk/main.py
|
b4fdde513b42ad4281049573892b6e464f3d98f4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,598
|
py
|
main.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""main"""
import argparse
import os
import time
import cv2
from api.infer import SdkApi
from config import config as cfg
from StreamManagerApi import StreamManagerApi
def parser_args():
"""parser_args"""
parser = argparse.ArgumentParser(description="metric_learn inference")
parser.add_argument("--img_path",
type=str,
required=False,
default="../../data/Stanford_Online_Products",
help="image directory.")
parser.add_argument(
"--pipeline_path",
type=str,
required=False,
default="./config/metric_learn.pipeline",
help="image file path. The default is '/metric_learn/infer/sdk/config/metric_learn.pipeline'. ")
parser.add_argument(
"--model_type",
type=str,
required=False,
default="dvpp",
help=
"rgb: high-precision, dvpp: high performance. The default is 'dvpp'.")
parser.add_argument(
"--infer_mode",
type=str,
required=False,
default="infer",
help=
"infer:only infer, eval: accuracy evaluation. The default is 'infer'.")
parser.add_argument(
"--infer_result_dir",
type=str,
required=False,
default="../../data/infer_result",
help=
"cache dir of inference result. The default is '../data/infer_result'.")
arg = parser.parse_args()
return arg
def process_img(img_file):
img0 = cv2.imread(img_file)
img = resize_i(img0, height=cfg.MODEL_HEIGHT, width=cfg.MODEL_WIDTH)
return img
def resize_i(img, height=224, width=224):
"""resize img"""
percent = float(height) / min(img.shape[0], img.shape[1])
resized_width = int(round(img.shape[1] * percent))
resized_height = int(round(img.shape[0] * percent))
img = cv2.resize(img, (resized_width, resized_height), interpolation=cv2.INTER_LANCZOS4)
shape = (224, 224)
resized = cv2.resize(img, shape, interpolation=cv2.INTER_LINEAR)
return resized
def image_inference(pipeline_path, stream_name, data_dir, result_dir):
stream_manager_api = StreamManagerApi()
start_time = time.time()
sdk_api = SdkApi(pipeline_path)
if not sdk_api.init():
exit(-1)
print(stream_name)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
img_data_plugin_id = 0
print("\nBegin to inference for {}.\n".format(data_dir))
TRAIN_LIST = "../data/Stanford_Online_Products/test_half.txt"
TRAIN_LISTS = open(TRAIN_LIST, "r").readlines()
max_len = 30003
# cal_acc
for _, item in enumerate(TRAIN_LISTS):
if _ >= max_len:
break
items = item.strip().split()
path = items[0]
father = path.split("/")[0]
father_path = os.path.join(result_dir, father)
if not os.path.exists(father_path):
os.makedirs(father_path)
file_path = os.path.join(data_dir, path)
save_bin_path = os.path.join(result_dir, "{}.bin".format(path.split(".")[0]))
img_np = process_img(file_path)
img_shape = img_np.shape
# SDK
sdk_api.send_img_input(stream_name,
img_data_plugin_id, "appsrc0",
img_np.tobytes(), img_shape)
result = sdk_api.get_result(stream_name)
with open(save_bin_path, "wb") as fp:
fp.write(result)
print(
"End-2end inference, file_name:", file_path,
"\n"
)
end_time = time.time()
print("cost: ", end_time-start_time, "s")
print("fps: ", 30003.0/(end_time-start_time), "imgs/sec")
stream_manager_api.DestroyAllStreams()
if __name__ == "__main__":
args = parser_args()
image_inference(args.pipeline_path, cfg.STREAM_NAME.encode("utf-8"), args.img_path,
args.infer_result_dir)
|
6fc4688ec257779777a3e5beae11a9fbd5c2cc6e
|
312a8fde11293cb142334a3860966ec1f75ac401
|
/timesketch/lib/analyzers/authentication/utils_test.py
|
f89b054b1cc4ba0423eb75bc3894baf7b14fb791
|
[
"Apache-2.0"
] |
permissive
|
google/timesketch
|
f0fd09062a8a24bac581d2d4286d095d667d2f10
|
24f471b58ca4a87cb053961b5f05c07a544ca7b8
|
refs/heads/master
| 2023-08-31T21:48:19.602686
| 2023-08-31T11:24:17
| 2023-08-31T11:24:17
| 21,009,909
| 2,263
| 647
|
Apache-2.0
| 2023-09-14T14:08:07
| 2014-06-19T17:49:45
|
Python
|
UTF-8
|
Python
| false
| false
| 23,171
|
py
|
utils_test.py
|
# Copyright 2023 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file con1672097149tains unit tests for interface"""
import hashlib
import logging
import sys
import textwrap
from typing import List
import pandas as pd
from timesketch.lib.analyzers.interface import AnalyzerOutput
from timesketch.lib.analyzers.authentication.utils import AuthSummary
from timesketch.lib.analyzers.authentication.utils import LoginRecord
from timesketch.lib.analyzers.authentication.utils import BaseAuthenticationUtils
from timesketch.lib.analyzers.authentication.utils import BruteForceUtils
from timesketch.lib.testlib import BaseTest
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
def load_test_dataframe() -> pd.DataFrame:
"""Loads SSH log file and returns dataframe.
Returns:
pd.DataFrame: A dataframe containing mock events.
"""
return pd.DataFrame(mock_authentication_events())
EXPECTED_IP_SUMMARY = {
"summary_type": "source_ip",
"source_ip": "192.168.140.67",
"domain": "",
"username": "",
"first_seen": 1672097149,
"last_seen": 1672097360,
"first_auth": {
"timestamp": 1672097359,
"session_id": "6d652a46d9ddf7ebc4cade9b36a2ff1a0819180ea353c63438b5e5d0"
"2a1991db",
"session_duration": 1,
"source_ip": "192.168.140.67",
"source_hostname": "",
"source_port": 58300,
"domain": "",
"username": "admin",
},
"summary": {},
"successful_logins": [
{
"timestamp": 1672097359,
"session_id": "6d652a46d9ddf7ebc4cade9b36a2ff1a0819180ea353c63438b5e5d0"
"2a1991db",
"session_duration": 1,
"source_ip": "192.168.140.67",
"source_hostname": "",
"source_port": 58300,
"domain": "",
"username": "admin",
}
],
"success_source_ips": ["192.168.140.67"],
"success_usernames": ["admin"],
"total_success_events": 1,
"total_failed_events": 200,
"distinct_source_ip_count": 1,
"distinct_username_count": 1,
"top_source_ips": {"192.168.140.67": 202},
"top_usernames": {"admin": 202},
}
EXPECTED_USER_SUMMARY = {
"summary_type": "username",
"source_ip": "",
"domain": "",
"username": "admin",
"first_seen": 1672097149,
"last_seen": 1672097360,
"first_auth": {
"timestamp": 1672097359,
"session_id": "6d652a46d9ddf7ebc4cade9b36a2ff1a0819180ea353c63438b5e5d0"
"2a1991db",
"session_duration": 1,
"source_ip": "192.168.140.67",
"source_hostname": "",
"source_port": 58300,
"domain": "",
"username": "admin",
},
"summary": {},
"successful_logins": [
{
"timestamp": 1672097359,
"session_id": "6d652a46d9ddf7ebc4cade9b36a2ff1a0819180ea353c63438b5e5d0"
"2a1991db",
"session_duration": 1,
"source_ip": "192.168.140.67",
"source_hostname": "",
"source_port": 58300,
"domain": "",
"username": "admin",
},
],
"success_source_ips": ["192.168.140.67"],
"success_usernames": ["admin"],
"total_success_events": 1,
"total_failed_events": 210,
"distinct_source_ip_count": 2,
"distinct_username_count": 1,
"top_source_ips": {
"172.16.151.91": 10,
"192.168.140.67": 202,
},
"top_usernames": {"admin": 212},
}
EXPECTED_AUTH_SUMMARY_3 = {
"summary_type": "source_ip",
"source_ip": "192.168.140.67",
"domain": "",
"username": "",
"first_seen": 1672097149,
"last_seen": 1672097360,
"first_auth": {
"timestamp": 1672097359,
"session_id": "6d652a46d9ddf7ebc4cade9b36a2ff1a0819180ea353c63438b5e5d0"
"2a1991db",
"session_duration": 1,
"source_ip": "192.168.140.67",
"source_hostname": "",
"source_port": 58300,
"domain": "",
"username": "admin",
},
"summary": {},
"successful_logins": [
{
"timestamp": 1672097359,
"session_id": "6d652a46d9ddf7ebc4cade9b36a2ff1a0819180ea353c63438b5e5d0"
"2a1991db",
"session_duration": 1,
"source_ip": "192.168.140.67",
"source_hostname": "",
"source_port": 58300,
"domain": "",
"username": "admin",
}
],
"success_source_ips": ["192.168.140.67"],
"success_usernames": ["admin"],
"total_success_events": 1,
"total_failed_events": 200,
"distinct_source_ip_count": 1,
"distinct_username_count": 1,
"top_source_ips": {"192.168.140.67": 202},
"top_usernames": {"admin": 202},
}
EXPECTED_AUTH_SUMMARY_4 = {
"summary_type": "username",
"source_ip": "",
"domain": "",
"username": "admin",
"first_seen": 1672097149,
"last_seen": 1672097360,
"first_auth": {
"timestamp": 1672097359,
"session_id": "6d652a46d9ddf7ebc4cade9b36a2ff1a0819180ea353c63438b5e5d0"
"2a1991db",
"session_duration": 1,
"source_ip": "192.168.140.67",
"source_hostname": "",
"source_port": 58300,
"domain": "",
"username": "admin",
},
"summary": {},
"successful_logins": [
{
"timestamp": 1672097359,
"session_id": "6d652a46d9ddf7ebc4cade9b36a2ff1a0819180ea353c63438b5e5d0"
"2a1991db",
"session_duration": 1,
"source_ip": "192.168.140.67",
"source_hostname": "",
"source_port": 58300,
"domain": "",
"username": "admin",
},
],
"success_source_ips": ["192.168.140.67"],
"success_usernames": ["admin"],
"total_success_events": 1,
"total_failed_events": 210,
"distinct_source_ip_count": 2,
"distinct_username_count": 1,
"top_source_ips": {
"172.16.151.91": 10,
"192.168.140.67": 202,
},
"top_usernames": {"admin": 212},
}
EMPTY_LOGIN_SESSION = {
"source_ip": "",
"domain": "",
"username": "",
"session_id": "",
"login_timestamp": 0,
"logout_timestamp": 0,
"session_duration": 0,
}
EXPECTED_LOGIN_SESSION = {
"timestamp": 1672097359,
"session_id": "6d652a46d9ddf7ebc4cade9b36a2ff1a0819180ea353c63438b5e5d02a1991db",
"source_hostname": "",
"session_duration": 7,
"source_ip": "192.168.140.67",
"source_port": 58300,
"domain": "",
"username": "admin",
}
class TestBaseAuthenticationAnalyzer(BaseTest):
"""Class for testing BasicAuthenticationAnalyzer."""
def setUp(self) -> None:
df = load_test_dataframe()
self.analyzer = BaseAuthenticationUtils()
self.analyzer.set_dataframe(df)
def test_check_required_fields(self) -> None:
"""Tests check_required_fields method."""
# Testing missing fields
fields = [
"timestamp",
"source_ip",
"source_port",
"username",
"domain",
"authentication_method",
"authentication_result",
]
self.assertFalse(self.analyzer.check_required_fields(fields))
# Testing valid fields
fields = [
"timestamp",
"source_ip",
"source_port",
"username",
"domain",
"authentication_method",
"authentication_result",
"session_id",
]
self.assertTrue(self.analyzer.check_required_fields(fields))
def test_calculate_session_duration(self) -> None:
"""Tests calculate_session_duration."""
# Testing empty session ID
session_duration = self.analyzer.calculate_session_duration(
session_id="", timestamp=1672097359
)
self.assertEqual(-1, session_duration)
# Testing invalid session ID value
session_duration = self.analyzer.calculate_session_duration(
session_id="abcdef01234567890", timestamp=1672097359
)
self.assertEqual(-1, session_duration)
# Testing valid session ID and invalid timestamp
session_duration = self.analyzer.calculate_session_duration(
session_id="6d652a46d9ddf7ebc4cade9b36a2ff1a0819180ea353c63438b5e5d0"
"2a1991db",
timestamp=None,
)
self.assertEqual(-1, session_duration)
# Testing valid session_id and timestamp
session_duration = self.analyzer.calculate_session_duration(
session_id="6d652a46d9ddf7ebc4cade9b36a2ff1a0819180ea353c63438b5e5d0"
"2a1991db",
timestamp=1672097359,
)
self.assertEqual(1, session_duration)
def test_get_ip_summary(self) -> None:
"""Test get_ip_summary method."""
# Testing empty dataframe
authsummary = self.analyzer.get_ip_summary("100.100.100.100")
self.assertIsNone(authsummary)
# Testing non-existent IP address 100.100.100.100
authsummary = self.analyzer.get_ip_summary("100.100.100.100")
self.assertIsNone(authsummary)
# Testing valid IP 192.168.140.67 summary
authsummary = self.analyzer.get_ip_summary("192.168.140.67")
self.assertDictEqual(EXPECTED_IP_SUMMARY, authsummary.to_dict())
def test_get_user_summary(self) -> None:
"""Test get_user_summary method."""
# Testing empty dataframe
authsummary = self.analyzer.get_user_summary(
username="gametogenesis", domain=""
)
self.assertIsNone(authsummary)
# Testing non-existent username supermario
authsummary = self.analyzer.get_user_summary(username="supermario", domain="")
self.assertIsNone(authsummary)
# Testing valid username kadmin
authsummary = self.analyzer.get_user_summary(username="admin", domain="")
self.assertIsNotNone(authsummary)
self.assertDictEqual(EXPECTED_USER_SUMMARY, authsummary.to_dict())
def test_get_authsummary(self) -> None:
"""Test get_authsummary method."""
# Testing empty dataframe
df = pd.DataFrame()
authsummary = self.analyzer.get_authsummary(df, "source_ip", "100.100.100.100")
self.assertIsNone(authsummary)
# Testing invalid summary_type value
df = self.analyzer.df
authsummary = self.analyzer.get_authsummary(df, "source_port", 54321)
self.assertIsNone(authsummary)
# Testing valid summary_type source_ip
authsummary = self.analyzer.get_authsummary(df, "source_ip", "192.168.140.67")
self.assertDictEqual(EXPECTED_AUTH_SUMMARY_3, authsummary.to_dict())
# Testing valid source_type username
authsummary = self.analyzer.get_authsummary(df, "username", "admin")
self.assertDictEqual(EXPECTED_AUTH_SUMMARY_4, authsummary.to_dict())
def test_to_useraccount(self) -> None:
"""Test to_useraccount method."""
# Testing empty domain and username
useraccount = self.analyzer.to_useraccount(username="", domain="")
self.assertEqual(useraccount, "")
# Testing username and domain
useraccount = self.analyzer.to_useraccount(username="admin", domain="example")
self.assertEqual("example/admin", useraccount)
def test_from_useraccount(self) -> None:
"""Test from_useraccount method."""
# Testing empty useraccount
username, domain = self.analyzer.from_useraccount("")
self.assertEqual("", username)
self.assertEqual("", domain)
# Testing empty domain and username
username, domain = self.analyzer.from_useraccount("admin")
self.assertEqual("admin", username)
self.assertEqual("", domain)
username, domain = self.analyzer.from_useraccount("example/admin")
self.assertEqual("admin", username)
self.assertEqual("example", domain)
class TestBruteForceAnalyzer(BaseTest):
"""Class for testing BruteForceAnalzyer."""
def setUp(self) -> None:
"""Setups test class."""
self.analyzer = BruteForceUtils()
self.analyzer.analyzer_metadata = {
"timesketch_instance": "http://localhost",
"sketch_id": 1,
"timeline_id": 1,
}
df = load_test_dataframe()
self.analyzer.set_dataframe(df)
def _create_analyzer_output(self) -> AnalyzerOutput:
"""Creates and returns analyzer output.
Returns:
AnalyzerOutput: Returns an empty analyzer output.
"""
output = AnalyzerOutput(
analyzer_identifier="BruteForceAnalyzer",
analyzer_name="Brute Force Analyzer",
timesketch_instance="http://localhost",
sketch_id=1,
timeline_id=1,
)
return output
def _create_authsummary(self) -> AuthSummary:
"""Creates and returns authsummaries.
Returns:
AuthSummary: Returns an object of AuthSummary.
"""
# Create successful login entry
login = LoginRecord(
source_ip="192.168.140.67",
username="admin",
domain="",
session_id="6d652a46d9ddf7ebc4cade9b36a2ff1a0819180ea353c63438b5e5d0"
"2a1991db",
)
login.timestamp = 1672097359
login.source_port = 58300
login.session_duration = 1
authsummary = AuthSummary()
authsummary.summary_type = "source_ip"
authsummary.source_ip = "192.168.140.67"
authsummary.username = ""
authsummary.domain = ""
authsummary.first_seen = 1672097149
authsummary.last_seen = 1672097360
authsummary.first_auth = login
authsummary.successful_logins.append(login)
authsummary.success_source_ips = ["192.168.140.67"]
authsummary.success_usernames = ["admin"]
authsummary.total_success_events = 1
authsummary.total_failed_events = 200
authsummary.distinct_source_ip_count = 1
authsummary.distinct_username_count = 1
authsummary.top_source_ips["192.168.140.67"] = 202
authsummary.top_usernames["admin"] = 202
authsummary.summary["bruteforce"] = []
authsummary.summary["bruteforce"].append(login)
return authsummary
def _create_authsummaries(self) -> List[AuthSummary]:
"""Creates and returns a list of AuthSummary.
Returns:
List[AuthSummary]: A list of AuthSummary.
"""
authsummaries = []
authsummary = self._create_authsummary()
authsummaries.append(authsummary)
return authsummaries
def _mock_empty_analyzer_output(self) -> AnalyzerOutput:
"""Mock an empty analyzer output.
Returns:
AnalyzerOutput: An object of class AnalyzerOutput.
"""
output = self._create_analyzer_output()
output.result_priority = "NOTE"
output.result_status = "SUCCESS"
output.result_summary = "No bruteforce activity"
output.result_markdown = "\n### Brute Force Analyzer\nBrute force not detected"
return output
def _mock_analyzer_output(self) -> AnalyzerOutput:
"""Mocks a valid analyzer output.
Returns:
AnalyzerOutput: An object of class AnalyzerOutput.
"""
output = self._create_analyzer_output()
output.result_priority = "HIGH"
output.result_status = "SUCCESS"
output.result_summary = "1 brute force from 192.168.140.67"
output.result_markdown = textwrap.dedent(
"""
### Brute Force Analyzer
### Brute Force Summary for 192.168.140.67
- Successful brute force on 2022-12-26T23:29:19Z as admin
#### 192.168.140.67 Summary
- IP first seen on 2022-12-26T23:25:49Z
- IP last seen on 2022-12-26T23:29:20Z
- First successful authentication on 2022-12-26T23:29:19Z
- First successful login from 192.168.140.67
- First successful login as admin
#### Top Usernames
- admin: 202"""
)
return output
def test_generate_analyzer_output(self) -> None:
"""Tests generate_analyzer_output method."""
test_output = self._create_analyzer_output()
# Testing unset authsummaries
self.assertIsNone(
self.analyzer.generate_analyzer_output(
authsummaries=None, output=test_output
)
)
# Testing empty authsummaries
expected_output = self._mock_empty_analyzer_output()
# Generate output and set result_attributes to empty dict
# We don't want to compare it.
output = self.analyzer.generate_analyzer_output(
authsummaries=[], output=test_output
)
output.result_attributes = {}
self.assertDictEqual(expected_output.__dict__, output.__dict__)
# Testing valid authsummaries
expected_output = self._mock_analyzer_output()
authsummaries = self._create_authsummaries()
expected_output.result_attributes = {"bruteforce": authsummaries}
output = self.analyzer.generate_analyzer_output(
authsummaries=authsummaries, output=test_output
)
self.assertDictEqual(expected_output.__dict__, output.__dict__)
def test_ip_bruteforce_check(self) -> None:
"""Tests ip_bruteforce_check method."""
# Testing non-existing IP
authsummary = self.analyzer.ip_bruteforce_check("192.168.100.100")
self.assertIsNone(authsummary)
# Testing empty IP address
authsummary = self.analyzer.ip_bruteforce_check("")
self.assertIsNone(authsummary)
# Testing non brutforcing IP address
authsummary = self.analyzer.ip_bruteforce_check("172.30.151.91")
self.assertIsNone(authsummary)
# Testing brute forcing IP address
authsummary = self.analyzer.ip_bruteforce_check("192.168.140.67")
expected_authsummary = self._create_authsummary()
self.assertDictEqual(expected_authsummary.to_dict(), authsummary.to_dict())
def test_start_bruteforce_analysis(self) -> None:
"""Tests start_bruteforce_analysis method."""
expected_output = self._mock_analyzer_output()
# Generate analyzer output and set result_attributes to empty dict
output = self.analyzer.start_bruteforce_analysis(self._create_analyzer_output())
output.result_attributes = {}
self.assertDictEqual(expected_output.to_json(), output.to_json())
def mock_authentication_events() -> List[dict]:
"""Mock authentication events.
Returns:
List[dict]: A list of dictionary containing mock authentication events.
"""
events = []
# Creating failed events 192.168.140.67
config = {
"hostname": "debian-server",
"username": "admin",
"source_ip": "192.168.140.67",
"source_port": 58200,
"event_type": "authentication",
"authentication_method": "password",
"authentication_result": "failure",
"pid": 625,
}
events.extend(create_authentication_events(config, count=200))
# Create failed authentication from 172.16.151.91
config["source_ip"] = "172.16.151.91"
config["source_port"] = 58250
events.extend(create_authentication_events(config, count=10))
# Create successful events
config = {
"hostname": "debian-server",
"username": "admin",
"source_ip": "192.168.140.67",
"source_port": 58300,
"event_type": "authentication",
"authentication_method": "password",
"authentication_result": "success",
"pid": 700,
}
events.extend(create_authentication_events(config, count=1))
# Create disconnection events
config = {
"hostname": "debian-server",
"username": "admin",
"source_ip": "192.168.140.67",
"source_port": 58300,
"event_type": "disconnection",
"authentication_method": "",
"authentication_result": "",
"pid": 700,
}
events.extend(create_authentication_events(config, count=1))
# Generate event ID and timestamp
event_id = 0
timestamp = 1672097149681987
for i, _ in enumerate(events):
events[i]["event_id"] = event_id
events[i]["timestamp"] = int(timestamp / 1000000)
event_id += 1
timestamp += 1000000
return events
def create_authentication_events(config: dict, count: int = 200) -> List[dict]:
"""Creates authentication events.
Args:
config (dict): A dictionary containing SSH event data.
count (int): Indicates the number of authentication events to generate.
Returns:
List[dict]: A list of dictionary containing authentication events.
"""
events = []
for i in range(0, count):
event = {
"hostname": config.get("hostname", "default-ssh-server"),
"username": config.get("username", "root"),
"domain": "",
"source_ip": config.get("source_ip", "192.168.1.1"),
"source_port": int(config.get("source_port", 62000)) + i,
"pid": int(config.get("pid", 500)) + i,
"event_type": config.get("event_type", "disconnection"),
"authentication_method": config.get("authentication_method", ""),
"authentication_result": config.get("authentication_result", ""),
}
event["session_id"] = calculate_session_id(
hostname=event["hostname"],
username=event["username"],
source_ip=event["source_ip"],
source_port=event["source_port"],
)
events.append(event)
return events
def calculate_session_id(
hostname: str, username: str, source_ip: str, source_port: int
) -> str:
"""Creates pseudo session ID for SSH.
Args:
hostname (str): Hostname of the system.
username (str): Username in authentication event.
source_ip (str): IP address initiating authentication.
source_port (int): The source port used in authentication.
Returns:
str: A string containing pseudo session ID.
"""
session_id_data = f"{hostname}|{username}|{source_ip}|{source_port}"
hasher = hashlib.new("sha256")
hasher.update(str.encode(session_id_data))
return hasher.hexdigest()
|
37e8ebff2a65c15a451e4c0a07f5e340a610805c
|
9efca95a55cb4df52d895d42f1ec10331516a734
|
/tools/c7n_tencentcloud/tests/test_tc_query.py
|
e05f1563342081fb5055359b6695a24cc6afb51a
|
[
"Apache-2.0"
] |
permissive
|
cloud-custodian/cloud-custodian
|
519e602abe00c642786441b64cc40857ef5bc9de
|
27563cf4571040f923124e1acb2463f11e372225
|
refs/heads/main
| 2023-09-04T10:54:55.963703
| 2023-09-01T17:40:17
| 2023-09-01T17:40:17
| 52,837,350
| 3,327
| 1,096
|
Apache-2.0
| 2023-09-14T14:03:30
| 2016-03-01T01:11:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,190
|
py
|
test_tc_query.py
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import pytest
from c7n_tencentcloud.query import ResourceTypeInfo, ResourceQuery, QueryResourceManager
from c7n_tencentcloud.utils import PageMethod
class RegionInfo(ResourceTypeInfo):
"""RegionInfo"""
id = "InstanceId"
endpoint = "cvm.tencentcloudapi.com"
service = "cvm"
version = "2017-03-12"
enum_spec = ("DescribeRegions", "Response.RegionSet[]", {})
metrics_instance_id_name = "InstanceId"
resource_prefix = "instance"
taggable = True
class CVMInfo(ResourceTypeInfo):
"""CVMInfo"""
id = "InstanceId"
endpoint = "cvm.tencentcloudapi.com"
service = "cvm"
version = "2017-03-12"
enum_spec = ("DescribeInstances", "Response.InstanceSet[]", {})
metrics_instance_id_name = "InstanceId"
paging_def = {"method": PageMethod.Offset, "limit": {"key": "Limit", "value": 20}}
resource_prefix = "instance"
taggable = True
class CVMInfoNoPagination(ResourceTypeInfo):
"""CVMInfoNoPagination"""
id = "InstanceId"
endpoint = "cvm.tencentcloudapi.com"
service = "cvm"
version = "2017-03-12"
enum_spec = ("DescribeInstances", "Response.InstanceSet[]", {})
metrics_instance_id_name = "InstanceId"
resource_prefix = "instance"
taggable = True
def test_meta_str():
assert str(RegionInfo) == "<Type info service:cvm client:2017-03-12>"
assert str(CVMInfo) == "<Type info service:cvm client:2017-03-12>"
class TestResourcetQuery:
@pytest.mark.vcr
def test_filter(self, session):
resource_query = ResourceQuery(session)
res = resource_query.filter("ap-singapore", RegionInfo, {})
assert len(res) == 20
@pytest.mark.vcr
def test_paged_filter(self, session):
resource_query = ResourceQuery(session)
res = resource_query.paged_filter("ap-singapore", CVMInfo, {})
assert len(res) == 6
# (data, expected_query_params)
data_test_cases = [
({}, {}),
({"query": [{"Filters": [{"Key": "Value"}]}]}, {"Filters": [{"Key": "Value"}]})
]
@pytest.fixture(params=data_test_cases)
def data_test_case(request):
return request.param
class TestQueryResourceManager:
def test_get_permissions(self, ctx):
resource_manager = QueryResourceManager(ctx, {})
assert resource_manager.get_permissions() == []
def test_get_resource_query_params(self, ctx, data_test_case):
resource_manager = QueryResourceManager(ctx, data_test_case[0])
res = resource_manager.get_resource_query_params()
assert res == data_test_case[1]
@pytest.mark.vcr
def test_resources(self, ctx, monkeypatch):
monkeypatch.setattr(QueryResourceManager, "resource_type", CVMInfo)
resource_manager = QueryResourceManager(ctx, {})
res = resource_manager.resources()
assert len(res) == 6
@pytest.mark.vcr
def test_resources_no_pagination(self, ctx, monkeypatch):
monkeypatch.setattr(QueryResourceManager, "resource_type", CVMInfoNoPagination)
resource_manager = QueryResourceManager(ctx, {})
res = resource_manager.resources()
assert len(res) == 6
|
67b976a40c68f4db8def28ff9947aeaaa83efc48
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/framework/docstr/arange.py
|
1b5f3dc5a7a8feff77a4be986ea751d902427ed5
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 2,402
|
py
|
arange.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.arange,
"""
oneflow.arange(start: int = 0, end, step: int = 1, dtype: Optional[oneflow._oneflow_internal.dtype] = None, device: Optional[Union[oneflow._oneflow_internal.device, str]] = None, placement: Optional[oneflow._oneflow_internal.placement] = None, sbp: Optional[Union[oneflow._oneflow_internal.sbp.sbp, List[oneflow._oneflow_internal.sbp.sbp]]] = None, requires_grad: bool = False)
Returns a 1-D tensor of size :math:`\\left\\lfloor \\frac{\\text{end} - \\text{start}}{\\text{step}} \\right\\rfloor + 1`
with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
the gap between two values in the tensor.
.. math::
\\text{out}_{i+1} = \\text{out}_i + \\text{step}.
Args:
start (int): the starting value for the set of points. Default: ``0``.
end (int): the ending value for the set of points
step (int): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
dtype(flow.dtype, optional): If `dtype` is not given, infer the `dtype` from the other input arguments. If any of start, end, or step are floating-point, the `dtype` is inferred to be the floating-point data type. Otherwise, the `dtype` is inferred to be `flow.int64`.
device(flow.device, optional): the desired device of returned tensor. Default: if None, uses the current device for the default tensor.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: `False`.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> y = flow.arange(0, 5)
>>> y
tensor([0, 1, 2, 3, 4], dtype=oneflow.int64)
""",
)
|
fe1e4dcb93483d89ec19f9f930364cd87faa7fd1
|
cb4f118412a55c52d720bc79e4074606622920ac
|
/arcade/examples/particle_systems.py
|
1cd658086e0f77e218c7aafdbb6586fac7e7af33
|
[
"MIT"
] |
permissive
|
pythonarcade/arcade
|
3e536306f0c44f911de149b58958d8b609ffad4b
|
908664efc256697d3098a347f63d217d97841782
|
refs/heads/development
| 2023-08-29T02:53:01.599145
| 2023-08-26T16:54:34
| 2023-08-26T16:54:34
| 49,003,082
| 786
| 215
|
NOASSERTION
| 2023-09-12T18:38:54
| 2016-01-04T14:46:52
|
Python
|
UTF-8
|
Python
| false
| false
| 27,208
|
py
|
particle_systems.py
|
"""
Particle Systems
Demonstrate how to use the Emitter and Particle classes to create particle systems.
Demonstrate the different effects possible with Emitter's and Particle's by showing
a number of different emitters in sequence, with each example often varying just one
setting from the previous example.
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.particle_systems
"""
from __future__ import annotations
import arcade
import pyglet
import random
import math
from arcade.math import (
rand_in_circle,
rand_on_circle,
rand_in_rect,
rand_on_line,
rand_vec_magnitude,
rand_vec_spread_deg,
)
from arcade import particles
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Particle System Examples"
QUIET_BETWEEN_SPAWNS = 0.25 # time between spawning another particle system
EMITTER_TIMEOUT = 10 * 60
CENTER_POS = (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2)
BURST_PARTICLE_COUNT = 500
TEXTURE = ":resources:images/pinball/pool_cue_ball.png"
TEXTURE2 = ":resources:images/space_shooter/playerShip3_orange.png"
TEXTURE3 = ":resources:images/pinball/bumper.png"
TEXTURE4 = ":resources:images/enemies/wormGreen.png"
TEXTURE5 = ":resources:images/space_shooter/meteorGrey_med1.png"
TEXTURE6 = ":resources:images/animated_characters/female_person/femalePerson_idle.png"
TEXTURE7 = ":resources:images/tiles/boxCrate_double.png"
DEFAULT_SCALE = 0.3
DEFAULT_ALPHA = 32
DEFAULT_PARTICLE_LIFETIME = 3.0
PARTICLE_SPEED_FAST = 1.0
PARTICLE_SPEED_SLOW = 0.3
DEFAULT_EMIT_INTERVAL = 0.003
DEFAULT_EMIT_DURATION = 1.5
# Utils
def sine_wave(t, min_x, max_x, wavelength):
spread = max_x - min_x
mid = (max_x + min_x) / 2
return (spread / 2) * math.sin(2 * math.pi * t / wavelength) + mid
# Example emitters
def emitter_0():
"""Burst, emit from center, particle with lifetime"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitBurst(BURST_PARTICLE_COUNT),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_0.__doc__, e
def emitter_1():
"""Burst, emit from center, particle lifetime 1.0 seconds"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitBurst(BURST_PARTICLE_COUNT),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=1.0,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_1.__doc__, e
def emitter_2():
"""Burst, emit from center, particle lifetime random in range"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitBurst(BURST_PARTICLE_COUNT),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=random.uniform(DEFAULT_PARTICLE_LIFETIME - 1.0, DEFAULT_PARTICLE_LIFETIME),
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_2.__doc__, e
def emitter_3():
"""Burst, emit in circle"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitBurst(BURST_PARTICLE_COUNT),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_SLOW),
lifetime=DEFAULT_PARTICLE_LIFETIME,
center_xy=rand_in_circle((0.0, 0.0), 100),
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_3.__doc__, e
def emitter_4():
"""Burst, emit on circle"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitBurst(BURST_PARTICLE_COUNT),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_SLOW),
lifetime=DEFAULT_PARTICLE_LIFETIME,
center_xy=rand_on_circle((0.0, 0.0), 100),
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_4.__doc__, e
def emitter_5():
"""Burst, emit in rectangle"""
width, height = 200, 100
centering_offset = (-width / 2, -height / 2)
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitBurst(BURST_PARTICLE_COUNT),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_SLOW),
lifetime=DEFAULT_PARTICLE_LIFETIME,
center_xy=rand_in_rect(centering_offset, width, height),
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_5.__doc__, e
def emitter_6():
"""Burst, emit on line"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitBurst(BURST_PARTICLE_COUNT),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_SLOW),
lifetime=DEFAULT_PARTICLE_LIFETIME,
center_xy=rand_on_line((0.0, 0.0), (SCREEN_WIDTH, SCREEN_HEIGHT)),
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_6.__doc__, e
def emitter_7():
"""Burst, emit from center, velocity fixed speed around 360 degrees"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitBurst(BURST_PARTICLE_COUNT // 4),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_on_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_7.__doc__, e
def emitter_8():
"""Burst, emit from center, velocity in rectangle"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitBurst(BURST_PARTICLE_COUNT),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_rect((-2.0, -2.0), 4.0, 4.0),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_8.__doc__, e
def emitter_9():
"""Burst, emit from center, velocity in fixed angle and random speed"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitBurst(BURST_PARTICLE_COUNT // 4),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_vec_magnitude(45, 1.0, 4.0),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_9.__doc__, e
def emitter_10():
"""Burst, emit from center, velocity from angle with spread"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitBurst(BURST_PARTICLE_COUNT // 4),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_vec_spread_deg(90, 45, 2.0),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_10.__doc__, e
def emitter_11():
"""Burst, emit from center, velocity along a line"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitBurst(BURST_PARTICLE_COUNT // 4),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_on_line((-2, 1), (2, 1)),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_11.__doc__, e
def emitter_12():
"""Infinite emitting w/ eternal particle"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitInterval(0.02),
particle_factory=lambda emitter: particles.EternalParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_12.__doc__, e
def emitter_13():
"""Interval, emit particle every 0.01 seconds for one second"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_13.__doc__, e
def emitter_14():
"""Interval, emit from center, particle lifetime 1.0 seconds"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=1.0,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_14.__doc__, e
def emitter_15():
"""Interval, emit from center, particle lifetime random in range"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=random.uniform(DEFAULT_PARTICLE_LIFETIME - 1.0, DEFAULT_PARTICLE_LIFETIME),
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_15.__doc__, e
def emitter_16():
"""Interval, emit in circle"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_SLOW),
lifetime=DEFAULT_PARTICLE_LIFETIME,
center_xy=rand_in_circle((0.0, 0.0), 100),
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_16.__doc__, e
def emitter_17():
"""Interval, emit on circle"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_SLOW),
lifetime=DEFAULT_PARTICLE_LIFETIME,
center_xy=rand_on_circle((0.0, 0.0), 100),
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_17.__doc__, e
def emitter_18():
"""Interval, emit in rectangle"""
width, height = 200, 100
centering_offset = (-width / 2, -height / 2)
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_SLOW),
lifetime=DEFAULT_PARTICLE_LIFETIME,
center_xy=rand_in_rect(centering_offset, width, height),
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_18.__doc__, e
def emitter_19():
"""Interval, emit on line"""
e = particles.Emitter(
center_xy=(0.0, 0.0),
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_SLOW),
lifetime=DEFAULT_PARTICLE_LIFETIME,
center_xy=rand_on_line((0.0, 0.0), (SCREEN_WIDTH, SCREEN_HEIGHT)),
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_19.__doc__, e
def emitter_20():
"""Interval, emit from center, velocity fixed speed around 360 degrees"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_on_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_20.__doc__, e
def emitter_21():
"""Interval, emit from center, velocity in rectangle"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_rect((-2.0, -2.0), 4.0, 4.0),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_21.__doc__, e
def emitter_22():
"""Interval, emit from center, velocity in fixed angle and speed"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(0.2, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=(1.0, 1.0),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=128
)
)
return emitter_22.__doc__, e
def emitter_23():
"""Interval, emit from center, velocity in fixed angle and random speed"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL * 8, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_vec_magnitude(45, 1.0, 4.0),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_23.__doc__, e
def emitter_24():
"""Interval, emit from center, velocity from angle with spread"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_vec_spread_deg(90, 45, 2.0),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_24.__doc__, e
def emitter_25():
"""Interval, emit from center, velocity along a line"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_on_line((-2, 1), (2, 1)),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_25.__doc__, e
def emitter_26():
"""Interval, emit particles every 0.4 seconds and stop after emitting 5"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithCount(0.4, 5),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=0.6,
alpha=128
)
)
return emitter_26.__doc__, e
def emitter_27():
"""Maintain a steady count of particles"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitMaintainCount(3),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_on_circle((0.0, 0.0), 2.0),
lifetime=random.uniform(1.0, 3.0),
)
)
return emitter_27.__doc__, e
def emitter_28():
"""random particle textures"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL * 5, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=random.choice((TEXTURE, TEXTURE2, TEXTURE3)),
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE
)
)
return emitter_28.__doc__, e
def emitter_29():
"""random particle scale"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL * 5, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=random.uniform(0.1, 0.8),
alpha=DEFAULT_ALPHA
)
)
return emitter_29.__doc__, e
def emitter_30():
"""random particle alpha"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL * 5, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=int(random.uniform(32, 128))
)
)
return emitter_30.__doc__, e
def emitter_31():
"""Constant particle angle"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL * 5, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE2,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
angle=45,
scale=DEFAULT_SCALE
)
)
return emitter_31.__doc__, e
def emitter_32():
"""animate particle angle"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL * 5, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE2,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
change_angle=2,
scale=DEFAULT_SCALE
)
)
return emitter_32.__doc__, e
def emitter_33():
"""Particles that fade over time"""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.FadeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE
)
)
return emitter_33.__doc__, e
def emitter_34():
"""Dynamically generated textures, burst emitting, fading particles"""
textures = [arcade.make_soft_circle_texture(48, p) for p in (arcade.color.GREEN, arcade.color.BLUE_GREEN)]
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitBurst(BURST_PARTICLE_COUNT),
particle_factory=lambda emitter: particles.FadeParticle(
filename_or_texture=random.choice(textures),
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE
)
)
return emitter_34.__doc__, e
def emitter_35():
"""Use most features"""
soft_circle = arcade.make_soft_circle_texture(80, (255, 64, 64))
textures = (TEXTURE, TEXTURE2, TEXTURE3, TEXTURE4, TEXTURE5, TEXTURE6, TEXTURE7, soft_circle)
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(0.01, 1.0),
particle_factory=lambda emitter: particles.FadeParticle(
filename_or_texture=random.choice(textures),
change_xy=rand_in_circle((0.0, 0.0), PARTICLE_SPEED_FAST * 2),
lifetime=random.uniform(1.0, 3.5),
angle=random.uniform(0, 360),
change_angle=random.uniform(-3, 3),
scale=random.uniform(0.1, 0.8)
)
)
return emitter_35.__doc__, e
def emitter_36():
"""Moving emitter. Particles spawn relative to emitter."""
class MovingEmitter(particles.Emitter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.elapsed = 0.0
def update(self):
super().update()
self.elapsed += 1 / 60
self.center_x = sine_wave(self.elapsed, 0, SCREEN_WIDTH, SCREEN_WIDTH / 100)
self.center_y = sine_wave(self.elapsed, 0, SCREEN_HEIGHT, SCREEN_HEIGHT / 100)
e = MovingEmitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitInterval(0.005),
particle_factory=lambda emitter: particles.FadeParticle(
filename_or_texture=TEXTURE,
change_xy=rand_in_circle((0.0, 0.0), 0.1),
lifetime=random.uniform(1.5, 5.5),
scale=random.uniform(0.05, 0.2)
)
)
return emitter_36.__doc__, e
def emitter_37():
"""Rotating emitter. Particles initial velocity is relative to emitter's angle."""
e = particles.Emitter(
center_xy=CENTER_POS,
emit_controller=particles.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: particles.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=(0.0, 2.0),
lifetime=2.0,
scale=DEFAULT_SCALE
)
)
e.change_angle = 10.0
return emitter_37.__doc__, e
def emitter_38():
"""Use simple emitter interface to create a burst emitter"""
e = particles.make_burst_emitter(
center_xy=CENTER_POS,
filenames_and_textures=(TEXTURE, TEXTURE3, TEXTURE4),
particle_count=50,
particle_speed=2.5,
particle_lifetime_min=1.0,
particle_lifetime_max=2.5,
particle_scale=0.3,
fade_particles=True
)
return emitter_38.__doc__, e
def emitter_39():
"""Use simple emitter interface to create an interval emitter"""
e = particles.make_interval_emitter(
center_xy=CENTER_POS,
filenames_and_textures=(TEXTURE, TEXTURE3, TEXTURE4),
emit_interval=0.01,
emit_duration=2.0,
particle_speed=1.5,
particle_lifetime_min=1.0,
particle_lifetime_max=3.0,
particle_scale=0.2,
fade_particles=True
)
return emitter_39.__doc__, e
class MyGame(arcade.Window):
def __init__(self):
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
self.background_color = arcade.color.BLACK
# collect particle factory functions
self.factories = [v for k, v in globals().items() if k.startswith("emitter_")]
self.emitter_factory_id = -1
self.label = None
self.emitter = None
self.emitter_timeout = 0
self.obj = arcade.Sprite(":resources:images/pinball/bumper.png", scale=0.2, center_x=0, center_y=15)
self.obj.change_x = 3
pyglet.clock.schedule_once(self.next_emitter, QUIET_BETWEEN_SPAWNS)
def next_emitter(self, _time_delta):
self.emitter_factory_id = (self.emitter_factory_id + 1) % len(self.factories)
print("Changing emitter to {}".format(self.emitter_factory_id))
self.emitter_timeout = 0
self.label, self.emitter = self.factories[self.emitter_factory_id]()
def on_update(self, delta_time):
if self.emitter:
self.emitter_timeout += 1
self.emitter.update()
if self.emitter.can_reap() or self.emitter_timeout > EMITTER_TIMEOUT:
pyglet.clock.schedule_once(self.next_emitter, QUIET_BETWEEN_SPAWNS)
self.emitter = None
self.obj.update()
if self.obj.center_x > SCREEN_WIDTH:
self.obj.center_x = 0
def on_draw(self):
self.clear()
self.obj.draw()
if self.label:
arcade.draw_text("#{} {}".format(self.emitter_factory_id, self.label),
SCREEN_WIDTH / 2, SCREEN_HEIGHT - 20,
arcade.color.PALE_GOLD, 20, width=SCREEN_WIDTH, align="center",
anchor_x="center", anchor_y="center")
if self.emitter:
self.emitter.draw()
arcade.draw_text("Particles: " + str(self.emitter.get_count()), 10, 30, arcade.color.PALE_GOLD, 12)
def on_key_press(self, key, modifiers):
if key == arcade.key.ESCAPE:
arcade.close_window()
if __name__ == "__main__":
game = MyGame()
arcade.run()
|
a786e8a0adfe3b9f94e2dc49377956d24338df74
|
8e90a7759ec7143427823547e0fbff58e0343aaa
|
/docker_sdk_api/domain/exceptions/models_exception.py
|
9e00c0cbd85688826a01ffcaf991df39755ff2df
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
BMW-InnovationLab/BMW-TensorFlow-Training-GUI
|
646a6f86f26887e94351b4c572b7fe7f0842f75c
|
06531dae14365986c86baf735fd149317f4bb67a
|
refs/heads/master
| 2023-07-20T01:48:27.299962
| 2023-07-12T15:22:22
| 2023-07-12T15:22:22
| 227,429,492
| 1,030
| 198
|
Apache-2.0
| 2023-05-22T17:40:23
| 2019-12-11T18:06:11
|
Python
|
UTF-8
|
Python
| false
| false
| 260
|
py
|
models_exception.py
|
from domain.exceptions.application_error import ApplicationError
class PathNotFound(ApplicationError):
def __init__(self, additional_message: str = '', path: str = ''):
super().__init__("Path Not Found ", additional_message + '{}'.format(path))
|
b780a8d218e2960df43d079e0f0d1587e69d4e8a
|
5ff404b30db6a7c58016ef34198e483c8c32e46a
|
/diffeqpy/tests/test_dde.py
|
0b4b1eaef45d4b0875d816a399f01462a13dd1aa
|
[
"MIT"
] |
permissive
|
SciML/diffeqpy
|
86cb33dd65e7e719648e90c1687b05619ea78e9d
|
d3741862e4d959d7cc17144cad2e6bc117101352
|
refs/heads/master
| 2023-08-28T02:21:18.953588
| 2022-01-13T02:41:29
| 2022-01-13T02:41:29
| 130,501,860
| 332
| 28
|
MIT
| 2023-09-12T18:07:16
| 2018-04-21T18:34:34
|
Python
|
UTF-8
|
Python
| false
| false
| 540
|
py
|
test_dde.py
|
from julia import Main
from .. import de
def test():
f = Main.eval("""
function f(du, u, h, p, t)
du[1] = 1.1/(1 + sqrt(10)*(h(p, t-20)[1])^(5/4)) - 10*u[1]/(1 + 40*u[2])
du[2] = 100*u[1]/(1 + 40*u[2]) - 2.43*u[2]
end""")
u0 = [1.05767027/3, 1.030713491/3]
h = Main.eval("""
function h(p,t)
[1.05767027/3, 1.030713491/3]
end
""")
tspan = (0.0, 100.0)
constant_lags = [20.0]
prob = de.DDEProblem(f,u0,h,tspan,constant_lags=constant_lags)
sol = de.solve(prob,saveat=0.1)
|
cc70e8dc53e4c174a49b33ea734355aa67767c6b
|
0f4513aa510f62808af0d2cc5b3bb76f16df8807
|
/learn/sym_train.py
|
7afd8553b5323517cab33f9f22c09592f9c3b014
|
[
"MIT"
] |
permissive
|
chrisdonahue/ddc
|
a93bc81d69dd123d31c17c79aec31d588acc8786
|
812aa57f31d77a5fb91bc2eb5d1e7d8c03416235
|
refs/heads/master
| 2022-05-04T17:52:55.072679
| 2022-03-23T14:54:30
| 2022-03-23T14:54:30
| 92,370,461
| 205
| 45
| null | 2022-01-10T01:27:33
| 2017-05-25T06:04:01
|
Python
|
UTF-8
|
Python
| false
| false
| 26,955
|
py
|
sym_train.py
|
from collections import defaultdict
import cPickle as pickle
import os
import time
import numpy as np
import tensorflow as tf
from sym_net import SymNet
from util import *
# Data
tf.app.flags.DEFINE_string('train_txt_fp', '', 'Training dataset txt file with a list of pickled song files')
tf.app.flags.DEFINE_string('valid_txt_fp', '', 'Eval dataset txt file with a list of pickled song files')
tf.app.flags.DEFINE_string('test_txt_fp', '', 'Test dataset txt file with a list of pickled song files')
tf.app.flags.DEFINE_string('sym_rnn_pretrain_model_ckpt_fp', '', 'File path to model checkpoint with only sym weights')
tf.app.flags.DEFINE_string('model_ckpt_fp', '', 'File path to model checkpoint if resuming or eval')
# Features
tf.app.flags.DEFINE_string('sym_in_type', 'onehot', 'Either \'onehot\' or \'bagofarrows\'')
tf.app.flags.DEFINE_string('sym_out_type', 'onehot', 'Either \'onehot\' or \'bagofarrows\'')
tf.app.flags.DEFINE_integer('sym_narrows', 4, 'Number or arrows in data')
tf.app.flags.DEFINE_integer('sym_narrowclasses', 4, 'Number or arrow classes in data')
tf.app.flags.DEFINE_integer('sym_embedding_size', 32, '')
tf.app.flags.DEFINE_bool('audio_z_score', False, 'If true, train and test on z-score of validation data')
tf.app.flags.DEFINE_integer('audio_deviation_max', 0, '')
tf.app.flags.DEFINE_integer('audio_context_radius', -1, 'Past and future context per training example')
tf.app.flags.DEFINE_integer('audio_nbands', 0, 'Number of bands per frame')
tf.app.flags.DEFINE_integer('audio_nchannels', 0, 'Number of channels per frame')
tf.app.flags.DEFINE_bool('feat_meas_phase', False, '')
tf.app.flags.DEFINE_bool('feat_meas_phase_cos', False, '')
tf.app.flags.DEFINE_bool('feat_meas_phase_sin', False, '')
tf.app.flags.DEFINE_bool('feat_beat_phase', False, '')
tf.app.flags.DEFINE_bool('feat_beat_phase_cos', False, '')
tf.app.flags.DEFINE_bool('feat_beat_phase_sin', False, '')
tf.app.flags.DEFINE_bool('feat_beat_diff', False, '')
tf.app.flags.DEFINE_bool('feat_beat_diff_next', False, '')
tf.app.flags.DEFINE_bool('feat_beat_abs', False, '')
tf.app.flags.DEFINE_bool('feat_time_diff', False, '')
tf.app.flags.DEFINE_bool('feat_time_diff_next', False, '')
tf.app.flags.DEFINE_bool('feat_time_abs', False, '')
tf.app.flags.DEFINE_bool('feat_prog_diff', False, '')
tf.app.flags.DEFINE_bool('feat_prog_abs', False, '')
tf.app.flags.DEFINE_bool('feat_diff_feet', False, '')
tf.app.flags.DEFINE_bool('feat_diff_aps', False, '')
tf.app.flags.DEFINE_integer('feat_beat_phase_nquant', 0, '')
tf.app.flags.DEFINE_integer('feat_beat_phase_max_nwraps', 0, '')
tf.app.flags.DEFINE_integer('feat_meas_phase_nquant', 0, '')
tf.app.flags.DEFINE_integer('feat_meas_phase_max_nwraps', 0, '')
tf.app.flags.DEFINE_string('feat_diff_feet_to_id_fp', '', '')
tf.app.flags.DEFINE_string('feat_diff_coarse_to_id_fp', '', '')
tf.app.flags.DEFINE_bool('feat_diff_dipstick', False, '')
tf.app.flags.DEFINE_string('feat_freetext_to_id_fp', '', '')
tf.app.flags.DEFINE_integer('feat_bucket_beat_diff_n', None, '')
tf.app.flags.DEFINE_float('feat_bucket_beat_diff_max', None, '')
tf.app.flags.DEFINE_integer('feat_bucket_time_diff_n', None, '')
tf.app.flags.DEFINE_float('feat_bucket_time_diff_max', None, '')
# Network params
tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size for training')
tf.app.flags.DEFINE_integer('nunroll', 1, '')
tf.app.flags.DEFINE_string('cnn_filter_shapes', '', 'CSV 3-tuples of filter shapes (time, freq, n)')
tf.app.flags.DEFINE_string('cnn_pool', '', 'CSV 2-tuples of pool amounts (time, freq)')
tf.app.flags.DEFINE_integer('cnn_dim_reduction_size', -1, '')
tf.app.flags.DEFINE_float('cnn_dim_reduction_keep_prob', 1.0, '')
tf.app.flags.DEFINE_string('cnn_dim_reduction_nonlin', '', '')
tf.app.flags.DEFINE_string('rnn_cell_type', 'lstm', '')
tf.app.flags.DEFINE_integer('rnn_size', 0, '')
tf.app.flags.DEFINE_integer('rnn_nlayers', 0, '')
tf.app.flags.DEFINE_float('rnn_keep_prob', 1.0, '')
tf.app.flags.DEFINE_string('dnn_sizes', '', 'CSV sizes for dense layers')
tf.app.flags.DEFINE_float('dnn_keep_prob', 1.0, '')
# Training params
tf.app.flags.DEFINE_float('grad_clip', 0.0, 'Clip gradients to this value if greater than 0')
tf.app.flags.DEFINE_string('opt', 'sgd', 'One of \'sgd\'')
tf.app.flags.DEFINE_float('lr', 1.0, 'Learning rate')
tf.app.flags.DEFINE_float('lr_decay_rate', 1.0, 'Multiply learning rate by this value every epoch')
tf.app.flags.DEFINE_integer('lr_decay_delay', 0, '')
tf.app.flags.DEFINE_integer('nbatches_per_ckpt', 100, 'Save model weights every N batches')
tf.app.flags.DEFINE_integer('nbatches_per_eval', 10000, 'Evaluate model every N batches')
tf.app.flags.DEFINE_integer('nepochs', 0, 'Number of training epochs, negative means train continuously')
tf.app.flags.DEFINE_string('experiment_dir', '', 'Directory for temporary training files and model weights')
# Eval params
# Generate params
tf.app.flags.DEFINE_string('generate_fp', '', '')
tf.app.flags.DEFINE_string('generate_vocab_fp', '', '')
FLAGS = tf.app.flags.FLAGS
dtype = tf.float32
def main(_):
assert FLAGS.experiment_dir
do_train = FLAGS.nepochs != 0 and bool(FLAGS.train_txt_fp)
do_valid = bool(FLAGS.valid_txt_fp)
do_train_eval = do_train and do_valid
do_eval = bool(FLAGS.test_txt_fp)
do_generate = bool(FLAGS.generate_fp)
# Load data
print 'Loading data'
train_data, valid_data, test_data = open_dataset_fps(FLAGS.train_txt_fp, FLAGS.valid_txt_fp, FLAGS.test_txt_fp)
# Calculate validation metrics
if FLAGS.audio_z_score:
z_score_fp = os.path.join(FLAGS.experiment_dir, 'valid_mean_std.pkl')
if do_valid and not os.path.exists(z_score_fp):
print 'Calculating validation metrics'
mean_per_band, std_per_band = calc_mean_std_per_band(valid_data)
with open(z_score_fp, 'wb') as f:
pickle.dump((mean_per_band, std_per_band), f)
else:
print 'Loading validation metrics'
with open(z_score_fp, 'rb') as f:
mean_per_band, std_per_band = pickle.load(f)
# Sanitize data
for data in [train_data, valid_data, test_data]:
apply_z_norm(data, mean_per_band, std_per_band)
# Flatten the data into chart references for easier iteration
print 'Flattening datasets into charts'
charts_train = flatten_dataset_to_charts(train_data)
charts_valid = flatten_dataset_to_charts(valid_data)
charts_test = flatten_dataset_to_charts(test_data)
# Filter charts that are too short
charts_train_len = len(charts_train)
charts_train = filter(lambda x: x.get_nannotations() >= FLAGS.nunroll, charts_train)
if len(charts_train) != charts_train_len:
print '{} charts too small for training'.format(charts_train_len - len(charts_train))
print 'Train set: {} charts, valid set: {} charts, test set: {} charts'.format(len(charts_train), len(charts_valid), len(charts_test))
# Load ID maps
diff_feet_to_id = None
if FLAGS.feat_diff_feet_to_id_fp:
diff_feet_to_id = load_id_dict(FLAGS.feat_diff_feet_to_id_fp)
diff_coarse_to_id = None
if FLAGS.feat_diff_coarse_to_id_fp:
diff_coarse_to_id = load_id_dict(FLAGS.feat_diff_coarse_to_id_fp)
freetext_to_id = None
if FLAGS.feat_freetext_to_id_fp:
freetext_to_id = load_id_dict(FLAGS.feat_freetext_to_id_fp)
# Create feature config
feats_config = {
'meas_phase': FLAGS.feat_meas_phase,
'meas_phase_cos': FLAGS.feat_meas_phase_cos,
'meas_phase_sin': FLAGS.feat_meas_phase_sin,
'beat_phase': FLAGS.feat_beat_phase,
'beat_phase_cos': FLAGS.feat_beat_phase_cos,
'beat_phase_sin': FLAGS.feat_beat_phase_sin,
'beat_diff': FLAGS.feat_beat_diff,
'beat_diff_next': FLAGS.feat_beat_diff_next,
'beat_abs': FLAGS.feat_beat_abs,
'time_diff': FLAGS.feat_time_diff,
'time_diff_next': FLAGS.feat_time_diff_next,
'time_abs': FLAGS.feat_time_abs,
'prog_diff': FLAGS.feat_prog_diff,
'prog_abs': FLAGS.feat_prog_abs,
'diff_feet': FLAGS.feat_diff_feet,
'diff_aps': FLAGS.feat_diff_aps,
'beat_phase_nquant': FLAGS.feat_beat_phase_nquant,
'beat_phase_max_nwraps': FLAGS.feat_beat_phase_max_nwraps,
'meas_phase_nquant': FLAGS.feat_meas_phase_nquant,
'meas_phase_max_nwraps': FLAGS.feat_meas_phase_max_nwraps,
'diff_feet_to_id': diff_feet_to_id,
'diff_coarse_to_id': diff_coarse_to_id,
'freetext_to_id': freetext_to_id,
'bucket_beat_diff_n': FLAGS.feat_bucket_beat_diff_n,
'bucket_time_diff_n': FLAGS.feat_bucket_time_diff_n
}
nfeats = 0
for feat in feats_config.values():
if feat is None:
continue
if isinstance(feat, dict):
nfeats += max(feat.values()) + 1
else:
nfeats += int(feat)
nfeats += 1 if FLAGS.feat_beat_phase_max_nwraps > 0 else 0
nfeats += 1 if FLAGS.feat_meas_phase_max_nwraps > 0 else 0
nfeats += 1 if FLAGS.feat_bucket_beat_diff_n > 0 else 0
nfeats += 1 if FLAGS.feat_bucket_time_diff_n > 0 else 0
feats_config['diff_dipstick'] = FLAGS.feat_diff_dipstick
feats_config['audio_time_context_radius'] = FLAGS.audio_context_radius
feats_config['audio_deviation_max'] = FLAGS.audio_deviation_max
feats_config['bucket_beat_diff_max'] = FLAGS.feat_bucket_beat_diff_max
feats_config['bucket_time_diff_max'] = FLAGS.feat_bucket_time_diff_max
feats_config_eval = dict(feats_config)
feats_config_eval['audio_deviation_max'] = 0
print 'Feature configuration (nfeats={}): {}'.format(nfeats, feats_config)
# Create model config
rnn_proj_init = tf.constant_initializer(0.0, dtype=dtype) if FLAGS.sym_rnn_pretrain_model_ckpt_fp else tf.uniform_unit_scaling_initializer(factor=1.0, dtype=dtype)
model_config = {
'nunroll': FLAGS.nunroll,
'sym_in_type': FLAGS.sym_in_type,
'sym_embedding_size': FLAGS.sym_embedding_size,
'sym_out_type': FLAGS.sym_out_type,
'sym_narrows': FLAGS.sym_narrows,
'sym_narrowclasses': FLAGS.sym_narrowclasses,
'other_nfeats': nfeats,
'audio_context_radius': FLAGS.audio_context_radius,
'audio_nbands': FLAGS.audio_nbands,
'audio_nchannels': FLAGS.audio_nchannels,
'cnn_filter_shapes': stride_csv_arg_list(FLAGS.cnn_filter_shapes, 3, int),
'cnn_init': tf.uniform_unit_scaling_initializer(factor=1.43, dtype=dtype),
'cnn_pool': stride_csv_arg_list(FLAGS.cnn_pool, 2, int),
'cnn_dim_reduction_size': FLAGS.cnn_dim_reduction_size,
'cnn_dim_reduction_init': tf.uniform_unit_scaling_initializer(factor=1.0, dtype=dtype),
'cnn_dim_reduction_nonlin': FLAGS.cnn_dim_reduction_nonlin,
'cnn_dim_reduction_keep_prob': FLAGS.cnn_dim_reduction_keep_prob,
'rnn_proj_init': rnn_proj_init,
'rnn_cell_type': FLAGS.rnn_cell_type,
'rnn_size': FLAGS.rnn_size,
'rnn_nlayers': FLAGS.rnn_nlayers,
'rnn_init': tf.random_uniform_initializer(-5e-2, 5e-2, dtype=dtype),
'nunroll': FLAGS.nunroll,
'rnn_keep_prob': FLAGS.rnn_keep_prob,
'dnn_sizes': stride_csv_arg_list(FLAGS.dnn_sizes, 1, int),
'dnn_init': tf.uniform_unit_scaling_initializer(factor=1.15, dtype=dtype),
'dnn_keep_prob': FLAGS.dnn_keep_prob,
'grad_clip': FLAGS.grad_clip,
'opt': FLAGS.opt,
}
print 'Model configuration: {}'.format(model_config)
with tf.Graph().as_default(), tf.Session() as sess:
if do_train:
print 'Creating train model'
with tf.variable_scope('model_ss', reuse=None):
model_train = SymNet(mode='train', batch_size=FLAGS.batch_size, **model_config)
if do_train_eval or do_eval:
print 'Creating eval model'
with tf.variable_scope('model_ss', reuse=do_train):
eval_batch_size = FLAGS.batch_size
if FLAGS.rnn_size > 0 and FLAGS.rnn_nlayers > 0:
eval_batch_size = 1
model_eval = SymNet(mode='eval', batch_size=eval_batch_size, **model_config)
model_early_stop_xentropy_avg = tf.train.Saver(tf.global_variables(), max_to_keep=None)
model_early_stop_accuracy = tf.train.Saver(tf.global_variables(), max_to_keep=None)
if do_generate:
print 'Creating generation model'
with tf.variable_scope('model_ss', reuse=do_train):
eval_batch_size = FLAGS.batch_size
model_gen = SymNet(mode='gen', batch_size=1, **model_config)
# Restore or init model
model_saver = tf.train.Saver(tf.global_variables())
if FLAGS.model_ckpt_fp:
print 'Restoring model weights from {}'.format(FLAGS.model_ckpt_fp)
model_saver.restore(sess, FLAGS.model_ckpt_fp)
else:
print 'Initializing model weights from scratch'
sess.run(tf.global_variables_initializer())
# Restore or init sym weights
if FLAGS.sym_rnn_pretrain_model_ckpt_fp:
print 'Restoring pretrained weights from {}'.format(FLAGS.sym_rnn_pretrain_model_ckpt_fp)
var_list_old = filter(lambda x: 'nosym' not in x.name and 'cnn' not in x.name, tf.global_variables())
pretrain_saver = tf.train.Saver(var_list_old)
pretrain_saver.restore(sess, FLAGS.sym_rnn_pretrain_model_ckpt_fp)
# Create summaries
if do_train:
summary_writer = tf.summary.FileWriter(FLAGS.experiment_dir, sess.graph)
epoch_mean_xentropy = tf.placeholder(tf.float32, shape=[], name='epoch_mean_xentropy')
epoch_mean_time = tf.placeholder(tf.float32, shape=[], name='epoch_mean_time')
epoch_var_xentropy = tf.placeholder(tf.float32, shape=[], name='epoch_var_xentropy')
epoch_var_time = tf.placeholder(tf.float32, shape=[], name='epoch_var_time')
epoch_time_total = tf.placeholder(tf.float32, shape=[], name='epoch_time_total')
epoch_summaries = tf.summary.merge([
tf.summary.scalar('epoch_mean_xentropy', epoch_mean_xentropy),
tf.summary.scalar('epoch_mean_time', epoch_mean_time),
tf.summary.scalar('epoch_var_xentropy', epoch_var_xentropy),
tf.summary.scalar('epoch_var_time', epoch_var_time),
tf.summary.scalar('epoch_time_total', epoch_time_total)
])
eval_metric_names = ['xentropy_avg', 'accuracy']
eval_metrics = {}
eval_summaries = []
for eval_metric_name in eval_metric_names:
name_mean = 'eval_mean_{}'.format(eval_metric_name)
name_var = 'eval_var_{}'.format(eval_metric_name)
ph_mean = tf.placeholder(tf.float32, shape=[], name=name_mean)
ph_var = tf.placeholder(tf.float32, shape=[], name=name_var)
summary_mean = tf.summary.scalar(name_mean, ph_mean)
summary_var = tf.summary.scalar(name_var, ph_var)
eval_summaries.append(tf.summary.merge([summary_mean, summary_var]))
eval_metrics[eval_metric_name] = (ph_mean, ph_var)
eval_time = tf.placeholder(tf.float32, shape=[], name='eval_time')
eval_time_summary = tf.summary.scalar('eval_time', eval_time)
eval_summaries = tf.summary.merge([eval_time_summary] + eval_summaries)
# Calculate epoch stuff
train_nexamples = sum([chart.get_nannotations() for chart in charts_train])
examples_per_batch = FLAGS.batch_size
examples_per_batch *= model_train.out_nunroll
batches_per_epoch = train_nexamples // examples_per_batch
nbatches = FLAGS.nepochs * batches_per_epoch
print '{} frames in data, {} batches per epoch, {} batches total'.format(train_nexamples, batches_per_epoch, nbatches)
# Init epoch
lr_summary = model_train.assign_lr(sess, FLAGS.lr)
summary_writer.add_summary(lr_summary, 0)
epoch_xentropies = []
epoch_times = []
batch_num = 0
eval_best_xentropy_avg = float('inf')
eval_best_accuracy = float('-inf')
while FLAGS.nepochs < 0 or batch_num < nbatches:
batch_time_start = time.time()
syms, feats_other, feats_audio, targets, target_weights = model_train.prepare_train_batch(charts_train, **feats_config)
feed_dict = {
model_train.syms: syms,
model_train.feats_other: feats_other,
model_train.feats_audio: feats_audio,
model_train.targets: targets,
model_train.target_weights: target_weights
}
batch_xentropy, _ = sess.run([model_train.avg_neg_log_lhood, model_train.train_op], feed_dict=feed_dict)
epoch_xentropies.append(batch_xentropy)
epoch_times.append(time.time() - batch_time_start)
batch_num += 1
if batch_num % batches_per_epoch == 0:
epoch_num = batch_num // batches_per_epoch
print 'Completed epoch {}'.format(epoch_num)
lr_decay = FLAGS.lr_decay_rate ** max(epoch_num - FLAGS.lr_decay_delay, 0)
lr_summary = model_train.assign_lr(sess, FLAGS.lr * lr_decay)
summary_writer.add_summary(lr_summary, batch_num)
epoch_xentropy = np.mean(epoch_xentropies)
print 'Epoch mean cross-entropy (nats) {}'.format(epoch_xentropy)
epoch_summary = sess.run(epoch_summaries, feed_dict={epoch_mean_xentropy: epoch_xentropy, epoch_mean_time: np.mean(epoch_times), epoch_var_xentropy: np.var(epoch_xentropies), epoch_var_time: np.var(epoch_times), epoch_time_total: np.sum(epoch_times)})
summary_writer.add_summary(epoch_summary, batch_num)
epoch_xentropies = []
epoch_times = []
if batch_num % FLAGS.nbatches_per_ckpt == 0:
print 'Saving model weights...'
ckpt_fp = os.path.join(FLAGS.experiment_dir, 'onset_net_train')
model_saver.save(sess, ckpt_fp, global_step=tf.contrib.framework.get_or_create_global_step())
print 'Done saving!'
if do_train_eval and batch_num % FLAGS.nbatches_per_eval == 0:
print 'Evaluating...'
eval_start_time = time.time()
metrics = defaultdict(list)
for eval_chart in charts_valid:
if model_eval.do_rnn:
state = sess.run(model_eval.initial_state)
neg_log_prob_sum = 0.0
correct_predictions_sum = 0.0
weight_sum = 0.0
for syms, syms_in, feats_other, feats_audio, targets, target_weights in model_eval.eval_iter(eval_chart, **feats_config_eval):
feed_dict = {
model_eval.syms: syms_in,
model_eval.feats_other: feats_other,
model_eval.feats_audio: feats_audio,
model_eval.targets: targets,
model_eval.target_weights: target_weights
}
if model_eval.do_rnn:
feed_dict[model_eval.initial_state] = state
xentropies, correct_predictions, state = sess.run([model_eval.neg_log_lhoods, model_eval.correct_predictions, model_eval.final_state], feed_dict=feed_dict)
else:
xentropies, correct_predictions = sess.run([model_eval.neg_log_lhoods, model_eval.correct_predictions], feed_dict=feed_dict)
neg_log_prob_sum += np.sum(xentropies)
correct_predictions_sum += np.sum(correct_predictions)
weight_sum += np.sum(target_weights)
assert int(weight_sum) == eval_chart.get_nannotations()
xentropy_avg = neg_log_prob_sum / weight_sum
accuracy = correct_predictions_sum / weight_sum
metrics['xentropy_avg'].append(xentropy_avg)
metrics['accuracy'].append(accuracy)
metrics = {k: (np.mean(v), np.var(v)) for k, v in metrics.items()}
feed_dict = {}
results = []
for metric_name, (mean, var) in metrics.items():
feed_dict[eval_metrics[metric_name][0]] = mean
feed_dict[eval_metrics[metric_name][1]] = var
feed_dict[eval_time] = time.time() - eval_start_time
summary_writer.add_summary(sess.run(eval_summaries, feed_dict=feed_dict), batch_num)
xentropy_avg_mean = metrics['xentropy_avg'][0]
if xentropy_avg_mean < eval_best_xentropy_avg:
print 'Xentropy {} better than previous {}'.format(xentropy_avg_mean, eval_best_xentropy_avg)
ckpt_fp = os.path.join(FLAGS.experiment_dir, 'onset_net_early_stop_xentropy_avg')
model_early_stop_xentropy_avg.save(sess, ckpt_fp, global_step=tf.contrib.framework.get_or_create_global_step())
eval_best_xentropy_avg = xentropy_avg_mean
accuracy_mean = metrics['accuracy'][0]
if accuracy_mean > eval_best_accuracy:
print 'Accuracy {} better than previous {}'.format(accuracy_mean, eval_best_accuracy)
ckpt_fp = os.path.join(FLAGS.experiment_dir, 'onset_net_early_stop_accuracy')
model_early_stop_accuracy.save(sess, ckpt_fp, global_step=tf.contrib.framework.get_or_create_global_step())
eval_best_accuracy = accuracy_mean
print 'Done evaluating'
if do_eval:
print 'Evaluating...'
metrics = defaultdict(list)
for test_chart in charts_test:
if model_eval.do_rnn:
state = sess.run(model_eval.initial_state)
neg_log_prob_sum = 0.0
correct_predictions_sum = 0.0
weight_sum = 0.0
for syms, syms_in, feats_other, feats_audio, targets, target_weights in model_eval.eval_iter(test_chart, **feats_config_eval):
feed_dict = {
model_eval.syms: syms_in,
model_eval.feats_other: feats_other,
model_eval.feats_audio: feats_audio,
model_eval.targets: targets,
model_eval.target_weights: target_weights
}
if model_eval.do_rnn:
feed_dict[model_eval.initial_state] = state
xentropies, correct_predictions, state = sess.run([model_eval.neg_log_lhoods, model_eval.correct_predictions, model_eval.final_state], feed_dict=feed_dict)
else:
xentropies, correct_predictions = sess.run([model_eval.neg_log_lhoods, model_eval.correct_predictions], feed_dict=feed_dict)
neg_log_prob_sum += np.sum(xentropies)
correct_predictions_sum += np.sum(correct_predictions)
weight_sum += np.sum(target_weights)
assert int(weight_sum) == test_chart.get_nannotations()
xentropy_avg = neg_log_prob_sum / weight_sum
accuracy = correct_predictions_sum / weight_sum
metrics['perplexity'].append(np.exp(xentropy_avg))
metrics['xentropy_avg'].append(xentropy_avg)
metrics['accuracy'].append(accuracy)
metrics = {k: (np.mean(v), np.std(v), np.min(v), np.max(v)) for k, v in metrics.items()}
copy_pasta = []
for metric_name in ['xentropy_avg', 'perplexity', 'accuracy']:
metric_stats = metrics[metric_name]
copy_pasta += list(metric_stats)
print '{}: {}'.format(metric_name, metric_stats)
print 'COPY PASTA:'
print ','.join([str(x) for x in copy_pasta])
# TODO: This currently only works for VERY specific model (delta time LSTM)
if do_generate:
print 'Generating...'
with open(FLAGS.generate_fp, 'r') as f:
step_times = [float(x) for x in f.read().split(',')]
with open(FLAGS.generate_vocab_fp, 'r') as f:
idx_to_sym = {i:k for i, k in enumerate(f.read().splitlines())}
def weighted_pick(weights):
t = np.cumsum(weights)
s = np.sum(weights)
return(int(np.searchsorted(t, np.random.rand(1)*s)))
state = sess.run(model_gen.initial_state)
sym_prev = '<-1>'
step_time_prev = step_times[0]
seq_scores = []
seq_sym_idxs = []
seq_syms = []
for step_time in step_times:
delta_step_time = step_time - step_time_prev
syms_in = np.array([[model_gen.arrow_to_encoding(sym_prev, 'bagofarrows')]], dtype=np.float32)
feats_other = np.array([[[delta_step_time]]], dtype=np.float32)
feats_audio = np.zeros((1, 1, 0, 0, 0), dtype=np.float32)
feed_dict = {
model_gen.syms: syms_in,
model_gen.feats_other: feats_other,
model_gen.feats_audio: feats_audio,
model_gen.initial_state: state
}
scores, state = sess.run([model_gen.scores, model_gen.final_state], feed_dict=feed_dict)
sym_idx = 0
while sym_idx <= 1:
sym_idx = weighted_pick(scores)
if sym_idx <= 1:
print 'rare'
sym_idx = sym_idx - 1 # remove special
sym = idx_to_sym[sym_idx]
seq_scores.append(scores)
seq_sym_idxs.append(sym_idx)
seq_syms.append(sym)
sym_prev = sym
step_time_prev = step_time
with open(os.path.join(FLAGS.experiment_dir, 'seq.pkl'), 'wb') as f:
pickle.dump((seq_scores, seq_sym_idxs, seq_syms), f)
if __name__ == '__main__':
tf.app.run()
|
3f42b46ce78fe0d322af45e9d169321a568f6fa0
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayEbppInvoiceMerchantEnterstatusQueryModel.py
|
b63611be5d5c86678e65fbff0cf36071d9e2eda7
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,007
|
py
|
AlipayEbppInvoiceMerchantEnterstatusQueryModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEbppInvoiceMerchantEnterstatusQueryModel(object):
def __init__(self):
self._m_short_name = None
self._process_id = None
self._product_code = None
@property
def m_short_name(self):
return self._m_short_name
@m_short_name.setter
def m_short_name(self, value):
self._m_short_name = value
@property
def process_id(self):
return self._process_id
@process_id.setter
def process_id(self, value):
self._process_id = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
def to_alipay_dict(self):
params = dict()
if self.m_short_name:
if hasattr(self.m_short_name, 'to_alipay_dict'):
params['m_short_name'] = self.m_short_name.to_alipay_dict()
else:
params['m_short_name'] = self.m_short_name
if self.process_id:
if hasattr(self.process_id, 'to_alipay_dict'):
params['process_id'] = self.process_id.to_alipay_dict()
else:
params['process_id'] = self.process_id
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEbppInvoiceMerchantEnterstatusQueryModel()
if 'm_short_name' in d:
o.m_short_name = d['m_short_name']
if 'process_id' in d:
o.process_id = d['process_id']
if 'product_code' in d:
o.product_code = d['product_code']
return o
|
af96ba3d5b91ba281ed3598e17fe76cf0e299d9f
|
56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a
|
/applications/HDF5Application/tests/test_line_output_process.py
|
72b85f96498572e87b73e1099326fb6105ca0c82
|
[
"BSD-3-Clause"
] |
permissive
|
KratosMultiphysics/Kratos
|
82b902a2266625b25f17239b42da958611a4b9c5
|
366949ec4e3651702edc6ac3061d2988f10dd271
|
refs/heads/master
| 2023-08-30T20:31:37.818693
| 2023-08-30T18:01:01
| 2023-08-30T18:01:01
| 81,815,495
| 994
| 285
|
NOASSERTION
| 2023-09-14T13:22:43
| 2017-02-13T10:58:24
|
C++
|
UTF-8
|
Python
| false
| false
| 4,687
|
py
|
test_line_output_process.py
|
# Core imports
import KratosMultiphysics
import KratosMultiphysics.kratos_utilities as KratosUtils
from KratosMultiphysics import KratosUnittest as UnitTest
from KratosMultiphysics.testing.utilities import ReadModelPart
# HDF5 imports
from KratosMultiphysics.HDF5Application.line_output_process import Factory as LineOutputProcessFactory
from KratosMultiphysics.HDF5Application.core.file_io import OpenHDF5File
# STD imports
import pathlib
class TestLineOutputProcess(UnitTest.TestCase):
communicator = KratosMultiphysics.Testing.GetDefaultDataCommunicator()
file_name = "test_line_output.h5"
def setUp(self):
KratosUtils.DeleteFileIfExisting(self.file_name)
self.communicator.Barrier()
def tearDown(self):
# The output file is not actually checked yet in the script,
# so if you need to validate the results, comment the line
# below.
self.communicator.Barrier()
KratosUtils.DeleteFileIfExisting(self.file_name)
def test_LineOutputProcess(self):
model, model_part = self.MakeModel()
parameters = self.parameters
number_of_steps = 10
process_parameters = KratosMultiphysics.Parameters()
process_parameters.AddValue("Parameters", parameters)
process = LineOutputProcessFactory(process_parameters, model)
process.ExecuteInitialize()
for i_step in range(number_of_steps):
# Create new step data
model_part.CloneTimeStep(2.0 * i_step)
model_part.ProcessInfo[KratosMultiphysics.STEP] = i_step
# Modify variables
for node in model_part.Nodes:
for variable, increment in zip((KratosMultiphysics.DISPLACEMENT_X, KratosMultiphysics.VELOCITY), (1.0, [0.0,0.0,1.0])):
node.SetSolutionStepValue(variable,node.GetSolutionStepValue(variable) + increment)
# Print output if requested
if process.IsOutputStep():
process.PrintOutput()
self.communicator.Barrier()
# Open output file
file_parameters = parameters["file_parameters"].Clone()
file_parameters.AddString("file_access_mode", "read_only")
with OpenHDF5File(file_parameters, model_part) as file:
# Check output file structure
root = "/test_line_output_{}".format(parameters["model_part_name"].GetString())
self.assertTrue(file.IsGroup(root))
self.assertTrue(file.IsDataSet(root + "/POSITION"))
@property
def parameters(self) -> KratosMultiphysics.Parameters:
parameters = KratosMultiphysics.Parameters("""{
"model_part_name" : "main",
"start_point" : [0.0, 0.0, 0.0],
"end_point" : [1.0, 0.0, 0.0],
"number_of_points" : 51,
"output_variables" : ["DISPLACEMENT_X", "VELOCITY"],
"output_frequency" : 3,
"coordinates_prefix" : "/test_line_output_<model_part_name>",
"variables_prefix" : "/test_line_output_<model_part_name>/test_step_<step>",
"file_parameters" : {
"file_name" : ""
}
}""")
parameters["file_parameters"]["file_name"].SetString(self.file_name)
return parameters
@staticmethod
def MakeModel():
model = KratosMultiphysics.Model()
model_part = model.CreateModelPart("main")
model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] = 3
model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DISPLACEMENT_X)
model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
ReadModelPart(str(TestLineOutputProcess.GetInputMDPAPath()), model_part)
for node in model_part.Nodes:
node.SetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_X, node.X)
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY, [node.X, node.Y, node.Z])
return model, model_part
@staticmethod
def GetInputMDPAPath() -> pathlib.Path:
script_directory = pathlib.Path(__file__).absolute().parent
kratos_root_directory = script_directory.parent.parent.parent
test_input_directory = kratos_root_directory / "kratos" / "tests" / "auxiliar_files_for_python_unittest"
test_file_stem = test_input_directory / "mdpa_files" / "test_processes"
test_file_path = pathlib.Path(str(test_file_stem) + ".mdpa")
if not test_file_path.is_file():
raise FileNotFoundError("Test file not found: {}".format(test_file_path))
return test_file_stem
if __name__ == "__main__":
UnitTest.main()
|
1b4f8ebe86887eecb924a3f46b9bfb0f1315f637
|
8cb9099bdba3ae9fbf725bc9db23e4f8f5a4d01a
|
/test/test_dma.py
|
6a20eefc2d98d6251101a5a35b2ce4c052a3e023
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
enjoy-digital/litepcie
|
83eb14b3049195376e5ebab59d4ff3f1d20cbe7c
|
170855627bde8b917637907aa0efdacba5fe907f
|
refs/heads/master
| 2023-09-04T23:38:33.936818
| 2023-08-25T09:36:26
| 2023-08-25T09:36:40
| 42,041,717
| 398
| 98
|
NOASSERTION
| 2023-07-28T17:26:31
| 2015-09-07T08:45:27
|
Verilog
|
UTF-8
|
Python
| false
| false
| 10,364
|
py
|
test_dma.py
|
#
# This file is part of LitePCIe.
#
# Copyright (c) 2015-2022 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
# In this high level test, LitePCIeEndpoint is connected to LitePCIeDMAReader and LitePCIeDMAWriter
# frontends with Reader's source connected to Writer's sink. Our Host model is used to emulate a Host
# memory with the Reader and Writer are reading/writing data from/to this memory.
#
# ┌───────────┐
# │ │
# │ HOST │
# │ (Model) │
# │ │
# └─┬───────▲─┘
# │ │
# ┌─────▼───────┴─────┐
# │ │
# │ │
# ┌──► LitePCIeEndpoint ├─┐
# │ │ │ │
# │ │ │ │
# │ └───────────────────┘ │
# │ │
# ┌────────┴──────────┐ ┌─────────▼─────────┐
# │ │ │ │
# │ LitePCIeDMAWriter │ │ LitePCIeDMAReader │
# │ │ │ │
# └────────▲──────────┘ └─────────┬─────────┘
# │ │
# │ │
# └────────────────────────┘
#
# The Host memory is initially filled with random data, that are read by the Reader, re-directed
# to the Writer and then re-written in another memory location of the Host. The test then checks
# that the initial data and re-written data are identical.
import unittest
from litepcie.common import *
from litepcie.core import LitePCIeEndpoint
from litepcie.core.msi import LitePCIeMSI
from litepcie.frontend.dma import LitePCIeDMAWriter, LitePCIeDMAReader
from test.common import seed_to_data
from test.model.host import *
# Parameters ---------------------------------------------------------------------------------------
root_id = 0x100
endpoint_id = 0x400
# DMA Driver ---------------------------------------------------------------------------------------
class DMADriver:
"""DMA Driver model
Provides methods to control/program LitePCIeDMAReader/LitePCIeDMAWriter.
"""
def __init__(self, dma, dut):
self.dma = getattr(dut, dma)
self.dut = dut
def set_prog_mode(self):
yield from self.dma.table.loop_prog_n.write(0)
def set_loop_mode(self):
yield from self.dma.table.loop_prog_n.write(1)
def flush(self):
yield from self.dma.table.reset.write(1)
def program_descriptor(self, address, length):
address_lsb = (address >> 0) & 0xffff_ffff
address_msb = (address >> 32) & 0xffff_ffff
value = address_lsb
value |= (length << 32)
yield from self.dma.table.value.write(value)
yield from self.dma.table.we.write(address_msb)
def enable(self):
yield from self.dma._enable.write(1)
def disable(self):
yield from self.dma._enable.write(0)
# MSI Handler --------------------------------------------------------------------------------------
DMA_READER_IRQ = 1
DMA_WRITER_IRQ = 2
class MSIHandler(Module):
"""MSI Handled model
Handles the MSI IRQs generated by LitePCIeDMAReader/LitePCIeDMAWriter.
"""
def __init__(self, debug=False):
self.debug = debug
self.sink = stream.Endpoint(msi_layout())
self.dma_reader_irq_count = 0
self.dma_writer_irq_count = 0
def clear_dma_reader_irq_count(self):
self.dma_reader_irq_count = 0
def clear_dma_writer_irq_count(self):
self.dma_writer_irq_count = 0
@passive
def generator(self, dut):
while True:
yield self.sink.ready.eq(1)
if (yield self.sink.valid):
# Get IRQs.
irq_vector = (yield dut.msi.vector.status)
irq_clear = 0
# Handle IRQs.
if irq_vector & DMA_READER_IRQ:
self.dma_reader_irq_count += 1
if self.debug:
print("[MSI] dma_reader_irq (n: {:d})".format(self.dma_reader_irq_count))
irq_clear |= DMA_READER_IRQ
if irq_vector & DMA_WRITER_IRQ:
self.dma_writer_irq_count += 1
if self.debug:
print("[MSI] dma_writer_irq (n: {:d})".format(self.dma_writer_irq_count))
irq_clear |= DMA_WRITER_IRQ
# Clear IRQs.
yield from dut.msi.clear.write((yield from dut.msi.clear.read()) | irq_clear)
yield
# Test DMA -----------------------------------------------------------------------------------------
class TestDMA(unittest.TestCase):
def dma_test(self, data_width, address_width, test_size=1024):
host_data = [seed_to_data(i, True) for i in range(test_size//4)]
loopback_data = []
def main_generator(dut, nreads=8, nwrites=8):
# Allocate Host's Memory.
dut.host.malloc(0x00000000, test_size*2)
# Enable Chipset
dut.host.chipset.enable()
# Fill initial Host's Memory.
dut.host.write_mem(0x00000000, host_data)
# DMA Reader/Writer control models.
dma_reader_driver = DMADriver("dma_reader", dut)
dma_writer_driver = DMADriver("dma_writer", dut)
# Program DMA Reader descriptors.
yield from dma_reader_driver.set_prog_mode()
yield from dma_reader_driver.flush()
for i in range(nreads):
yield from dma_reader_driver.program_descriptor((test_size//8)*i, test_size//8)
# Program DMA Writer descriptors.
yield from dma_writer_driver.set_prog_mode()
yield from dma_writer_driver.flush()
for i in range(nwrites):
yield from dma_writer_driver.program_descriptor(test_size + (test_size//8)*i, test_size//8)
# Enable MSI.
yield dut.msi.enable.storage.eq(DMA_READER_IRQ | DMA_WRITER_IRQ)
# Enable DMA Reader & Writer.
yield from dma_reader_driver.enable()
yield from dma_writer_driver.enable()
# Wait for all writes.
while dut.msi_handler.dma_writer_irq_count != nwrites:
yield
# Delay to ensure all the data has been written.
for i in range(1024):
yield
for data in dut.host.read_mem(test_size, test_size):
loopback_data.append(data)
class DUT(Module):
def __init__(self, data_width, address_width):
self.data_width = data_width
self.address_width = address_width
# Host -----------------------------------------------------------------------------
self.submodules.host = Host(data_width, root_id, endpoint_id,
phy_debug = False,
chipset_debug = False,
chipset_split = True,
chipset_reordering = True,
host_debug = True)
# Endpoint -------------------------------------------------------------------------
self.submodules.endpoint = LitePCIeEndpoint(self.host.phy,
address_width = address_width,
max_pending_requests = 8
)
# DMA Reader/Writer ----------------------------------------------------------------
dma_reader_port = self.endpoint.crossbar.get_master_port(read_only=True)
dma_writer_port = self.endpoint.crossbar.get_master_port(write_only=True)
self.submodules.dma_reader = LitePCIeDMAReader(self.endpoint, dma_reader_port, address_width=address_width)
self.submodules.dma_writer = LitePCIeDMAWriter(self.endpoint, dma_writer_port, address_width=address_width)
self.comb += self.dma_reader.source.connect(self.dma_writer.sink)
# MSI ------------------------------------------------------------------------------
self.submodules.msi = LitePCIeMSI(2)
self.comb += [
self.msi.irqs[log2_int(DMA_READER_IRQ)].eq(self.dma_reader.irq),
self.msi.irqs[log2_int(DMA_WRITER_IRQ)].eq(self.dma_writer.irq)
]
self.submodules.msi_handler = MSIHandler(debug=False)
self.comb += self.msi.source.connect(self.msi_handler.sink)
dut = DUT(data_width, address_width)
generators = {
"sys" : [
main_generator(dut),
dut.msi_handler.generator(dut),
dut.host.generator(),
dut.host.chipset.generator(),
dut.host.phy.phy_sink.generator(),
dut.host.phy.phy_source.generator()
]
}
clocks = {"sys": 10}
run_simulation(dut, generators, clocks, vcd_name="test_dma.vcd")
self.assertEqual(host_data, loopback_data)
def test_dma_64b_data_width_32b_address_width(self):
self.dma_test(data_width=64, address_width=32)
def test_dma_64b_data_width_64b_address_width(self):
self.dma_test(data_width=64, address_width=64)
|
7d462515d4960494c399284bed404002d5e62a56
|
56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a
|
/applications/OptimizationApplication/python_scripts/controls/shape/explicit_vertex_morphing.py
|
bfe89e4418a656f02588e65cc2448a472911a3f7
|
[
"BSD-3-Clause"
] |
permissive
|
KratosMultiphysics/Kratos
|
82b902a2266625b25f17239b42da958611a4b9c5
|
366949ec4e3651702edc6ac3061d2988f10dd271
|
refs/heads/master
| 2023-08-30T20:31:37.818693
| 2023-08-30T18:01:01
| 2023-08-30T18:01:01
| 81,815,495
| 994
| 285
|
NOASSERTION
| 2023-09-14T13:22:43
| 2017-02-13T10:58:24
|
C++
|
UTF-8
|
Python
| false
| false
| 2,711
|
py
|
explicit_vertex_morphing.py
|
# ==============================================================================
# KratosOptimizationApplication
#
# License: BSD License
# license: OptimizationApplication/license.txt
#
# Main authors: Reza Najian Asl, https://github.com/RezaNajian
#
# ==============================================================================
import KratosMultiphysics as KM
import KratosMultiphysics.ShapeOptimizationApplication as KSO
import KratosMultiphysics.OptimizationApplication as KOA
from KratosMultiphysics.ShapeOptimizationApplication import mapper_factory
from KratosMultiphysics.OptimizationApplication.controls.shape.shape_control import ShapeControl
class ExplicitVertexMorphing(ShapeControl):
def __init__(self, name, model, settings):
self.project_to_normal = False
self.smooth_surface = False
self.plane_symmetry = False
self.plane_symmetry = False
super().__init__(name,model,settings)
self.technique_settings = self.settings["technique_settings"]
def Initialize(self):
super().Initialize()
self.ex_vm_mapper = {}
for model_part_name in self.controlling_objects:
if not self.model.HasModelPart(model_part_name):
raise RuntimeError("ExplicitVertexMorphing: Model part {} from control {} does not exist in the input model parts".format(model_part_name,self.name))
ex_mapper = mapper_factory.CreateMapper(self.model.GetModelPart(model_part_name), self.model.GetModelPart(model_part_name), self.technique_settings)
ex_mapper.Initialize()
self.ex_vm_mapper[model_part_name] = ex_mapper
def MapFirstDerivative(self,derivative_variable_name,mapped_derivative_variable_name):
for mapper in self.ex_vm_mapper.values():
mapper.InverseMap(derivative_variable_name,mapped_derivative_variable_name)
def Compute(self):
for mapper in self.ex_vm_mapper.values():
mapper.Map(KOA.D_CX,KOA.D_X)
def Update(self):
for model_part_name in self.controlling_objects:
model_part = self.model.GetModelPart(model_part_name)
for node in model_part.Nodes:
shape_update = node.GetSolutionStepValue(KOA.D_X)
node.X0 += shape_update[0]
node.X = node.X0
node.Y0 += shape_update[1]
node.Y = node.Y0
node.Z0 += shape_update[2]
node.Z = node.Z0
for mapper in self.ex_vm_mapper.values():
mapper.Update()
def GetControllingObjects(self):
return self.controlling_objects
|
d76e20c42370840b0c75af17e225504e4eab86ea
|
61ac704b86a83227e5a21666aab2d3b2c3bdddb0
|
/tests/test_lemmatizers.py
|
1f28775a41d021160d40599109589b7a568e8a30
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] |
permissive
|
markuskiller/textblob-de
|
1d96a230f0ce45cf56f0cea01b08d94a4b7a5057
|
8479bde9d66cdb16eeec70b92bd3bfa63cb89ac3
|
refs/heads/dev
| 2021-07-16T16:56:48.147739
| 2021-07-01T13:50:03
| 2021-07-01T13:50:03
| 21,608,591
| 105
| 15
|
MIT
| 2021-03-04T10:05:58
| 2014-07-08T10:24:15
|
Python
|
UTF-8
|
Python
| false
| false
| 997
|
py
|
test_lemmatizers.py
|
# -*- coding: utf-8 -*-
"""Test cases for np extractors."""
from __future__ import unicode_literals
import unittest
from nose.tools import * # PEP8 asserts
from textblob_de import PatternParserLemmatizer, PatternTokenizer, NLTKPunktTokenizer
class TestPatternParserLemmatizer(unittest.TestCase):
def setUp(self):
self.text = "Peter hat ein schönes Auto."
self.expected_lemmata = [
('Peter', 'NNP'), ('haben', 'VB'), ('ein', 'DT'), ('schön', 'JJ'), ('Auto', 'NN')]
def test_lemmatize_nltk_tok(self):
_lemmatizer = PatternParserLemmatizer(tokenizer=NLTKPunktTokenizer())
lemmata = _lemmatizer.lemmatize(self.text)
assert_equal(lemmata, self.expected_lemmata)
def test_lemmatize_pattern_tok(self):
_lemmatizer = PatternParserLemmatizer(tokenizer=PatternTokenizer())
lemmata = _lemmatizer.lemmatize(self.text)
assert_equal(lemmata, self.expected_lemmata)
if __name__ == '__main__':
unittest.main()
|
8f911e8fd856cfdfb82cd936fa627c6e7f59985d
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-security/ranger/ranger-tools/scripts/create_requests.py
|
a3335510f2c9cfae41b9e288531cbdbc8314c13b
|
[
"Apache-2.0",
"BSD-3-Clause",
"WTFPL",
"MIT",
"GPL-2.0-only"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 1,612
|
py
|
create_requests.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. See accompanying LICENSE file.
#
import json
# Opening JSON file
f = open("test_servicetags_hive.json")
# returns JSON object as a dictionary
data = json.load(f)
final_list = []
# Iterating through the json list
for i in data['serviceResources']:
resource_id = i['id']
# dictionary with table, database, or column
resource_elements = i['resourceElements']
temp = {'name': "request-" + str(resource_id), 'request': {'resource': {'elements': {}}, 'accessType': "select", 'user': "hrt_1", 'userGroups': [], 'requestData': "request-" + str(resource_id)}, 'result': {'isAudited': 'true', 'isAllowed': 'false', 'policyId': resource_id}}
resource_keys = resource_elements.keys()
for resource_key in resource_keys:
resource_item = resource_elements[resource_key]
resource_value = resource_item['values'][0]
temp['request']['resource']['elements'][resource_key] = resource_value
final_list.append(temp)
# Writing JSON file
with open("test_requests_hive.json", "w") as outfile:
json.dump(final_list, outfile)
|
76e4d37c47d1b34722003082926ea787f317803d
|
c364fdae67ad5298d03d14d442ef890233c45724
|
/pymel/util/__init__.pyi
|
15d371996ce90cf378b0c0fa06d987f34d09715d
|
[
"BSD-3-Clause"
] |
permissive
|
LumaPictures/pymel
|
952b376b1bf4d2cc99c3f99c6c6b4dbc35edd065
|
5fbe189fc0e0e1fdf056be2dd2ae63d26ca33ed5
|
refs/heads/master
| 2023-08-30T01:17:01.855520
| 2023-04-12T15:48:35
| 2023-04-12T15:48:35
| 404,345
| 388
| 128
|
NOASSERTION
| 2023-09-02T00:00:17
| 2009-12-07T19:53:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,804
|
pyi
|
__init__.pyi
|
from collections import *
from .common import *
from .arrays import *
from .enum import *
from .path import *
from .decoration import *
from .shell import *
from .arguments import AddedKey as AddedKey, ChangedKey as ChangedKey, RemovedKey as RemovedKey, breadth as breadth, breadthArgs as breadthArgs, breadthIterArgs as breadthIterArgs, clsname as clsname, compareCascadingDicts as compareCascadingDicts, convertListArgs as convertListArgs, deepPatch as deepPatch, deepPatchAltered as deepPatchAltered, expandArgs as expandArgs, getCascadingDictItem as getCascadingDictItem, getImportableName as getImportableName, getImportableObject as getImportableObject, isIterable as isIterable, isMapping as isMapping, isNumeric as isNumeric, isScalar as isScalar, isSequence as isSequence, iterateArgs as iterateArgs, izip_longest as izip_longest, listForNone as listForNone, mergeCascadingDicts as mergeCascadingDicts, pairIter as pairIter, postorder as postorder, postorderArgs as postorderArgs, postorderIterArgs as postorderIterArgs, preorder as preorder, preorderArgs as preorderArgs, preorderIterArgs as preorderIterArgs, reorder as reorder, sequenceToSlices as sequenceToSlices, setCascadingDictItem as setCascadingDictItem
from .utilitytypes import EquivalencePairs as EquivalencePairs, LazyDocString as LazyDocString, LazyDocStringError as LazyDocStringError, LazyLoadModule as LazyLoadModule, ModuleInterceptor as ModuleInterceptor, ProxyUnicode as ProxyUnicode, Singleton as Singleton, TwoWayDict as TwoWayDict, addLazyDocString as addLazyDocString, alias as alias, defaultdict as defaultdict, defaultlist as defaultlist, metaReadOnlyAttr as metaReadOnlyAttr, metaStatic as metaStatic, propertycache as propertycache, proxyClass as proxyClass, readonly as readonly, universalmethod as universalmethod
|
caacef2dbcad1c9c4a90c6d675b3a94b44106916
|
f61be064cd8d1a5f721c80b3d0146ce081964f45
|
/api/python/examples/nm.py
|
ed6260c021bbf2509796f92f4cd35f5aee5c6e13
|
[
"Apache-2.0"
] |
permissive
|
lief-project/LIEF
|
415fe42c22c97a06b36f0ed385f19de16b7c09d2
|
1ecfbf1fd8c22de5252f4e8b1a2e83199941faf5
|
refs/heads/master
| 2023-08-29T05:54:58.862920
| 2023-08-27T19:33:23
| 2023-08-27T19:33:23
| 85,205,851
| 3,933
| 657
|
Apache-2.0
| 2023-09-09T03:44:33
| 2017-03-16T14:34:53
|
C++
|
UTF-8
|
Python
| false
| false
| 848
|
py
|
nm.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Description
# -----------
#
# This tool is a cross format Linux nm like. It prints all symbols
# present in the binary. For `PE` it will print symbols in the *symbol section*
# and for `ELF` it will print *static* symbols **AND** *dynamic* symbols.
#
# Example:
#
# >>> nm("/usr/bin/ls")
# >>> nm("C:\\Windows\\explorer.exe")
import sys
from lief import parse
def nm(filename):
""" Return symbols from *filename* binary """
binary = parse(filename) # Build an abstract binary
symbols = binary.symbols
if len(symbols) > 0:
for symbol in symbols:
print(symbol)
else:
print("No symbols found")
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: " + sys.argv[0] + " <binary>")
sys.exit(-1)
nm(sys.argv[1])
|
ec7b039dbf4d44298b828542f5c8e532caabeb9f
|
4e4b752c4dbecf0b0d9f7cb86f9f76bb0ffa5d32
|
/tests/unit/trace/test_attributes.py
|
018cdc47cff8d38097dd0423999662e2dc0f850a
|
[
"Apache-2.0"
] |
permissive
|
census-instrumentation/opencensus-python
|
ab6bcf12b16677d9ca7fc93a5f96c2946d138a0c
|
3a2d8dfe1db4e0129dc691c35901a0d12127afc1
|
refs/heads/master
| 2023-09-02T13:53:19.757971
| 2023-03-16T22:10:07
| 2023-03-16T22:10:07
| 96,581,030
| 701
| 289
|
Apache-2.0
| 2023-09-14T21:14:09
| 2017-07-07T22:28:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,082
|
py
|
test_attributes.py
|
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from opencensus.trace import attributes as attributes_module
class TestAttributes(unittest.TestCase):
def test_constructor_default(self):
attributes = attributes_module.Attributes()
self.assertEqual(attributes.attributes, {})
def test_constructor_explicit(self):
attr = {'key': 'value'}
attributes = attributes_module.Attributes(attr)
self.assertEqual(attributes.attributes, attr)
def test_set_attribute(self):
key = 'test key'
value = 'test value'
attributes = attributes_module.Attributes()
attributes.set_attribute(key=key, value=value)
expected_attr = {key: value}
self.assertEqual(expected_attr, attributes.attributes)
def test_delete_attribute(self):
attr = {'key1': 'value1', 'key2': 'value2'}
attributes = attributes_module.Attributes(attr)
attributes.delete_attribute('key1')
self.assertEqual(attributes.attributes, {'key2': 'value2'})
def test_get_attribute(self):
attr = {'key': 'value'}
attributes = attributes_module.Attributes(attr)
value = attributes.get_attribute('key')
self.assertEqual(value, 'value')
def test_format_attributes_json(self):
attrs = {
'key1': 'test string',
'key2': True,
'key3': 100,
'key4': 123.456,
}
attributes = attributes_module.Attributes(attrs)
attributes_json = attributes.format_attributes_json()
expected_attributes_json = {
'attributeMap': {
'key1': {
'string_value': {
'value': 'test string',
'truncated_byte_count': 0
}
},
'key2': {
'bool_value': True
},
'key3': {
'int_value': 100
},
'key4': {
'double_value': 123.456
}
}
}
self.assertEqual(expected_attributes_json, attributes_json)
def test_format_attributes_json_type_error(self):
attrs = {
'key1': mock.Mock(),
}
expected_json = {'attributeMap': {}}
attributes = attributes_module.Attributes(attrs)
attributes_json = attributes.format_attributes_json()
self.assertEqual(attributes_json, expected_json)
|
789cd4a87da4e64941ace53347c44602658dee0b
|
c6e6c564cf03427de02e78f436bdf7483e13402f
|
/tests/test_violations/test_implementation.py
|
e43b00bf80471ee1de876a8b5c30297c9f390df9
|
[
"MIT"
] |
permissive
|
wemake-services/wemake-python-styleguide
|
5a60ff468bf7877008c8ed34467da8bdbc2398f2
|
96e482514a60c12e99ee235337e678c9a4e484e3
|
refs/heads/master
| 2023-08-31T14:42:36.827760
| 2023-08-29T05:54:18
| 2023-08-29T05:54:18
| 124,593,057
| 2,427
| 572
|
MIT
| 2023-09-13T07:15:00
| 2018-03-09T21:04:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
test_implementation.py
|
import ast
import re
import pytest
from wemake_python_styleguide.violations.base import ASTViolation
class NewViolation(ASTViolation):
"""
Yells at cloud.
Yay, I'm a docstring!
"""
code = 1
error_template = '{0}'
def test_visitor_returns_location():
"""Ensures that `BaseNodeVisitor` return correct violation message."""
assert NewViolation.full_code == 'WPS001'
assert NewViolation.summary == 'Yells at cloud.'
visitor = NewViolation(node=ast.parse(''), text='violation')
assert visitor.node_items() == (0, 0, 'WPS001 violation')
def test_violation_must_have_docstring():
"""Ensures that `BaseNodeVisitor` return correct violation message."""
with pytest.raises(
TypeError,
match=re.escape(
'Please include a docstring documenting ' +
"<class 'test_implementation.test_violation_must_have_docstring." +
"<locals>.IShallNotPass'>",
),
):
class IShallNotPass(ASTViolation): # noqa: WPS431
code = 123
|
c12db7c857d67856acbe1bbe263e661caf2ad3b2
|
51ec37fc8b633e90f699d4372e1301cf30b9d960
|
/angrmanagement/data/log.py
|
bf82e65d06b6739e7e22dd29053704031ab2c10b
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr-management
|
b7deffdefd53a99336c8da2cd21bd17f1eb689d7
|
f28bfb1c34313c74f99691d0b47de1d90ebfd4ec
|
refs/heads/master
| 2023-09-02T11:53:13.869102
| 2023-08-31T23:38:12
| 2023-08-31T23:38:12
| 40,425,410
| 727
| 125
|
BSD-2-Clause
| 2023-09-11T22:09:39
| 2015-08-09T04:35:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,873
|
py
|
log.py
|
import atexit
import logging
from datetime import datetime
from logging.handlers import QueueHandler, QueueListener
from multiprocessing import Queue
from typing import Optional
from angr.utils.mp import Initializer
from angrmanagement.config import Conf
class LogTimeStamp:
"""
A Log timestamp with formatting
"""
def __init__(self, unix_timestamp: int):
"""
:param unix_time: The unix time the timestamp represents
"""
self._ts = datetime.fromtimestamp(unix_timestamp)
self._cache_key: Optional[str] = None
self._cache_str: Optional[str] = None
def __str__(self) -> str:
"""
Return the timestamp as a formatted string
"""
if Conf.log_timestamp_format != self._cache_key:
self._cache_str = self._ts.strftime(Conf.log_timestamp_format)
return self._cache_str
class LogRecord:
"""
Stores a log record.
"""
__slots__ = (
"level",
"timestamp",
"source",
"content",
)
def __init__(self, level, unix_timestamp, source, content):
self.timestamp = LogTimeStamp(unix_timestamp)
self.level = level
self.source = source
self.content = content
class LogDumpHandler(logging.Handler):
"""
Dumps log messages.
"""
def __init__(self, instance, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instance = instance
def emit(self, record: logging.LogRecord) -> None:
log_record = LogRecord(record.levelno, record.created, record.name, self.format(record))
self.instance.log.append(log_record)
self.instance.log.am_event(log_record=log_record)
class AMQueueHandler(QueueHandler):
"""
A logging QueueHandler that is of a different type than the default QueueHandler
This allows checking isinstance to ensure the handler is what we desired
"""
def install_queue_handler(queue: Queue):
"""
Install a queue handler using the given queue
This function should work for both fork and spawn modes of multiprocessing
Fork modes may already have the parent logger installed, spawn may not
"""
if not any(isinstance(i, AMQueueHandler) for i in logging.root.handlers):
logging.root.handlers.insert(0, AMQueueHandler(queue))
def initialize(*args, **kwargs) -> None:
"""
Installs a LogDumpHandler and sets up forwarding from other processes to this one
"""
queue = Queue()
# Install queue handlers to the current process and all future subprocesses
Initializer.get().register(install_queue_handler, queue)
install_queue_handler(queue)
# Install a listener which forwards log records to the LogDumpHandler
listener = QueueListener(queue, LogDumpHandler(*args, **kwargs))
atexit.register(listener.stop)
listener.start()
|
cef4bd8a2e3405bac722ea50fddba12b93166855
|
316a33474dbdc1dec5a0cfd663a870ff7449892d
|
/test/test_connection.py
|
bbedf550f75d1e487e4eb0e36695b6d0a74f1a3b
|
[
"MIT"
] |
permissive
|
python-hyper/wsproto
|
df9c84a6ebae10b321f43438a379e0f40c4c1ccb
|
c0a107939d6c0fccdb55028b39b3db026319b65e
|
refs/heads/main
| 2023-08-11T13:49:25.663894
| 2023-04-12T20:45:52
| 2023-04-12T20:45:52
| 65,805,167
| 228
| 46
|
MIT
| 2023-04-12T20:45:54
| 2016-08-16T09:10:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,012
|
py
|
test_connection.py
|
import pytest
from wsproto.connection import CLIENT, Connection, ConnectionState, SERVER
from wsproto.events import (
BytesMessage,
CloseConnection,
Ping,
Pong,
Request,
TextMessage,
)
from wsproto.frame_protocol import CloseReason
from wsproto.utilities import LocalProtocolError
@pytest.mark.parametrize("client_sends", [True, False])
@pytest.mark.parametrize("final", [True, False])
def test_send_message(client_sends: bool, final: bool) -> None:
client = Connection(CLIENT)
server = Connection(SERVER)
if client_sends:
local = client
remote = server
else:
local = server
remote = client
data = b"x" * 23
remote.receive_data(local.send(BytesMessage(data=data, message_finished=final)))
event = next(remote.events())
assert isinstance(event, BytesMessage)
assert event.data == data
assert event.message_finished is final
@pytest.mark.parametrize("client_sends", [True, False])
@pytest.mark.parametrize(
"code, reason",
[(CloseReason.NORMAL_CLOSURE, "bye"), (CloseReason.GOING_AWAY, "👋👋")],
)
def test_closure(client_sends: bool, code: CloseReason, reason: str) -> None:
client = Connection(CLIENT)
server = Connection(SERVER)
if client_sends:
local = client
remote = server
else:
local = server
remote = client
remote.receive_data(local.send(CloseConnection(code=code, reason=reason)))
event = next(remote.events())
assert isinstance(event, CloseConnection)
assert event.code is code
assert event.reason == reason
assert remote.state is ConnectionState.REMOTE_CLOSING
assert local.state is ConnectionState.LOCAL_CLOSING
local.receive_data(remote.send(event.response()))
event = next(local.events())
assert isinstance(event, CloseConnection)
assert event.code is code
assert event.reason == reason
assert remote.state is ConnectionState.CLOSED # type: ignore[comparison-overlap]
assert local.state is ConnectionState.CLOSED
with pytest.raises(LocalProtocolError):
local.receive_data(b"foobar")
def test_abnormal_closure() -> None:
client = Connection(CLIENT)
client.receive_data(None)
event = next(client.events())
assert isinstance(event, CloseConnection)
assert event.code is CloseReason.ABNORMAL_CLOSURE
assert client.state is ConnectionState.CLOSED
def test_close_whilst_closing() -> None:
client = Connection(CLIENT)
client.send(CloseConnection(code=CloseReason.NORMAL_CLOSURE))
with pytest.raises(LocalProtocolError):
client.send(CloseConnection(code=CloseReason.NORMAL_CLOSURE))
def test_send_after_close() -> None:
client = Connection(CLIENT)
client.send(CloseConnection(code=CloseReason.NORMAL_CLOSURE))
with pytest.raises(LocalProtocolError):
client.send(TextMessage(data="", message_finished=True))
@pytest.mark.parametrize("client_sends", [True, False])
def test_ping_pong(client_sends: bool) -> None:
client = Connection(CLIENT)
server = Connection(SERVER)
if client_sends:
local = client
remote = server
else:
local = server
remote = client
payload = b"x" * 23
remote.receive_data(local.send(Ping(payload=payload)))
event = next(remote.events())
assert isinstance(event, Ping)
assert event.payload == payload
local.receive_data(remote.send(event.response()))
event = next(local.events())
assert isinstance(event, Pong)
assert event.payload == payload
def test_unsolicited_pong() -> None:
client = Connection(CLIENT)
server = Connection(SERVER)
payload = b"x" * 23
server.receive_data(client.send(Pong(payload=payload)))
event = next(server.events())
assert isinstance(event, Pong)
assert event.payload == payload
@pytest.mark.parametrize("split_message", [True, False])
def test_data(split_message: bool) -> None:
client = Connection(CLIENT)
server = Connection(SERVER)
data = "ƒñö®∂😎"
server.receive_data(
client.send(TextMessage(data=data, message_finished=not split_message))
)
event = next(server.events())
assert isinstance(event, TextMessage)
assert event.message_finished is not split_message
def test_frame_protocol_gets_fed_garbage() -> None:
client = Connection(CLIENT)
payload = b"x" * 23
frame = b"\x09" + bytearray([len(payload)]) + payload
client.receive_data(frame)
event = next(client.events())
assert isinstance(event, CloseConnection)
assert event.code == CloseReason.PROTOCOL_ERROR
def test_send_invalid_event() -> None:
client = Connection(CLIENT)
with pytest.raises(LocalProtocolError):
client.send(Request(target="/", host="wsproto"))
def test_receive_data_when_closed() -> None:
client = Connection(CLIENT)
client._state = ConnectionState.CLOSED
with pytest.raises(LocalProtocolError):
client.receive_data(b"something")
|
23a20735f60fa77dda95f5085fdb60307946edfc
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayEcoRenthouseBillOrderDownloadResponse.py
|
0c41147f24adf12e582f86b1f744dfadc7baf93e
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 772
|
py
|
AlipayEcoRenthouseBillOrderDownloadResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEcoRenthouseBillOrderDownloadResponse(AlipayResponse):
def __init__(self):
super(AlipayEcoRenthouseBillOrderDownloadResponse, self).__init__()
self._status_value = None
@property
def status_value(self):
return self._status_value
@status_value.setter
def status_value(self, value):
self._status_value = value
def parse_response_content(self, response_content):
response = super(AlipayEcoRenthouseBillOrderDownloadResponse, self).parse_response_content(response_content)
if 'status_value' in response:
self.status_value = response['status_value']
|
c7eac23a174e7d8340197b64d800ef2db76706b0
|
a7957955624002dab86de5f081028a4eedefe816
|
/lib/tracker/oceanplus.py
|
d5dd55e5a58e502413513832da29f16489923f84
|
[
"MIT"
] |
permissive
|
researchmm/TracKit
|
2652366f0d9430cdc13598bf7997979e1a9eeb2d
|
e351e5bff8071aa14f333d5975a8f408a3e264c6
|
refs/heads/master
| 2023-09-04T03:37:53.098346
| 2023-08-07T23:50:10
| 2023-08-07T23:50:10
| 223,065,925
| 639
| 121
|
MIT
| 2021-09-02T13:07:46
| 2019-11-21T01:57:48
|
Python
|
UTF-8
|
Python
| false
| false
| 17,824
|
py
|
oceanplus.py
|
import os
import cv2
import yaml
import numpy as np
import torch
import torch.nn.functional as F
from os.path import join, exists
from utils.utils import load_yaml, im_to_torch, get_subwindow_tracking, make_scale_pyramid, python2round, get_subwindow_tracking_mask
class OceanPlus(object):
def __init__(self, info):
super(OceanPlus, self).__init__()
self.info = info # model and benchmark info
self.stride = 8
if info.dataset in ['DAVIS2016', 'DAVIS2017', 'YTBVOS']:
self.vos = True
else:
self.vos = False
def init(self, im, target_pos, target_sz, model, hp=None, online=False, mask=None, debug=False):
# in: whether input infrared image
state = dict()
# epoch test
p = AdaConfig()
self.debug = debug
state['im_h'] = im.shape[0]
state['im_w'] = im.shape[1]
self.imh = state['im_h']
self.imw = state['im_w']
# single test
# if not hp and not self.info.epoch_test:
if True:
prefix = [x for x in ['OTB', 'VOT', 'DAVIS'] if x in self.info.dataset]
if len(prefix) == 0: prefix = [self.info.dataset]
absPath = os.path.abspath(os.path.dirname(__file__))
yname='OceanPlus.yaml'
yamlPath = os.path.join(absPath, '../../experiments/test/{}/'.format(prefix[0]), yname)
cfg = load_yaml(yamlPath)
if self.info.dataset not in list(cfg.keys()):
print('[*] unsupported benchmark, use VOT2020 hyper-parameters (not optimal)')
cfg_benchmark = cfg['VOT2020']
else:
cfg_benchmark = cfg[self.info.dataset]
p.update(cfg_benchmark)
p.renew()
if ((target_sz[0] * target_sz[1]) / float(state['im_h'] * state['im_w'])) < 0.004:
p.instance_size = cfg_benchmark['big_sz']
p.renew()
else:
p.instance_size = cfg_benchmark['small_sz']
p.renew()
self.grids(p) # self.grid_to_search_x, self.grid_to_search_y
net = model
# param tune
if hp:
p.update(hp)
if 'lambda_u' in hp.keys() or 'lambda_s' in hp.keys():
net.update_lambda(hp['lambda_u'], hp['lambda_s'])
if 'iter1' in hp.keys() or 'iter2' in hp.keys():
net.update_iter(hp['iter1'], hp['iter2'])
print('======= hyper-parameters: pk: {:.3f}, wi: {:.2f}, lr: {:.2f} ======='.format(p.penalty_k, p.window_influence, p.lr))
wc_z = target_sz[0] + p.context_amount * sum(target_sz)
hc_z = target_sz[1] + p.context_amount * sum(target_sz)
s_z = round(np.sqrt(wc_z * hc_z))
avg_chans = np.mean(im, axis=(0, 1))
z_crop, _ = get_subwindow_tracking(im, target_pos, p.exemplar_size, s_z, avg_chans)
mask_crop, _ = get_subwindow_tracking_mask(mask, target_pos, p.exemplar_size, s_z, out_mode=None)
mask_crop = (mask_crop > 0.5).astype(np.uint8)
mask_crop = torch.from_numpy(mask_crop)
# vis zcrop
# vis = 0.5 * z_crop.permute(1,2,0) + 255 * mask_crop.unsqueeze(-1).float()
# cv2.imwrite('zcrop.jpg', vis.numpy())
z = z_crop.unsqueeze(0)
net.template(z.cuda(), mask_crop.unsqueeze(0).cuda())
if p.windowing == 'cosine':
window = np.outer(np.hanning(p.score_size), np.hanning(p.score_size)) # [17,17]
elif p.windowing == 'uniform':
window = np.ones(int(p.score_size), int(p.score_size))
state['p'] = p
state['net'] = net
state['avg_chans'] = avg_chans
state['window'] = window
state['target_pos'] = target_pos
state['target_sz'] = target_sz
self.p = p
self.debug_on_crop = False
self.debug_on_ori = False
self.save_mask = False # save all mask results
self.mask_ratio = False
self.update_template = True
if self.debug_on_ori or self.debug_on_crop:
print('Warning: debuging...')
print('Warning: turning off debugging mode after this process')
self.debug = True
return state
def update(self, net, x_crops, target_pos, target_sz, window, scale_z, p):
cls_score, bbox_pred, mask = net.track(x_crops)
cls_score = F.sigmoid(cls_score).squeeze().cpu().data.numpy()
# bbox to real predict
bbox_pred = bbox_pred.squeeze().cpu().data.numpy()
pred_x1 = self.grid_to_search_x - bbox_pred[0, ...]
pred_y1 = self.grid_to_search_y - bbox_pred[1, ...]
pred_x2 = self.grid_to_search_x + bbox_pred[2, ...]
pred_y2 = self.grid_to_search_y + bbox_pred[3, ...]
# size penalty
s_c = self.change(self.sz(pred_x2-pred_x1, pred_y2-pred_y1) / (self.sz_wh(target_sz))) # scale penalty
r_c = self.change((target_sz[0] / target_sz[1]) / ((pred_x2-pred_x1) / (pred_y2-pred_y1))) # ratio penalty
penalty = np.exp(-(r_c * s_c - 1) * p.penalty_k)
pscore = penalty * cls_score
# window penalty
if self.online_score is not None:
pscore_ori = pscore * (1 - p.window_influence) + window * p.window_influence
else:
pscore = pscore * (1 - p.window_influence) + window * p.window_influence
pscore_ori = pscore
if self.online_score is not None:
s_size = pscore.shape[0]
o_score = cv2.resize(self.online_score, (s_size, s_size), interpolation=cv2.INTER_CUBIC)
pscore = p.online_ratio * o_score + (1 - p.online_ratio) * pscore_ori
else:
pass
# get max
r_max, c_max = np.unravel_index(pscore.argmax(), pscore.shape)
# to real size
pred_x1 = pred_x1[r_max, c_max]
pred_y1 = pred_y1[r_max, c_max]
pred_x2 = pred_x2[r_max, c_max]
pred_y2 = pred_y2[r_max, c_max]
pred_xs = (pred_x1 + pred_x2) / 2
pred_ys = (pred_y1 + pred_y2) / 2
pred_w = pred_x2 - pred_x1
pred_h = pred_y2 - pred_y1
diff_xs = pred_xs - p.instance_size // 2
diff_ys = pred_ys - p.instance_size // 2
diff_xs, diff_ys, pred_w, pred_h = diff_xs / scale_z, diff_ys / scale_z, pred_w / scale_z, pred_h / scale_z
target_sz = target_sz / scale_z
# size learning rate
lr = penalty[r_max, c_max] * cls_score[r_max, c_max] * p.lr
if pscore_ori[r_max, c_max] > 0.95 and self.update_template: # donot update for vos dataset
pos_in_crop = np.array([diff_xs, diff_ys]) * scale_z
sz_in_crop = target_sz * scale_z
net.update_roi_template(pos_in_crop, sz_in_crop, pscore[r_max, c_max]) # update template
# size rate
res_xs = target_pos[0] + diff_xs
res_ys = target_pos[1] + diff_ys
res_w = pred_w * lr + (1 - lr) * target_sz[0]
res_h = pred_h * lr + (1 - lr) * target_sz[1]
target_pos = np.array([res_xs, res_ys])
target_sz = target_sz * (1 - lr) + lr * np.array([res_w, res_h])
if self.debug:
bbox = [int(target_pos[0]-target_sz[0]/2), int(target_pos[1]-target_sz[1]/2), int(target_pos[0]+target_sz[0]/2), int(target_pos[1]+target_sz[1]/2)]
# ----------------------- mask --------------------
mask = mask.squeeze()
mask = F.softmax(mask, dim=0)[1]
mask = mask.squeeze().cpu().data.numpy() # [255, 255]
# print('---- in track0')
if self.debug_on_crop:
print('===========> debug on crop image <==========')
# draw on crop image
polygon = self.mask2box(mask, method='cv2poly')
im = x_crops.squeeze().permute(1, 2, 0).cpu().data.numpy()
output = self.draw_mask(mask, im, polygon=polygon, mask_ratio=0.8, draw_contour=False, object_num=1)
cv2.imwrite('mask.jpg', output)
else:
# print('===========> debug on original image <==========')
# width and height of original image patch in get_sub_window tracking
context_xmin, context_xmax, context_ymin, context_ymax = self.crop_info['crop_cords']
top_pad, left_pad, r, c = self.crop_info['pad_info']
temp_w = context_xmax - context_xmin + 1
temp_h = context_ymax - context_ymin + 1
mask_temp = cv2.resize(mask, (int(temp_h), int(temp_w)), interpolation=cv2.INTER_CUBIC)
# return mask to original image patch in get_sub_window tracking
empty_mask = self.crop_info['empty_mask']
empty_mask[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1)] = mask_temp
# remove crop padding
mask_in_im = empty_mask[top_pad:top_pad + r, left_pad:left_pad + c]
if self.debug_on_ori or self.debug:
polygon = self.mask2box(mask_in_im, method='cv2poly')
output = self.draw_mask(mask_in_im, self.im_ori, polygon=polygon, box=bbox, mask_ratio=0.8, draw_contour=False, object_num=1)
cv2.imwrite(join(self.save_dir, self.name.split('/')[-1]), output)
else:
polygon = None
# ------ test -------
results = dict()
results['target_pos'] = target_pos
results['target_sz'] = target_sz
results['cls_score'] = cls_score[r_max, c_max]
results['mask'] = (mask_in_im > self.p.seg_thr).astype(np.uint8)
results['mask_ori'] = mask_in_im
results['polygon'] = polygon
return results
def track(self, state, im, online_score=None, gt=None, name=None):
p = state['p']
net = state['net']
avg_chans = state['avg_chans']
window = state['window']
target_pos = state['target_pos']
target_sz = state['target_sz']
self.im_ori = im.copy()
self.gt = gt
if online_score is not None:
self.online_score = online_score.squeeze().cpu().data.numpy()
else:
self.online_score = None
# debug
if self.debug:
temp = name.split('/')[-2]
self.name = name
self.save_dir = join('debug', temp)
if not exists(self.save_dir):
os.makedirs(self.save_dir)
hc_z = target_sz[1] + p.context_amount * sum(target_sz)
wc_z = target_sz[0] + p.context_amount * sum(target_sz)
s_z = np.sqrt(wc_z * hc_z)
scale_z = p.exemplar_size / s_z
d_search = (p.instance_size - p.exemplar_size) / 2 # slightly different from rpn++
pad = d_search / scale_z
s_x = s_z + 2 * pad
x_crop, self.crop_info = get_subwindow_tracking(im, target_pos, p.instance_size, python2round(s_x), avg_chans)
x_crop = x_crop.unsqueeze(0)
results = self.update(net, x_crop.cuda(), target_pos, target_sz*scale_z, window, scale_z, p)
target_pos, target_sz, cls_score, mask, mask_ori, polygon = results['target_pos'], results['target_sz'], results['cls_score'], results['mask'], results['mask_ori'], results['polygon']
target_pos[0] = max(0, min(state['im_w'], target_pos[0]))
target_pos[1] = max(0, min(state['im_h'], target_pos[1]))
target_sz[0] = max(10, min(state['im_w'], target_sz[0]))
target_sz[1] = max(10, min(state['im_h'], target_sz[1]))
state['target_pos'] = target_pos
state['target_sz'] = target_sz
state['cls_score'] = cls_score
state['mask'] = mask
state['mask_ori'] = mask_ori
state['polygon'] = polygon
state['p'] = p
return state
def mask2box(self, mask, method='cv2poly'):
"""
method: cv2poly --> opencv
opt --> vot version
"""
mask = (mask > self.p.seg_thr).astype(np.uint8)
if method == 'cv2poly':
if cv2.__version__[-5] == '4':
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
else:
_, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnt_area = [cv2.contourArea(cnt) for cnt in contours]
if len(contours) != 0 and np.max(cnt_area) > 0:
contour = contours[np.argmax(cnt_area)] # use max area polygon
polygon = contour.reshape(-1, 2)
# pbox = cv2.boundingRect(polygon) # Min Max Rectangle
# box_in_img = pbox
prbox = cv2.boxPoints(cv2.minAreaRect(polygon)) # Rotated Rectangle
pred_polygon = ((prbox[0][0], prbox[0][1]), (prbox[1][0], prbox[1][1]),
(prbox[2][0], prbox[2][1]), (prbox[3][0], prbox[3][1]))
return pred_polygon
else:
return None
elif method == 'opt':
pass
else:
raise ValueError('not supported mask2box methods')
def draw_mask(self, mask, im, polygon=None, box=None, mask_ratio=0.2, draw_contour=False, object_num=1):
# draw mask
# mask: 0, 255
mask = mask > self.p.seg_thr
mask = mask.astype('uint8')
# COLOR
COLORS = np.random.randint(128, 255, size=(object_num, 3), dtype="uint8")
COLORSIM = np.vstack([[0, 0, 0], COLORS]).astype("uint8")
mask_draw = COLORSIM[mask]
# mask = mask * 255
where_is = (mask == 0).astype(int)
where_is = np.expand_dims(where_is, axis=-1)
out_mask = where_is * im
output = ((1 - mask_ratio) * im + mask_ratio * mask_draw + mask_ratio * out_mask).astype("uint8")
# output = ((1 - mask_ratio) * im + mask_ratio * mask).astype("uint8")
if draw_contour:
mask = cv2.cvtColor(mask, cv2.COLOR_RGB2GRAY)
try:
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# remove small contours
areas = np.array([cv2.contourArea(c) for c in contours])
max_area = np.max(areas)
max_idx = np.argmax(areas)
minArea = max_area * 0.01
filteredContours = []
findhier = []
for id, i in enumerate(contours):
area = cv2.contourArea(i)
if area > minArea:
filteredContours.append(i)
findhier.append(hierarchy[:, id, :])
# findhier = np.array(findhier).transpose(1, 0, 2)
output = cv2.drawContours(output, filteredContours, -1, (255, 255, 255), 2, cv2.LINE_8)
except:
print('draw contour process fails...')
else:
pass
if polygon is not None:
# draw rotated box
polygon = np.int0(polygon) # to int
output = cv2.polylines(output, [polygon.reshape((-1, 1, 2))], True, (0, 255, 255), 3)
# output = cv2.drawContours(output, [polygon], 0, (0, 0, 255), 3)
# draw gt
try:
gt = ((self.gt[0], self.gt[1]), (self.gt[2], self.gt[3]), (self.gt[4], self.gt[5]), (self.gt[6], self.gt[7]))
gt = np.int0(gt) # to int
output = cv2.polylines(output, [gt.reshape((-1, 1, 2))], True, (0, 0, 255), 3)
except:
pass
if box is not None:
output = cv2.rectangle(output, (box[0], box[1]), (box[2], box[3]), (0, 255, 0))
return output
def grids(self, p):
"""
each element of feature map on input search image
:return: H*W*2 (position for each element)
"""
sz = p.score_size
# the real shift is -param['shifts']
sz_x = sz // 2
sz_y = sz // 2
x, y = np.meshgrid(np.arange(0, sz) - np.floor(float(sz_x)),
np.arange(0, sz) - np.floor(float(sz_y)))
self.grid_to_search_x = x * p.total_stride + p.instance_size // 2
self.grid_to_search_y = y * p.total_stride + p.instance_size // 2
def IOUgroup(self, pred_x1, pred_y1, pred_x2, pred_y2, gt_xyxy):
# overlap
x1, y1, x2, y2 = gt_xyxy
xx1 = np.maximum(pred_x1, x1) # 17*17
yy1 = np.maximum(pred_y1, y1)
xx2 = np.minimum(pred_x2, x2)
yy2 = np.minimum(pred_y2, y2)
ww = np.maximum(0, xx2 - xx1)
hh = np.maximum(0, yy2 - yy1)
area = (x2 - x1) * (y2 - y1)
target_a = (pred_x2 - pred_x1) * (pred_y2 - pred_y1)
inter = ww * hh
overlap = inter / (area + target_a - inter)
return overlap
def change(self, r):
return np.maximum(r, 1. / r)
def sz(self, w, h):
pad = (w + h) * 0.5
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def sz_wh(self, wh):
pad = (wh[0] + wh[1]) * 0.5
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
class AdaConfig(object):
penalty_k = 0.06
window_influence = 0.484
lr = 0.644
windowing = 'cosine'
exemplar_size = 127
instance_size = 255
total_stride = 8
score_size = (instance_size - exemplar_size) // total_stride + 1 + 8 # for ++
context_amount = 0.5
ratio = 0.94
online_ratio = 0.7
#seg_thr = 0.84
seg_thr = 0.9
lambda_u = 0.1
lambda_s = 0.2
iter1 = 0.33
iter2 = 0.33
def update(self, newparam=None):
if newparam:
for key, value in newparam.items():
setattr(self, key, value)
self.renew()
def renew(self):
self.score_size = (self.instance_size - self.exemplar_size) // self.total_stride + 1 + 8 # for ++
|
2ed0c1b79cfea0e285678f0650ac39cc509f58e2
|
12d3d7359d78a8d722daeda7e3a12625e667990d
|
/models/roadnet_model.py
|
babc41f7c0ccdb57374839acda0d2034af2591a1
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
yhlleo/DeepSegmentor
|
71fabfbc98ae2ef3b66dc9d3bf6050e32747191b
|
e23af48e7c155b04a9c40013fb5a6616e4102484
|
refs/heads/master
| 2023-04-12T13:31:57.678114
| 2023-03-17T02:43:56
| 2023-03-17T02:43:56
| 190,896,878
| 228
| 81
|
NOASSERTION
| 2021-04-06T06:50:51
| 2019-06-08T14:24:10
|
Python
|
UTF-8
|
Python
| false
| false
| 5,801
|
py
|
roadnet_model.py
|
# Author: Yahui Liu <yahui.liu@uintn.it>
import torch
import numpy as np
import itertools
from .base_model import BaseModel
import torch.nn.functional as F
from .roadnet_networks import define_roadnet
class RoadNetModel(BaseModel):
"""
This class implements the RoadNet model.
RoadNet paper: https://ieeexplore.ieee.org/document/8506600
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options."""
return parser
def __init__(self, opt):
"""Initialize the RoadNet class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['segment', 'edge', 'centerline']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['image', 'label_gt', 'label_pred']
# specify the models you want to save to the disk.
self.model_names = ['G']
# define networks
self.netG = define_roadnet(opt.input_nc,
opt.output_nc,
opt.ngf,
opt.norm,
opt.use_selu,
opt.init_type,
opt.init_gain,
self.gpu_ids)
if self.isTrain:
# define loss functions
self.weight_segment_side = [0.5, 0.75, 1.0, 0.75, 0.5, 1.0]
self.weight_others_side = [0.5, 0.75, 1.0, 0.75, 1.0]
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, eps=1e-3, weight_decay=2e-4)
#self.optimizer = torch.optim.SGD(self.netG.parameters(), lr=opt.lr, momentum=0.9, weight_decay=2e-4)
self.optimizers.append(self.optimizer)
def _get_balanced_sigmoid_cross_entropy(self,x):
count_neg = torch.sum(1. - x)
count_pos = torch.sum(x)
beta = count_neg / (count_neg + count_pos)
pos_weight = beta / (1 - beta)
cost = torch.nn.BCEWithLogitsLoss(size_average=True, reduce=True, pos_weight=pos_weight)
return cost, 1-beta
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
"""
self.image = input['image'].to(self.device)
self.segment_gt = input['segment'].to(self.device)
self.edge_gt = input['edge'].to(self.device)
self.centerline_gt = input['centerline'].to(self.device)
self.image_paths = input['A_paths']
if self.isTrain:
self.criterion_seg, self.beta_seg = self._get_balanced_sigmoid_cross_entropy(self.segment_gt)
self.criterion_edg, self.beta_edg = self._get_balanced_sigmoid_cross_entropy(self.edge_gt)
self.criterion_cnt, self.beta_cnt = self._get_balanced_sigmoid_cross_entropy(self.centerline_gt)
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.segments, self.edges, self.centerlines = self.netG(self.image)
# for visualization
segment_gt_viz = (self.segment_gt-0.5)/0.5
edge_gt_viz = (self.edge_gt-0.5)/0.5
centerline_gt_viz = (self.centerline_gt-0.5)/0.5
self.label_gt = torch.cat([centerline_gt_viz, edge_gt_viz, segment_gt_viz], dim=1)
segment_fused = (torch.sigmoid(self.segments[-1])-0.5)/0.5
edge_fused = (torch.sigmoid(self.edges[-1])-0.5)/0.5
centerlines_fused = (torch.sigmoid(self.centerlines[-1])-0.5)/0.5
self.label_pred = torch.cat([centerlines_fused, edge_fused, segment_fused], dim=1)
def backward(self):
"""Calculate the loss"""
self.loss_segment = torch.mean((torch.sigmoid(self.segments[-1])-self.segment_gt)**2) * 0.5
if self.segment_gt.sum() > 0.0: # ignore blank ones
for out, w in zip(self.segments, self.weight_segment_side):
self.loss_segment += self.criterion_seg(out, self.segment_gt) * self.beta_seg * w
self.loss_edge = torch.mean((torch.sigmoid(self.edges[-1])-self.edge_gt)**2) * 0.5
if self.edge_gt.sum() > 0.0:
for out, w in zip(self.edges, self.weight_others_side):
self.loss_edge += self.criterion_edg(out, self.edge_gt) * self.beta_edg * w
self.loss_centerline = torch.mean((torch.sigmoid(self.centerlines[-1])-self.centerline_gt)**2) * 0.5
if self.centerline_gt.sum() > 0.0:
for out, w in zip(self.centerlines, self.weight_others_side):
self.loss_centerline += self.criterion_cnt(out, self.centerline_gt) * self.beta_cnt * w
self.loss_total = self.loss_segment + self.loss_edge + self.loss_centerline
self.loss_total.backward()
def optimize_parameters(self, epoch=None):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# forward
self.forward() # compute predictions.
self.optimizer.zero_grad() # set G's gradients to zero
self.backward() # calculate gradients for G
self.optimizer.step() # update G's weights
|
35ababe91767e3440884b6762d812a6252a063b6
|
5320b5faacc16e29c027d7acaef0a0a9efd03b13
|
/tests/test_maps.py
|
8db6298fcd6233d04bd27bb0870552b4b6d2af8f
|
[
"Apache-2.0"
] |
permissive
|
googlemaps/google-maps-services-python
|
87cce1903120db3f35605b772f51a0777f47b598
|
645e07de5a27c4c858b2c0673f0dd6f23ca62d28
|
refs/heads/master
| 2023-09-03T08:49:32.905689
| 2023-01-27T01:01:18
| 2023-01-27T01:01:18
| 22,978,655
| 4,410
| 1,525
|
Apache-2.0
| 2023-08-27T16:27:14
| 2014-08-15T04:35:30
|
Python
|
UTF-8
|
Python
| false
| false
| 4,138
|
py
|
test_maps.py
|
#
# Copyright 2020 Google Inc. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""Tests for the maps module."""
from types import GeneratorType
import responses
import googlemaps
from . import TestCase
from googlemaps.maps import StaticMapMarker
from googlemaps.maps import StaticMapPath
class MapsTest(TestCase):
def setUp(self):
self.key = "AIzaasdf"
self.client = googlemaps.Client(self.key)
@responses.activate
def test_static_map_marker(self):
marker = StaticMapMarker(
locations=[{"lat": -33.867486, "lng": 151.206990}, "Sydney"],
size="small",
color="blue",
label="S",
)
self.assertEqual(
"size:small|color:blue|label:S|" "-33.867486,151.20699|Sydney", str(marker)
)
with self.assertRaises(ValueError):
StaticMapMarker(locations=["Sydney"], label="XS")
self.assertEqual("label:1|Sydney", str(StaticMapMarker(locations=["Sydney"], label="1")))
@responses.activate
def test_static_map_path(self):
path = StaticMapPath(
points=[{"lat": -33.867486, "lng": 151.206990}, "Sydney"],
weight=5,
color="red",
geodesic=True,
fillcolor="Red",
)
self.assertEqual(
"weight:5|color:red|fillcolor:Red|"
"geodesic:True|"
"-33.867486,151.20699|Sydney",
str(path),
)
@responses.activate
def test_download(self):
url = "https://maps.googleapis.com/maps/api/staticmap"
responses.add(responses.GET, url, status=200)
path = StaticMapPath(
points=[(62.107733, -145.541936), "Delta+Junction,AK"],
weight=5,
color="red",
)
m1 = StaticMapMarker(
locations=[(62.107733, -145.541936)], color="blue", label="S"
)
m2 = StaticMapMarker(
locations=["Delta+Junction,AK"], size="tiny", color="green"
)
m3 = StaticMapMarker(
locations=["Tok,AK"], size="mid", color="0xFFFF00", label="C"
)
response = self.client.static_map(
size=(400, 400),
zoom=6,
center=(63.259591, -144.667969),
maptype="hybrid",
format="png",
scale=2,
visible=["Tok,AK"],
path=path,
markers=[m1, m2, m3],
)
self.assertTrue(isinstance(response, GeneratorType))
self.assertEqual(1, len(responses.calls))
self.assertURLEqual(
"%s?center=63.259591%%2C-144.667969&format=png&maptype=hybrid&"
"markers=color%%3Ablue%%7Clabel%%3AS%%7C62.107733%%2C-145.541936&"
"markers=size%%3Atiny%%7Ccolor%%3Agreen%%7CDelta%%2BJunction%%2CAK&"
"markers=size%%3Amid%%7Ccolor%%3A0xFFFF00%%7Clabel%%3AC%%7CTok%%2CAK&"
"path=weight%%3A5%%7Ccolor%%3Ared%%7C62.107733%%2C-145.541936%%7CDelta%%2BJunction%%2CAK&"
"scale=2&size=400x400&visible=Tok%%2CAK&zoom=6&key=%s" % (url, self.key),
responses.calls[0].request.url,
)
with self.assertRaises(ValueError):
self.client.static_map(size=(400, 400))
with self.assertRaises(ValueError):
self.client.static_map(
size=(400, 400), center=(63.259591, -144.667969), zoom=6, format="test"
)
with self.assertRaises(ValueError):
self.client.static_map(
size=(400, 400), center=(63.259591, -144.667969), zoom=6, maptype="test"
)
|
f13686622beb1474093d7a1d2cd22196c80bd825
|
f4aa1885d4121e131c2a580183c6312aeefa8147
|
/ch03/myproject_virtualenv/src/django-myproject/myproject/apps/core/templatetags/utility_tags.py
|
fc2ed3057fcea5a1fe6c8a5e86d2c3dd7c58e634
|
[
"MIT"
] |
permissive
|
PacktPublishing/Django-3-Web-Development-Cookbook-Fourth-Edition
|
8f09d1ea9b13e8a66fc489fc09c9a5ee8f9968cf
|
9371e0ea6f4dc61567bf28299cf57146519e274c
|
refs/heads/master
| 2023-02-20T02:36:51.226985
| 2023-01-30T08:39:30
| 2023-01-30T08:39:30
| 201,903,680
| 189
| 117
|
MIT
| 2023-02-10T22:45:42
| 2019-08-12T09:54:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,575
|
py
|
utility_tags.py
|
from urllib.parse import urlencode
from django import template
from django.utils.encoding import force_str
from django.utils.safestring import mark_safe
register = template.Library()
def construct_query_string(context, query_params):
# empty values will be removed
query_string = context["request"].path
if len(query_params):
encoded_params = urlencode([
(key, force_str(value))
for (key, value) in query_params if value
]).replace("&", "&")
query_string += f"?{encoded_params}"
return mark_safe(query_string)
"""TAGS"""
@register.simple_tag(takes_context=True)
def modify_query(context, *params_to_remove, **params_to_change):
"""Renders a link with modified current query parameters"""
query_params = []
for key, value_list in context["request"].GET.lists():
if not key in params_to_remove:
# don't add key-value pairs for params_to_remove
if key in params_to_change:
# update values for keys in params_to_change
query_params.append((key, params_to_change[key]))
params_to_change.pop(key)
else:
# leave existing parameters as they were
# if not mentioned in the params_to_change
for value in value_list:
query_params.append((key, value))
# attach new params
for key, value in params_to_change.items():
query_params.append((key, value))
return construct_query_string(context, query_params)
|
fe96b10aba94a82db507c65836855dbfec04abab
|
f0413b4a61eff16fd881632c046bb90e986b799c
|
/rsbook_code/utilities/example_graphs.py
|
c3b0717a611b9c91576e6629ab71f9c9c6014ca6
|
[
"Apache-2.0"
] |
permissive
|
krishauser/RoboticSystemsBook
|
2bd21335003a24c826ee5caf2d2f34627ff075a9
|
431b36f0c80d553e936833f0276841e3f2de6e84
|
refs/heads/master
| 2023-08-31T13:36:21.680166
| 2023-08-28T22:10:54
| 2023-08-28T22:10:54
| 146,316,598
| 220
| 44
|
Apache-2.0
| 2023-06-30T16:48:25
| 2018-08-27T15:23:53
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,912
|
py
|
example_graphs.py
|
from __future__ import print_function,division
from .graph import AdjListGraph
import itertools
import sys
def grid_graph(M,N,diagonals=False):
"""Makes a grid graph of size (M,N). Vertices are indices (i,j).
If diagonals=True, then diagonal edges are added.
"""
G = AdjListGraph([],[])
for i in range(M):
for j in range(N):
n = (i,j)
G.add_node(n)
for i in range(M):
for j in range(N):
n = (i,j)
if i > 0:
G.add_edge(n,(i-1,j))
if j > 0:
G.add_edge(n,(i,j-1))
if i+1 < M:
G.add_edge(n,(i+1,j))
if j+1 < N:
G.add_edge(n,(i,j+1))
if diagonals:
if i > 0 and j > 0:
G.add_edge(n,(i-1,j-1))
if i > 0 and j+1 < N:
G.add_edge(n,(i-1,j+1))
if i+1 < M and j > 0:
G.add_edge(n,(i+1,j-1))
if i+1 < M and j+1 < N:
G.add_edge(n,(i+1,j+1))
return G
def grid_node_neighbors_nd(index,diagonals=False,imin=None,imax=None,wrap=False):
"""Iterates over neighbors of a node in grid_graph_nd without actually
constructing the grid.
Args:
index (array-like): the (integer) node
diagonals (bool, optional): whether to iterate over diagonal edges. Note:
if diagonals = True, there are 3^d-1 edges per node.
imin (array-like, optional): if given, there is a lower bound on the
grid.
imax (array-like, optional): if given, there is an upper bound on the
grid (index range is (imin[i]...,imax[i]-1)).
wrap (bool or list of bools, optional): if True, the grid is allowed to
wrap in all directions. If a list of bools, the grid is allowed to
wrap in the given directions.
"""
cap = True
if imin is None and imax is None:
cap = False
wrap = False
if imin is None and wrap is not False:
imin = [-sys.maxint - 1]*len(index)
if imax is None and wrap is not False:
imax = [sys.maxint]*len(index)
in_bounds = None
enforce_bounds = None
if not cap:
pass
elif wrap is False:
in_bounds = lambda x,i: imin[i] <= x <= imax[i]-1
elif wrap is True:
enforce_bounds = lambda x,i: imax[i]-1 if x < imin[i] else (imin[i] if x >= imax[i] else x)
else:
assert hasattr(wrap,'__iter__')
assert len(wrap) == len(index),"Wrap array must have the same size as shape"
in_bounds = lambda x,i: True if wrap[i] else imin[i] <= x <= imax[i]-1
enforce_bounds = lambda x,i: x if not wrap[i] else (imax[i]-1 if x < imin[i] else (imin[i] if x >= imax[i] else x))
if diagonals:
for ofs in itertools.product(*[[-1,0,1]]*len(index)):
vn = [x+d for (x,d) in zip(index,ofs)]
if in_bounds is not None:
if not all(in_bounds(x,i) for (i,x) in enumerate(vn)):
continue
if enforce_bounds is not None:
vn = [enforce_bounds(x,i) for (i,x) in enumerate(vn)]
vn = tuple(vn)
if vn != index:
yield vn
else:
#only add axes
vn = list(index)
for i,d in enumerate(index):
vn[i] -= 1
if in_bounds is None or in_bounds(vn[i],i):
if enforce_bounds is not None:
vn[i] = enforce_bounds(vn[i],i)
yield tuple(vn)
vn[i] = d
vn[i] += 1
if in_bounds is None or in_bounds(vn[i],i):
if enforce_bounds is not None:
vn[i] = enforce_bounds(vn[i],i)
yield tuple(vn)
vn[i] = d
return
def grid_graph_nd(shape,diagonals=False,wrap=False):
"""Makes a grid graph of a given shape (d1,d2,..,dN). Vertices are
indices (i1,...,iN).
If diagonals=True, then diagonal edges are added. Note: if diagonals=True,
there are 3^d-1 edges per node.
If wrap=True, or wrap is a d-length array with some True values, the grid
is allowed to wrap in those dimensions.
Requires Numpy.
"""
import numpy as np
if hasattr(wrap,'__iter__'):
assert len(wrap) == len(shape),"Wrap array must have the same size as shape"
else:
wrap = [wrap]*len(shape)
G = AdjListGraph([],[])
for v in np.ndindex(*shape):
v = tuple(v)
G.add_node(v)
for v in np.ndindex(*shape):
if diagonals:
for ofs in itertools.product(*[[-1,0,1]]*len(shape)):
vn = [x+d for (x,d) in zip(v,ofs)]
add = True
for i,x in enumerate(vn):
if x < 0:
if wrap[i]:
x=shape[i]-1
else:
add = False
break
if x >= shape[i]:
if wrap[i]:
x=0
else:
add = False
break
if add:
vn = tuple(vn)
if vn != v:
G.add_edge(v,vn)
else:
#only add axes
for i,d in enumerate(v):
vn = list(v)
if d > 0 or wrap[i]:
vn[i] -= 1
if wrap[i] and vn[i] < 0: vn[i] = shape[i]-1
G.add_edge(v,tuple(vn))
vn[i] += 1
if d+1 < shape[i] or wrap[i]:
vn[i] += 1
if wrap[i] and vn[i] >= shape[i]: vn[i] = 0
G.add_edge(v,tuple(vn))
vn[i] -= 1
return G
|
c5bff3275643d680f55d23208a52107e2be5651b
|
96c1f13473cf224113185902edd4c9c01091e106
|
/theseus/geometry/so3.py
|
4f71bff710f8c5f42de930e2088dedf875a7696f
|
[
"MIT"
] |
permissive
|
facebookresearch/theseus
|
f1e488eb5a25f5ba74a6995911bee958b5da4cf3
|
240e1206329d42fedd40399684d6e17e455c6645
|
refs/heads/main
| 2023-08-11T07:33:12.328520
| 2023-08-02T12:58:01
| 2023-08-02T12:58:01
| 429,570,359
| 1,410
| 105
|
MIT
| 2023-08-01T14:30:01
| 2021-11-18T20:28:27
|
Python
|
UTF-8
|
Python
| false
| false
| 11,165
|
py
|
so3.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import List, Optional, Union, cast
import torch
import theseus.constants
from theseus.global_params import _THESEUS_GLOBAL_PARAMS
from torchlie.functional import SO3 as SO3_base
from .lie_group import LieGroup
from .lie_group_check import _LieGroupCheckContext
from .point_types import Point3
class SO3(LieGroup):
def __init__(
self,
quaternion: Optional[torch.Tensor] = None,
tensor: Optional[torch.Tensor] = None,
name: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
strict_checks: bool = False,
disable_checks: bool = False,
):
if quaternion is not None and tensor is not None:
raise ValueError("Please provide only one of quaternion or tensor.")
if quaternion is not None:
dtype = quaternion.dtype
super().__init__(
tensor=tensor,
name=name,
dtype=dtype,
strict_checks=strict_checks,
disable_checks=disable_checks,
)
if quaternion is not None:
self.update_from_unit_quaternion(quaternion)
@staticmethod
def rand(
*size: int,
generator: Optional[torch.Generator] = None,
dtype: Optional[torch.dtype] = None,
device: theseus.constants.DeviceType = None,
requires_grad: bool = False,
) -> "SO3":
if len(size) != 1:
raise ValueError("The size should be 1D.")
tensor = SO3_base.rand(
*size,
generator=generator,
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
return SO3(tensor=tensor, disable_checks=True)
@staticmethod
def randn(
*size: int,
generator: Optional[torch.Generator] = None,
dtype: Optional[torch.dtype] = None,
device: theseus.constants.DeviceType = None,
requires_grad: bool = False,
) -> "SO3":
if len(size) != 1:
raise ValueError("The size should be 1D.")
tensor = SO3_base.randn(
*size,
generator=generator,
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
return SO3(tensor=tensor, disable_checks=True)
@staticmethod
def _init_tensor() -> torch.Tensor: # type: ignore
return torch.eye(3, 3).view(1, 3, 3)
def update_from_unit_quaternion(self, quaternion: torch.Tensor):
self.update(self.unit_quaternion_to_SO3(quaternion))
def dof(self) -> int:
return 3
def __repr__(self) -> str:
return f"SO3(tensor={self.tensor}, name={self.name})"
def __str__(self) -> str:
with torch.no_grad():
return f"SO3(matrix={self.tensor}), name={self.name})"
def _adjoint_impl(self) -> torch.Tensor:
return self.tensor.clone()
def _project_impl(
self, euclidean_grad: torch.Tensor, is_sparse: bool = False
) -> torch.Tensor:
self._project_check(euclidean_grad, is_sparse)
if is_sparse:
return SO3_base.left_project(self.tensor, euclidean_grad)
else:
ret = self.tensor.new_zeros(euclidean_grad.shape[:-1])
temp = torch.einsum("...jk,...ji->...ik", euclidean_grad, self.tensor)
ret[..., 0] = temp[..., 2, 1] - temp[..., 1, 2]
ret[..., 1] = temp[..., 0, 2] - temp[..., 2, 0]
ret[..., 2] = temp[..., 1, 0] - temp[..., 0, 1]
return ret
@staticmethod
def _check_tensor_impl(tensor: torch.Tensor) -> bool:
with torch.no_grad():
if tensor.ndim != 3 or tensor.shape[1:] != (3, 3):
raise ValueError("SO3 data tensors can only be 3x3 matrices.")
try:
SO3_base.check_group_tensor(tensor)
except ValueError:
return False
return True
@staticmethod
def _unit_quaternion_check(quaternion: torch.Tensor):
if quaternion.ndim != 2 or quaternion.shape[1] != 4:
raise ValueError("Quaternions can only be 4-D vectors.")
checks_enabled, silent_unchecks = _LieGroupCheckContext.get_context()
if checks_enabled:
SO3_base.check_unit_quaternion(quaternion)
elif not silent_unchecks:
warnings.warn(
"Lie group checks are disabled, so the validness of unit quaternions is not "
"checked for SO3.",
RuntimeWarning,
)
@staticmethod
def _hat_matrix_check(matrix: torch.Tensor):
if matrix.ndim != 3 or matrix.shape[1:] != (3, 3):
raise ValueError("Hat matrices of SO(3) can only be 3x3 matrices")
checks_enabled, silent_unchecks = _LieGroupCheckContext.get_context()
if checks_enabled:
SO3_base.check_hat_tensor(matrix)
elif not silent_unchecks:
warnings.warn(
"Lie group checks are disabled, so the skew-symmetry of hat matrices is "
"not checked for SO3.",
RuntimeWarning,
)
@staticmethod
def exp_map(
tangent_vector: torch.Tensor, jacobians: Optional[List[torch.Tensor]] = None
) -> "SO3":
if tangent_vector.ndim != 2 or tangent_vector.shape[1] != 3:
raise ValueError("Tangent vectors of SO3 should be batched 3-D vectors.")
return SO3(
tensor=SO3_base.exp(tangent_vector, jacobians=jacobians),
disable_checks=True,
)
@staticmethod
def normalize(tensor: torch.Tensor) -> torch.Tensor:
if tensor.ndim != 3 or tensor.shape[1:] != (3, 3):
raise ValueError("SO3 data tensors can only be batched 3x3 matrices.")
return SO3_base.normalize(tensor)
def _log_map_impl(
self, jacobians: Optional[List[torch.Tensor]] = None
) -> torch.Tensor:
return SO3_base.log(self.tensor, jacobians=jacobians)
def _compose_impl(self, so3_2: LieGroup) -> "SO3":
return SO3(
tensor=SO3_base.compose(self.tensor, so3_2.tensor), strict_checks=False
)
def _inverse_impl(self, get_jacobian: bool = False) -> "SO3":
# if self.tensor is a valid SO(3), then self.tensor.transpose(1, 2)
# must be valid as well
return SO3(tensor=self.tensor.transpose(1, 2).clone(), disable_checks=True)
def to_matrix(self) -> torch.Tensor:
return self.tensor.clone()
# The quaternion takes the [w x y z] convention
def to_quaternion(self) -> torch.Tensor:
sine_axis = self.tensor.new_zeros(self.shape[0], 3)
sine_axis[:, 0] = 0.5 * (self[:, 2, 1] - self[:, 1, 2])
sine_axis[:, 1] = 0.5 * (self[:, 0, 2] - self[:, 2, 0])
sine_axis[:, 2] = 0.5 * (self[:, 1, 0] - self[:, 0, 1])
w = 0.5 * (1 + self[:, 0, 0] + self[:, 1, 1] + self[:, 2, 2]).clamp(0, 4).sqrt()
near_zero = w > 1 - _THESEUS_GLOBAL_PARAMS.get_eps("so3", "near_zero", w.dtype)
near_pi = w <= _THESEUS_GLOBAL_PARAMS.get_eps("so3", "near_pi", w.dtype)
non_zero = self.tensor.new_ones([1])
ret = self.tensor.new_zeros(self.shape[0], 4)
# theta != pi
ret[:, 0] = w
ret[:, 1:] = 0.5 * sine_axis / torch.where(near_pi, non_zero, w).view(-1, 1)
# theta ~ pi
ddiag = torch.diagonal(self.tensor, dim1=1, dim2=2)
# Find the index of major coloumns and diagonals
major = torch.logical_and(
ddiag[:, 1] > ddiag[:, 0], ddiag[:, 1] > ddiag[:, 2]
) + 2 * torch.logical_and(ddiag[:, 2] > ddiag[:, 0], ddiag[:, 2] > ddiag[:, 1])
aux = torch.ones(self.shape[0], dtype=torch.bool)
sel_rows = 0.5 * (self[aux, major] + self[aux, :, major])
cosine_near_pi = 0.5 * (self[:, 0, 0] + self[:, 1, 1] + self[:, 2, 2] - 1)
sel_rows[aux, major] -= cosine_near_pi
axis = (
sel_rows
/ torch.where(
near_zero.view(-1, 1),
non_zero.view(-1, 1),
sel_rows.norm(dim=1, keepdim=True),
)
* torch.where(sine_axis[aux, major].view(-1, 1) >= 0, non_zero, -non_zero)
)
sine_half_theta = (0.5 * (1 - cosine_near_pi)).clamp(0, 1).sqrt().view(-1, 1)
ret[:, 1:] = torch.where(
near_pi.view(-1, 1), axis * sine_half_theta, ret[:, 1:]
)
return ret
@staticmethod
def hat(tangent_vector: torch.Tensor) -> torch.Tensor:
return SO3_base.hat(tangent_vector)
@staticmethod
def vee(matrix: torch.Tensor) -> torch.Tensor:
SO3._hat_matrix_check(matrix)
return SO3_base.vee(matrix)
def _rotate_shape_check(self, point: Union[Point3, torch.Tensor]):
err_msg = (
f"Point tensor to rotate must have final dimensions of shape (3,) "
f"or (3, 1), and be broadcastable with SO3, but received a tensor with "
f"shape {point.shape}, while SO3 shape is {self.shape}."
)
if isinstance(point, torch.Tensor):
if (
point.ndim not in [2, 3]
or point.shape[1] != 3
or (point.ndim == 3 and point.shape[-1] != 1)
):
raise ValueError(err_msg)
elif point.dof() != 3:
raise ValueError(err_msg)
if (
point.shape[0] != self.shape[0]
and point.shape[0] != 1
and self.shape[0] != 1
):
raise ValueError(err_msg)
# The quaternion takes the [w x y z] convention
@staticmethod
def unit_quaternion_to_SO3(quaternion: torch.Tensor) -> "SO3":
if quaternion.ndim == 1:
quaternion = quaternion.unsqueeze(0)
return SO3(
tensor=SO3_base.quaternion_to_rotation(quaternion), disable_checks=True
)
def _copy_impl(self, new_name: Optional[str] = None) -> "SO3":
# if self.tensor is a valid SO(3), so is the copy
return SO3(tensor=self.tensor.clone(), name=new_name, disable_checks=True)
# only added to avoid casting downstream
def copy(self, new_name: Optional[str] = None) -> "SO3":
return cast(SO3, super().copy(new_name=new_name))
def rotate(
self,
point: Union[Point3, torch.Tensor],
jacobians: Optional[List[torch.Tensor]] = None,
) -> Point3:
self._rotate_shape_check(point)
p = point if isinstance(point, torch.Tensor) else point.tensor
return Point3(tensor=SO3_base.transform(self.tensor, p, jacobians=jacobians))
def unrotate(
self,
point: Union[Point3, torch.Tensor],
jacobians: Optional[List[torch.Tensor]] = None,
) -> Point3:
self._rotate_shape_check(point)
p = point if isinstance(point, torch.Tensor) else point.tensor
return Point3(tensor=SO3_base.untransform(self.tensor, p, jacobians=jacobians))
rand_so3 = SO3.rand
randn_so3 = SO3.randn
|
fe5ef946eb615394c41a1ad2e7eacd5dc319288b
|
d8a1f695f04f54260e7550de6fcad93103c7ef34
|
/magi/agents/tdmpc/run_tdmpc.py
|
7cdf00a5f136a62fbefea05f88a5e9a1cbfd2cd5
|
[
"Apache-2.0"
] |
permissive
|
ethanluoyc/magi
|
6192f409cd2e3b5a0b31b8153c782e7c6079defb
|
eb86f7678d991a1700b594ec073162945ad9df71
|
refs/heads/main
| 2023-05-22T23:57:30.340397
| 2023-02-07T14:18:30
| 2023-02-07T14:18:57
| 356,384,964
| 108
| 4
|
Apache-2.0
| 2023-02-07T13:31:30
| 2021-04-09T20:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 4,556
|
py
|
run_tdmpc.py
|
"""Example running TD-MPC on dm_control.
Run this example with
```shell
python -m magi.agents.tdmpc.run_tdmpc \
--config magi/agents/tdmpc/configs/walker.py \
--config.task=walker-walk
```
See configs/ for configurations for other environments.
"""
import functools
import os
from absl import app
from absl import flags
from absl import logging
os.environ["MUJOCO_GL"] = "egl"
# pylint: disable=wrong-import-position
import optax
import tensorflow as tf
from acme import wrappers
from acme.jax import experiments
from ml_collections import config_flags
from magi.agents import tdmpc
from magi.experiments import experiment_logger
_CONFIG = config_flags.DEFINE_config_file("config", None)
_WORKDIR = flags.DEFINE_string("workdir", None, "Where to store artifacts")
flags.mark_flag_as_required("config")
def make_logger_factory(config):
wandb_kwargs = dict(
name=config.wandb_name,
entity=config.wandb_entity,
project=config.wandb_project,
config=config.to_dict(),
tags=[config.task],
)
logger_factory = experiment_logger.LoggerFactory(
log_to_wandb=config.get("use_wandb", False),
workdir=_WORKDIR.value,
learner_time_delta=10.0,
wandb_kwargs=wandb_kwargs,
)
return logger_factory
def make_environment_factory(config):
def environment_factory(seed):
# pylint: disable=import-outside-toplevel
from dm_control import suite
domain, task = config.task.replace("-", "_").split("_", 1)
domain = dict(cup="ball_in_cup").get(domain, domain)
assert (domain, task) in suite.ALL_TASKS
env = suite.load(
domain, task, task_kwargs={"random": seed}, visualize_reward=False
)
env = wrappers.ConcatObservationWrapper(env)
env = wrappers.ActionRepeatWrapper(env, config.action_repeat)
env = wrappers.CanonicalSpecWrapper(env)
env = wrappers.SinglePrecisionWrapper(env)
return env
return environment_factory
def _make_schedule(config):
return getattr(optax, config.name)(**config.kwargs)
def make_experiment_config(config):
environment_factory = make_environment_factory(config)
logger_factory = make_logger_factory(config)
networks_factory = functools.partial(
tdmpc.make_networks,
latent_size=config.latent_dim,
encoder_hidden_size=config.enc_dim,
mlp_hidden_size=config.mlp_dim,
)
optimizer = optax.chain(
optax.clip_by_global_norm(config.grad_clip_norm),
optax.adam(config.lr),
)
std_schedule = _make_schedule(config.std_schedule)
horizon_schedule = _make_schedule(config.horizon_schedule)
builder = tdmpc.TDMPCBuilder(
tdmpc.TDMPCConfig(
std_schedule=std_schedule,
horizon_schedule=horizon_schedule,
optimizer=optimizer,
batch_size=config.batch_size,
# One update per actor step.
samples_per_insert=config.batch_size,
samples_per_insert_tolerance_rate=0.1,
max_replay_size=config.max_buffer_size,
variable_update_period=config.variable_update_period,
per_alpha=config.per_alpha,
per_beta=config.per_beta,
discount=config.discount,
num_samples=config.num_samples,
min_std=config.min_std,
temperature=config.temperature,
momentum=config.momentum,
num_elites=config.num_elites,
iterations=config.iterations,
tau=config.tau,
seed_steps=config.seed_steps,
mixture_coef=config.mixture_coef,
horizon=config.horizon,
consistency_coef=config.consistency_coef,
reward_coef=config.reward_coef,
value_coef=config.value_coef,
rho=config.rho,
)
)
return experiments.ExperimentConfig(
builder=builder,
network_factory=networks_factory,
environment_factory=environment_factory,
max_num_actor_steps=config.train_steps,
seed=config.seed,
logger_factory=logger_factory,
checkpointing=None,
)
def main(_):
tf.config.set_visible_devices([], "GPU")
config = _CONFIG.value
logging.info("Config:\n%s", config)
experiment_config = make_experiment_config(config)
experiments.run_experiment(
experiment_config,
eval_every=config.eval_freq,
num_eval_episodes=config.eval_episodes,
)
if __name__ == "__main__":
app.run(main)
|
0c56cc1a9c1b31e947e26cefaec2c88c27e54341
|
5105403f2b75990654519438d8ceabcf80962ebf
|
/examples/models/gauges.py
|
e2dbca6ddd5bab9c5e8561f8ec1ab1d982433386
|
[
"BSD-3-Clause"
] |
permissive
|
bokeh/bokeh
|
ed1d81eb07d27d27c6710c9fec9114886047f528
|
310cb2cbeabc4c4b8180cbda566df16039737cdc
|
refs/heads/branch-3.3
| 2023-08-31T23:53:06.537061
| 2023-08-30T03:43:05
| 2023-08-30T03:43:05
| 3,834,332
| 17,174
| 5,251
|
BSD-3-Clause
| 2023-09-14T11:37:23
| 2012-03-26T15:40:01
|
Python
|
UTF-8
|
Python
| false
| false
| 4,351
|
py
|
gauges.py
|
from math import pi
from typing import Any, Literal
from bokeh.core.properties import expr, value
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models import (Arc, Circle, ColumnDataSource, Plot,
PolarTransform, Range1d, Ray, Text)
from bokeh.util.browser import view
xdr = Range1d(start=-1.25, end=1.25)
ydr = Range1d(start=-1.25, end=1.25)
plot = Plot(x_range=xdr, y_range=ydr, width=600, height=600)
plot.toolbar_location = None
plot.outline_line_color = None
start_angle = pi + pi/4
end_angle = -pi/4
max_kmh = 250
max_mph = max_kmh*0.621371
major_step, minor_step = 25, 5
plot.add_glyph(Circle(x=0, y=0, radius=1.00, fill_color="white", line_color="black"))
plot.add_glyph(Circle(x=0, y=0, radius=0.05, fill_color="gray", line_color="black"))
plot.add_glyph(Text(x=0, y=+0.15, text=value("km/h"), text_color="red", text_align="center", text_baseline="bottom", text_font_style="bold"))
plot.add_glyph(Text(x=0, y=-0.15, text=value("mph"), text_color="blue", text_align="center", text_baseline="top", text_font_style="bold"))
def data(val: float):
"""Shorthand to override default units with "data", for e.g. `Ray.length`. """
return value(val, units="data")
def speed_to_angle(speed: float, units: str) -> float:
max_speed = max_kmh if units == "kmh" else max_mph
speed = min(max(speed, 0), max_speed)
total_angle = start_angle - end_angle
angle = total_angle*float(speed)/max_speed
return start_angle - angle
def add_needle(speed: float, units: str) -> None:
angle = speed_to_angle(speed, units)
plot.add_glyph(Ray(x=0, y=0, length=data(0.75), angle=angle, line_color="black", line_width=3))
plot.add_glyph(Ray(x=0, y=0, length=data(0.10), angle=angle-pi, line_color="black", line_width=3))
def add_gauge(radius: float, max_value: float, length: float, direction: Literal[-1, 1], color: Any, major_step: int, minor_step: int) -> None:
major_angles, minor_angles = [], []
total_angle = start_angle - end_angle
major_angle_step = float(major_step)/max_value*total_angle
minor_angle_step = float(minor_step)/max_value*total_angle
major_angle = 0
while major_angle <= total_angle:
major_angles.append(start_angle - major_angle)
major_angle += major_angle_step
minor_angle = 0
while minor_angle <= total_angle:
minor_angles.append(start_angle - minor_angle)
minor_angle += minor_angle_step
major_labels = [ major_step*i for i, _ in enumerate(major_angles) ]
n = major_step/minor_step
minor_angles = [ x for i, x in enumerate(minor_angles) if i % n != 0 ]
glyph = Arc(x=0, y=0, radius=radius, start_angle=start_angle, end_angle=end_angle, direction="clock", line_color=color, line_width=2)
plot.add_glyph(glyph)
rotation = 0 if direction == 1 else -pi
angles = [ angle + rotation for angle in major_angles ]
source = ColumnDataSource(dict(major_angles=major_angles, angle=angles))
t = PolarTransform(radius=radius, angle="major_angles")
glyph = Ray(x=expr(t.x), y=expr(t.y), length=data(length), angle="angle", line_color=color, line_width=2)
plot.add_glyph(source, glyph)
angles = [ angle + rotation for angle in minor_angles ]
source = ColumnDataSource(dict(minor_angles=minor_angles, angle=angles))
t = PolarTransform(radius=radius, angle="minor_angles")
glyph = Ray(x=expr(t.x), y=expr(t.y), length=data(length/2), angle="angle", line_color=color, line_width=1)
plot.add_glyph(source, glyph)
text_angles = [ angle - pi/2 for angle in major_angles ]
source = ColumnDataSource(dict(major_angles=major_angles, angle=text_angles, text=major_labels))
t = PolarTransform(radius=radius + 2*length*direction, angle="major_angles")
glyph = Text(x=expr(t.x), y=expr(t.y), angle="angle", text="text", text_align="center", text_baseline="middle")
plot.add_glyph(source, glyph)
add_gauge(0.75, max_kmh, 0.05, +1, "red", major_step, minor_step)
add_gauge(0.70, max_mph, 0.05, -1, "blue", major_step, minor_step)
add_needle(55, "kmh")
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
doc.validate()
filename = "gauges.html"
with open(filename, "w") as f:
f.write(file_html(doc, title="Gauges"))
print(f"Wrote {filename}")
view(filename)
|
ad01c2dc0611ba49b383a5fae307358cbec4c060
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Tensorflow/source/tensorflow/contrib/integrate/python/ops/odes.py
|
b4a99867ed46897f60be3f230838c3f576d5455e
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 24,150
|
py
|
odes.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ODE solvers for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
_ButcherTableau = collections.namedtuple('_ButcherTableau',
'alpha beta c_sol c_mid c_error')
# Parameters from Shampine (1986), section 4.
_DORMAND_PRINCE_TABLEAU = _ButcherTableau(
alpha=[1 / 5, 3 / 10, 4 / 5, 8 / 9, 1., 1.],
beta=[
[1 / 5],
[3 / 40, 9 / 40],
[44 / 45, -56 / 15, 32 / 9],
[19372 / 6561, -25360 / 2187, 64448 / 6561, -212 / 729],
[9017 / 3168, -355 / 33, 46732 / 5247, 49 / 176, -5103 / 18656],
[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84],
],
c_sol=[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84, 0],
c_mid=[
6025192743 / 30085553152 / 2, 0, 51252292925 / 65400821598 / 2,
-2691868925 / 45128329728 / 2, 187940372067 / 1594534317056 / 2,
-1776094331 / 19743644256 / 2, 11237099 / 235043384 / 2
],
c_error=[
1951 / 21600 - 35 / 384,
0,
22642 / 50085 - 500 / 1113,
451 / 720 - 125 / 192,
-12231 / 42400 - -2187 / 6784,
649 / 6300 - 11 / 84,
1 / 60,
],)
def _possibly_nonzero(x):
return isinstance(x, ops.Tensor) or x != 0
def _scaled_dot_product(scale, xs, ys, name=None):
"""Calculate a scaled, vector inner product between lists of Tensors."""
with ops.name_scope(name, 'scaled_dot_product', [scale, xs, ys]) as scope:
# Some of the parameters in our Butcher tableau include zeros. Using
# _possibly_nonzero lets us avoid wasted computation.
return math_ops.add_n(
[(scale * x) * y for x, y in zip(xs, ys)
if _possibly_nonzero(x) or _possibly_nonzero(y)],
name=scope)
def _dot_product(xs, ys, name=None):
"""Calculate the vector inner product between two lists of Tensors."""
with ops.name_scope(name, 'dot_product', [xs, ys]) as scope:
return math_ops.add_n([x * y for x, y in zip(xs, ys)], name=scope)
def _runge_kutta_step(func,
y0,
f0,
t0,
dt,
tableau=_DORMAND_PRINCE_TABLEAU,
name=None):
"""Take an arbitrary Runge-Kutta step and estimate error.
Args:
func: Function to evaluate like `func(y, t)` to compute the time derivative
of `y`.
y0: Tensor initial value for the state.
f0: Tensor initial value for the derivative, computed from `func(y0, t0)`.
t0: float64 scalar Tensor giving the initial time.
dt: float64 scalar Tensor giving the size of the desired time step.
tableau: optional _ButcherTableau describing how to take the Runge-Kutta
step.
name: optional name for the operation.
Returns:
Tuple `(y1, f1, y1_error, k)` giving the estimated function value after
the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the state at `t1`,
estimated error at `t1`, and a list of Runge-Kutta coefficients `k` used for
calculating these terms.
"""
with ops.name_scope(name, 'runge_kutta_step', [y0, f0, t0, dt]) as scope:
y0 = ops.convert_to_tensor(y0, name='y0')
f0 = ops.convert_to_tensor(f0, name='f0')
t0 = ops.convert_to_tensor(t0, name='t0')
dt = ops.convert_to_tensor(dt, name='dt')
dt_cast = math_ops.cast(dt, y0.dtype)
k = [f0]
for alpha_i, beta_i in zip(tableau.alpha, tableau.beta):
ti = t0 + alpha_i * dt
yi = y0 + _scaled_dot_product(dt_cast, beta_i, k)
k.append(func(yi, ti))
if not (tableau.c_sol[-1] == 0 and tableau.c_sol == tableau.beta[-1]):
# This property (true for Dormand-Prince) lets us save a few FLOPs.
yi = y0 + _scaled_dot_product(dt_cast, tableau.c_sol, k)
y1 = array_ops.identity(yi, name='%s/y1' % scope)
f1 = array_ops.identity(k[-1], name='%s/f1' % scope)
y1_error = _scaled_dot_product(
dt_cast, tableau.c_error, k, name='%s/y1_error' % scope)
return (y1, f1, y1_error, k)
def _interp_fit(y0, y1, y_mid, f0, f1, dt):
"""Fit coefficients for 4th order polynomial interpolation.
Args:
y0: function value at the start of the interval.
y1: function value at the end of the interval.
y_mid: function value at the mid-point of the interval.
f0: derivative value at the start of the interval.
f1: derivative value at the end of the interval.
dt: width of the interval.
Returns:
List of coefficients `[a, b, c, d, e]` for interpolating with the polynomial
`p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for values of `x`
between 0 (start of interval) and 1 (end of interval).
"""
# a, b, c, d, e = sympy.symbols('a b c d e')
# x, dt, y0, y1, y_mid, f0, f1 = sympy.symbols('x dt y0 y1 y_mid f0 f1')
# p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e
# sympy.solve([p.subs(x, 0) - y0,
# p.subs(x, 1 / 2) - y_mid,
# p.subs(x, 1) - y1,
# (p.diff(x) / dt).subs(x, 0) - f0,
# (p.diff(x) / dt).subs(x, 1) - f1],
# [a, b, c, d, e])
# {a: -2.0*dt*f0 + 2.0*dt*f1 - 8.0*y0 - 8.0*y1 + 16.0*y_mid,
# b: 5.0*dt*f0 - 3.0*dt*f1 + 18.0*y0 + 14.0*y1 - 32.0*y_mid,
# c: -4.0*dt*f0 + dt*f1 - 11.0*y0 - 5.0*y1 + 16.0*y_mid,
# d: dt*f0,
# e: y0}
a = _dot_product([-2 * dt, 2 * dt, -8, -8, 16], [f0, f1, y0, y1, y_mid])
b = _dot_product([5 * dt, -3 * dt, 18, 14, -32], [f0, f1, y0, y1, y_mid])
c = _dot_product([-4 * dt, dt, -11, -5, 16], [f0, f1, y0, y1, y_mid])
d = dt * f0
e = y0
return [a, b, c, d, e]
def _interp_fit_rk(y0, y1, k, dt, tableau=_DORMAND_PRINCE_TABLEAU):
"""Fit an interpolating polynomial to the results of a Runge-Kutta step."""
with ops.name_scope('interp_fit_rk'):
dt = math_ops.cast(dt, y0.dtype)
y_mid = y0 + _scaled_dot_product(dt, tableau.c_mid, k)
f0 = k[0]
f1 = k[-1]
return _interp_fit(y0, y1, y_mid, f0, f1, dt)
def _interp_evaluate(coefficients, t0, t1, t):
"""Evaluate polynomial interpolation at the given time point.
Args:
coefficients: list of Tensor coefficients as created by `interp_fit`.
t0: scalar float64 Tensor giving the start of the interval.
t1: scalar float64 Tensor giving the end of the interval.
t: scalar float64 Tensor giving the desired interpolation point.
Returns:
Polynomial interpolation of the coefficients at time `t`.
"""
with ops.name_scope('interp_evaluate'):
t0 = ops.convert_to_tensor(t0)
t1 = ops.convert_to_tensor(t1)
t = ops.convert_to_tensor(t)
dtype = coefficients[0].dtype
assert_op = control_flow_ops.Assert(
(t0 <= t) & (t <= t1),
['invalid interpolation, fails `t0 <= t <= t1`:', t0, t, t1])
with ops.control_dependencies([assert_op]):
x = math_ops.cast((t - t0) / (t1 - t0), dtype)
xs = [constant_op.constant(1, dtype), x]
for _ in range(2, len(coefficients)):
xs.append(xs[-1] * x)
return _dot_product(coefficients, reversed(xs))
def _optimal_step_size(last_step,
error_ratio,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
order=5,
name=None):
"""Calculate the optimal size for the next Runge-Kutta step."""
with ops.name_scope(name, 'optimal_step_size', [last_step,
error_ratio]) as scope:
error_ratio = math_ops.cast(error_ratio, last_step.dtype)
exponent = math_ops.cast(1 / order, last_step.dtype)
# this looks more complex than necessary, but importantly it keeps
# error_ratio in the numerator so we can't divide by zero:
factor = math_ops.maximum(1 / ifactor,
math_ops.minimum(error_ratio**exponent / safety,
1 / dfactor))
return math_ops.div(last_step, factor, name=scope)
def _abs_square(x):
if x.dtype.is_complex:
return math_ops.square(math_ops.real(x)) + math_ops.square(math_ops.imag(x))
else:
return math_ops.square(x)
def _ta_append(tensor_array, value):
"""Append a value to the end of a tf.TensorArray."""
return tensor_array.write(tensor_array.size(), value)
class _RungeKuttaState(
collections.namedtuple('_RungeKuttaState',
'y1, f1, t0, t1, dt, interp_coeff')):
"""Saved state of the Runge Kutta solver.
Attributes:
y1: Tensor giving the function value at the end of the last time step.
f1: Tensor giving derivative at the end of the last time step.
t0: scalar float64 Tensor giving start of the last time step.
t1: scalar float64 Tensor giving end of the last time step.
dt: scalar float64 Tensor giving the size for the next time step.
interp_coef: list of Tensors giving coefficients for polynomial
interpolation between `t0` and `t1`.
"""
class _History(
collections.namedtuple('_History', 'integrate_points, error_ratio')):
"""Saved integration history for use in `info_dict`.
Attributes:
integrate_points: tf.TensorArray storing integrating time points.
error_ratio: tf.TensorArray storing computed error ratios at each
integration step.
"""
def _assert_increasing(t):
assert_increasing = control_flow_ops.Assert(
math_ops.reduce_all(t[1:] > t[:-1]), ['`t` must be monotonic increasing'])
return ops.control_dependencies([assert_increasing])
def _check_input_types(t, y0):
if not (y0.dtype.is_floating or y0.dtype.is_complex):
raise TypeError('`y0` must have a floating point or complex floating '
'point dtype')
if not t.dtype.is_floating:
raise TypeError('`t` must have a floating point dtype')
def _dopri5(func,
y0,
t,
rtol,
atol,
full_output=False,
first_step=None,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
max_num_steps=1000,
name=None):
"""Solve an ODE for `odeint` using method='dopri5'."""
if first_step is None:
# at some point, we might want to switch to picking the step size
# automatically
first_step = 1.0
with ops.name_scope(name, 'dopri5', [
y0, t, rtol, atol, safety, ifactor, dfactor, max_num_steps
]) as scope:
first_step = ops.convert_to_tensor(
first_step, dtype=t.dtype, name='first_step')
safety = ops.convert_to_tensor(safety, dtype=t.dtype, name='safety')
ifactor = ops.convert_to_tensor(ifactor, dtype=t.dtype, name='ifactor')
dfactor = ops.convert_to_tensor(dfactor, dtype=t.dtype, name='dfactor')
max_num_steps = ops.convert_to_tensor(
max_num_steps, dtype=dtypes.int32, name='max_num_steps')
def adaptive_runge_kutta_step(rk_state, history, n_steps):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, interp_coeff = rk_state
with ops.name_scope('assertions'):
check_underflow = control_flow_ops.Assert(t0 + dt > t0,
['underflow in dt', dt])
check_max_num_steps = control_flow_ops.Assert(
n_steps < max_num_steps, ['max_num_steps exceeded'])
check_numerics = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.is_finite(abs(y0))),
['non-finite values in state `y`', y0])
with ops.control_dependencies(
[check_underflow, check_max_num_steps, check_numerics]):
y1, f1, y1_error, k = _runge_kutta_step(func, y0, f0, t0, dt)
with ops.name_scope('error_ratio'):
# We use the same approach as the dopri5 fortran code.
error_tol = atol + rtol * math_ops.maximum(abs(y0), abs(y1))
tensor_error_ratio = _abs_square(y1_error) / _abs_square(error_tol)
# Could also use reduce_maximum here.
error_ratio = math_ops.sqrt(math_ops.reduce_mean(tensor_error_ratio))
accept_step = error_ratio <= 1
with ops.name_scope('update/rk_state'):
# If we don't accept the step, the _RungeKuttaState will be useless
# (covering a time-interval of size 0), but that's OK, because in such
# cases we always immediately take another Runge-Kutta step.
y_next = control_flow_ops.cond(accept_step, lambda: y1, lambda: y0)
f_next = control_flow_ops.cond(accept_step, lambda: f1, lambda: f0)
t_next = control_flow_ops.cond(accept_step, lambda: t0 + dt, lambda: t0)
interp_coeff = control_flow_ops.cond(
accept_step, lambda: _interp_fit_rk(y0, y1, k, dt),
lambda: interp_coeff)
dt_next = _optimal_step_size(dt, error_ratio, safety, ifactor, dfactor)
rk_state = _RungeKuttaState(y_next, f_next, t0, t_next, dt_next,
interp_coeff)
with ops.name_scope('update/history'):
history = _History(
_ta_append(history.integrate_points, t0 + dt),
_ta_append(history.error_ratio, error_ratio))
return rk_state, history, n_steps + 1
def interpolate(solution, history, rk_state, i):
"""Interpolate through the next time point, integrating as necessary."""
with ops.name_scope('interpolate'):
rk_state, history, _ = control_flow_ops.while_loop(
lambda rk_state, *_: t[i] > rk_state.t1,
adaptive_runge_kutta_step, (rk_state, history, 0),
name='integrate_loop')
y = _interp_evaluate(rk_state.interp_coeff, rk_state.t0, rk_state.t1,
t[i])
solution = solution.write(i, y)
return solution, history, rk_state, i + 1
with _assert_increasing(t):
num_times = array_ops.size(t)
solution = tensor_array_ops.TensorArray(
y0.dtype, size=num_times).write(0, y0)
history = _History(
integrate_points=tensor_array_ops.TensorArray(
t.dtype, size=0, dynamic_size=True),
error_ratio=tensor_array_ops.TensorArray(
rtol.dtype, size=0, dynamic_size=True))
rk_state = _RungeKuttaState(
y0, func(y0, t[0]), t[0], t[0], first_step, interp_coeff=[y0] * 5)
solution, history, _, _ = control_flow_ops.while_loop(
lambda _, __, ___, i: i < num_times,
interpolate, (solution, history, rk_state, 1),
name='interpolate_loop')
y = solution.stack(name=scope)
y.set_shape(t.get_shape().concatenate(y0.get_shape()))
if not full_output:
return y
else:
integrate_points = history.integrate_points.stack()
info_dict = {
'num_func_evals': 6 * array_ops.size(integrate_points) + 1,
'integrate_points': integrate_points,
'error_ratio': history.error_ratio.stack()
}
return (y, info_dict)
def odeint(func,
y0,
t,
rtol=1e-6,
atol=1e-12,
method=None,
options=None,
full_output=False,
name=None):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(y, t), y(t[0]) = y0
```
where y is a Tensor of any shape.
For example:
```
# solve `dy/dt = -y`, corresponding to exponential decay
tf.contrib.integrate.odeint(lambda y, _: -y, 1.0, [0, 1, 2])
=> [1, exp(-1), exp(-2)]
```
Output dtypes and numerical precision are based on the dtypes of the inputs
`y0` and `t`.
Currently, implements 5th order Runge-Kutta with adaptive step size control
and dense output, using the Dormand-Prince method. Similar to the 'dopri5'
method of `scipy.integrate.ode` and MATLAB's `ode45`.
Based on: Shampine, Lawrence F. (1986), "Some Practical Runge-Kutta Formulas",
Mathematics of Computation, American Mathematical Society, 46 (173): 135-150,
doi:10.2307/2008219
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. If not provided as a Tensor, converted to a Tensor with
float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use. Currently,
the only valid option is `'dopri5'`.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set. For
`'dopri5'`, valid options include:
* first_step: an initial guess for the size of the first integration
(current default: 1.0, but may later be changed to use heuristics based
on the gradient).
* safety: safety factor for adaptive step control, generally a constant
in the range 0.8-1 (default: 0.9).
* ifactor: maximum factor by which the adaptive step may be increased
(default: 10.0).
* dfactor: maximum factor by which the adpative step may be decreased
(default: 0.2).
* max_num_steps: integer maximum number of integrate steps between time
points in `t` (default: 1000).
full_output: optional boolean. If True, `odeint` returns a tuple
`(y, info_dict)` describing the integration process.
name: Optional name for this operation.
Returns:
y: (N+1)-D tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
info_dict: only if `full_output == True`. A dict with the following values:
* num_func_evals: integer Tensor counting the number of function
evaluations.
* integrate_points: 1D float64 Tensor with the upper bound of each
integration time step.
* error_ratio: 1D float Tensor with the estimated ratio of the integration
error to the error tolerance at each integration step. An ratio greater
than 1 corresponds to rejected steps.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
if method is not None and method != 'dopri5':
raise ValueError('invalid method: %r' % method)
if options is None:
options = {}
elif method is None:
raise ValueError('cannot supply `options` without specifying `method`')
with ops.name_scope(name, 'odeint', [y0, t, rtol, atol]) as scope:
# TODO(shoyer): use nest.flatten (like tf.while_loop) to allow `y0` to be an
# arbitrarily nested tuple. This will help performance and usability by
# avoiding the need to pack/unpack in user functions.
y0 = ops.convert_to_tensor(y0, name='y0')
t = ops.convert_to_tensor(t, preferred_dtype=dtypes.float64, name='t')
_check_input_types(t, y0)
error_dtype = abs(y0).dtype
rtol = ops.convert_to_tensor(rtol, dtype=error_dtype, name='rtol')
atol = ops.convert_to_tensor(atol, dtype=error_dtype, name='atol')
return _dopri5(
func,
y0,
t,
rtol=rtol,
atol=atol,
full_output=full_output,
name=scope,
**options)
class _FixedGridIntegrator(six.with_metaclass(abc.ABCMeta)):
"""Base class for fixed-grid ODE integrators."""
def integrate(self, evol_func, y0, time_grid):
time_delta_grid = time_grid[1:] - time_grid[:-1]
scan_func = self._make_scan_func(evol_func)
y_grid = functional_ops.scan(scan_func, (time_grid[:-1], time_delta_grid),
y0)
return array_ops.concat([[y0], y_grid], axis=0)
def _make_scan_func(self, evol_func):
def scan_func(y, t_and_dt):
t, dt = t_and_dt
dy = self._step_func(evol_func, t, dt, y)
dy = math_ops.cast(dy, dtype=y.dtype)
return y + dy
return scan_func
@abc.abstractmethod
def _step_func(self, evol_func, t, dt, y):
pass
class _MidpointFixedGridIntegrator(_FixedGridIntegrator):
def _step_func(self, evol_func, t, dt, y):
dt_cast = math_ops.cast(dt, y.dtype)
# yn1 = yn + h * f(tn + h/2, yn + f(tn, yn) * h/2)
return dt_cast * evol_func(y + evol_func(y, t) * dt_cast / 2, t + dt / 2)
class _RK4FixedGridIntegrator(_FixedGridIntegrator):
def _step_func(self, evol_func, t, dt, y):
k1 = evol_func(y, t)
half_step = t + dt / 2
dt_cast = math_ops.cast(dt, y.dtype)
k2 = evol_func(y + dt_cast * k1 / 2, half_step)
k3 = evol_func(y + dt_cast * k2 / 2, half_step)
k4 = evol_func(y + dt_cast * k3, t + dt)
return math_ops.add_n([k1, 2 * k2, 2 * k3, k4]) * (dt_cast / 6)
def odeint_fixed(func, y0, t, method='rk4', name=None):
"""ODE integration on a fixed grid (with no step size control).
Useful in certain scenarios to avoid the overhead of adaptive step size
control, e.g. when differentiation of the integration result is desired and/or
the time grid is known a priori to be sufficient.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype.
method: One of 'midpoint' or 'rk4'.
name: Optional name for the resulting operation.
Returns:
y: (N+1)-D tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: Upon caller errors.
"""
with ops.name_scope(name, 'odeint_fixed', [y0, t]):
t = ops.convert_to_tensor(t, preferred_dtype=dtypes.float64, name='t')
y0 = ops.convert_to_tensor(y0, name='y0')
_check_input_types(t, y0)
with _assert_increasing(t):
with ops.name_scope(method):
if method == 'midpoint':
return _MidpointFixedGridIntegrator().integrate(func, y0, t)
elif method == 'rk4':
return _RK4FixedGridIntegrator().integrate(func, y0, t)
else:
raise ValueError('method not supported: {!s}'.format(method))
|
8c736ffd4a5a1d0da516f50e497ee45122c28cb3
|
290dbf0107d93ebc8d50790b267f9552c13d810f
|
/generator/data_file_parser.py
|
c0dbb9cb5f8970b5a48f0e353fa6d811df2a88af
|
[
"MIT"
] |
permissive
|
codebox/homoglyph
|
a01cb0de3570b6ad8a0bdb8ff7a50e10374a4a51
|
0209d35fe8ad79348b520401da8affe8df188909
|
refs/heads/master
| 2022-09-30T13:17:52.559898
| 2022-09-07T09:45:54
| 2022-09-07T09:45:54
| 45,297,276
| 472
| 63
|
MIT
| 2022-09-07T10:20:25
| 2015-10-31T10:14:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,278
|
py
|
data_file_parser.py
|
import os
import os.path
class DataFileLine:
def __init__(self, text):
self.text = text.strip()
self.parts = self.text.split(';', 2)
def has_data(self):
return (not self.text.startswith('#')) and len(self.parts) >= 2
def _get_char_from_code(self, code):
return chr(int(code.strip(), 16))
def get_chars(self):
return self._get_char_from_code(self.parts[0]), self._get_char_from_code(self.parts[1])
class DataFileParser:
def __init__(self, file_path):
self.file_path = file_path
def parse(self):
char_pairs = []
with open(self.file_path, encoding='utf-8') as f:
for line_text in f:
line = DataFileLine(line_text)
if line.has_data():
try:
char_pairs.append(line.get_chars())
except:
pass
return char_pairs
class DataFileDir:
def __init__(self, dir_name):
self.dir_name = dir_name
def parse_all(self, char_manager):
for file in os.listdir(self.dir_name):
char_pairs = DataFileParser(os.path.join(self.dir_name, file)).parse()
for pair in char_pairs:
char_manager.add_pair(*pair)
|
cd47c536a7c8cf928d0a53d8cc6ea63079d7cd76
|
e2403814bd7ab3a3746e98bd8489cb188e790e65
|
/donkey_gym/core/fps.py
|
5b2348df79afa47590ccda1d7c3575da8442c4f4
|
[
"MIT"
] |
permissive
|
araffin/learning-to-drive-in-5-minutes
|
1cd24206201ef7098a2b1a9428528e78d37c572a
|
9f20079c0a0091cc13c9e788239a879388c2c50e
|
refs/heads/master
| 2022-05-04T10:19:35.267107
| 2022-04-05T21:31:25
| 2022-04-05T21:31:25
| 157,005,049
| 306
| 101
|
MIT
| 2020-11-06T16:14:35
| 2018-11-10T17:34:16
|
Python
|
UTF-8
|
Python
| false
| false
| 671
|
py
|
fps.py
|
# Original author: Tawn Kramer
import time
class FPSTimer(object):
"""
Helper function to monitor the speed of the control.
:param verbose: (int)
"""
def __init__(self, verbose=0):
self.start_time = time.time()
self.iter = 0
self.verbose = verbose
def reset(self):
self.start_time = time.time()
self.iter = 0
def on_frame(self):
self.iter += 1
if self.iter == 100:
end_time = time.time()
if self.verbose >= 1:
print('{:.2f} fps'.format(100.0 / (end_time - self.start_time)))
self.start_time = time.time()
self.iter = 0
|
d99bcda596de27088a3445311db16d3505bb4873
|
9468849850c7c2b2040835eb9496bfb716a98c21
|
/cea/plots/base.py
|
87a9b630037f4145022c31836d02c8c39f6addfe
|
[
"MIT"
] |
permissive
|
architecture-building-systems/CityEnergyAnalyst
|
e6532c0c794538dbb665366ccf6d783e0d9d1345
|
b84bcefdfdfc2bc0e009b5284b74391a957995ac
|
refs/heads/master
| 2023-08-30T19:57:47.445797
| 2023-08-25T13:30:28
| 2023-08-25T13:30:28
| 49,491,341
| 166
| 60
|
MIT
| 2023-09-11T11:10:00
| 2016-01-12T10:02:17
|
Python
|
UTF-8
|
Python
| false
| false
| 14,626
|
py
|
base.py
|
"""
Implements base classes to derive plot classes from. The code in py:mod:`cea.plots.categories` uses
py:class:`cea.plots.base.PlotBase` to figure out the list of plots in a category.
"""
import os
import re
import jinja2
import cea.config
import cea.inputlocator
from cea import MissingInputDataException
from cea.plots.variable_naming import COLOR, NAMING
__author__ = "Daren Thomas"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Daren Thomas"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
class PlotBase(object):
"""A base class for plots containing helper methods used by all plots."""
# override these in plot subclasses!
name = None # a label to name the plot
category_name = None # name of the category this plot belongs to (can be inherited from category base plot)
category_path = None # a relative path for outputting the plot to (FIXME: maybe we remove this later on)
expected_parameters = {} # maps parameter-name -> "section:parameter"
@classmethod
def id(cls):
name = re.sub('\s+\(.*\)', '', cls.name) # remove parenthesis
return name.lower().replace(' ', '-').replace('/', '-') # use for js/html etc.
def __init__(self, project, parameters, cache):
self.cache = cache # a PlotCache implementation for reading cached data
self.project = project # full path to the project this plot belongs to
self.category_path = None # override this in the __init__.py subclasses for each category (see cea/plots/demand/__init__.py for an example)
# self.analysis_fields = None # override this in the plot subclasses! set it to a list of fields in self.data
# self.input_files = [] # override this in the plot subclasses! set it to a list of tuples (locator.method, args)
self.parameters = parameters
self.buildings = self.process_buildings_parameter() if 'buildings' in self.expected_parameters else None
for parameter_name in self.expected_parameters:
# Try to load missing parameters with default values
if parameter_name not in parameters:
try:
self.parameters[parameter_name] = cea.config.Configuration(cea.config.DEFAULT_CONFIG).get(
self.expected_parameters[parameter_name])
except Exception:
import traceback
traceback.print_exc()
assert parameter_name in parameters, "Missing parameter {}".format(parameter_name)
self.timeframe = self.parameters['timeframe'] if 'timeframe' in self.expected_parameters else None
def missing_input_files(self):
"""Return the list of missing input files for this plot"""
result = []
for locator_method, args in self.input_files:
if not os.path.exists(locator_method(*args)):
result.append((locator_method, args))
return result
@property
def locator(self):
"""
:rtype: cea.inputlocator.InputLocator
"""
return cea.inputlocator.InputLocator(os.path.join(self.project, self.parameters['scenario-name']))
@property
def layout(self):
# override this in the plot subclasses! set it to a plotly.graph_objs.Layout object
return None
@property
def title(self):
"""Override the version in PlotBase"""
if set(self.buildings) != set(self.locator.get_zone_building_names()):
if len(self.buildings) == 1:
return "%s for Building %s" % (self.name, self.buildings[0])
else:
return "%s for Selected Buildings" % self.name
return "%s for District" % self.name
def totals_bar_plot(self):
"""Creates a plot based on the totals data in percentages."""
import plotly.graph_objs
traces = []
data = self.data
data['total'] = data[self.analysis_fields].sum(axis=1)
data = data.sort_values(by='total', ascending=False) # this will get the maximum value to the left
for field in self.analysis_fields:
y = data[field]
total_percent = (y / data['total'] * 100).round(2).values
total_percent_txt = ["(%.2f %%)" % x for x in total_percent]
name = NAMING[field]
trace = plotly.graph_objs.Bar(x=data["Name"], y=y, name=name, marker=dict(color=COLOR[field]))
traces.append(trace)
return traces
@property
def output_path(self):
"""The output path to use for the solar-potential plots"""
assert self.name, "Attribute 'name' not defined for this plot (%s)" % self.__class__
assert self.category_path, "Attribute 'category_path' not defined for this plot(%s)" % self.__class__
if len(self.buildings) == 1:
prefix = 'Building_%s' % self.buildings[0]
elif len(self.buildings) < len(self.locator.get_zone_building_names()):
prefix = 'Selected_Buildings'
else:
prefix = 'District'
file_name = "%s_%s" % (prefix, self.id())
return self.locator.get_timeseries_plots_file(file_name, self.category_path)
def remove_unused_fields(self, data, fields):
"""
Helper method that, given a data frame and a list of fields in that data frame, returns the subset of those
fields that actually have data.
FIXME: what about columns with negative values?
"""
import numpy as np
fields = [field for field in fields if field in data.columns]
return [field for field in fields if np.isclose(data[field].sum(), 1e-8) == False]
def calc_graph(self):
"""Calculate a plotly Data object as to be passed to the data attribute of Figure"""
raise NotImplementedError('Subclass needs to implement calc_graph for plot!')
def calc_table(self):
"""Calculates a pandas.Dataframe to display as table."""
raise NotImplementedError('This plot has no table')
def plot(self, auto_open=False):
"""Plots the graphs to the filename (see output_path)"""
if self.missing_input_files():
raise MissingInputDataException(
"Following input files are missing: {input_files}".format(input_files=self.missing_input_files()))
# PLOT
template_path = os.path.join(os.path.dirname(__file__), 'plot.html')
with open(template_path, "r") as fp:
template = jinja2.Template(fp.read())
plot_html = template.render(plot_div=self.plot_div(), table_div=self.table_div(), title=self.title)
with open(self.output_path, 'w') as f:
f.write(plot_html)
print("Plotted '%s' to %s" % (self.name, self.output_path))
if auto_open:
import webbrowser
webbrowser.open(self.output_path)
def plot_div(self):
"""Return the plot as an html <div/> for use in the dashboard. Override this method in subclasses"""
if self.missing_input_files():
raise MissingInputDataException(
"Following input files are missing: {input_files}".format(input_files=self.missing_input_files()))
return self.cache.lookup_plot_div(self, self._plot_div_producer)
def _plot_div_producer(self):
import plotly.graph_objs
import plotly.offline
# Set default color template to 'none' for plotly version 4
try:
import plotly.io as pio
pio.templates.default = 'none'
except ImportError:
pass
fig = plotly.graph_objs.Figure(data=self._plot_data_producer(), layout=self.layout)
fig['layout'].update(dict(hovermode='closest'))
fig['layout']['yaxis'].update(dict(hoverformat=".2f"))
fig['layout']['margin'].update(dict(l=50, r=50, t=50, b=50))
fig['layout']['font'].update(dict(size=10))
if self.timeframe is not None:
import datetime
# Try to get plot year from data
try:
plot_year = fig['data'][0]['x'][0].year
fig.update_xaxes(
rangebreaks=[
dict(values=[datetime.datetime(plot_year, 2, 29)])
]
)
except Exception as e:
print(e)
div = plotly.offline.plot(fig, output_type='div', include_plotlyjs=False, show_link=False)
return div
def _plot_data_producer(self):
try:
return self.cache.lookup_plot_data(self, self.calc_graph)
except NotImplementedError: # if self.calc_graph() is not implemented
return None
def plot_data_to_file(self):
import pandas as pd
import collections
import re
plotly_data = self._plot_data_producer()
# Return None if plotly data does not exist
if plotly_data is None:
print("Unable to find plot data found for '{}'".format(self.name))
return None
x_axis = self.layout['xaxis']['title'] if 'xaxis' in self.layout else ''
y_axis = self.layout['yaxis']['title']
data = []
scatter_plots = collections.OrderedDict()
for trace in plotly_data:
name = trace.get('name')
x = trace.get('x')
y = trace.get('y')
if x is not None and y is not None and len(x) == len(y):
if 'yaxis' in trace: # Assign correct title if plot contains multiple y_axis
y_axis_num = trace['yaxis'].split('y')[1]
y_axis = self.layout['yaxis{}'.format(y_axis_num)]['title'] or y_axis
# Fix for plotly v4
if hasattr(x_axis, 'text'):
x_axis = x_axis.text
y_axis = y_axis.text
if trace['type'] == 'bar':
column_name = name
units = re.search(r'\[.*?\]', y_axis)
if units:
column_name = '{} {}'.format(name, units.group())
df = pd.DataFrame({x_axis: list(x), column_name: list(y)}).set_index(x_axis)
data.append(df)
elif trace['type'] == 'scattergl' and name is not None:
column_name = y_axis
df = pd.DataFrame({x_axis: list(x), column_name: list(y)}).set_index(x_axis)
scatter_plots[name] = df
if data:
data = pd.concat(data, axis=1)
# Try to merge any scatter plots with bar data which have the same index name
for data_name, scatter_data in scatter_plots.items():
try:
data = pd.concat([data, scatter_data], axis=1)
except Exception as e:
print(e)
# Export data as .csv
output_path = os.path.splitext(self.output_path)[0] + '.csv'
data.to_csv(output_path)
elif scatter_plots:
# Export data as .xlsx
output_path = os.path.splitext(self.output_path)[0] + '.xlsx'
with pd.ExcelWriter(output_path) as writer:
for data_name, scatter_data in scatter_plots.items():
sheet_name = data_name[:31] # Sheet name cannot be more than 31 characters
scatter_data.to_excel(writer, sheet_name=sheet_name)
else: # Return None if could not parse any data from plot
output_path = None
print("Written '{}' plot data to {}".format(self.name, output_path))
return output_path
def table_div(self):
"""Returns the html div for a table, or an empty string if no table is to be produced"""
if self.missing_input_files():
raise MissingInputDataException(
"Following input files are missing: {input_files}".format(input_files=self.missing_input_files()))
return self.cache.lookup_table_div(self, self._table_div_producer)
def _table_div_producer(self):
"""Default producer for table divs (override if you need more control)"""
try:
table_df = self.calc_table()
template_path = os.path.join(os.path.dirname(__file__), 'table.html')
template = jinja2.Template(open(template_path, 'r').read())
table_html = template.render(table_df=table_df)
return table_html
except NotImplementedError:
return ''
@classmethod
def get_default_parameters(cls, config):
"""Return a dictionary of parameters taken by using the values in the config file"""
return {
k: config.get(v)
for k, v in cls.expected_parameters.items()
}
def process_buildings_parameter(self):
"""
Make sure the buildings parameter contains only buildings in the zone. Returns (and updates) the parameter.
"""
# all plots in this category use the buildings parameter. make it easier to access
# handle special case of buildings... (only allow buildings for the scenario in question)
zone_building_names = self.locator.get_zone_building_names()
if not self.parameters['buildings']:
self.parameters['buildings'] = zone_building_names
self.parameters['buildings'] = ([b for b in self.parameters['buildings'] if
b in zone_building_names]
or zone_building_names)
return self.parameters['buildings']
def resample_time_data(self, dataframe):
import pandas as pd
if 'DATE' in dataframe.columns:
time_data = dataframe.set_index('DATE')
else:
time_data = dataframe.copy()
# Remove timezone data (found in technology potential files)
time_data.index = pd.to_datetime(time_data.index.map(lambda x: pd.Timestamp(x))).tz_localize(None)
if self.timeframe == "daily":
time_data = time_data.resample('D').sum()
elif self.timeframe == "weekly":
time_data = time_data.resample('W').sum()
elif self.timeframe == "monthly":
time_data = time_data.resample('M').sum()
time_data.index = time_data.index.strftime('%b %Y')
elif self.timeframe == "yearly":
time_data = time_data.resample('Y').sum()
time_data.index = time_data.index.strftime('Year %Y')
return time_data
|
098cd02f7a5825f954667f6ccbc69208338997a8
|
6afa4eec288b12a399672ffba35b8b163796499c
|
/NLP_ALL/text_classifier/sklearn/example_2.py
|
2a0122f2857f31bd8362030d98d26e0322f2ff93
|
[] |
no_license
|
lhyxcxy/nlp
|
4dbc03cba81fda55a60e305850fc5b8de77b6662
|
81fb273dc5859ba9f36984138a1ebe449fec7f35
|
refs/heads/master
| 2022-11-06T05:45:54.084726
| 2020-06-20T03:11:24
| 2020-06-20T03:11:24
| 84,399,969
| 211
| 76
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,772
|
py
|
example_2.py
|
# -*- coding: utf-8 -*-
import json
import codecs
import jieba
from sklearn.cluster import KMeans
import uuid
from jieba import analyse
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from collections import Counter
def loadFile():
"""
加载文件
:return:
"""
f = codecs.open("data.json", 'r')
sentences=list();
sentences_words=list()
for line in f:
line=line[:-1]
if line=="":
continue
app = {}
line=line.encode("utf-8")
try:
setting = json.loads(line)
question = setting['question'] #意多重结构的读取语法
answer = setting['answer']
except Exception, e:
continue
if question=="" or answer=="":
continue
# app[u"问题"]=question;
#app[u"答案"]= answer
words=list(jieba.cut(question,cut_all=True))
wordsStr=" ".join(words)
sentences.append(question)
sentences_words.append(wordsStr)
return sentences_words,sentences#分好词的句子,原始句子
def kmeans(class_num):
"""
kmeans 分类
:param class_num: 分类数量
:return:class_list[[句子1,句子2],[句子1,句子2]]
"""
class_list=list();
sentences_words,sentences=loadFile()
vectorizer = CountVectorizer() # 该类会将文本中的词语转换为词频矩阵,矩阵元素a[i][j] 表示j词在i类文本下的词频
transformer = TfidfTransformer() # 该类会统计每个词语的tf-idf权值
# 第一个fit_transform是计算tf-idf,第二个fit_transform是将文本转为词频矩阵
#注意此处的words_list 必须是["我 爱 中国 天安门","北京 大学"] 样式的分好词并以空格分来的list
tfidf = transformer.fit_transform(vectorizer.fit_transform(sentences_words))
#weight 是一个shape=[句子数,分词数量] 组成的二维数组
weight = tfidf.toarray() # 将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重
clf = KMeans(n_clusters=class_num)
s = clf.fit(weight)
for i in range(class_num):
class_list.append(list())
print clf.labels_
for i in range(len(clf.labels_)):#clf.labels_ 是每个句子所属的类别[1,3,2,5,0,3,5,4,1] 下标对应数据句子的下标
class_label=clf.labels_[i]
class_list[class_label].append(sentences[i])
#print "#######第"+str(clf.labels_[i])+"类"+words_list[i]
return class_list;
class_sentences=kmeans(3);
for i in range(len(class_sentences)):
print "##############第"+str(i)+"类";
for c1 in class_sentences[i]:
print c1
|
a90766cebcfa0ea369e08569dcdfd03bee0d77c6
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/uilogging/core/session.py
|
c85f5671f7b1f32461829c9e566b41e95c2eeada
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 5,596
|
py
|
session.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/uilogging/core/session.py
import typing
import adisp
import async
from BWUtil import AsyncReturn
from gui.wgcg.uilogging.contexts import UILoggingSessionCtx
from helpers import dependency, time_utils
from helpers.log.adapters import getWithContext
from ids_generators import SequenceIDGenerator
from skeletons.gui.web import IWebController
from soft_exception import SoftException
from uilogging.constants import DEFAULT_LOGGER_NAME
from uilogging.core.core_constants import LOGS_MAX_COUNT_PER_SEND, LOG_RECORD_MAX_PROPERTIES_COUNT, MAX_SESSION_GET_RETRIES, MIN_SESSION_LIFE_TIME
from uilogging.core.log import LogRecord
class WaitingSessionData(SoftException):
pass
class SessionData(object):
__slots__ = ('__id', '__auth', '__logging')
def __init__(self, sessionID, data):
self.__id = sessionID
self.__auth = data.get('auth') or {}
self.__logging = data.get('logging') or {}
@property
def id(self):
return self.__id
@property
def token(self):
return self.__auth.get('token')
@property
def expiration(self):
return self.__auth.get('expiration')
@property
def isExpired(self):
expiration = self.expiration
return False if expiration is None else expiration <= time_utils.getServerUTCTime()
@property
def maxLogsCount(self):
return min(self.__logging.get('max_logs_count', 0), LOGS_MAX_COUNT_PER_SEND)
@property
def maxLogPropertiesCount(self):
return min(self.__logging.get('max_log_properties_count', 0), LOG_RECORD_MAX_PROPERTIES_COUNT)
@property
def isValid(self):
isValid = bool(self.url) and self.maxLogsCount >= 1 and self.maxLogPropertiesCount >= 1
if not self.isExpired and self.expiration is not None:
isValid = isValid and self.expiration - time_utils.getServerUTCTime() >= MIN_SESSION_LIFE_TIME
return isValid
@property
def url(self):
return self.__logging.get('url', '')
def verifyLog(self, log):
return len(log) <= self.maxLogPropertiesCount
class Session(object):
webController = dependency.descriptor(IWebController)
def __init__(self):
self._requesting = False
self._destroyed = False
self._sessionData = None
self._initialized = False
self._idGen = SequenceIDGenerator()
self._logger = getWithContext(DEFAULT_LOGGER_NAME, self)
return
def get(self):
return self._sessionData
def remove(self, sessionID):
session = self.get()
if session and session.id == sessionID:
self._clear()
self._logger.debug('Session=%s removed.', sessionID)
def update(self):
if self._requesting:
return True
if not self._destroyed and not self._isInitialized:
self._update()
return True
return False
@async.async
def request(self):
if self._destroyed:
self._logger.debug('Ui logging session destroyed.')
raise AsyncReturn(None)
if self._requesting:
raise WaitingSessionData('Session data request in progress.')
if self._isInitialized:
self._logger.debug('Return cached session data.')
raise AsyncReturn(self._sessionData)
self._clear()
self._requesting = True
retries = MAX_SESSION_GET_RETRIES
try:
while True:
self._sessionData = yield async.await_callback(self._getSessionData)()
if not self._sessionData or not self._sessionData.isExpired:
break
retries -= 1
if retries <= 0:
self._sessionData = None
break
except async.BrokenPromiseError:
self._logger.debug('Promise was destroyed while waiting for result.')
self._sessionData = None
except Exception:
self._logger.exception('Failed to get session data.')
self._sessionData = None
self._initialized = True
self._requesting = False
raise AsyncReturn(self._sessionData)
return
def destroy(self):
self._destroyed = True
self._clear()
self._logger.debug('Destroyed.')
@async.async
def _update(self):
self._logger.debug('Updating.')
try:
yield self.request()
except WaitingSessionData:
self._logger.debug('Already waiting session.')
except async.BrokenPromiseError:
self._logger.debug('Promise was destroyed while waiting for result.')
raise AsyncReturn(None)
return
@property
def _isInitialized(self):
return self._initialized and (not self._sessionData or not self._sessionData.isExpired)
def _clear(self):
self._initialized = False
self._sessionData = None
return
@adisp.process
def _getSessionData(self, callback):
self._logger.debug('Request session data.')
response = yield self.webController.sendRequest(ctx=UILoggingSessionCtx())
self._logger.debug('Response session data: code=%s', response.getCode())
if not self._destroyed and response.isSuccess() and isinstance(response.data, dict):
data = SessionData(self._idGen.next(), response.data)
if data.isValid:
callback(data)
return
callback(None)
return
|
40a08d583fb2bdc4088b70932d782f8b1afd0e38
|
7f620e7902c0b9ccb1fcfd1427acd5936ea33814
|
/mlrun/api/crud/notifications.py
|
6245e524fe50de9dfecbb049aeed89498bd02282
|
[
"Apache-2.0"
] |
permissive
|
mlrun/mlrun
|
2074c230070129ce3becb211b92c90b29a2ce850
|
b5fe0c05ae7f5818a4a5a5a40245c851ff9b2c77
|
refs/heads/development
| 2023-09-06T00:09:21.546135
| 2023-09-05T19:38:13
| 2023-09-05T19:38:13
| 205,706,595
| 1,093
| 229
|
Apache-2.0
| 2023-09-14T14:14:10
| 2019-09-01T16:59:19
|
Python
|
UTF-8
|
Python
| false
| false
| 5,739
|
py
|
notifications.py
|
# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
import sqlalchemy.orm
import mlrun.api.api.utils
import mlrun.api.db.sqldb.db
import mlrun.api.utils.scheduler
import mlrun.api.utils.singletons.db
import mlrun.api.utils.singletons.scheduler
import mlrun.common.schemas
import mlrun.utils.singleton
class Notifications(
metaclass=mlrun.utils.singleton.Singleton,
):
def store_run_notifications(
self,
session: sqlalchemy.orm.Session,
notification_objects: typing.List[mlrun.model.Notification],
run_uid: str,
project: str = None,
mask_params: bool = True,
):
project = project or mlrun.mlconf.default_project
# we don't mask the notification params when it's a status update as they are already masked
notification_objects_to_store = notification_objects
if mask_params:
notification_objects_to_store = (
mlrun.api.api.utils.validate_and_mask_notification_list(
notification_objects, run_uid, project
)
)
mlrun.api.utils.singletons.db.get_db().store_run_notifications(
session, notification_objects_to_store, run_uid, project
)
def list_run_notifications(
self,
session: sqlalchemy.orm.Session,
run_uid: str,
project: str = "",
) -> typing.List[mlrun.model.Notification]:
project = project or mlrun.mlconf.default_project
return mlrun.api.utils.singletons.db.get_db().list_run_notifications(
session, run_uid, project
)
def delete_run_notifications(
self,
session: sqlalchemy.orm.Session,
name: str = None,
run_uid: str = None,
project: str = None,
):
project = project or mlrun.mlconf.default_project
# Delete notification param project secret
notifications = [
notification
for notification in self.list_run_notifications(session, run_uid, project)
if notification.name == name
]
if notifications:
# unique constraint on name, run_uid, project, so the list will contain one item at most
notification = notifications[0]
mlrun.api.api.utils.delete_notification_params_secret(project, notification)
mlrun.api.utils.singletons.db.get_db().delete_run_notifications(
session, name, run_uid, project
)
@staticmethod
def set_object_notifications(
db_session: sqlalchemy.orm.Session,
auth_info: mlrun.common.schemas.AuthInfo,
project: str,
notifications: typing.List[mlrun.common.schemas.Notification],
notification_parent: typing.Union[
mlrun.common.schemas.RunIdentifier, mlrun.common.schemas.ScheduleIdentifier
],
):
"""
Sets notifications on given object (run or schedule, might be extended in the future).
This will replace any existing notifications.
:param db_session: DB session
:param auth_info: Authorization info
:param project: Project name
:param notifications: List of notifications to set
:param notification_parent: Identifier of the object on which to set the notifications
"""
set_notification_methods = {
"run": {
"factory": mlrun.api.utils.singletons.db.get_db,
"method_name": mlrun.api.db.sqldb.db.SQLDB.set_run_notifications.__name__,
"identifier_key": "uid",
},
"schedule": {
"factory": mlrun.api.utils.singletons.scheduler.get_scheduler,
"method_name": mlrun.api.utils.scheduler.Scheduler.set_schedule_notifications.__name__,
"identifier_key": "name",
},
}
set_notification_method = set_notification_methods.get(
notification_parent.kind, {}
)
factory = set_notification_method.get("factory")
if not factory:
raise mlrun.errors.MLRunNotFoundError(
f"couldn't find factory for object kind: {notification_parent.kind}"
)
set_func = set_notification_method.get("method_name")
if not set_func:
raise mlrun.errors.MLRunNotFoundError(
f"couldn't find set notification function for object kind: {notification_parent.kind}"
)
identifier_key = set_notification_method.get("identifier_key")
if not identifier_key:
raise mlrun.errors.MLRunNotFoundError(
f"couldn't find identifier key for object kind: {notification_parent.kind}"
)
notification_objects_to_set = (
mlrun.api.api.utils.validate_and_mask_notification_list(
notifications,
getattr(notification_parent, identifier_key),
project,
)
)
getattr(factory(), set_func)(
session=db_session,
project=project,
notifications=notification_objects_to_set,
identifier=notification_parent,
auth_info=auth_info,
)
|
ab2c3e5edb15798fe895550773cae6ebaa8d81e8
|
3a50c0712e0a31b88d0a5e80a0c01dbefc6a6e75
|
/thrift/compiler/test/fixtures/transitive-deps/gen-python/c/thrift_metadata.py
|
77f68bf0f650a634a9f20df266c45af0fd1db71f
|
[
"Apache-2.0"
] |
permissive
|
facebook/fbthrift
|
3b7b94a533666c965ce69cfd6054041218b1ea6f
|
53cf6f138a7648efe5aef9a263aabed3d282df91
|
refs/heads/main
| 2023-08-24T12:51:32.367985
| 2023-08-24T08:28:35
| 2023-08-24T08:28:35
| 11,131,631
| 2,347
| 666
|
Apache-2.0
| 2023-09-01T01:44:39
| 2013-07-02T18:15:51
|
C++
|
UTF-8
|
Python
| false
| false
| 2,412
|
py
|
thrift_metadata.py
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT
# @generated
#
from __future__ import annotations
import apache.thrift.metadata.thrift_types as _fbthrift_metadata
# TODO (ffrancet): This general pattern can be optimized by using tuples and dicts
# instead of re-generating thrift structs
def _fbthrift_gen_metadata_struct_C(metadata_struct: _fbthrift_metadata.ThriftMetadata) -> _fbthrift_metadata.ThriftMetadata:
qualified_name = "c.C"
if qualified_name in metadata_struct.structs:
return metadata_struct
fields = [
_fbthrift_metadata.ThriftField(id=1, type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_I64_TYPE), name="i", is_optional=False, structured_annotations=[
]),
]
struct_dict = dict(metadata_struct.structs)
struct_dict[qualified_name] = _fbthrift_metadata.ThriftStruct(name=qualified_name, fields=fields,
is_union=False,
structured_annotations=[
])
new_struct = metadata_struct(structs=struct_dict)
# i
return new_struct
def gen_metadata_struct_C() -> _fbthrift_metadata.ThriftMetadata:
return _fbthrift_gen_metadata_struct_C(_fbthrift_metadata.ThriftMetadata(structs={}, enums={}, exceptions={}, services={}))
# TODO (ffrancet): This general pattern can be optimized by using tuples and dicts
# instead of re-generating thrift structs
def _fbthrift_gen_metadata_exception_E(metadata_struct: _fbthrift_metadata.ThriftMetadata) -> _fbthrift_metadata.ThriftMetadata:
qualified_name = "c.E"
if qualified_name in metadata_struct.exceptions:
return metadata_struct
fields = [
]
struct_dict = dict(metadata_struct.exceptions)
struct_dict[qualified_name] = _fbthrift_metadata.ThriftException(name=qualified_name, fields=fields,
structured_annotations=[
])
new_struct = metadata_struct(exceptions=struct_dict)
return new_struct
def gen_metadata_exception_E() -> _fbthrift_metadata.ThriftMetadata:
return _fbthrift_gen_metadata_exception_E(_fbthrift_metadata.ThriftMetadata(structs={}, enums={}, exceptions={}, services={}))
def getThriftModuleMetadata() -> _fbthrift_metadata.ThriftMetadata:
meta = _fbthrift_metadata.ThriftMetadata(structs={}, enums={}, exceptions={}, services={})
meta = _fbthrift_gen_metadata_struct_C(meta)
meta = _fbthrift_gen_metadata_exception_E(meta)
return meta
|
be8b1c8043971135dc5aa59023971b87b824042f
|
80f94bea418d7956df1ba19d4d6a1d7715a94ade
|
/test/integration/test_configuration_decode.py
|
92dc6b8e9a997bd8fe4aea3c24d24acaa6203d26
|
[
"CC-BY-2.5",
"MIT",
"CC-BY-3.0",
"AFL-3.0"
] |
permissive
|
galaxyproject/galaxy
|
5748409eb6693b1611f289d164f85e20c3237495
|
b9ae7a16ba0465995e880ae9701b7e87226b9bab
|
refs/heads/dev
| 2023-08-28T22:35:51.248138
| 2023-08-26T08:02:33
| 2023-08-26T08:02:33
| 31,211,061
| 1,277
| 1,137
|
NOASSERTION
| 2023-09-14T19:39:01
| 2015-02-23T14:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,023
|
py
|
test_configuration_decode.py
|
from galaxy_test.base.populators import LibraryPopulator
from galaxy_test.driver import integration_util
class TestConfigurationDecodeIntegration(integration_util.IntegrationTestCase):
def setUp(self):
super().setUp()
self.library_populator = LibraryPopulator(self.galaxy_interactor)
def test_admin_decode_id(self):
new_lib = self.library_populator.new_library("DecodeTestLibrary")
decode_response = self._get("configuration/decode/" + new_lib["id"], admin=True)
response_id = decode_response.json()["decoded_id"]
decoded_library_id = self._app.security.decode_id(new_lib["id"])
assert decoded_library_id == response_id
# fake valid folder id by prepending F
valid_encoded_folder_id = "F" + new_lib["id"]
folder_decode_response = self._get("configuration/decode/" + valid_encoded_folder_id, admin=True)
folder_response_id = folder_decode_response.json()["decoded_id"]
assert decoded_library_id == folder_response_id
|
0e9142f13f2f8ede388632eb6296ce3519038b2a
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/fivem/test_config_flow.py
|
121b416a1100ee56d26f1efc1251dbee7a28f0f0
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,205
|
py
|
test_config_flow.py
|
"""Test the FiveM config flow."""
from unittest.mock import patch
from fivem import FiveMServerOfflineError
from homeassistant import config_entries
from homeassistant.components.fivem.config_flow import DEFAULT_PORT
from homeassistant.components.fivem.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResultType
USER_INPUT = {
CONF_HOST: "fivem.dummyserver.com",
CONF_PORT: DEFAULT_PORT,
}
def _mock_fivem_info_success():
return {
"resources": [
"fivem",
"monitor",
],
"server": "FXServer-dummy v0.0.0.DUMMY linux",
"vars": {
"gamename": "gta5",
},
"version": 123456789,
}
def _mock_fivem_info_invalid():
return {
"plugins": [
"sample",
],
"data": {
"gamename": "gta5",
},
}
def _mock_fivem_info_invalid_game_name():
info = _mock_fivem_info_success()
info["vars"]["gamename"] = "redm"
return info
async def test_show_config_form(hass: HomeAssistant) -> None:
"""Test if initial configuration form is shown."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "user"
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == FlowResultType.FORM
assert result["errors"] is None
with patch(
"fivem.fivem.FiveM.get_info_raw",
return_value=_mock_fivem_info_success(),
), patch(
"homeassistant.components.fivem.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
await hass.async_block_till_done()
assert result2["type"] == FlowResultType.CREATE_ENTRY
assert result2["title"] == USER_INPUT[CONF_HOST]
assert result2["data"] == USER_INPUT
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"fivem.fivem.FiveM.get_info_raw",
side_effect=FiveMServerOfflineError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
await hass.async_block_till_done()
assert result2["type"] == FlowResultType.FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_invalid(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"fivem.fivem.FiveM.get_info_raw",
return_value=_mock_fivem_info_invalid(),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
await hass.async_block_till_done()
assert result2["type"] == FlowResultType.FORM
assert result2["errors"] == {"base": "unknown"}
async def test_form_invalid_game_name(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"fivem.fivem.FiveM.get_info_raw",
return_value=_mock_fivem_info_invalid_game_name(),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
await hass.async_block_till_done()
assert result2["type"] == FlowResultType.FORM
assert result2["errors"] == {"base": "invalid_game_name"}
|
90bd5fb565a4d2b71c9e206c1d4bde9c78b060ee
|
83b8b30ebb633eecd29ca0a7a20cc43a293c9333
|
/extmod/uasyncio/funcs.py
|
0ce48b015c1bd7816a5409260494598428cc8489
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
adafruit/circuitpython
|
430ec895149d1eb814b505db39b4977a35ee88a7
|
506dca71b0cbb7af749bb51f86b01021db5483b3
|
refs/heads/main
| 2023-08-21T16:30:46.781068
| 2023-08-20T00:39:44
| 2023-08-20T00:39:44
| 66,166,069
| 3,806
| 1,560
|
MIT
| 2023-09-14T19:23:51
| 2016-08-20T20:10:40
|
C
|
UTF-8
|
Python
| false
| false
| 2,207
|
py
|
funcs.py
|
# MicroPython uasyncio module
# MIT license; Copyright (c) 2019-2020 Damien P. George
from . import core
async def wait_for(aw, timeout, sleep=core.sleep):
aw = core._promote_to_task(aw)
if timeout is None:
return await aw
def runner(waiter, aw):
nonlocal status, result
try:
result = await aw
s = True
except BaseException as er:
s = er
if status is None:
# The waiter is still waiting, set status for it and cancel it.
status = s
waiter.cancel()
# Run aw in a separate runner task that manages its exceptions.
status = None
result = None
runner_task = core.create_task(runner(core.cur_task, aw))
try:
# Wait for the timeout to elapse.
await sleep(timeout)
except core.CancelledError as er:
if status is True:
# aw completed successfully and cancelled the sleep, so return aw's result.
return result
elif status is None:
# This wait_for was cancelled externally, so cancel aw and re-raise.
status = True
runner_task.cancel()
raise er
else:
# aw raised an exception, propagate it out to the caller.
raise status
# The sleep finished before aw, so cancel aw and raise TimeoutError.
status = True
runner_task.cancel()
await runner_task
raise core.TimeoutError
def wait_for_ms(aw, timeout):
return wait_for(aw, timeout, core.sleep_ms)
async def gather(*aws, return_exceptions=False):
ts = [core._promote_to_task(aw) for aw in aws]
for i in range(len(ts)):
try:
# TODO handle cancel of gather itself
# if ts[i].coro:
# iter(ts[i]).waiting.push_head(cur_task)
# try:
# yield
# except CancelledError as er:
# # cancel all waiting tasks
# raise er
ts[i] = await ts[i]
except (core.CancelledError, Exception) as er:
if return_exceptions:
ts[i] = er
else:
raise er
return ts
|
84c53afbee73e2c75c6d70f824a44840e80dabb2
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/diligentgraphics-spirv-tools/all/conanfile.py
|
04f39fb2c75a43a2f268d73e8c0eb042cb210fc2
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 11,260
|
py
|
conanfile.py
|
from conans import ConanFile, tools, CMake
from conans.errors import ConanInvalidConfiguration
import os
import textwrap
required_conan_version = ">=1.33.0"
class SpirvtoolsConan(ConanFile):
name = "diligentgraphics-spirv-tools"
homepage = "https://github.com/DiligentGraphics/SPIRV-Tools/"
description = "Diligent fork. Create and optimize SPIRV shaders"
topics = ("spirv", "spirv-v", "vulkan", "opengl", "opencl", "hlsl", "khronos", "diligent")
url = "https://github.com/conan-io/conan-center-index"
provides = "spirv-tools"
deprecated = "spirv-tools"
license = "Apache-2.0"
settings = "os", "compiler", "arch", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"build_executables": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"build_executables": True,
}
short_paths = True
exports_sources = ["CMakeLists.txt", "patches/**"]
generators = "cmake"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
if not self._get_compatible_spirv_headers_version:
raise ConanInvalidConfiguration("unknown diligentgraphics-spirv-headers version")
self.requires("diligentgraphics-spirv-headers/{}".format(self._get_compatible_spirv_headers_version))
@property
def _get_compatible_spirv_headers_version(self):
return {
"cci.20211008": "cci.20211006",
}.get(str(self.version), False)
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
def _validate_dependency_graph(self):
if self.deps_cpp_info["diligentgraphics-spirv-headers"].version != self._get_compatible_spirv_headers_version:
raise ConanInvalidConfiguration("diligentgraphics-spirv-tools {0} requires diligentgraphics-spirv-headers {1}"
.format(self.version, self._get_compatible_spirv_headers_version))
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
cmake = CMake(self)
# Required by the project's CMakeLists.txt
cmake.definitions["SPIRV-Headers_SOURCE_DIR"] = self.deps_cpp_info["diligentgraphics-spirv-headers"].rootpath.replace("\\", "/")
# There are some switch( ) statements that are causing errors
# need to turn this off
cmake.definitions["SPIRV_WERROR"] = False
cmake.definitions["SKIP_SPIRV_TOOLS_INSTALL"] = False
cmake.definitions["SPIRV_LOG_DEBUG"] = False
cmake.definitions["SPIRV_SKIP_TESTS"] = True
cmake.definitions["SPIRV_CHECK_CONTEXT"] = False
cmake.definitions["SPIRV_BUILD_FUZZER"] = False
cmake.definitions["SPIRV_SKIP_EXECUTABLES"] = not self.options.build_executables
cmake.configure(build_folder=self._build_subfolder)
self._cmake = cmake
return self._cmake
def build(self):
self._validate_dependency_graph()
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
# CMAKE_POSITION_INDEPENDENT_CODE was set ON for the entire
# project in the lists file.
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
"set(CMAKE_POSITION_INDEPENDENT_CODE ON)", "")
def package(self):
self.copy(pattern="LICENSE*", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "SPIRV-Tools"))
tools.rmdir(os.path.join(self.package_folder, "SPIRV-Tools-link"))
tools.rmdir(os.path.join(self.package_folder, "SPIRV-Tools-opt"))
tools.rmdir(os.path.join(self.package_folder, "SPIRV-Tools-reduce"))
tools.rmdir(os.path.join(self.package_folder, "SPIRV-Tools-lint"))
if self.options.shared:
for file_name in ["*SPIRV-Tools", "*SPIRV-Tools-opt", "*SPIRV-Tools-link", "*SPIRV-Tools-reduce"]:
for ext in [".a", ".lib"]:
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), file_name + ext)
else:
tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), "*SPIRV-Tools-shared.dll")
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*SPIRV-Tools-shared*")
if self.options.shared:
targets = {"SPIRV-Tools-shared": "diligentgraphics-spirv-tools::SPIRV-Tools"}
else:
targets = {
"SPIRV-Tools": "diligentgraphics-spirv-tools::SPIRV-Tools", # before 2020.5, kept for conveniency
"SPIRV-Tools-static": "diligentgraphics-spirv-tools::SPIRV-Tools",
"SPIRV-Tools-opt": "diligentgraphics-spirv-tools::SPIRV-Tools-opt",
"SPIRV-Tools-link": "diligentgraphics-spirv-tools::SPIRV-Tools-link",
"SPIRV-Tools-reduce": "diligentgraphics-spirv-tools::SPIRV-Tools-reduce",
}
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_file_rel_path),
targets,
)
@staticmethod
def _create_cmake_module_alias_targets(module_file, targets):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent("""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""".format(alias=alias, aliased=aliased))
tools.save(module_file, content)
@property
def _module_subfolder(self):
return os.path.join("lib", "cmake")
@property
def _module_file_rel_path(self):
return os.path.join(self._module_subfolder,
"conan-official-{}-targets.cmake".format(self.name))
def package_info(self):
self.cpp_info.filenames["cmake_find_package"] = "SPIRV-Tools"
self.cpp_info.filenames["cmake_find_package_multi"] = "SPIRV-Tools"
self.cpp_info.names["pkg_config"] = "SPIRV-Tools-shared" if self.options.shared else "SPIRV-Tools"
# SPIRV-Tools
self.cpp_info.components["spirv-tools-core"].names["cmake_find_package"] = "SPIRV-Tools"
self.cpp_info.components["spirv-tools-core"].names["cmake_find_package_multi"] = "SPIRV-Tools"
self.cpp_info.components["spirv-tools-core"].builddirs.append(self._module_subfolder)
self.cpp_info.components["spirv-tools-core"].build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.components["spirv-tools-core"].build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
self.cpp_info.components["spirv-tools-core"].libs = ["SPIRV-Tools-shared" if self.options.shared else "SPIRV-Tools"]
self.cpp_info.components["spirv-tools-core"].requires = ["diligentgraphics-spirv-headers::diligentgraphics-spirv-headers"]
if self.options.shared:
self.cpp_info.components["spirv-tools-core"].defines = ["SPIRV_TOOLS_SHAREDLIB"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["spirv-tools-core"].system_libs.extend(["m", "rt"])
if not self.options.shared and tools.stdcpp_library(self):
self.cpp_info.components["spirv-tools-core"].system_libs.append(tools.stdcpp_library(self))
# FIXME: others components should have their own CMake config file
if not self.options.shared:
# SPIRV-Tools-opt
self.cpp_info.components["spirv-tools-opt"].names["cmake_find_package"] = "SPIRV-Tools-opt"
self.cpp_info.components["spirv-tools-opt"].names["cmake_find_package_multi"] = "SPIRV-Tools-opt"
self.cpp_info.components["spirv-tools-opt"].builddirs.append(self._module_subfolder)
self.cpp_info.components["spirv-tools-opt"].build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.components["spirv-tools-opt"].build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
self.cpp_info.components["spirv-tools-opt"].libs = ["SPIRV-Tools-opt"]
self.cpp_info.components["spirv-tools-opt"].requires = ["spirv-tools-core", "diligentgraphics-spirv-headers::diligentgraphics-spirv-headers"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["spirv-tools-opt"].system_libs.append("m")
# SPIRV-Tools-link
self.cpp_info.components["spirv-tools-link"].names["cmake_find_package"] = "SPIRV-Tools-link"
self.cpp_info.components["spirv-tools-link"].names["cmake_find_package_multi"] = "SPIRV-Tools-link"
self.cpp_info.components["spirv-tools-link"].builddirs.append(self._module_subfolder)
self.cpp_info.components["spirv-tools-link"].build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.components["spirv-tools-link"].build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
self.cpp_info.components["spirv-tools-link"].libs = ["SPIRV-Tools-link"]
self.cpp_info.components["spirv-tools-link"].requires = ["spirv-tools-core", "spirv-tools-opt"]
# SPIRV-Tools-reduce
self.cpp_info.components["spirv-tools-reduce"].names["cmake_find_package"] = "SPIRV-Tools-reduce"
self.cpp_info.components["spirv-tools-reduce"].names["cmake_find_package_multi"] = "SPIRV-Tools-reduce"
self.cpp_info.components["spirv-tools-reduce"].builddirs.append(self._module_subfolder)
self.cpp_info.components["spirv-tools-reduce"].build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.components["spirv-tools-reduce"].build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
self.cpp_info.components["spirv-tools-reduce"].libs = ["SPIRV-Tools-reduce"]
self.cpp_info.components["spirv-tools-reduce"].requires = ["spirv-tools-core", "spirv-tools-opt"]
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: %s" % bin_path)
self.env_info.path.append(bin_path)
|
3d2cf6c6dd2d764e06e86de5a51b436c73eace7a
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/programmers/난이도별/level03.가장_먼_노드/sangmandu.py
|
988635988fcf93ba3382a288dc3517afacff9ec4
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 876
|
py
|
sangmandu.py
|
'''
https://programmers.co.kr/learn/courses/30/lessons/49189
가장 먼 노드
[풀이]
1에서 부터 bfs로 접근하고 이에 따라서 거리를 +1씩 부여하는 방법
시간초과 이슈에 대해서는 간선을 조사할 때 행렬이 아니라 딕셔너리로 조사
'''
from collections import defaultdict
def solution(n, edge):
board = defaultdict(list)
dist = [0, 0.5] + [0] * (n - 1)
for st, ed in edge:
board[st].append(ed)
board[ed].append(st)
stack = [1]
save = []
distance = 1
while True:
if not stack:
if not save:
return dist.count(max(dist))
distance += 1
save, stack = stack, save
st = stack.pop()
for idx in board[st]:
if dist[idx] == 0:
dist[idx] = distance
save.append(idx)
'''
'''
|
775503c1682b705dd3f829e046823ef2161aff8f
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/ContactInfoVO.py
|
fc1b50f1e7a9e66639480d359c6c17922b168958
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,061
|
py
|
ContactInfoVO.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ContactInfoVO(object):
def __init__(self):
self._contact_name = None
self._encryption_content = None
self._phone_number = None
@property
def contact_name(self):
return self._contact_name
@contact_name.setter
def contact_name(self, value):
self._contact_name = value
@property
def encryption_content(self):
return self._encryption_content
@encryption_content.setter
def encryption_content(self, value):
self._encryption_content = value
@property
def phone_number(self):
return self._phone_number
@phone_number.setter
def phone_number(self, value):
self._phone_number = value
def to_alipay_dict(self):
params = dict()
if self.contact_name:
if hasattr(self.contact_name, 'to_alipay_dict'):
params['contact_name'] = self.contact_name.to_alipay_dict()
else:
params['contact_name'] = self.contact_name
if self.encryption_content:
if hasattr(self.encryption_content, 'to_alipay_dict'):
params['encryption_content'] = self.encryption_content.to_alipay_dict()
else:
params['encryption_content'] = self.encryption_content
if self.phone_number:
if hasattr(self.phone_number, 'to_alipay_dict'):
params['phone_number'] = self.phone_number.to_alipay_dict()
else:
params['phone_number'] = self.phone_number
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ContactInfoVO()
if 'contact_name' in d:
o.contact_name = d['contact_name']
if 'encryption_content' in d:
o.encryption_content = d['encryption_content']
if 'phone_number' in d:
o.phone_number = d['phone_number']
return o
|
6f2a976d7b37eaec0448e4999ca0f474f98b862b
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/core/ext-py3/eventlet-0.30.2/tests/thread_test.py
|
44de95d660ad79a592925e8b589e37d976774c40
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,628
|
py
|
thread_test.py
|
import gc
import weakref
import eventlet
from eventlet import corolocal
from eventlet import event
from eventlet import greenthread
from eventlet.green import thread
import six
from tests import LimitedTestCase
class Locals(LimitedTestCase):
def passthru(self, *args, **kw):
self.results.append((args, kw))
return args, kw
def setUp(self):
self.results = []
super(Locals, self).setUp()
def tearDown(self):
self.results = []
super(Locals, self).tearDown()
def test_assignment(self):
my_local = corolocal.local()
my_local.a = 1
def do_something():
my_local.b = 2
self.assertEqual(my_local.b, 2)
try:
my_local.a
self.fail()
except AttributeError:
pass
eventlet.spawn(do_something).wait()
self.assertEqual(my_local.a, 1)
def test_calls_init(self):
init_args = []
class Init(corolocal.local):
def __init__(self, *args):
init_args.append((args, eventlet.getcurrent()))
my_local = Init(1, 2, 3)
self.assertEqual(init_args[0][0], (1, 2, 3))
self.assertEqual(init_args[0][1], eventlet.getcurrent())
def do_something():
my_local.foo = 'bar'
self.assertEqual(len(init_args), 2, init_args)
self.assertEqual(init_args[1][0], (1, 2, 3))
self.assertEqual(init_args[1][1], eventlet.getcurrent())
eventlet.spawn(do_something).wait()
def test_calling_methods(self):
class Caller(corolocal.local):
def callme(self):
return self.foo
my_local = Caller()
my_local.foo = "foo1"
self.assertEqual("foo1", my_local.callme())
def do_something():
my_local.foo = "foo2"
self.assertEqual("foo2", my_local.callme())
eventlet.spawn(do_something).wait()
my_local.foo = "foo3"
self.assertEqual("foo3", my_local.callme())
def test_no_leaking(self):
refs = weakref.WeakKeyDictionary()
my_local = corolocal.local()
class X(object):
pass
def do_something(i):
o = X()
refs[o] = True
my_local.foo = o
p = eventlet.GreenPool()
for i in six.moves.range(100):
p.spawn(do_something, i)
p.waitall()
del p
gc.collect()
eventlet.sleep(0)
gc.collect()
# at this point all our coros have terminated
self.assertEqual(len(refs), 1)
|
5a2cb53056511c536699c2e8090b22d4cddb1452
|
807438e6974bf68762208ec24cf824dd0e5fabd6
|
/docs/examples/compute/cloudsigma/get_account_balance.py
|
292e35fc75645f17cb680926a5595aeff1e1f682
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
apache/libcloud
|
019c5bd839dedd2423f9604936886eaff252e04b
|
abba8c1719a8bda6db8efde2d46fd1b423ae4304
|
refs/heads/trunk
| 2023-08-31T20:14:22.369970
| 2023-08-21T20:17:57
| 2023-08-21T20:17:57
| 419,555
| 1,644
| 968
|
Apache-2.0
| 2023-09-13T19:34:44
| 2009-12-11T09:00:14
|
Python
|
UTF-8
|
Python
| false
| false
| 373
|
py
|
get_account_balance.py
|
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.CLOUDSIGMA)
driver = cls("username", "password", region="zrh", api_version="2.0")
balance = driver.ex_get_balance()
values = {"balance": balance["balance"], "currency": balance["currency"]}
print("Account balance: %(balance)s %(currency)s" % values)
|
feb1542a2183e620d9a0ffca7180ac9602c22299
|
abbc2d332bdfa036ac12438983e6d74cf4107e64
|
/SiamFCpp/SiamFCpp-video_analyst/siamfcpp/model/backbone/backbone_impl/resnet.py
|
efa0c5d154b412eb52a4f3e9b9db88b2d4b453b3
|
[] |
permissive
|
HonglinChu/SiamTrackers
|
c494cff7543a433e8ec7dbf6d9439b1e7395b0c0
|
805208b5348346d35e64abcbe901a3829743e157
|
refs/heads/master
| 2023-08-29T06:50:59.532271
| 2023-03-06T09:13:53
| 2023-03-06T09:13:53
| 253,718,080
| 1,166
| 243
|
Apache-2.0
| 2023-08-03T16:39:53
| 2020-04-07T07:24:00
|
Python
|
UTF-8
|
Python
| false
| false
| 8,851
|
py
|
resnet.py
|
# -*- coding: utf-8 -*
from loguru import logger
import torch
import torch.nn as nn
from siamfcpp.model.backbone.backbone_base import (TRACK_BACKBONES,
VOS_BACKBONES)
from siamfcpp.model.common_opr.common_block import conv_bn_relu, projector
from siamfcpp.model.module_base import ModuleBase
class creat_residual_block(nn.Module):
def __init__(self, inplanes, outplanes, stride, has_proj=False):
super(creat_residual_block, self).__init__()
self.has_proj = has_proj
if self.has_proj:
self.proj_conv = conv_bn_relu(inplanes,
outplanes,
stride=stride,
kszie=1,
pad=0,
has_bn=True,
has_relu=False,
bias=False)
self.conv1 = conv_bn_relu(inplanes,
outplanes,
stride=stride,
kszie=3,
pad=1,
has_bn=True,
has_relu=True,
bias=False)
self.conv2 = conv_bn_relu(outplanes,
outplanes,
stride=1,
kszie=3,
pad=1,
has_bn=True,
has_relu=False,
bias=False)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
if self.has_proj:
residual = self.proj_conv(residual)
x = self.conv1(x)
x = self.conv2(x)
x = x + residual
x = self.relu(x)
return x
class create_bottleneck(nn.Module):
"""
Modified Bottleneck : We change the kernel size of projection conv from 1 to 3.
"""
def __init__(self, inplanes, outplanes, stride, has_proj=False):
super(create_bottleneck, self).__init__()
self.has_proj = has_proj
if self.has_proj:
self.proj_conv = conv_bn_relu(inplanes,
outplanes,
stride=stride,
kszie=3,
pad=1,
has_bn=True,
has_relu=False,
bias=False)
self.conv1 = conv_bn_relu(inplanes,
outplanes,
stride=stride,
kszie=3,
pad=1,
has_bn=True,
has_relu=True,
bias=False)
self.conv2 = conv_bn_relu(outplanes,
outplanes,
stride=1,
kszie=3,
pad=1,
has_bn=True,
has_relu=True,
bias=False)
self.conv3 = conv_bn_relu(outplanes,
outplanes,
stride=1,
kszie=3,
pad=1,
has_bn=True,
has_relu=False,
bias=False)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
if self.has_proj:
residual = self.proj_conv(residual)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x + residual
x = self.relu(x)
return x
@VOS_BACKBONES.register
class ResNet50_M(ModuleBase):
default_hyper_params = {"pretrain_model_path": ""}
def __init__(self, block=create_bottleneck):
super(ResNet50_M, self).__init__()
self.block = block
self.stage1 = nn.Sequential(
conv_bn_relu(3,
32,
stride=2,
kszie=3,
pad=3,
has_bn=True,
has_relu=True,
bias=False),
conv_bn_relu(32,
32,
stride=1,
kszie=3,
pad=1,
has_bn=True,
has_relu=True,
bias=False),
conv_bn_relu(32,
32,
stride=1,
kszie=3,
pad=1,
has_bn=True,
has_relu=True,
bias=False), nn.MaxPool2d(3, 2, 1, ceil_mode=False))
self.stage2 = self.__make_stage(self.block, 32, 64, 3, 1)
self.stage3 = self.__make_stage(self.block, 64, 128, 4, 2)
self.stage4 = self.__make_stage(self.block, 128, 256, 6, 2)
self.stage5 = self.__make_stage(self.block, 256, 512, 3, 2)
def __make_stage(self, block, inplane, outplane, blocks, stride):
layers = []
layers.append(block(inplane, outplane, stride=stride, has_proj=True))
for i in range(1, blocks):
layers.append(block(outplane, outplane, 1, False))
return nn.Sequential(*layers)
def forward(self, x):
x1 = self.stage1(x)
x2 = self.stage2(x1)
x3 = self.stage3(x2)
x4 = self.stage4(x3)
x5 = self.stage5(x4)
return x5
@VOS_BACKBONES.register
class ResNet18_M(ModuleBase):
default_hyper_params = {"pretrain_model_path": ""}
def __init__(self, block=creat_residual_block):
super(ResNet18_M, self).__init__()
self.block = block
self.stage1 = nn.Sequential(
conv_bn_relu(3,
32,
stride=2,
kszie=3,
pad=3,
has_bn=True,
has_relu=True,
bias=False),
conv_bn_relu(32,
32,
stride=1,
kszie=3,
pad=1,
has_bn=True,
has_relu=True,
bias=False),
conv_bn_relu(32,
32,
stride=1,
kszie=3,
pad=1,
has_bn=True,
has_relu=True,
bias=False), nn.MaxPool2d(3, 2, 1, ceil_mode=False))
self.stage2 = self.__make_stage(self.block, 32, 64, 2, 1)
self.stage3 = self.__make_stage(self.block, 64, 128, 2, 2)
self.stage4 = self.__make_stage(self.block, 128, 256, 2, 2)
self.stage5 = self.__make_stage(self.block, 256, 256, 2, 2)
def __make_stage(self, block, inplane, outplane, blocks, stride):
layers = []
layers.append(block(inplane, outplane, stride=stride, has_proj=True))
for i in range(1, blocks):
layers.append(block(outplane, outplane, 1, False))
return nn.Sequential(*layers)
def forward(self, x):
x1 = self.stage1(x)
x2 = self.stage2(x1)
x3 = self.stage3(x2)
x4 = self.stage4(x3)
x5 = self.stage5(x4)
return x5
@VOS_BACKBONES.register
class JointEncoder(ModuleBase):
default_hyper_params = {"pretrain_model_path": ""}
def __init__(self, basemodel):
super(JointEncoder, self).__init__()
self.basemodel = basemodel
self.projector_corr_feature = projector(256, 256)
def forward(self, saliency_image, corr_feature):
corr_feature = self.projector_corr_feature(corr_feature)
x1 = self.basemodel.stage1(saliency_image)
x2 = self.basemodel.stage2(x1)
x3 = self.basemodel.stage3(x2)
x4 = self.basemodel.stage4(x3) + corr_feature
x5 = self.basemodel.stage5(x4)
return [x5, x4, x3, x2]
if __name__ == "__main__":
print(VOS_BACKBONES)
resnet_m = ResNet18_M()
image = torch.rand((1, 3, 257, 257))
print(image.shape)
feature = resnet_m(image)
print(feature.shape)
print(resnet_m.state_dict().keys())
#print(resnet_m)
|
db081184933f224861b42efb0a85391691471637
|
9875d011bf7b478421a4a5a57c6b42c24c069903
|
/examples/validation/core/33_outside_lib.py
|
41ef84a117161d33598368c3723cb40fc47931cb
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Kitware/trame
|
bc9a0d7d6a845050f4fb386d514bd7e9b7060a21
|
861b60718798cca2db292e65e6ad39106ba75ccd
|
refs/heads/master
| 2023-08-20T22:42:57.129511
| 2023-08-18T04:25:32
| 2023-08-18T04:25:32
| 410,108,340
| 198
| 41
|
NOASSERTION
| 2023-09-14T15:29:10
| 2021-09-24T21:38:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,269
|
py
|
33_outside_lib.py
|
from trame.app import get_server
from trame.ui.html import DivLayout
from trame.widgets import vuetify, html, helper
# From: https://quasar.dev/start/umd
module = dict(
scripts=[
"https://cdn.jsdelivr.net/npm/quasar@2.11.5/dist/quasar.umd.prod.js",
],
styles=[
"https://fonts.googleapis.com/css?family=Roboto:100,300,400,500,700,900|Material+Icons",
"https://cdn.jsdelivr.net/npm/quasar@2.11.5/dist/quasar.prod.css",
],
vue_use=[
"Quasar",
],
)
QSlider = helper.create_class(
"QSlider",
"q-slider",
module=module,
properties=[
"min",
"max",
],
)
QBtn = helper.create_class(
"QBtn",
"q-btn",
module=module,
properties=[
"label",
],
events=[
"click",
],
)
QCircularProgress = helper.create_class(
"QCircularProgress",
"q-circular-progress",
module=module,
properties=[
"value",
"indeterminate",
"size",
"thickness",
"color",
"center_color",
],
)
# -----------------------------------------------------------------------------
# Trame usage
# -----------------------------------------------------------------------------
server = get_server()
server.client_type = "vue3"
def reset():
server.state.value = 5
with DivLayout(server) as layout:
with html.Div(classes="q-pa-md"):
with html.Div(classes="row items-center"):
html.Div("{{ value }}", classes="col-2")
QBtn(label="Hello", classes="col", click=reset)
QCircularProgress(
indeterminate=True,
size="75px",
thickness=0.6,
color="lime",
center_color="grey-8",
classes="q-ma-md col",
)
QSlider(
v_model_number=("value", 0),
min=("1",),
max=("100",),
step=("1",),
classes="col",
)
QCircularProgress(
size="75px",
thickness=0.6,
color="lime",
center_color="grey-8",
classes="q-ma-md col",
value=("value",),
)
server.start()
|
84e74c7b3ac65cfe88ee4aeeef66e3d5313cefd8
|
8f344354a42d0f150eff315a44763038dad521de
|
/openchem/modules/encoders/openchem_encoder.py
|
352c671a8d253c271b939554ddbf384ebacf4f2e
|
[
"MIT"
] |
permissive
|
Mariewelt/OpenChem
|
2fa3c2c6be3bcd9d471d08bba2d45e00f9ecc38d
|
f427076119683bf14152644fb7adf9b32f0cf7e2
|
refs/heads/master
| 2023-06-08T06:35:17.967266
| 2022-04-27T19:27:40
| 2022-04-27T19:27:40
| 140,360,685
| 638
| 114
|
MIT
| 2023-06-06T01:57:19
| 2018-07-10T01:27:33
|
Python
|
UTF-8
|
Python
| false
| false
| 821
|
py
|
openchem_encoder.py
|
import torch
from torch import nn
from openchem.utils.utils import check_params
class OpenChemEncoder(nn.Module):
"""Base class for embedding module"""
def __init__(self, params, use_cuda=None):
super(OpenChemEncoder, self).__init__()
check_params(params, self.get_required_params(), self.get_required_params())
self.params = params
if use_cuda is None:
use_cuda = torch.cuda.is_available()
self.use_cuda = use_cuda
self.input_size = self.params['input_size']
self.encoder_dim = self.params['encoder_dim']
@staticmethod
def get_required_params():
return {'input_size': int, 'encoder_dim': int}
@staticmethod
def get_optional_params():
return {}
def forward(self, inp):
raise NotImplementedError
|
6ca8202e141243b589bbbf4ef35c9d7067ee610e
|
7ed2ef754060465709897be60ff14a0f4e2c9578
|
/delfin/alert_manager/alert_processor.py
|
13ca3ee80b57ed57811764af5b31d3d6b7016287
|
[
"Apache-2.0"
] |
permissive
|
sodafoundation/delfin
|
967b7ff276c20ea546e07538c2b02a7920aaddf4
|
978eff481945203bfbc3d84123e151f836748428
|
refs/heads/master
| 2023-09-04T11:27:21.103714
| 2023-07-13T09:02:14
| 2023-07-13T09:02:14
| 254,367,182
| 220
| 336
|
Apache-2.0
| 2023-09-13T07:04:15
| 2020-04-09T12:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 4,579
|
py
|
alert_processor.py
|
# Copyright 2020 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import threading
from oslo_log import log
from delfin import context
from delfin import coordination
from delfin import db
from delfin import exception
from delfin.common import alert_util
from delfin.drivers import api as driver_manager
from delfin.exporter import base_exporter
from delfin.task_manager import rpcapi
LOG = log.getLogger(__name__)
class AlertProcessor(object):
"""Alert model translation and export functions"""
def __init__(self):
self.driver_manager = driver_manager.API()
self.exporter_manager = base_exporter.AlertExporterManager()
self.task_rpcapi = rpcapi.TaskAPI()
def process_alert_info(self, alert):
"""Fills alert model using driver manager interface."""
ctxt = context.get_admin_context()
storage = db.storage_get(ctxt, alert['storage_id'])
alert_model = {}
try:
alert_model = self.driver_manager.parse_alert(ctxt,
alert['storage_id'],
alert)
# Fill storage specific info
if alert_model:
storage = self.get_storage_from_parsed_alert(
ctxt, storage, alert_model)
alert_util.fill_storage_attributes(alert_model, storage)
except exception.IncompleteTrapInformation as e:
LOG.warning(e)
threading.Thread(target=self.sync_storage_alert,
args=(ctxt, alert['storage_id'])).start()
except exception.AlertSourceNotFound:
LOG.info("Could not identify alert source from parsed alert. "
"Skipping the dispatch of alert")
return
except Exception as e:
LOG.error(e)
raise exception.InvalidResults(
"Failed to fill the alert model from driver.")
# Export to base exporter which handles dispatch for all exporters
if alert_model:
LOG.info("Dispatching one SNMP Trap to {} with sn {}".format(
alert_model['storage_id'], alert_model['serial_number']))
self.exporter_manager.dispatch(ctxt, [alert_model])
def get_storage_from_parsed_alert(self, ctxt, storage, alert_model):
# If parse_alert sets 'serial_number' or 'storage_name' in the
# alert_model, we need to get corresponding storage details
# from the db and fill that in alert_model
storage_sn = alert_model.get('serial_number')
storage_name = alert_model.get('storage_name')
filters = {
"vendor": storage['vendor'],
"model": storage['model'],
}
try:
if storage_sn and storage_sn != storage['serial_number']:
filters['serial_number'] = storage_sn
elif storage_name and storage_name != storage['name']:
filters['name'] = storage_name
else:
return storage
storage_list = db.storage_get_all(ctxt, filters=filters)
if not storage_list:
msg = "Failed to get destination storage for SNMP Trap. " \
"Storage with serial number {} or storage name {} " \
"not found in DB".format(storage_sn, storage_name)
raise exception.AlertSourceNotFound(msg)
db.alert_source_get(ctxt, storage_list[0]['id'])
storage = storage_list[0]
except exception.AlertSourceNotFound:
LOG.info("Storage with serial number {} or name {} "
"is not registered for receiving "
"SNMP Trap".format(storage_sn, storage_name))
raise
return storage
@coordination.synchronized('sync-trap-{storage_id}', blocking=False)
def sync_storage_alert(self, context, storage_id):
time.sleep(10)
self.task_rpcapi.sync_storage_alerts(context, storage_id, None)
|
02332ce35c8f455fd84f567e1d3b2b93ea6a71b9
|
6958f617af0c5a76304ceb1006c77bc70ca0e195
|
/tests/python/test_indices.py
|
025bc897eb4869dd3b418f037f989438fd104619
|
[
"Apache-2.0"
] |
permissive
|
taichi-dev/taichi
|
3fae315a494f1c97392d5b931c939abbbfba1bdc
|
b30b511f55e3d0ebff765ee048d0aaa4ba9e7667
|
refs/heads/master
| 2023-09-02T13:28:18.208792
| 2023-08-23T23:22:43
| 2023-08-23T23:22:43
| 74,660,642
| 17,231
| 1,841
|
Apache-2.0
| 2023-09-14T11:29:32
| 2016-11-24T10:00:05
|
C++
|
UTF-8
|
Python
| false
| false
| 1,979
|
py
|
test_indices.py
|
import pytest
from taichi.lang.misc import get_host_arch_list
import taichi as ti
from tests import test_utils
@test_utils.test(arch=get_host_arch_list())
def test_indices():
a = ti.field(ti.f32, shape=(128, 32, 8))
b = ti.field(ti.f32)
ti.root.dense(ti.j, 32).dense(ti.i, 16).place(b)
mapping_a = a.snode._physical_index_position()
assert mapping_a == {0: 0, 1: 1, 2: 2}
mapping_b = b.snode._physical_index_position()
assert mapping_b == {0: 0, 1: 1}
# Note that b is column-major:
# the virtual first index exposed to the user comes second in memory layout.
@ti.kernel
def fill():
for i, j in b:
b[i, j] = i * 10 + j
@ti.kernel
def get_field_addr(i: ti.i32, j: ti.i32) -> ti.u64:
return ti.get_addr(b, [i, j])
fill()
for i in range(16):
for j in range(32):
assert b[i, j] == i * 10 + j
assert get_field_addr(0, 1) + 4 == get_field_addr(1, 1)
@test_utils.test(arch=get_host_arch_list(), default_ip=ti.i64)
def test_indices_i64():
n = 1024
val = ti.field(dtype=ti.i64, shape=n)
val.fill(1)
@ti.kernel
def prefix_sum():
ti.loop_config(serialize=True)
for i in range(1, 1024):
val[i] += val[i - 1]
prefix_sum()
for i in range(n):
assert val[i] == i + 1
@test_utils.test()
def test_indices_with_matrix():
grid_m = ti.field(dtype=ti.i32, shape=(10, 10))
@ti.kernel
def build_grid():
base = int(ti.Vector([2, 4]))
grid_m[base] = 100
grid_m[int(ti.Vector([1, 1]))] = 10
build_grid()
assert grid_m[1, 1] == 10
assert grid_m[2, 4] == 100
@test_utils.test()
def test_negative_valued_indices():
@ti.kernel
def foo(i: int):
x = ti.Vector([i, i + 1, i + 2])
print(x[:-1])
with pytest.raises(
ti.TaichiSyntaxError,
match="Negative indices are not supported in Taichi kernels.",
):
foo(0)
|
3185b6522729d4498afa597be350c6d92ffbcc19
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-service-control/google/cloud/servicecontrol/__init__.py
|
6b42fc5d3acafc643cede539cadc29594d66efa0
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,516
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.servicecontrol import gapic_version as package_version
__version__ = package_version.__version__
from google.cloud.servicecontrol_v1.services.quota_controller.async_client import (
QuotaControllerAsyncClient,
)
from google.cloud.servicecontrol_v1.services.quota_controller.client import (
QuotaControllerClient,
)
from google.cloud.servicecontrol_v1.services.service_controller.async_client import (
ServiceControllerAsyncClient,
)
from google.cloud.servicecontrol_v1.services.service_controller.client import (
ServiceControllerClient,
)
from google.cloud.servicecontrol_v1.types.check_error import CheckError
from google.cloud.servicecontrol_v1.types.distribution import Distribution
from google.cloud.servicecontrol_v1.types.http_request import HttpRequest
from google.cloud.servicecontrol_v1.types.log_entry import (
LogEntry,
LogEntryOperation,
LogEntrySourceLocation,
)
from google.cloud.servicecontrol_v1.types.metric_value import (
MetricValue,
MetricValueSet,
)
from google.cloud.servicecontrol_v1.types.operation import Operation
from google.cloud.servicecontrol_v1.types.quota_controller import (
AllocateQuotaRequest,
AllocateQuotaResponse,
QuotaError,
QuotaOperation,
)
from google.cloud.servicecontrol_v1.types.service_controller import (
CheckRequest,
CheckResponse,
ReportRequest,
ReportResponse,
)
__all__ = (
"QuotaControllerClient",
"QuotaControllerAsyncClient",
"ServiceControllerClient",
"ServiceControllerAsyncClient",
"CheckError",
"Distribution",
"HttpRequest",
"LogEntry",
"LogEntryOperation",
"LogEntrySourceLocation",
"MetricValue",
"MetricValueSet",
"Operation",
"AllocateQuotaRequest",
"AllocateQuotaResponse",
"QuotaError",
"QuotaOperation",
"CheckRequest",
"CheckResponse",
"ReportRequest",
"ReportResponse",
)
|
d6db50ece21d570be2267a90433cf24fcd03e969
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DQM/HcalTasks/python/OfflineSourceSequence_pp.py
|
d929ad94d87d0889ec6c9e1539aa7d066d141c83
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 2,704
|
py
|
OfflineSourceSequence_pp.py
|
import FWCore.ParameterSet.Config as cms
#-----------------
# HCAL DQM Offline Source Sequence Definition for pp
# To be used for Offline DQM importing
#-----------------
# import the tasks
from DQM.HcalTasks.DigiTask import digiTask
from DQM.HcalTasks.RawTask import rawTask
from DQM.HcalTasks.TPTask import tpTask
from DQM.HcalTasks.RecHitTask import recHitTask, recHitPreRecoTask
from DQM.HcalTasks.hcalGPUComparisonTask_cfi import hcalGPUComparisonTask
# set processing type to Offine
digiTask.ptype = 1
tpTask.ptype = 1
recHitTask.ptype = 1
rawTask.ptype = 1
recHitPreRecoTask.ptype = 1
hcalGPUComparisonTask.ptype = 1
# set the label for Emulator TP Task
tpTask.tagEmul = "valHcalTriggerPrimitiveDigis"
hcalOfflineSourceSequence = cms.Sequence(
digiTask +
tpTask +
recHitTask +
rawTask )
hcalOnlyOfflineSourceSequence = cms.Sequence(
digiTask +
recHitPreRecoTask +
rawTask )
hcalOnlyOfflineSourceSequenceGPU = cms.Sequence(
digiTask +
recHitTask +
rawTask +
hcalGPUComparisonTask
)
from Configuration.ProcessModifiers.gpuValidationHcal_cff import gpuValidationHcal
gpuValidationHcal.toReplaceWith(hcalOnlyOfflineSourceSequence, hcalOnlyOfflineSourceSequenceGPU)
from Configuration.Eras.Modifier_run2_HCAL_2018_cff import run2_HCAL_2018
run2_HCAL_2018.toModify(hcalGPUComparisonTask,
tagHBHE_ref = "hbheprereco@cpu",
tagHBHE_target = "hbheprereco@cuda"
)
run2_HCAL_2018.toModify(recHitTask,
tagHBHE = "hbheprereco"
)
from Configuration.Eras.Modifier_run3_HB_cff import run3_HB
### reverting the reco tag setting that inherited from run2
run3_HB.toModify(hcalGPUComparisonTask,
tagHBHE_ref = "hbhereco@cpu",
tagHBHE_target = "hbhereco@cuda"
)
run3_HB.toModify(recHitTask,
tagHBHE = "hbhereco"
)
_phase1_hcalOnlyOfflineSourceSequence = hcalOnlyOfflineSourceSequence.copy()
_phase1_hcalOnlyOfflineSourceSequence.replace(recHitPreRecoTask, recHitTask)
run3_HB.toReplaceWith(hcalOnlyOfflineSourceSequence, _phase1_hcalOnlyOfflineSourceSequence)
from Configuration.Eras.Modifier_phase2_hcal_cff import phase2_hcal
_phase2_hcalOfflineSourceSequence = hcalOfflineSourceSequence.copyAndExclude([tpTask,rawTask])
phase2_hcal.toReplaceWith(hcalOfflineSourceSequence, _phase2_hcalOfflineSourceSequence)
phase2_hcal.toModify(digiTask,
tagHBHE = "simHcalDigis:HBHEQIE11DigiCollection",
tagHO = "simHcalDigis",
tagHF = "simHcalDigis:HFQIE10DigiCollection"
)
from Configuration.ProcessModifiers.premix_stage2_cff import premix_stage2
(premix_stage2 & phase2_hcal).toModify(digiTask,
tagHBHE = "DMHcalDigis:HBHEQIE11DigiCollection",
tagHO = "DMHcalDigis",
tagHF = "DMHcalDigis:HFQIE10DigiCollection"
)
|
6cecec7a1ebaa42246729c8480c1e30e27cc9136
|
b8d217ac322a9caa20bf59e9372c9476de3969f5
|
/skbuild/platform_specifics/abstract.py
|
66f8e205bd71ec1b958995e3f215e5a64ca15487
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
scikit-build/scikit-build
|
8e8d112ab2fda9c2bec3b310392d63cff7b64cd1
|
676e110315a971abb856edbd6df0c74293e5ba2d
|
refs/heads/main
| 2023-08-28T14:05:35.538095
| 2023-08-10T13:41:02
| 2023-08-10T13:41:02
| 21,749,516
| 422
| 134
|
NOASSERTION
| 2023-09-05T04:07:05
| 2014-07-11T20:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 12,406
|
py
|
abstract.py
|
"""This module defines objects useful to discover which CMake generator is
supported on the current platform."""
from __future__ import annotations
import os
import shutil
import subprocess
import textwrap
from typing import Iterable, Mapping
from ..constants import CMAKE_DEFAULT_EXECUTABLE
from ..exceptions import SKBuildGeneratorNotFoundError
from ..utils import push_dir
test_folder = "_cmake_test_compile"
class CMakePlatform:
"""This class encapsulates the logic allowing to get the identifier of a
working CMake generator.
Derived class should at least set :attr:`default_generators`.
"""
def __init__(self) -> None:
# default_generators is a property for mocking in tests
self._default_generators: list[CMakeGenerator] = []
self.architecture: str | None = None
@property
def default_generators(self) -> list[CMakeGenerator]:
"""List of generators considered by :func:`get_best_generator()`."""
return self._default_generators
@default_generators.setter
def default_generators(self, generators: list[CMakeGenerator]) -> None:
self._default_generators = generators
@property
def generator_installation_help(self) -> str:
"""Return message guiding the user for installing a valid toolchain."""
raise NotImplementedError() # pragma: no cover
@staticmethod
def write_test_cmakelist(languages: Iterable[str]) -> None:
"""Write a minimal ``CMakeLists.txt`` useful to check if the
requested ``languages`` are supported."""
if not os.path.exists(test_folder):
os.makedirs(test_folder)
with open(f"{test_folder}/CMakeLists.txt", "w", encoding="utf-8") as f:
f.write("cmake_minimum_required(VERSION 2.8.12)\n")
f.write("PROJECT(compiler_test NONE)\n")
for language in languages:
f.write(f"ENABLE_LANGUAGE({language:s})\n")
f.write(
'if("${_SKBUILD_FORCE_MSVC}")\n'
' math(EXPR FORCE_MAX "${_SKBUILD_FORCE_MSVC}+9")\n'
' math(EXPR FORCE_MIN "${_SKBUILD_FORCE_MSVC}")\n'
" if(NOT MSVC)\n"
' message(FATAL_ERROR "MSVC is required to pass this check.")\n'
" elseif(MSVC_VERSION LESS FORCE_MIN OR MSVC_VERSION GREATER FORCE_MAX)\n"
' message(FATAL_ERROR "MSVC ${MSVC_VERSION} does pass this check.")\n'
" endif()\n"
"endif()\n"
)
@staticmethod
def cleanup_test() -> None:
"""Delete test project directory."""
if os.path.exists(test_folder):
shutil.rmtree(test_folder)
def get_generator(self, generator_name: str) -> CMakeGenerator:
"""Loop over generators and return the first that matches the given
name.
"""
for default_generator in self.default_generators:
if default_generator.name == generator_name:
return default_generator
return CMakeGenerator(generator_name)
def get_generators(self, generator_name: str) -> list[CMakeGenerator]:
"""Loop over generators and return all that match the given name."""
return [
default_generator
for default_generator in self.default_generators
if default_generator.name == generator_name
]
# TODO: this method name is not great. Does anyone have a better idea for
# renaming it?
def get_best_generator(
self,
generator_name: str | None = None,
skip_generator_test: bool = False,
languages: Iterable[str] = ("CXX", "C"),
cleanup: bool = True,
cmake_executable: str = CMAKE_DEFAULT_EXECUTABLE,
cmake_args: Iterable[str] = (),
architecture: str | None = None,
) -> CMakeGenerator:
"""Loop over generators to find one that works by configuring
and compiling a test project.
:param generator_name: If provided, uses only provided generator, \
instead of trying :attr:`default_generators`.
:type generator_name: str | None
:param skip_generator_test: If set to True and if a generator name is \
specified, the generator test is skipped. If no generator_name is specified \
and the option is set to True, the first available generator is used.
:type skip_generator_test: bool
:param languages: The languages you'll need for your project, in terms \
that CMake recognizes.
:type languages: tuple
:param cleanup: If True, cleans up temporary folder used to test \
generators. Set to False for debugging to see CMake's output files.
:type cleanup: bool
:param cmake_executable: Path to CMake executable used to configure \
and build the test project used to evaluate if a generator is working.
:type cmake_executable: str
:param cmake_args: List of CMake arguments to use when configuring \
the test project. Only arguments starting with ``-DCMAKE_`` are \
used.
:type cmake_args: tuple
:return: CMake Generator object
:rtype: :class:`CMakeGenerator` or None
:raises skbuild.exceptions.SKBuildGeneratorNotFoundError:
"""
candidate_generators: list[CMakeGenerator] = []
if generator_name is None:
candidate_generators = self.default_generators
else:
# Lookup CMakeGenerator by name. Doing this allow to get a
# generator object with its ``env`` property appropriately
# initialized.
# MSVC should be used in "-A arch" form
if architecture is not None:
self.architecture = architecture
# Support classic names for generators
generator_name, self.architecture = _parse_legacy_generator_name(generator_name, self.architecture)
candidate_generators = []
for default_generator in self.default_generators:
if default_generator.name == generator_name:
candidate_generators.append(default_generator)
if not candidate_generators:
candidate_generators = [CMakeGenerator(generator_name)]
self.write_test_cmakelist(languages)
working_generator: CMakeGenerator | None
if skip_generator_test:
working_generator = candidate_generators[0]
else:
working_generator = self.compile_test_cmakelist(cmake_executable, candidate_generators, cmake_args)
if working_generator is None:
line = "*" * 80
installation_help = self.generator_installation_help
msg = textwrap.dedent(
f"""\
{line}
scikit-build could not get a working generator for your system. Aborting build.
{installation_help}
{line}"""
)
raise SKBuildGeneratorNotFoundError(msg)
if cleanup:
CMakePlatform.cleanup_test()
return working_generator
@staticmethod
@push_dir(directory=test_folder)
def compile_test_cmakelist(
cmake_exe_path: str, candidate_generators: Iterable[CMakeGenerator], cmake_args: Iterable[str] = ()
) -> CMakeGenerator | None:
"""Attempt to configure the test project with
each :class:`CMakeGenerator` from ``candidate_generators``.
Only cmake arguments starting with ``-DCMAKE_`` are used to configure
the test project.
The function returns the first generator allowing to successfully
configure the test project using ``cmake_exe_path``."""
# working generator is the first generator we find that works.
working_generator = None
# Include only -DCMAKE_* arguments
cmake_args = [arg for arg in cmake_args if arg.startswith("-DCMAKE_")]
# Do not complain about unused CMake arguments
cmake_args.insert(0, "--no-warn-unused-cli")
def _generator_discovery_status_msg(_generator: CMakeGenerator, suffix: str = "") -> None:
outer = "-" * 80
inner = ["-" * ((idx * 5) - 3) for idx in range(1, 8)]
print("\n".join(inner) if suffix else outer)
print(f"-- Trying {_generator.description!r} generator{suffix}")
print(outer if suffix else "\n".join(inner[::-1]), flush=True)
for generator in candidate_generators:
print("\n", flush=True)
_generator_discovery_status_msg(generator)
# clear the cache for each attempted generator type
if os.path.isdir("build"):
shutil.rmtree("build")
with push_dir("build", make_directory=True):
# call cmake to see if the compiler specified by this
# generator works for the specified languages
cmd = [cmake_exe_path, "../", "-G", generator.name]
if generator.toolset:
cmd.extend(["-T", generator.toolset])
if generator.architecture and "Visual Studio" in generator.name:
cmd.extend(["-A", generator.architecture])
cmd.extend(cmake_args)
cmd.extend(generator.args)
status = subprocess.run(cmd, env=generator.env, check=False).returncode
msg = "success" if status == 0 else "failure"
_generator_discovery_status_msg(generator, f" - {msg}")
print(flush=True)
# cmake succeeded, this generator should work
if status == 0:
# we have a working generator, don't bother looking for more
working_generator = generator
break
return working_generator
class CMakeGenerator:
"""Represents a CMake generator.
.. automethod:: __init__
"""
def __init__(
self,
name: str,
env: Mapping[str, str] | None = None,
toolset: str | None = None,
arch: str | None = None,
args: Iterable[str] | None = None,
) -> None:
"""Instantiate a generator object with the given ``name``.
By default, ``os.environ`` is associated with the generator. Dictionary
passed as ``env`` parameter will be merged with ``os.environ``. If an
environment variable is set in both ``os.environ`` and ``env``, the
variable in ``env`` is used.
Some CMake generators support a ``toolset`` specification to tell the native
build system how to choose a compiler. You can also include CMake arguments.
"""
self._generator_name = name
self.args = list(args or [])
self.env = dict(list(os.environ.items()) + list(env.items() if env else []))
self._generator_toolset = toolset
self._generator_architecture = arch
description_arch = name if arch is None else f"{name} {arch}"
if toolset is None:
self._description = description_arch
else:
self._description = f"{description_arch} {toolset}"
@property
def name(self) -> str:
"""Name of CMake generator."""
return self._generator_name
@property
def toolset(self) -> str | None:
"""Toolset specification associated with the CMake generator."""
return self._generator_toolset
@property
def architecture(self) -> str | None:
"""Architecture associated with the CMake generator."""
return self._generator_architecture
@property
def description(self) -> str:
"""Name of CMake generator with properties describing the environment (e.g toolset)"""
return self._description
def _parse_legacy_generator_name(generator_name: str, arch: str | None) -> tuple[str, str | None]:
"""
Support classic names for MSVC generators. Architecture is stripped from
the name and "arch" is replaced with the arch string if a legacy name is
given.
"""
if generator_name.startswith("Visual Studio"):
if generator_name.endswith(" Win64"):
arch = "x64"
generator_name = generator_name[:-6]
elif generator_name.endswith(" ARM"):
arch = "ARM"
generator_name = generator_name[:-4]
return generator_name, arch
|
97a7d2af41c249d3bada8e7f9d193b93206cbe33
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/subscription/azure-mgmt-subscription/tests/test_mgmt_subscription.py
|
51ef20a0e326a6049e18a92bc08299f887af3755
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 990
|
py
|
test_mgmt_subscription.py
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import azure.mgmt.subscription
from azure.mgmt.subscription.models import *
from devtools_testutils import AzureMgmtRecordedTestCase, recorded_by_proxy
class TestMgmtSubscription(AzureMgmtRecordedTestCase):
def setup_method(self, method):
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.subscription.SubscriptionClient
)
@recorded_by_proxy
def test_subscriptions_list(self):
result = self.mgmt_client.subscriptions.list()
assert list(result) is not None
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
3650888a71dab2389a3a88576faae212e9e94758
|
9efca95a55cb4df52d895d42f1ec10331516a734
|
/tools/c7n_kube/tests/test_custom_resource.py
|
98f76a81df1051d357d456d5d625917a1f489ece
|
[
"Apache-2.0"
] |
permissive
|
cloud-custodian/cloud-custodian
|
519e602abe00c642786441b64cc40857ef5bc9de
|
27563cf4571040f923124e1acb2463f11e372225
|
refs/heads/main
| 2023-09-04T10:54:55.963703
| 2023-09-01T17:40:17
| 2023-09-01T17:40:17
| 52,837,350
| 3,327
| 1,096
|
Apache-2.0
| 2023-09-14T14:03:30
| 2016-03-01T01:11:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,354
|
py
|
test_custom_resource.py
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
#
from c7n.exceptions import PolicyValidationError
from common_kube import KubeTest
class TestCustomResource(KubeTest):
def test_custom_cluster_resource_query(self):
factory = self.replay_flight_data()
policy = self.load_policy(
{
"name": "custom-resources",
"resource": "k8s.custom-cluster-resource",
"query": [
{
"group": "stable.example.com",
"version": "v1",
"plural": "crontabscluster",
}
],
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["apiVersion"], "stable.example.com/v1")
self.assertEqual(resources[0]["kind"], "CronTabCluster")
def test_custom_namespaced_resource_query(self):
factory = self.replay_flight_data()
policy = self.load_policy(
{
"name": "custom-resources",
"resource": "k8s.custom-namespaced-resource",
"query": [
{
"group": "stable.example.com",
"version": "v1",
"plural": "crontabs",
}
],
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["apiVersion"], "stable.example.com/v1")
self.assertEqual(resources[0]["kind"], "CronTab")
def test_custom_resource_validation(self):
self.assertRaises(
PolicyValidationError,
self.load_policy,
{
"name": "custom-resources",
"resource": "k8s.custom-namespaced-resource",
},
validate=True,
)
self.assertRaises(
PolicyValidationError,
self.load_policy,
{
"name": "custom-resources",
"resource": "k8s.custom-namespaced-resource",
"query": [{"bad": "value"}],
},
validate=True,
)
|
c68fae943809a395f8bfdc6410766fba5915bf4a
|
6c8305ea1df9687df1c0d2b0ace56733516c6322
|
/readthedocs/core/views/__init__.py
|
e4a4e3dba7d0d5b29def4fb2e6494dcb35fbb5c4
|
[
"MIT"
] |
permissive
|
readthedocs/readthedocs.org
|
9806083aa744c2308267919480a692e1e003e45d
|
bf88ce6d1085d922322a5fadce63a22c5544c830
|
refs/heads/main
| 2023-09-05T20:22:34.281891
| 2023-09-05T12:41:52
| 2023-09-05T12:41:52
| 841,835
| 2,894
| 1,509
|
MIT
| 2023-09-14T20:36:00
| 2010-08-16T19:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,487
|
py
|
__init__.py
|
"""
Core views.
Including the main homepage, documentation and header rendering,
and server errors.
"""
import structlog
from django.conf import settings
from django.http import JsonResponse
from django.shortcuts import redirect, render
from django.urls import reverse
from django.views.generic import TemplateView, View
from readthedocs.core.mixins import CDNCacheControlMixin, PrivateViewMixin
log = structlog.get_logger(__name__)
class NoProjectException(Exception):
pass
class HealthCheckView(CDNCacheControlMixin, View):
# Never cache this view, we always want to get the live response from the server.
# In production we should configure the health check to hit the LB directly,
# but it's useful to be careful here in case of a misconfiguration.
cache_response = False
def get(self, request, *_, **__):
return JsonResponse({"status": 200}, status=200)
class HomepageView(TemplateView):
"""
Conditionally show the home page or redirect to the login page.
On the current dashboard, this shows the application homepage. However, we
no longer require this page in our application as we have a similar page on
our website. Instead, redirect to our login page on the new dashboard.
"""
template_name = "homepage.html"
def get(self, request, *args, **kwargs):
# Redirect to login page for new dashboard
if settings.RTD_EXT_THEME_ENABLED:
return redirect(reverse("account_login"))
# Redirect to user dashboard for logged in users
if request.user.is_authenticated:
return redirect("projects_dashboard")
# Redirect to ``about.`` in production
if not settings.DEBUG:
query_string = f"?ref={settings.PRODUCTION_DOMAIN}"
if request.META["QUERY_STRING"]:
# Small hack to not append `&` to URLs without a query_string
query_string += "&" + request.META["QUERY_STRING"]
# Do a 302 here so that it varies on logged in status
return redirect(
f"https://about.readthedocs.com{query_string}", permanent=False
)
# Show the homepage for local dev
return super().get(request, *args, **kwargs)
class SupportView(PrivateViewMixin, TemplateView):
template_name = "support/index.html"
def get_context_data(self, **kwargs):
"""Pass along endpoint for support form."""
context = super().get_context_data(**kwargs)
context["SUPPORT_FORM_ENDPOINT"] = settings.SUPPORT_FORM_ENDPOINT
return context
def server_error_500(request, template_name="500.html"):
"""A simple 500 handler so we get media."""
r = render(request, template_name)
r.status_code = 500
return r
def do_not_track(request):
dnt_header = request.headers.get("Dnt")
# https://w3c.github.io/dnt/drafts/tracking-dnt.html#status-representation
return JsonResponse( # pylint: disable=redundant-content-type-for-json-response
{
"policy": "https://docs.readthedocs.io/en/latest/privacy-policy.html",
"same-party": [
"readthedocs.org",
"readthedocs.com",
"readthedocs.io", # .org Documentation Sites
"readthedocs-hosted.com", # .com Documentation Sites
],
"tracking": "N" if dnt_header == "1" else "T",
},
content_type="application/tracking-status+json",
)
|
8c93dd7878fd70579a8f90e3b32c1f72e56dbf0f
|
6bbbb8237c93f9b1f302010a65d6ecb6f286f23b
|
/websauna/system/core/loggingcapture.py
|
4575a81f467523e6f9d1d11d1f0e07502c0d7334
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
websauna/websauna
|
ea60d5a6aef59b3725bace0d188dacea72574e63
|
a57de54fb8a3fae859f24f373f0292e1e4b3c344
|
refs/heads/master
| 2023-08-07T08:16:51.664340
| 2020-06-06T19:28:18
| 2020-06-06T19:28:18
| 49,773,298
| 294
| 74
|
NOASSERTION
| 2020-12-30T01:48:27
| 2016-01-16T12:55:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,077
|
py
|
loggingcapture.py
|
"""Extra logging context."""
# Standard Library
import logging
# Pyramid
from pyramid.request import Request
# SQLAlchemy
import sqlalchemy
logger = logging.getLogger(__name__)
def is_good_sqlalchemy_object(obj):
"""Let's not cause exceptions in exception handler/logger."""
state = sqlalchemy.inspect(obj)
return not state.detached
def get_logging_user_context(request: Request) -> dict:
"""Capture some extra user-specific information from the logging context.
:return: Dict containing human readable user parameters to help identify the user on this request
"""
try:
user = getattr(request, "user", None)
except sqlalchemy.exc.InvalidRequestError:
# We had a rollback and could not capture the user,
# because user has been rolled back
user = None
user_context = {}
try:
if user:
if is_good_sqlalchemy_object(user):
# Add additional user context to the logged exception
username = getattr(user, "friendly_name", None) or getattr(user, "username", None) or str(user)
email = getattr(user, "email", None)
user_context.update(dict(user=username, email=email))
else:
user_context.update(dict(detached=True))
# All the session data as JSON
session = getattr(request, "session", None)
if session:
session = dict(session.items())
user_context.update(dict(session=session))
else:
user_context.update(dict(session="No session data available in internal_server_error()"))
user_context["ip"] = request.client_addr
# TODO: Make this more generic
# https://support.cloudflare.com/hc/en-us/articles/200168236-What-does-CloudFlare-IP-Geolocation-do-
user_context["cloudflare_country"] = request.headers.get("cf-ipcountry")
return user_context
except Exception as e:
logger.exception(e)
logger.error("Failed to capture user context %s", request)
return {}
|
c5c4bd2d62f78e723e8d3fa5f48ab164b6ec6812
|
2a093824e198f9fd5758f38f388aea792867fc54
|
/JEECMS/Hack JeeCMS Sign/hacksign.py
|
82f3a13f72d06a3e03bb81217371eec3eae56928
|
[] |
no_license
|
coffeehb/tools
|
00585a00311758dff821725926349cf696a70ace
|
e8841461cd0fc8392086f35ea4ed72de74a6ce85
|
refs/heads/master
| 2022-10-25T02:07:00.571576
| 2022-10-03T01:20:43
| 2022-10-03T01:20:43
| 58,847,026
| 307
| 172
| null | 2019-06-14T07:54:59
| 2016-05-15T06:12:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,205
|
py
|
hacksign.py
|
from burp import IBurpExtender
from burp import IHttpListener
from java.io import PrintWriter
import hashlib
import urllib
print "Hack Jeecms Sign By Nerd."
class BurpExtender(IBurpExtender, IHttpListener):
def registerExtenderCallbacks(self, callbacks):
self._callbacks = callbacks
self._helpers = callbacks.getHelpers()
callbacks.setExtensionName("Hack JeeCMS Sign")
callbacks.registerHttpListener(self)
self.stdout = PrintWriter(callbacks.getStdout(), True)
self.stderr = PrintWriter(callbacks.getStderr(), True)
callbacks.issueAlert("Loaded Successfull.")
def processHttpMessage(self, toolFlag, messageIsRequest, currentRequest):
if messageIsRequest:
requestInfo = self._helpers.analyzeRequest(currentRequest)
self.headers = list(requestInfo.getHeaders())
hook_host = requestInfo.getUrl().getHost()
bodyBytes = currentRequest.getRequest()[requestInfo.getBodyOffset():]
self.body = self._helpers.bytesToString(bodyBytes)
o,n = self.update_sign(urllib.unquote(self.body))
self.body = self.body.replace(o,n)
newMessage = self._helpers.buildHttpMessage(self.headers, self.body)
currentRequest.setRequest(newMessage)
# Process responses
else:
pass
def update_sign(slef, body=""):
try:
old_sign = ""
# defalut appKey
appKey = "uicxsXYso7DJxlrFdgQnVVXW5OCzU74h"
hash_param = ""
param_list = body.split("&")
temp_dict = {}
for pa in param_list:
t = pa.split("=")
temp_dict[t[0]] = t[1]
tmmmm = temp_dict.items()
tmmmm.sort()
for (k, v) in tmmmm:
if k == "sign":
old_sign = v
print "old sign = ",v
continue
hash_param += "%s=%s&" % (k, v)
hash_param += "key=" + appKey
sign = hashlib.md5(hash_param).hexdigest()
return old_sign,sign.upper()
except Exception, e:
return "",""
|
b0ad7a9cbfc983ad927438b0821a10dcf79c1e7c
|
ac632b18245ad0f21c40f7ffaf73f2248830e6b9
|
/tests/test_yaspin.py
|
6fa9dcdb947be3e38bf5030ace6fa5b68c763ff1
|
[
"MIT"
] |
permissive
|
pavdmyt/yaspin
|
bd44ea4366a95597d6400aa4b84693879c2344fd
|
5eeecda895ec0b6fd5a7ca81bbbbd734fdba47c5
|
refs/heads/master
| 2023-08-29T11:18:41.546218
| 2023-08-19T16:22:49
| 2023-08-19T16:22:49
| 109,303,405
| 694
| 41
|
MIT
| 2023-09-05T12:33:01
| 2017-11-02T18:27:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
test_yaspin.py
|
"""
tests.test_yaspin
~~~~~~~~~~~~~~~~~
Basic unittests.
"""
from collections import namedtuple
import pytest
from yaspin import Spinner, yaspin
from yaspin.core import default_spinner
@pytest.mark.parametrize(
"spinner, expected",
[
# None
(None, default_spinner),
# hasattr(spinner, "frames") and not hasattr(spinner, "interval")
(namedtuple("Spinner", "frames")("-\\|/"), default_spinner),
# not hasattr(spinner, "frames") and hasattr(spinner, "interval")
(namedtuple("Spinner", "interval")(42), default_spinner),
# Both attrs, not set
(Spinner("", 0), default_spinner),
# Both attrs, not frames
(Spinner("", 42), default_spinner),
# Both attrs, not interval
(Spinner("-\\|/", 0), default_spinner),
# Both attrs, are set
(Spinner("-\\|/", 42), Spinner("-\\|/", 42)),
],
ids=[
"None",
"no `interval` attr",
"no `frames` attr",
"attrs not set",
"`frames` not set",
"`interval` not set",
"both attrs are set",
],
)
def test_set_spinner(spinner, expected):
sp = yaspin(spinner)
assert sp.spinner == expected
|
598e556afebbfe2081beadccd754512e4b9ed7c4
|
a902290fb3b911676358ae4d93f83061a6c2bd0f
|
/InvenTree/company/migrations/0007_remove_supplierpart_lead_time.py
|
5f050d58136fe9ed531fbd449fde41b74121a8c6
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
inventree/InvenTree
|
a15e54182c9bfafdf5348cc9a66da1004e23e760
|
e88a8e99a5f0b201c67a95cba097c729f090d5e2
|
refs/heads/master
| 2023-09-03T19:32:35.438375
| 2023-08-30T00:25:40
| 2023-08-30T00:25:40
| 85,894,461
| 3,077
| 549
|
MIT
| 2023-09-14T14:21:01
| 2017-03-23T01:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 346
|
py
|
0007_remove_supplierpart_lead_time.py
|
# Generated by Django 2.2.5 on 2019-09-12 12:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('company', '0006_supplierpricebreak_currency'),
]
operations = [
migrations.RemoveField(
model_name='supplierpart',
name='lead_time',
),
]
|
8954f110a623158e9e61bf606ba480c7e6134c7a
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/CondTools/L1Trigger/python/L1TriggerKeyListRcdSource_cfi.py
|
c75feeae720bccfcf03fc3e270c6622eaef48592
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 226
|
py
|
L1TriggerKeyListRcdSource_cfi.py
|
import FWCore.ParameterSet.Config as cms
L1TriggerKeyListRcdSource = cms.ESSource("EmptyESSource",
recordName = cms.string('L1TriggerKeyListRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
|
712ecfc2360f4ed24c858a27ce6b239f05d2af4f
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/inspections/PyAbstractClassInspection/overriddenAsField.py
|
5d79ab73af50bb985f09bebe7765345c654a8da1
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
overriddenAsField.py
|
import abc
class A(object):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def foo(self):
pass
class C(A):
foo = 'bar'
|
d2fafec4bbcb6742a9f33ff612373c961cbc96df
|
9c268aa04ab8b359d11399f94a30c8f4fe171a0c
|
/lib/axis/tb/axis_ep.py
|
33298ea354b5d768113e695b8ef9f9be14b053ef
|
[
"MIT"
] |
permissive
|
alexforencich/verilog-ethernet
|
e41586b9214e66341f3eace03da2baa9c004da89
|
b316c6764e083823f95f52b3f324fccee4f12fa0
|
refs/heads/master
| 2023-09-03T00:58:09.380285
| 2023-08-26T19:44:50
| 2023-08-26T19:44:50
| 26,883,874
| 1,690
| 530
|
MIT
| 2023-08-25T05:59:58
| 2014-11-19T22:04:53
|
Verilog
|
UTF-8
|
Python
| false
| false
| 18,151
|
py
|
axis_ep.py
|
"""
Copyright (c) 2014-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
skip_asserts = False
class AXIStreamFrame(object):
def __init__(self, data=b'', keep=None, id=None, dest=None, user=None, last_cycle_user=None):
self.B = 0
self.N = 8
self.M = 1
self.WL = 8
self.data = b''
self.keep = None
self.id = 0
self.dest = 0
self.user = None
self.last_cycle_user = None
if type(data) in (bytes, bytearray):
self.data = bytearray(data)
self.keep = keep
self.id = id
self.dest = dest
self.user = user
self.last_cycle_user = last_cycle_user
elif type(data) is AXIStreamFrame:
self.N = data.N
self.WL = data.WL
if type(data.data) is bytearray:
self.data = bytearray(data.data)
else:
self.data = list(data.data)
if data.keep is not None:
self.keep = list(data.keep)
if data.id is not None:
if type(data.id) in (int, bool):
self.id = data.id
else:
self.id = list(data.id)
if data.dest is not None:
if type(data.dest) in (int, bool):
self.dest = data.dest
else:
self.dest = list(data.dest)
if data.user is not None:
if type(data.user) in (int, bool):
self.user = data.user
else:
self.user = list(data.user)
self.last_cycle_user = data.last_cycle_user
else:
self.data = list(data)
self.keep = keep
self.id = id
self.dest = dest
self.user = user
self.last_cycle_user = last_cycle_user
def build(self):
if self.data is None:
return
f = list(self.data)
tdata = []
tkeep = []
tid = []
tdest = []
tuser = []
i = 0
while len(f) > 0:
if self.B == 0:
data = 0
keep = 0
for j in range(self.M):
data = data | (f.pop(0) << (j*self.WL))
keep = keep | (1 << j)
if len(f) == 0: break
tdata.append(data)
if self.keep is None:
tkeep.append(keep)
else:
tkeep.append(self.keep[i])
else:
# multiple tdata signals
data = 0
tdata.append(f.pop(0))
tkeep.append(0)
if self.id is None:
tid.append(0)
elif type(self.id) is int:
tid.append(self.id)
else:
tid.append(self.id[i])
if self.dest is None:
tdest.append(0)
elif type(self.dest) is int:
tdest.append(self.dest)
else:
tdest.append(self.dest[i])
if self.user is None:
tuser.append(0)
elif type(self.user) is int:
tuser.append(self.user)
else:
tuser.append(self.user[i])
i += 1
if self.last_cycle_user:
tuser[-1] = self.last_cycle_user
return tdata, tkeep, tid, tdest, tuser
def parse(self, tdata, tkeep, tid, tdest, tuser):
if tdata is None or tkeep is None or tuser is None:
return
if len(tdata) != len(tkeep) or len(tdata) != len(tid) or len(tdata) != len(tdest) or len(tdata) != len(tuser):
raise Exception("Invalid data")
self.data = []
self.keep = []
self.id = []
self.dest = []
self.user = []
if self.B == 0:
mask = 2**self.WL-1
for i in range(len(tdata)):
for j in range(self.M):
if tkeep[i] & (1 << j):
self.data.append((tdata[i] >> (j*self.WL)) & mask)
self.keep.append(tkeep[i])
self.id.append(tid[i])
self.dest.append(tdest[i])
self.user.append(tuser[i])
else:
for i in range(len(tdata)):
self.data.append(tdata[i])
self.keep.append(tkeep[i])
self.id.append(tid[i])
self.dest.append(tdest[i])
self.user.append(tuser[i])
if self.WL == 8:
self.data = bytearray(self.data)
self.last_cycle_user = self.user[-1]
def __eq__(self, other):
if not isinstance(other, AXIStreamFrame):
return False
if self.data != other.data:
return False
if self.keep is not None and other.keep is not None:
if self.keep != other.keep:
return False
if self.id is not None and other.id is not None:
if type(self.id) in (int, bool) and type(other.id) is list:
for k in other.id:
if self.id != k:
return False
elif type(other.id) in (int, bool) and type(self.id) is list:
for k in self.id:
if other.id != k:
return False
elif self.id != other.id:
return False
if self.dest is not None and other.dest is not None:
if type(self.dest) in (int, bool) and type(other.dest) is list:
for k in other.dest:
if self.dest != k:
return False
elif type(other.dest) in (int, bool) and type(self.dest) is list:
for k in self.dest:
if other.dest != k:
return False
elif self.dest != other.dest:
return False
if self.last_cycle_user is not None and other.last_cycle_user is not None:
if self.last_cycle_user != other.last_cycle_user:
return False
if self.user is not None and other.user is not None:
if type(self.user) in (int, bool) and type(other.user) is list:
for k in other.user[:-1]:
if self.user != k:
return False
elif type(other.user) in (int, bool) and type(self.user) is list:
for k in self.user[:-1]:
if other.user != k:
return False
elif self.user != other.user:
return False
else:
if self.user is not None and other.user is not None:
if type(self.user) in (int, bool) and type(other.user) is list:
for k in other.user:
if self.user != k:
return False
elif type(other.user) in (int, bool) and type(self.user) is list:
for k in self.user:
if other.user != k:
return False
elif self.user != other.user:
return False
return True
def __repr__(self):
return (
('AXIStreamFrame(data=%s, ' % repr(self.data)) +
('keep=%s, ' % repr(self.keep)) +
('id=%s, ' % repr(self.id)) +
('dest=%s, ' % repr(self.dest)) +
('user=%s, ' % repr(self.user)) +
('last_cycle_user=%s)' % repr(self.last_cycle_user))
)
def __iter__(self):
return self.data.__iter__()
class AXIStreamSource(object):
def __init__(self):
self.active = False
self.has_logic = False
self.queue = []
def send(self, frame):
self.queue.append(AXIStreamFrame(frame))
def write(self, data):
self.send(data)
def count(self):
return len(self.queue)
def empty(self):
return not self.queue
def idle(self):
return not self.queue and not self.active
def wait(self):
while not self.idle():
yield self.clk.posedge
def create_logic(self,
clk,
rst,
tdata=None,
tkeep=Signal(bool(True)),
tvalid=Signal(bool(False)),
tready=Signal(bool(True)),
tlast=Signal(bool(False)),
tid=Signal(intbv(0)),
tdest=Signal(intbv(0)),
tuser=Signal(intbv(0)),
pause=0,
name=None
):
assert not self.has_logic
self.has_logic = True
@instance
def logic():
data = []
keep = []
id = []
dest = []
user = []
self.active = False
B = 0
N = len(tdata)
M = len(tkeep)
WL = int((len(tdata)+M-1)/M)
if type(tdata) is list or type(tdata) is tuple:
# multiple tdata signals
B = len(tdata)
N = [len(b) for b in tdata]
M = 1
WL = [1]*B
while True:
yield clk.posedge, rst.posedge
if rst:
data = []
keep = []
id = []
dest = []
user = []
self.active = False
if B > 0:
for s in tdata:
s.next = 0
else:
tdata.next = 0
tkeep.next = 0
tid.next = 0
tdest.next = 0
tuser.next = False
tvalid.next = False
tlast.next = False
else:
tvalid.next = self.active and (tvalid or not pause)
if tready and tvalid:
if len(data) > 0:
if B > 0:
l = data.pop(0)
for i in range(B):
tdata[i].next = l[i]
else:
tdata.next = data.pop(0)
tkeep.next = keep.pop(0)
tid.next = id.pop(0)
tdest.next = dest.pop(0)
tuser.next = user.pop(0)
tvalid.next = not pause
tlast.next = len(data) == 0
else:
tvalid.next = False
tlast.next = False
self.active = False
if not self.active and self.queue:
frame = self.queue.pop(0)
frame.B = B
frame.N = N
frame.M = M
frame.WL = WL
data, keep, id, dest, user = frame.build()
if name is not None:
print("[%s] Sending frame %s" % (name, repr(frame)))
if B > 0:
l = data.pop(0)
for i in range(B):
tdata[i].next = l[i]
else:
tdata.next = data.pop(0)
tkeep.next = keep.pop(0)
tid.next = id.pop(0)
tdest.next = dest.pop(0)
tuser.next = user.pop(0)
tvalid.next = not pause
tlast.next = len(data) == 0
self.active = True
return instances()
class AXIStreamSink(object):
def __init__(self):
self.active = False
self.has_logic = False
self.queue = []
self.read_queue = []
self.sync = Signal(intbv(0))
def recv(self):
if self.queue:
return self.queue.pop(0)
return None
def read(self, count=-1):
while self.queue:
self.read_queue.extend(self.queue.pop(0).data)
if count < 0:
count = len(self.read_queue)
data = self.read_queue[:count]
del self.read_queue[:count]
return data
def count(self):
return len(self.queue)
def empty(self):
return not self.queue
def idle(self):
return not self.active
def wait(self, timeout=0):
yield delay(0)
if self.queue:
return
if timeout:
yield self.sync, delay(timeout)
else:
yield self.sync
def create_logic(self,
clk,
rst,
tdata=None,
tkeep=Signal(bool(True)),
tvalid=Signal(bool(False)),
tready=Signal(bool(True)),
tlast=Signal(bool(True)),
tid=Signal(intbv(0)),
tdest=Signal(intbv(0)),
tuser=Signal(intbv(0)),
pause=0,
name=None
):
assert not self.has_logic
self.has_logic = True
tready_int = Signal(bool(False))
tvalid_int = Signal(bool(False))
@always_comb
def pause_logic():
tready.next = tready_int and not pause
tvalid_int.next = tvalid and not pause
@instance
def logic():
data = []
keep = []
id = []
dest = []
user = []
B = 0
N = len(tdata)
M = len(tkeep)
WL = int((len(tdata)+M-1)/M)
first = True
if type(tdata) is list or type(tdata) is tuple:
# multiple tdata signals
B = len(tdata)
N = [len(b) for b in tdata]
M = 1
WL = [1]*B
while True:
yield clk.posedge, rst.posedge
if rst:
tready_int.next = False
data = []
keep = []
id = []
dest = []
user = []
first = True
self.active = False
else:
tready_int.next = True
if tvalid_int:
if not skip_asserts:
# zero tkeep not allowed
assert int(tkeep) != 0
# tkeep must be contiguous
# i.e. 0b00011110 allowed, but 0b00011010 not allowed
b = int(tkeep)
while b & 1 == 0:
b = b >> 1
while b & 1 == 1:
b = b >> 1
assert b == 0
# tkeep must not have gaps across cycles
if not first:
# not first cycle; lowest bit must be set
assert int(tkeep) & 1
if not tlast:
# not last cycle; highest bit must be set
assert int(tkeep) & (1 << len(tkeep)-1)
if B > 0:
l = []
for i in range(B):
l.append(int(tdata[i]))
data.append(l)
else:
data.append(int(tdata))
keep.append(int(tkeep))
id.append(int(tid))
dest.append(int(tdest))
user.append(int(tuser))
first = False
self.active = True
if tlast:
frame = AXIStreamFrame()
frame.B = B
frame.N = N
frame.M = M
frame.WL = WL
frame.parse(data, keep, id, dest, user)
self.queue.append(frame)
self.sync.next = not self.sync
self.active = False
if name is not None:
print("[%s] Got frame %s" % (name, repr(frame)))
data = []
keep = []
id = []
dest = []
user = []
first = True
return instances()
|
9b5ef6e150455b41dbddd28cc26a246746e425d1
|
0b134572e3ac3903ebb44df6d4138cbab9d3327c
|
/app/grandchallenge/workspaces/tasks.py
|
781861b5388538154d8826754421026e6f486274
|
[
"Apache-2.0"
] |
permissive
|
comic/grand-challenge.org
|
660de3bafaf8f4560317f1dfd9ae9585ec272896
|
dac25f93b395974b32ba2a8a5f9e19b84b49e09d
|
refs/heads/main
| 2023-09-01T15:57:14.790244
| 2023-08-31T14:23:04
| 2023-08-31T14:23:04
| 4,557,968
| 135
| 53
|
Apache-2.0
| 2023-09-14T13:41:03
| 2012-06-05T09:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 8,757
|
py
|
tasks.py
|
import requests
from celery import shared_task
from django.conf import settings
from grandchallenge.workspaces.models import (
WorkbenchToken,
Workspace,
WorkspaceKindChoices,
WorkspaceStatus,
WorkspaceTypeConfiguration,
)
@shared_task
def create_workspace_type_configuration(*, workspace_type_configuration_pk):
configuration = WorkspaceTypeConfiguration.objects.get(
pk=workspace_type_configuration_pk
)
auth = WorkbenchToken.objects.get(
user__username=settings.WORKBENCH_ADMIN_USERNAME
)
with requests.Session() as s:
_authorise(client=s, auth=auth)
# TODO - use models.WorkspaceType
env_type_id = _get_env_type_id(s, name="SageMaker Notebook-v1")
_add_configuration(
client=s, env_type_id=env_type_id, configuration=configuration
)
@shared_task
def create_workspace(*, workspace_pk):
workspace = Workspace.objects.get(pk=workspace_pk)
with requests.Session() as s:
_authorise(client=s, auth=workspace.user.workbench_token)
if settings.DEBUG:
ip_address = _get_ip_address(s)
else:
ip_address = workspace.allowed_ip
# TODO - use models.WorkspaceType
env_type_id = _get_env_type_id(s, name="SageMaker Notebook-v1")
# TODO - use models.WorkbenchProject
project = _get_project(s)
instance = _create_workspace(
client=s,
cidr=f"{ip_address}/32",
description=f"Created at {workspace.created}",
env_type_config_id=workspace.configuration.pk,
env_type_id=env_type_id,
name=f"Workspace-{str(workspace.pk)}",
project_id=project["id"],
study_ids=[],
)
workspace.status = instance["status"]
workspace.service_workbench_id = instance["id"]
workspace.full_clean()
workspace.save()
tasks = wait_for_workspace_to_start.signature(
kwargs={"workspace_pk": workspace.pk}, immutable=True
)
if (
workspace.configuration.kind
== WorkspaceKindChoices.SAGEMAKER_NOTEBOOK
):
tasks |= get_workspace_url.signature(
kwargs={"workspace_pk": workspace.pk}, immutable=True
)
tasks.apply_async()
@shared_task(bind=True, max_retries=20)
def wait_for_workspace_to_start(self, *, workspace_pk):
"""Checks if the workspace is up for up to 10 minutes."""
workspace = Workspace.objects.get(pk=workspace_pk)
if workspace.status != WorkspaceStatus.PENDING:
# Nothing to do
return
with requests.Session() as s:
_authorise(client=s, auth=workspace.user.workbench_token)
instance = _get_workspace(
s, workspace_id=workspace.service_workbench_id
)
if instance["status"] == WorkspaceStatus.PENDING:
# Raises celery.exceptions.Retry
self.retry(countdown=30)
# TODO catch MaxRetriesExceeded?
else:
workspace.status = instance["status"]
workspace.full_clean()
workspace.save()
@shared_task
def get_workspace_url(*, workspace_pk):
workspace = Workspace.objects.get(pk=workspace_pk)
if workspace.configuration.kind != WorkspaceKindChoices.SAGEMAKER_NOTEBOOK:
raise ValueError("URLs can only be generated for SageMaker Notebooks")
with requests.Session() as s:
_authorise(client=s, auth=workspace.user.workbench_token)
instance = _get_workspace(
s, workspace_id=workspace.service_workbench_id
)
if instance["status"] != WorkspaceStatus.COMPLETED:
raise RuntimeError("Workspace was not running")
else:
connection = _get_workspace_connection(
s, workspace_id=workspace.service_workbench_id
)
url = _create_workspace_url(
s,
workspace_id=workspace.service_workbench_id,
connection_id=connection["id"],
)
workspace.notebook_url = url
workspace.full_clean()
workspace.save()
def _authorise(*, client, auth):
uri = "api/authentication/public/provider/configs"
response = client.get(f"{settings.WORKBENCH_API_URL}{uri}")
response.raise_for_status()
configs = response.json()
# get the auth provider url
auth_configs = [c for c in configs if c["id"] == auth.provider.lower()]
if len(auth_configs) != 1:
raise ValueError(
f"Auth provider {auth.get_provider_display()!r} is not supported by this service workbench instance"
)
auth_config = auth_configs[0]
# obtain the auth token
# TODO: is this only for internal auth?
response = client.post(
f"{settings.WORKBENCH_API_URL}{auth_config['signInUri']}",
data={
"username": auth.email,
"password": auth.token,
"authenticationProviderId": auth_config["id"],
},
)
response.raise_for_status()
# set the token auth header
# TODO: what is the expiry on tokens?
client.headers.update({"Authorization": response.json()["idToken"]})
def _get_ip_address(client):
uri = "api/ip"
response = client.get(f"{settings.WORKBENCH_API_URL}{uri}")
response.raise_for_status()
return response.json()["ipAddress"]
def _get_workspace_types(client, status="approved"):
uri = "api/workspace-types"
response = client.get(
f"{settings.WORKBENCH_API_URL}{uri}", params={"status": status}
)
response.raise_for_status()
return response.json()
def _get_env_type_id(client, name):
workspaces = _get_workspace_types(client)
workspace_types = [
w for w in workspaces if w["name"].casefold() == name.casefold()
]
if len(workspace_types) != 1:
raise RuntimeError(f"Unique workspace was not found for {name!r}.")
workspace_type = workspace_types[0]
return f"{workspace_type['product']['productId']}-{workspace_type['provisioningArtifact']['id']}"
def _add_configuration(
client, env_type_id, configuration, allow_role_ids=("researcher",)
):
uri = f"api/workspace-types/{env_type_id}/configurations"
response = client.post(
f"{settings.WORKBENCH_API_URL}{uri}",
json={
"id": str(configuration.pk),
"name": configuration.name,
"allowRoleIds": allow_role_ids,
"denyRoleIds": [],
"params": configuration.params,
"tags": [],
},
)
response.raise_for_status()
return response.json()
def _get_project(client):
uri = "api/projects"
response = client.get(f"{settings.WORKBENCH_API_URL}{uri}")
response.raise_for_status()
projects = response.json()
if len(projects) != 1:
raise RuntimeError("Too many projects found")
return projects[0]
def _create_workspace(
client,
cidr,
description,
env_type_config_id,
env_type_id,
name,
project_id,
study_ids,
):
uri = "api/workspaces/service-catalog/"
payload = {
"cidr": cidr,
"description": description,
"envTypeConfigId": env_type_config_id,
"envTypeId": env_type_id,
"name": name,
"projectId": project_id,
"studyIds": study_ids,
}
response = client.post(f"{settings.WORKBENCH_API_URL}{uri}", data=payload)
response.raise_for_status()
return response.json()
def _get_workspace(client, workspace_id):
uri = f"api/workspaces/service-catalog/{workspace_id}"
response = client.get(f"{settings.WORKBENCH_API_URL}{uri}")
response.raise_for_status()
return response.json()
def _get_workspace_connections(client, workspace_id):
uri = f"api/workspaces/service-catalog/{workspace_id}/connections"
response = client.get(f"{settings.WORKBENCH_API_URL}{uri}")
response.raise_for_status()
return response.json()
def _get_workspace_connection(
client, workspace_id, connection_type="SageMaker"
):
connections = _get_workspace_connections(client, workspace_id)
workspace_connections = [
c for c in connections if c["type"] == connection_type
]
if len(workspace_connections) != 1:
raise RuntimeError(
f"Connection {connection_type!r} not found for {workspace_id}"
)
return workspace_connections[0]
def _create_workspace_url(client, workspace_id, connection_id):
uri = f"api/workspaces/service-catalog/{workspace_id}/connections/{connection_id}/url"
response = client.post(f"{settings.WORKBENCH_API_URL}{uri}")
response.raise_for_status()
return response.json()["url"]
|
3e88a81a0f2e815460401828ac0e653058a3a6c6
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/common/Lib/bsddb/test/test_join.py
|
2ba47849fdca26b710cc1c48836eb21ec9d1de67
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,527
|
py
|
test_join.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/common/Lib/bsddb/test/test_join.py
import os
import unittest
from test_all import db, dbshelve, test_support, verbose, get_new_environment_path, get_new_database_path
ProductIndex = [('apple', 'Convenience Store'),
('blueberry', "Farmer's Market"),
('shotgun', 'S-Mart'),
('pear', "Farmer's Market"),
('chainsaw', 'S-Mart'),
('strawberry', "Farmer's Market")]
ColorIndex = [('blue', 'blueberry'),
('red', 'apple'),
('red', 'chainsaw'),
('red', 'strawberry'),
('yellow', 'peach'),
('yellow', 'pear'),
('black', 'shotgun')]
class JoinTestCase(unittest.TestCase):
keytype = ''
def setUp(self):
self.filename = self.__class__.__name__ + '.db'
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOCK)
def tearDown(self):
self.env.close()
test_support.rmtree(self.homeDir)
def test01_join(self):
if verbose:
print '\n', '-=' * 30
print 'Running %s.test01_join...' % self.__class__.__name__
priDB = db.DB(self.env)
priDB.open(self.filename, 'primary', db.DB_BTREE, db.DB_CREATE)
map(lambda t, priDB=priDB: priDB.put(*t), ProductIndex)
secDB = db.DB(self.env)
secDB.set_flags(db.DB_DUP | db.DB_DUPSORT)
secDB.open(self.filename, 'secondary', db.DB_BTREE, db.DB_CREATE)
map(lambda t, secDB=secDB: secDB.put(*t), ColorIndex)
sCursor = None
jCursor = None
try:
sCursor = secDB.cursor()
tmp = sCursor.set('red')
self.assertTrue(tmp)
jCursor = priDB.join([sCursor])
if jCursor.get(0) != ('apple', 'Convenience Store'):
self.fail('join cursor positioned wrong')
if jCursor.join_item() != 'chainsaw':
self.fail('DBCursor.join_item returned wrong item')
if jCursor.get(0)[0] != 'strawberry':
self.fail('join cursor returned wrong thing')
if jCursor.get(0):
self.fail('join cursor returned too many items')
finally:
if jCursor:
jCursor.close()
if sCursor:
sCursor.close()
priDB.close()
secDB.close()
return
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(JoinTestCase))
return suite
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.