hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a08b408c6d3eb38c9a674b04af95f97b9064553
| 11,764
|
py
|
Python
|
tests/model_meta/tests.py
|
devops2014/djangosite
|
db77915c9fd35a203edd8206f702ee4082f04d4a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/model_meta/tests.py
|
devops2014/djangosite
|
db77915c9fd35a203edd8206f702ee4082f04d4a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/model_meta/tests.py
|
devops2014/djangosite
|
db77915c9fd35a203edd8206f702ee4082f04d4a
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import apps
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import CharField, Field, related
from django.db.models.options import EMPTY_RELATION_TREE, IMMUTABLE_WARNING
from django.test import TestCase
from .models import (
AbstractPerson, BasePerson, Child, CommonAncestor, FirstParent, Person,
ProxyPerson, Relating, Relation, SecondParent,
)
from .results import TEST_RESULTS
class OptionsBaseTests(TestCase):
def _map_related_query_names(self, res):
return tuple((o.name, m) for o, m in res)
def _map_names(self, res):
return tuple((f.name, m) for f, m in res)
def _model(self, current_model, field):
model = field.model._meta.concrete_model
return None if model == current_model else model
def _details(self, current_model, relation):
direct = isinstance(relation, Field) or isinstance(relation, GenericForeignKey)
model = relation.model._meta.concrete_model
if model == current_model:
model = None
field = relation if direct else relation.field
m2m = isinstance(field, related.ManyToManyField)
return relation, model, direct, m2m
class GetFieldsTests(OptionsBaseTests):
def test_get_fields_is_immutable(self):
msg = IMMUTABLE_WARNING % "get_fields()"
for _ in range(2):
# Running unit test twice to ensure both non-cached and cached result
# are immutable.
fields = Person._meta.get_fields()
with self.assertRaisesMessage(AttributeError, msg):
fields += ["errors"]
class DataTests(OptionsBaseTests):
def test_fields(self):
for model, expected_result in TEST_RESULTS['fields'].items():
fields = model._meta.fields
self.assertEqual([f.attname for f in fields], expected_result)
def test_local_fields(self):
is_data_field = lambda f: isinstance(f, Field) and not isinstance(f, related.ManyToManyField)
for model, expected_result in TEST_RESULTS['local_fields'].items():
fields = model._meta.local_fields
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertEqual(f.model, model)
self.assertTrue(is_data_field(f))
def test_local_concrete_fields(self):
for model, expected_result in TEST_RESULTS['local_concrete_fields'].items():
fields = model._meta.local_concrete_fields
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertIsNotNone(f.column)
class M2MTests(OptionsBaseTests):
def test_many_to_many(self):
for model, expected_result in TEST_RESULTS['many_to_many'].items():
fields = model._meta.many_to_many
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertTrue(f.many_to_many and f.is_relation)
def test_many_to_many_with_model(self):
for model, expected_result in TEST_RESULTS['many_to_many_with_model'].items():
models = [self._model(model, field) for field in model._meta.many_to_many]
self.assertEqual(models, expected_result)
class RelatedObjectsTests(OptionsBaseTests):
key_name = lambda self, r: r[0]
def test_related_objects(self):
result_key = 'get_all_related_objects_with_model'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields()
if field.auto_created and not field.concrete
]
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_local(self):
result_key = 'get_all_related_objects_with_model_local'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_parents=False)
if field.auto_created and not field.concrete
]
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_include_hidden(self):
result_key = 'get_all_related_objects_with_model_hidden'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_hidden=True)
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_include_hidden_local_only(self):
result_key = 'get_all_related_objects_with_model_hidden_local'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_hidden=True, include_parents=False)
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
class VirtualFieldsTests(OptionsBaseTests):
def test_virtual_fields(self):
for model, expected_names in TEST_RESULTS['virtual_fields'].items():
objects = model._meta.virtual_fields
self.assertEqual(sorted([f.name for f in objects]), sorted(expected_names))
class GetFieldByNameTests(OptionsBaseTests):
def test_get_data_field(self):
field_info = self._details(Person, Person._meta.get_field('data_abstract'))
self.assertEqual(field_info[1:], (BasePerson, True, False))
self.assertIsInstance(field_info[0], CharField)
def test_get_m2m_field(self):
field_info = self._details(Person, Person._meta.get_field('m2m_base'))
self.assertEqual(field_info[1:], (BasePerson, True, True))
self.assertIsInstance(field_info[0], related.ManyToManyField)
def test_get_related_object(self):
field_info = self._details(Person, Person._meta.get_field('relating_baseperson'))
self.assertEqual(field_info[1:], (BasePerson, False, False))
self.assertIsInstance(field_info[0], related.ForeignObjectRel)
def test_get_related_m2m(self):
field_info = self._details(Person, Person._meta.get_field('relating_people'))
self.assertEqual(field_info[1:], (None, False, True))
self.assertIsInstance(field_info[0], related.ForeignObjectRel)
def test_get_generic_relation(self):
field_info = self._details(Person, Person._meta.get_field('generic_relation_base'))
self.assertEqual(field_info[1:], (None, True, False))
self.assertIsInstance(field_info[0], GenericRelation)
def test_get_fields_only_searches_forward_on_apps_not_ready(self):
opts = Person._meta
# If apps registry is not ready, get_field() searches over only
# forward fields.
opts.apps.ready = False
try:
# 'data_abstract' is a forward field, and therefore will be found
self.assertTrue(opts.get_field('data_abstract'))
msg = (
"Person has no field named 'relating_baseperson'. The app "
"cache isn't ready yet, so if this is an auto-created related "
"field, it won't be available yet."
)
# 'data_abstract' is a reverse field, and will raise an exception
with self.assertRaisesMessage(FieldDoesNotExist, msg):
opts.get_field('relating_baseperson')
finally:
opts.apps.ready = True
class RelationTreeTests(TestCase):
all_models = (Relation, AbstractPerson, BasePerson, Person, ProxyPerson, Relating)
def setUp(self):
apps.clear_cache()
def test_clear_cache_clears_relation_tree(self):
# The apps.clear_cache is setUp() should have deleted all trees.
# Exclude abstract models that are not included in the Apps registry
# and have no cache.
all_models_with_cache = (m for m in self.all_models if not m._meta.abstract)
for m in all_models_with_cache:
self.assertNotIn('_relation_tree', m._meta.__dict__)
def test_first_relation_tree_access_populates_all(self):
# On first access, relation tree should have populated cache.
self.assertTrue(self.all_models[0]._meta._relation_tree)
# AbstractPerson does not have any relations, so relation_tree
# should just return an EMPTY_RELATION_TREE.
self.assertEqual(AbstractPerson._meta._relation_tree, EMPTY_RELATION_TREE)
# All the other models should already have their relation tree
# in the internal __dict__ .
all_models_but_abstractperson = (m for m in self.all_models if m is not AbstractPerson)
for m in all_models_but_abstractperson:
self.assertIn('_relation_tree', m._meta.__dict__)
def test_relations_related_objects(self):
# Testing non hidden related objects
self.assertEqual(
sorted([field.related_query_name() for field in Relation._meta._relation_tree
if not field.rel.field.rel.is_hidden()]),
sorted([
'fk_abstract_rel', 'fk_abstract_rel', 'fk_abstract_rel', 'fk_base_rel', 'fk_base_rel',
'fk_base_rel', 'fk_concrete_rel', 'fk_concrete_rel', 'fo_abstract_rel', 'fo_abstract_rel',
'fo_abstract_rel', 'fo_base_rel', 'fo_base_rel', 'fo_base_rel', 'fo_concrete_rel',
'fo_concrete_rel', 'm2m_abstract_rel', 'm2m_abstract_rel', 'm2m_abstract_rel',
'm2m_base_rel', 'm2m_base_rel', 'm2m_base_rel', 'm2m_concrete_rel', 'm2m_concrete_rel',
])
)
# Testing hidden related objects
self.assertEqual(
sorted([field.related_query_name() for field in BasePerson._meta._relation_tree]),
sorted([
'+', '+', 'BasePerson_following_abstract+', 'BasePerson_following_abstract+',
'BasePerson_following_base+', 'BasePerson_following_base+', 'BasePerson_friends_abstract+',
'BasePerson_friends_abstract+', 'BasePerson_friends_base+', 'BasePerson_friends_base+',
'BasePerson_m2m_abstract+', 'BasePerson_m2m_base+', 'Relating_basepeople+',
'Relating_basepeople_hidden+', 'followers_abstract', 'followers_abstract', 'followers_abstract',
'followers_base', 'followers_base', 'followers_base', 'friends_abstract_rel_+', 'friends_abstract_rel_+',
'friends_abstract_rel_+', 'friends_base_rel_+', 'friends_base_rel_+', 'friends_base_rel_+', 'person',
'person', 'relating_basepeople', 'relating_baseperson',
])
)
self.assertEqual([field.related_query_name() for field in AbstractPerson._meta._relation_tree], [])
class ParentListTests(TestCase):
def test_get_parent_list(self):
self.assertEqual(CommonAncestor._meta.get_parent_list(), [])
self.assertEqual(FirstParent._meta.get_parent_list(), [CommonAncestor])
self.assertEqual(SecondParent._meta.get_parent_list(), [CommonAncestor])
self.assertEqual(Child._meta.get_parent_list(), [FirstParent, SecondParent, CommonAncestor])
| 45.072797
| 121
| 0.67137
|
4a08b46f5a82794f44c09a2d8182013e2904af61
| 16,969
|
py
|
Python
|
oc_chess_club/controller/database_handler.py
|
PabloLec/oc_chess_club
|
69a6ce3111afadce73710d314579af6e6f0cbce6
|
[
"MIT"
] | null | null | null |
oc_chess_club/controller/database_handler.py
|
PabloLec/oc_chess_club
|
69a6ce3111afadce73710d314579af6e6f0cbce6
|
[
"MIT"
] | null | null | null |
oc_chess_club/controller/database_handler.py
|
PabloLec/oc_chess_club
|
69a6ce3111afadce73710d314579af6e6f0cbce6
|
[
"MIT"
] | 1
|
2021-07-15T06:49:39.000Z
|
2021-07-15T06:49:39.000Z
|
from tinydb import Query, table
from oc_chess_club.controller.config_loader import _CONFIG
from oc_chess_club.models.database import Database
from oc_chess_club.models.player import Player
from oc_chess_club.models.tournament import Tournament
from oc_chess_club.models.round import Round
from oc_chess_club.models.match import Match
from oc_chess_club.controller.database_helper import DatabaseHelper
class SingletonMeta(type):
"""Meta for singleton application. As DataHandler will be used by different modules there is
no need to load the database multiple time.
Singleton was kept simple and is currently not thread safe.
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class DatabaseHandler(metaclass=SingletonMeta):
"""Handles all operations related to the database including CRUD for the different db elements.
Attributes:
database (Database): Object encapsulating the database in TinyDB format and usable tournament related objects.
helper (DatabaseHelper): Object with helper methods to manipulate and transform db objects.
players_table (table.Table): Instance of TinyDB "Players" Table.
tournaments_table (table.Table): Instance of TinyDB "Tournaments" Table.
rounds_table (table.Table): Instance of TinyDB "Rounds" Table.
matches_table (table.Table): Instance of TinyDB "Matches" Table.
"""
def __init__(self):
"""Constructor for DatabaseHandler Class. Initiates database loading."""
self.database = Database(_CONFIG.config["database_file"])
self.helper = DatabaseHelper(database=self.database)
self.players_table = None
self.tournaments_table = None
self.rounds_table = None
self.matches_table = None
self.load_database()
def load_database(self):
"""Instantiates the different tables in attributes and loads their content
by creating corresponding objects.
"""
self.players_table = self.database.db.table("players")
self.tournaments_table = self.database.db.table("tournaments")
self.rounds_table = self.database.db.table("rounds")
self.matches_table = self.database.db.table("matches")
self.load_players()
self.load_tournaments()
def load_players(self):
"""Uses TinyDB "Players" table to create Player objects."""
for player in self.players_table:
self.create_player(
first_name=player["First Name"],
last_name=player["Last Name"],
dob=player["DOB"],
gender=player["Gender"],
elo=player["ELO"],
id_num=player["id"],
is_deleted=player["Is Deleted"],
no_db_save=True,
)
def create_player(
self,
first_name: str,
last_name: str,
dob: str,
gender: str,
elo: int,
id_num: int = 0,
is_deleted: bool = False,
no_db_save: bool = False,
):
"""Creates a Player object and saves it into Database attributes.
Args:
first_name (str): Player's first name.
last_name (str): Player's last name.
dob (str): Player's date of birth.
gender (str): Player's gender.
elo (int): Player's ELO ranking.
id_num (int, optional): Player's id. Defaults to 0.
is_deleted (bool, optional): Is player deleted. Defaults to False.
no_db_save (bool, optional): If the object only needs to be saved in memory, not in db. Defaults to False.
Returns:
int: Created player's id.
"""
if id_num == 0:
id_num = self.find_next_id(self.players_table)
player = Player(first_name, last_name, dob, gender.upper(), elo, id_num, is_deleted)
self.save_player(player=player, no_db_save=no_db_save)
return id_num
def save_player(self, player: Player, no_db_save: bool = False):
"""Saves a Player object to TinyDB.
Args:
player (Player): Player object to be saved.
no_db_save (bool, optional): If the object only needs to be saved in memory, not in db. Defaults to False.
"""
self.database.players[player.id_num] = player
if no_db_save:
return
query = Query()
self.players_table.upsert(
{
"First Name": player.first_name,
"Last Name": player.last_name,
"DOB": player.dob,
"Gender": player.gender,
"ELO": int(player.elo),
"id": int(player.id_num),
"Is Deleted": player.is_deleted,
},
query.id == int(player.id_num),
)
def delete_player(self, player: Player):
"""Delete a player by setting a flag. User must persist in database for tournament history.
Args:
player (Player): Player to be deleted.
"""
player.is_deleted = True
self.save_player(player=player)
def load_tournaments(self):
"""Uses TinyDB "Tournaments" table to create Player objects."""
for tournament in self.tournaments_table:
self.create_tournament(
name=tournament["Name"],
location=tournament["Location"],
date=tournament["Date"],
number_of_rounds=tournament["Number of rounds"],
time_control=tournament["Time Control"],
description=tournament["Description"],
id_num=tournament["id"],
is_finished=tournament["Is Finished"],
players=tournament["Players"],
leaderboard=tournament["Leaderboard"],
no_db_save=True,
)
def create_tournament(
self,
name: str,
location: str,
date: str,
number_of_rounds: int,
time_control: str,
description: str,
players: list[int],
leaderboard: dict,
id_num: int = 0,
is_finished: bool = False,
no_db_save: bool = False,
):
"""Creates a Tournament object and saves it into Database attributes.
Args:
name (str): Tournament's name.
location (str): Tournament's physical location.
date (str): Tournament's date.
number_of_rounds (int): Number of rounds to be played.
time_control (str): Type of time control chosen.
description (str): Tournament's description.
players (list[int]): Participating players ids.
leaderboard (dict): Tournament's leaderboard.
id_num (int, optional): Tournament's id. Defaults to 0.
is_finished (bool, optional): Is tournament finished. Defaults to False.
no_db_save (bool, optional): If the object only needs to be saved in memory, not in db. Defaults to False.
Returns:
int: Created tournament's id.
"""
if id_num == 0:
id_num = self.find_next_id(self.tournaments_table)
# Create required list of Player objects from players ids.
player_objects = []
for player in players:
player_objects.append(self.database.players[player])
# Create an empty leaderboard if it doesn't exist yet.
if len(leaderboard) == 0:
for player in players:
leaderboard[str(player)] = 0
tournament = Tournament(
name=name,
location=location,
date=date,
number_of_rounds=number_of_rounds,
time_control=time_control,
description=description,
id_num=id_num,
is_finished=is_finished,
players=player_objects,
leaderboard=leaderboard,
)
self.save_tournament(tournament=tournament, no_db_save=no_db_save)
return id_num
def save_tournament(self, tournament: Tournament, no_db_save: bool = False):
"""Saves a Tournament object to memory and TinyDB.
Args:
tournament (Tournament): Tournament object to be saved.
no_db_save (bool, optional): If the object only needs to be saved in memory, not in db. Defaults to False.
"""
self.database.tournaments[tournament.id_num] = tournament
if no_db_save:
return
query = Query()
players_id = []
for player in tournament.players:
players_id.append(player.id_num)
self.tournaments_table.upsert(
{
"Name": tournament.name,
"Location": tournament.location,
"Date": tournament.date,
"Number of rounds": int(tournament.number_of_rounds),
"Time Control": tournament.time_control,
"Description": tournament.description,
"Players": players_id,
"Leaderboard": tournament.leaderboard,
"Is Finished": tournament.is_finished,
"id": int(tournament.id_num),
},
query.id == int(tournament.id_num),
)
def delete_tournament(self, tournament: Tournament):
"""Deletes a tournament in database.
Args:
tournament (Tournament): Tournament to be deleted
"""
self.load_rounds(tournament_id=tournament.id_num)
self.load_matches(tournament_id=tournament.id_num)
for round_ in tournament.rounds:
self.delete_round(round_=tournament.rounds[round_])
query = Query()
self.tournaments_table.remove(query.id == int(tournament.id_num))
del self.database.tournaments[int(tournament.id_num)]
def load_rounds(self, tournament_id: int):
"""Uses TinyDB "Rounds" table to create Round objects for one particular tournament.
Args:
tournament_id (int): Tournament to be considered.
"""
for round_ in self.rounds_table:
if round_["Tournament id"] != tournament_id:
continue
self.create_round(
round_number=round_["Round number"],
tournament_id=round_["Tournament id"],
id_num=round_["id"],
no_db_save=True,
)
def create_round(self, round_number: int, tournament_id: int, id_num: int = 0, no_db_save: bool = False):
"""Creates a Round object and saves it into Database attributes.
Args:
round_number (int): Ordered round number.
tournament_id (int): Round's tournament id.
id_num (int, optional): Round id. Defaults to 0.
no_db_save (bool, optional): If the object only needs to be saved in memory, not in db. Defaults to False.
Returns:
int: Created round id.
"""
if id_num == 0:
id_num = self.find_next_id(self.rounds_table)
created_round = Round(round_number=round_number, tournament_id=tournament_id, id_num=id_num)
self.save_round(round_=created_round, no_db_save=no_db_save)
return id_num
def save_round(self, round_: Round, no_db_save: bool = False):
"""Saves a Round object to memory and TinyDB.
Args:
round_ (Round): Round object to be saved. Underscore added because of reserved keyword.
no_db_save (bool, optional): If the object only needs to be saved in memory, not in db. Defaults to False.
"""
self.database.tournaments[round_.tournament_id].rounds[round_.id_num] = round_
if no_db_save:
return
query = Query()
self.rounds_table.upsert(
{
"Round number": round_.round_number,
"Tournament id": int(round_.tournament_id),
"id": int(round_.id_num),
},
query.id == int(round_.id_num),
)
def delete_round(self, round_: Round):
"""Deletes a round in database.
Args:
round_ (Round): Round to be deleted.
"""
for match in round_.matches:
self.delete_match(match=round_.matches[match])
query = Query()
self.rounds_table.remove(query.id == int(round_.id_num))
def load_matches(self, tournament_id: int):
"""Uses TinyDB "Matches" table to create Match objects for one particular tournament.
Args:
tournament_id (int): Tournament to be considered.
"""
for match in self.matches_table:
if match["Tournament id"] != tournament_id:
continue
player_1 = self.database.players[match["Player 1"]]
player_2 = self.database.players[match["Player 2"]]
players = (player_1, player_2)
self.create_match(
players=players,
tournament_id=match["Tournament id"],
round_id=match["Round id"],
winner=match["Winner"],
id_num=match["id"],
no_db_save=True,
)
def create_match(
self, players: tuple, tournament_id: int, round_id: int, winner: int, id_num: int = 0, no_db_save: bool = False
):
"""Creates a Match object and saves it into Database attributes.
Args:
players (tuple): Tuple of the two facing players.
tournament_id (int): Match's tournament id.
round_id (int): Match's round id..
winner (int): Match's winner. Either 1 (first player), 2 (second player) or 0 (draw).
id_num (int, optional): Match's id. Defaults to 0.
no_db_save (bool, optional): If the object only needs to be saved in memory, not in db. Defaults to False.
"""
if id_num == 0:
id_num = self.find_next_id(self.matches_table)
match = Match(
players=players,
tournament_id=tournament_id,
round_id=round_id,
winner=winner,
id_num=id_num,
)
self.save_match(match=match, no_db_save=no_db_save)
def save_match(self, match: Match, no_db_save: bool = False):
"""Saves a Match object to memory and TinyDB.
Args:
match (Match): Match object to be saved.
no_db_save (bool, optional): If the object only needs to be saved in memory, not in db. Defaults to False.
"""
self.database.tournaments[match.tournament_id].rounds[match.round_id].matches[match.id_num] = match
if no_db_save:
return
query = Query()
self.matches_table.upsert(
{
"Player 1": match.player_1.id_num,
"Player 2": match.player_2.id_num,
"Winner": match.winner,
"Tournament id": int(match.tournament_id),
"Round id": int(match.round_id),
"id": int(match.id_num),
},
query.id == int(match.id_num),
)
def delete_match(self, match: Match):
"""Deletes a match in database.
Args:
match (Match): Match to be deleted.
"""
query = Query()
self.matches_table.remove(query.id == int(match.id_num))
def find_next_id(self, table: table.Table):
"""Searches through a TinyDB table for the next biggest id number.
Args:
table (table.Table): TinyDB table to search in.
Returns:
int: Next biggest id to be used.
"""
if len(table) == 0:
return 1
query = Query()
biggest = 1
while len(table.search(query.id >= biggest)) > 0:
biggest += 1
return biggest
def update_leaderboard(self, tournament_id: int, player_id: int, points_earned: float):
"""Updates a tournament's leaderboard by adding points to a player.
Args:
tournament_id (int): Tounament's id.
player_id (int): Player's id.
points_earned (float): Points earned by the player.
"""
tournament = self.database.tournaments[tournament_id]
tournament.leaderboard[str(player_id)] += points_earned
self.save_tournament(tournament=tournament)
def find_unfinished_tournaments(self):
"""Searches through the Tournaments table for unfinished tournament.
Returns:
list[table.Document]: Unfinished tournaments.
"""
query = Query()
result = self.tournaments_table.search(query["Is Finished"] is False)
return result
# _DATABASE_HANDLER = DatabaseHandler()
| 33.469428
| 119
| 0.594437
|
4a08b53a6014d1629d2bc9c02dbadbd22c57771b
| 416
|
py
|
Python
|
salt/utils/dictupdate.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | 2
|
2017-09-17T21:10:35.000Z
|
2019-08-26T03:00:12.000Z
|
salt/utils/dictupdate.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | null | null | null |
salt/utils/dictupdate.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | 3
|
2021-02-23T08:12:48.000Z
|
2021-02-23T08:13:13.000Z
|
# -*- coding: utf-8 -*-
'''
Alex Martelli's soulution for recursive dict update from
http://stackoverflow.com/a/3233356
'''
# Import python libs
import collections
def update(dest, upd):
for key, val in upd.iteritems():
if isinstance(val, collections.Mapping):
ret = update(dest.get(key, {}), val)
dest[key] = ret
else:
dest[key] = upd[key]
return dest
| 21.894737
| 56
| 0.600962
|
4a08b563e716294524d0f82b2eb86fac99162f88
| 9,829
|
py
|
Python
|
tests/functional/test_users.py
|
jasmine95dn/flask_best_worst_scaling
|
86f35d83ccdee91b1c36ed13b8f92147f5e4014c
|
[
"MIT"
] | null | null | null |
tests/functional/test_users.py
|
jasmine95dn/flask_best_worst_scaling
|
86f35d83ccdee91b1c36ed13b8f92147f5e4014c
|
[
"MIT"
] | null | null | null |
tests/functional/test_users.py
|
jasmine95dn/flask_best_worst_scaling
|
86f35d83ccdee91b1c36ed13b8f92147f5e4014c
|
[
"MIT"
] | null | null | null |
#################################################################
# Functional Tests to validate user registration and user login #
#################################################################
def test_user_signup(test_client):
"""
GIVEN a Flask application
WHEN the '/user/signup' page is requested (GET)
THEN check if it is redirected to the right route:
1. the response is valid
2. it renders the right template
(template 'templates/user/signup.html' has page title 'SIGNUP')
3. all fields in login form are rendered as well
4. Button Sign up is rendered
"""
response = test_client.get('/user/signup')
# 1.
assert response.status_code == 200
# 2.
assert b"SIGNUP" in response.data
# 3.
assert b"username" in response.data
assert b"email" in response.data
assert b"password" in response.data
# 4.
assert b"Sign up" in response.data
def test_valid_signup(test_client, init_database):
"""
GIVEN a Flask application
WHEN a new user signs up on the '/user/signup' page (POST) with a valid username and email
THEN check if it is redirected to the right route:
1. the response is valid (user is redirected to the user homepage) -> GET
2. it renders the right template
(template 'templates/user/index.html' has page title of 'User - Homepage'
3. a message from Admin about new user pops up
"""
response = test_client.post('/user/signup',
data=dict(username='jung2', email='abcd@abc.de', password='12345679'),
follow_redirects=True)
# 1.
assert response.status_code == 200
# 2.
assert b"User - Homepage" in response.data
# 3.
assert b"Admin" in response.data
assert b"New user has been created" in response.data
def test_invalid_username_signup(test_client, init_database):
"""
GIVEN a Flask application
WHEN a user wants to sign up on the '/user/signup' page (POST) with an existing username
THEN check if it is not redirected to any other routes due to invalid inputs:
1. the response is valid (GET) -> it stays at '/user/signup'
2. it renders the right template
(template 'templates/user/signup.html' has page title 'SIGNUP')
3. the error message on username-field pops up
"""
response = test_client.post('/user/signup',
data=dict(username='jung', email='abcde@abc.de', password='abcdefghij'),
follow_redirects=True)
# 1.
assert response.status_code == 200
# 2.
assert b"SIGNUP" in response.data
# 3.
assert b"Username already exists! You can log in" in response.data
"""
GIVEN a Flask application
WHEN a user wants to sign up on the '/user/signup' page (POST) with a username that has special character or space
THEN check if:
1. the response is valid (GET) -> it stays at '/user/signup'
2. it renders the right template
(template 'templates/user/signup.html' has page title 'SIGNUP')
3. the error message on username-field pops up
"""
response = test_client.post('/user/signup',
data=dict(username='jung-1', email='abcdef@abc.de', password='abcdefghij'),
follow_redirects=True)
# 1.
assert response.status_code == 200
# 2.
assert b"SIGNUP" in response.data
# 3.
assert b"Username is not allowed to have space or special characters!" in response.data
"""
GIVEN a Flask application
WHEN a user wants to sign up on the '/user/signup' page (POST) with a username that does not meet the length requirement
THEN check if:
1. the response is valid (GET) -> it stays at '/user/signup'
2. it renders the right template
(template 'templates/user/signup.html' has page title 'SIGNUP')
3. the error message on username-field pops up
"""
response = test_client.post('/user/signup',
data=dict(username='jun', email='abcdef@abc.de', password='abcdefghij'),
follow_redirects=True)
# 1.
assert response.status_code == 200
# 2.
assert b"SIGNUP" in response.data
# 3.
assert b"Username must be between 4 and 15 characters long." in response.data
def test_invalid_email_signup(test_client, init_database):
"""
GIVEN a Flask application
WHEN a user wants to sign up on the '/user/signup' page (POST) with an existing email
THEN check if it is not redirected to any other routes due to invalid inputs:
1. the response is valid (GET) -> it stays at '/user/signup'
2. it renders the right template
(template 'templates/user/signup.html' has page title 'SIGNUP')
3. the error message on email-field pops up
"""
response = test_client.post('/user/signup',
data=dict(username='maryna', email='abc@abc.de', password='abcdefghij'),
follow_redirects=True)
# 1.
assert response.status_code == 200
# 2.
assert b"SIGNUP" in response.data
# 3.
assert b"Email already exists! Use another email if you want to be a new user" in response.data
"""
GIVEN a Flask application
WHEN a user wants to sign up on the '/user/signup' page (POST) with an email that does not meet the format of an email
THEN check if:
1. the response is valid (GET) -> it stays at '/user/signup'
2. it renders the right template
(template 'templates/user/signup.html' has page title 'SIGNUP')
3. the error message on username-field pops up
"""
response = test_client.post('/user/signup',
data=dict(username='jung234', email='abc@dede', password='abcdefghij'),
follow_redirects=True)
# 1.
assert response.status_code == 200
# 2.
assert b"SIGNUP" in response.data
# 3.
assert b"Invalid email" in response.data
def test_invalid_password_signup(test_client, init_database):
"""
GIVEN a Flask application
WHEN a user wants to sign up on the '/user/signup' page (POST) with a password that does not meet the length requirement
THEN check if it is not redirected to any other routes due to invalid inputs:
1. the response is valid (GET) -> it stays at '/user/signup'
2. it renders the right template
(template 'templates/user/signup.html' has page title 'SIGNUP')
3. the error message on password-field pops up
"""
response = test_client.post('/user/signup',
data=dict(username='jung234', email='abcdef@abc.de', password='abc'),
follow_redirects=True)
# 1.
assert response.status_code == 200
# 2.
assert b"SIGNUP" in response.data
# 3.
assert b"Password must be between 8 and 80 characters long." in response.data
def test_user_login(test_client):
"""
GIVEN a Flask application
WHEN the '/user/login' page is requested (GET)
THEN check if it is redirected to the right route:
1. the response is valid
2. it renders the right template
(template 'templates/user/login.html' has page title 'LOGIN')
3. all fields in login form are rendered as well
4. Button Login is rendered
"""
response = test_client.get('/user/login')
# 1.
assert response.status_code == 200
# 2.
assert b"LOGIN" in response.data
# 3.
assert b"username" in response.data
assert b"password" in response.data
assert b"remember" in response.data
# 4.
assert b"Login" in response.data
def test_valid_login_logout(test_client, init_database):
"""
GIVEN a Flask application
WHEN a user logs in on the '/user/login' page (POST) with a valid username
THEN check if it is redirected to the right route:
1. the response is valid (user is redirected to the valid username page) -> GET
2. it renders the right template
(template 'templates/user/profile.html' has page title of 'JUNG' as logged in username is 'jung')
"""
response = test_client.post('/user/login',
data=dict(username='jung', password='12345678', remember=False),
follow_redirects=True)
# 1.
assert response.status_code == 200
# 2.
assert b"JUNG" in response.data
"""
GIVEN a Flask application
WHEN a logged in user logs out (the '/user/logout' page is requested) (GET)
THEN check if it is redirected to the right route:
1. the response is valid (user is redirected to the 'User - Homepage') -> GET
2. it renders the right template
(template 'templates/user/index.html' has page title 'User - Homepage')
3. this template has the logout message from Admin
"""
response = test_client.get('/user/logout', follow_redirects=True)
# 1.
assert response.status_code == 200
# 2.
assert b"User - Homepage" in response.data
# 3.
assert b"Admin" in response.data
assert b"You have logged out!" in response.data
def test_invalid_username_login(test_client, init_database):
"""
GIVEN a Flask application
WHEN a user logs in on the '/user/login' page (POST) with a username that is not registered
THEN check if it is not redirected to any other routes due to invalid inputs:
1. the response is valid (GET) -> it stays at '/user/login'
2. it renders the right template
(template 'templates/user/login.html' has page title 'LOGIN')
3. the error message in username-field pops up
"""
response = test_client.post('/user/login',
data=dict(username='jung3', password='12345678', remember=True),
follow_redirects=True)
# 1.
assert response.status_code == 200
# 2.
assert b"LOGIN" in response.data
# 3.
assert b"Invalid username! Have you signed up?" in response.data
def test_invalid_password_login(test_client, init_database):
"""
GIVEN a Flask application
WHEN a user logs in on the '/user/login' page (POST) with an existing username but with wrong password
THEN check if it is not redirected to any other routes due to invalid inputs:
1. the response is valid (GET) -> it stays at '/user/login'
2. it renders the right template
(template 'templates/user/login.html' has page title 'LOGIN')
3. the error message in password-field pops up
"""
response = test_client.post('/user/login',
data=dict(username='jung', password='12345679', remember=True),
follow_redirects=True)
# 1.
assert response.status_code == 200
# 2.
assert b"LOGIN" in response.data
# 3.
assert b"Invalid password!" in response.data
| 36.812734
| 121
| 0.706176
|
4a08b6627f5016c142ec0e19a7bbd43cf3f9698f
| 8,230
|
py
|
Python
|
src/charm.py
|
dstathis/loki-operator
|
de7e732c2ed1c7c72ac6aac89608ab2c9fd6abff
|
[
"Apache-2.0"
] | null | null | null |
src/charm.py
|
dstathis/loki-operator
|
de7e732c2ed1c7c72ac6aac89608ab2c9fd6abff
|
[
"Apache-2.0"
] | null | null | null |
src/charm.py
|
dstathis/loki-operator
|
de7e732c2ed1c7c72ac6aac89608ab2c9fd6abff
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
#
# Learn more at: https://juju.is/docs/sdk
"""Charm the service.
Refer to the following post for a quick-start guide that will help you
develop a new k8s charm using the Operator Framework:
https://discourse.charmhub.io/t/4208
"""
import logging
import os
import textwrap
import yaml
from charms.alertmanager_k8s.v0.alertmanager_dispatch import AlertmanagerConsumer
from charms.grafana_k8s.v0.grafana_source import GrafanaSourceProvider
from charms.loki_k8s.v0.loki_push_api import LokiPushApiProvider
from ops.charm import CharmBase
from ops.framework import StoredState
from ops.main import main
from ops.model import ActiveStatus, BlockedStatus, WaitingStatus
from ops.pebble import PathError, ProtocolError
from kubernetes_service import K8sServicePatch, PatchFailed
from loki_server import LokiServer, LokiServerError, LokiServerNotReadyError
# Paths in workload container
LOKI_CONFIG = "/etc/loki/local-config.yaml"
LOKI_DIR = "/loki"
RULES_DIR = os.path.join(LOKI_DIR, "rules")
logger = logging.getLogger(__name__)
class LokiOperatorCharm(CharmBase):
"""Charm the service."""
_stored = StoredState()
_port = 3100
_name = "loki"
def __init__(self, *args):
super().__init__(*args)
self._container = self.unit.get_container(self._name)
self._stored.set_default(k8s_service_patched=False, config="")
self.alertmanager_consumer = AlertmanagerConsumer(self, relation_name="alertmanager")
self.grafana_source_provider = GrafanaSourceProvider(
charm=self,
refresh_event=self.on.loki_pebble_ready,
source_type="loki",
source_port=str(self._port),
)
self.framework.observe(self.on.install, self._on_install)
self.framework.observe(self.on.config_changed, self._on_config_changed)
self.framework.observe(self.on.upgrade_charm, self._on_upgrade_charm)
self.framework.observe(self.on.loki_pebble_ready, self._on_loki_pebble_ready)
self.framework.observe(
self.alertmanager_consumer.on.cluster_changed, self._on_alertmanager_change
)
self.loki_provider = None
self._loki_server = LokiServer()
self._provide_loki()
##############################################
# CHARM HOOKS HANDLERS #
##############################################
def _on_install(self, _):
"""Handler for the install event during which we will update the K8s service."""
self._patch_k8s_service()
def _on_config_changed(self, event):
self._configure(event)
def _on_upgrade_charm(self, event):
self._patch_k8s_service()
self._configure(event)
def _on_loki_pebble_ready(self, event):
self._configure(event)
def _on_alertmanager_change(self, event):
self._configure(event)
def _configure(self, event):
"""Configure Loki charm."""
restart = False
if not self._container.can_connect():
self.unit.status = WaitingStatus("Waiting for Pebble ready")
return False
current_layer = self._container.get_plan().services
new_layer = self._build_pebble_layer
if current_layer != new_layer:
restart = True
config = self._loki_config()
try:
if yaml.safe_load(self._stored.config) != config:
config_as_yaml = yaml.safe_dump(config)
self._container.push(LOKI_CONFIG, config_as_yaml)
logger.info("Pushed new configuration")
self._stored.config = config_as_yaml
restart = True
except (ProtocolError, PathError) as e:
self.unit.status = BlockedStatus(str(e))
return False
if restart:
self._container.add_layer(self._name, new_layer, combine=True)
self._container.restart(self._name)
logger.info("Loki (re)started")
self.unit.status = ActiveStatus()
@property
def _loki_command(self):
"""Construct command to launch Loki.
Returns:
a string consisting of Loki command and associated
command line options.
"""
return f"/usr/bin/loki -config.file={LOKI_CONFIG}"
@property
def _build_pebble_layer(self):
"""Construct the pebble layer.
Returns:
a Pebble layer specification for the Loki workload container.
"""
pebble_layer = {
"summary": "Loki layer",
"description": "pebble config layer for Loki",
"services": {
"loki": {
"override": "replace",
"summary": "loki",
"command": self._loki_command,
"startup": "enabled",
},
},
}
return pebble_layer
##############################################
# UTILITY METHODS #
##############################################
def _provide_loki(self):
"""Gets LokiPushApiProvider instance into `self.loki_provider`."""
try:
version = self._loki_server.version
self.loki_provider = self.loki_provider or LokiPushApiProvider(self)
logger.debug("Loki Provider is available. Loki version: %s", version)
except LokiServerNotReadyError as e:
self.unit.status = WaitingStatus(str(e))
except LokiServerError as e:
self.unit.status = BlockedStatus(str(e))
def _patch_k8s_service(self):
"""Fix the Kubernetes service that was setup by Juju with correct port numbers."""
if self.unit.is_leader() and not self._stored.k8s_service_patched:
service_ports = [
(f"{self.app.name}", self._port, self._port),
]
try:
K8sServicePatch.set_ports(self.app.name, service_ports)
except PatchFailed as e:
logger.error("Unable to patch the Kubernetes service: %s", str(e))
else:
self._stored.k8s_service_patched = True
logger.info("Successfully patched the Kubernetes service!")
def _alerting_config(self) -> str:
"""Construct Loki altering configuration.
Returns:
a string consisting of comma-separated list of Alertmanager URLs
to send notifications to.
"""
alerting_config = ""
alertmanagers = self.alertmanager_consumer.get_cluster_info()
if not alertmanagers:
logger.debug("No alertmanagers available")
return alerting_config
return ",".join([f"http://{am}" for am in alertmanagers])
def _loki_config(self) -> dict:
"""Construct Loki configuration.
Some minimal configuration is required for Loki to start, including: storage paths, schema,
ring.
Returns:
Dictionary representation of the Loki YAML config
"""
config = textwrap.dedent(
f"""
target: all
auth_enabled: false
server:
http_listen_port: {self._port}
common:
path_prefix: {LOKI_DIR}
storage:
filesystem:
chunks_directory: {os.path.join(LOKI_DIR, "chunks")}
rules_directory: {RULES_DIR}
replication_factor: 1
ring:
instance_addr: {self.loki_provider.unit_ip if self.loki_provider else ""}
kvstore:
store: inmemory
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
ruler:
alertmanager_url: {self._alerting_config()}
"""
)
return yaml.safe_load(config)
if __name__ == "__main__":
main(LokiOperatorCharm)
| 33.319838
| 99
| 0.599149
|
4a08b6d05bd4c8eba5132e173272e92d72108093
| 1,277
|
py
|
Python
|
fmcapi/api_objects/object_services/vlantags.py
|
dadelowo367/fmcapi
|
cd6ac6d118383a06063ead563ca98c7994238715
|
[
"BSD-3-Clause"
] | 57
|
2017-06-13T17:06:20.000Z
|
2021-07-27T08:53:25.000Z
|
fmcapi/api_objects/object_services/vlantags.py
|
dadelowo367/fmcapi
|
cd6ac6d118383a06063ead563ca98c7994238715
|
[
"BSD-3-Clause"
] | 66
|
2017-11-09T16:02:45.000Z
|
2021-08-04T20:52:41.000Z
|
fmcapi/api_objects/object_services/vlantags.py
|
dadelowo367/fmcapi
|
cd6ac6d118383a06063ead563ca98c7994238715
|
[
"BSD-3-Clause"
] | 56
|
2017-06-08T07:53:12.000Z
|
2021-07-30T13:26:47.000Z
|
"""VLAN Tags Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
from fmcapi.api_objects.helper_functions import validate_vlans
import logging
class VlanTags(APIClassTemplate):
"""The VlanTags Object in the FMC."""
VALID_JSON_DATA = ["id", "name", "type", "data", "description"]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/object/vlantags"
REQUIRED_FOR_POST = ["name", "data"]
def __init__(self, fmc, **kwargs):
"""
Initialize VlanTags object.
Set self.type to VlanTag and parse kwargs.
:param fmc: (object) FMC object
:param kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for VlanTags class.")
self.type = "VlanTag"
self.parse_kwargs(**kwargs)
def vlans(self, start_vlan, end_vlan=""):
"""
Associate VLANs.
:param start_vlan: (int) Lower VLAN.
:param end_vlan: (int) Upper VLAN.
"""
logging.debug("In vlans() for VlanTags class.")
start_vlan, end_vlan = validate_vlans(start_vlan=start_vlan, end_vlan=end_vlan)
self.data = {"startTag": start_vlan, "endTag": end_vlan}
| 31.925
| 87
| 0.639781
|
4a08b7d29915275ea4b10a17c0ade34fdcab09b8
| 9,132
|
py
|
Python
|
src/statistics.py
|
lampidis/Database-for-Papers
|
bc58093a882371764b2b98da4a92eac28a1ff2a9
|
[
"MIT"
] | 2
|
2021-03-13T17:07:07.000Z
|
2021-03-13T17:07:10.000Z
|
src/statistics.py
|
stavrako/ece-data-bases
|
c89f0a1c5f84623aedcfa0893d84a72b7d2ae909
|
[
"MIT"
] | null | null | null |
src/statistics.py
|
stavrako/ece-data-bases
|
c89f0a1c5f84623aedcfa0893d84a72b7d2ae909
|
[
"MIT"
] | null | null | null |
import pymysql
from tkinter import *
from tkinter.ttk import *
import textwrap
def main_statistics():
root = Tk()
root.title('Statistics')
root.geometry("730x450")
wrapper = textwrap.TextWrapper(width=50)
con = pymysql.connect(host = '150.140.186.221',
port = 3306,
user='db20_up1059338',
passwd='up1059338',
database='project_db20_up1059338')
cur = con.cursor()
def com16():
list1.delete(0,END)
if e1.get().isdigit()!=0:
cur.execute("""SELECT `Όνομα`, YEAR(`Ημερομηνία_Έκδοσης`), COUNT(`Τίτλος`)
FROM `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ` JOIN `ΠΕΡΙΟΔΙΚΟ` ON `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Κωδικός_Περιοδικού`=`ΠΕΡΙΟΔΙΚΟ`.`Κωδικός_Περιοδικού`
JOIN `ΑΡΘΡΟ` ON `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Κωδικός_Τεύχους`=`ΑΡΘΡΟ`.`Κωδικός_Τεύχους`
WHERE `impact_factor` >= %s
GROUP BY YEAR(`Ημερομηνία_Έκδοσης`), `Όνομα` """, e1.get())
r = cur.fetchall()
for row in r:
list1.insert(END, row)
return
def com17():
list1.delete(0,END)
if (e2_1.get().isdigit()!=0 and e2_2.get().isdigit()!=0):
cur.execute("""SELECT `Συντάσσει`.`Κωδικός_Συντάκτη`, `Όνομα`, `Επώνυμο`, COUNT(`Συντάσσει`.`Κωδικός_Άρθρου`) AS `Αριθμός_Άρθρων`
FROM `ΣΥΝΤΑΚΤΗΣ` JOIN `Συντάσσει` ON `ΣΥΝΤΑΚΤΗΣ`.`Κωδικός_Συντάκτη`=`Συντάσσει`.`Κωδικός_Συντάκτη`
JOIN `ΑΡΘΡΟ` ON `Συντάσσει`.`Κωδικός_Άρθρου`=`ΑΡΘΡΟ`.`Κωδικός_Άρθρου`
JOIN `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ` ON `ΑΡΘΡΟ`.`Κωδικός_Τεύχους`=`ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Κωδικός_Τεύχους`
WHERE (YEAR(CURDATE()) - YEAR(`ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Ημερομηνία_Έκδοσης`)) <= %s
GROUP BY `Συντάσσει`.`Κωδικός_Συντάκτη`, `Όνομα`, `Επώνυμο`
HAVING `Αριθμός_Άρθρων` >= %s""",(e2_2.get(),e2_1.get()))
r = cur.fetchall()
for row in r:
list1.insert(END, row)
return
def com18():
list1.delete(0,END)
if (e3_1.get().isdigit()!=0 and e3_2.get().isdigit()!=0):
cur.execute("""SELECT `ΠΕΡΙΟΔΙΚΟ`.`Όνομα`, `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Αριθμός_Τεύχους`, ROUND(AVG(`ΑΡΘΡΟ`.`Τελική_Σελίδα`-`ΑΡΘΡΟ`.`Αρχική_Σελίδα`)) AS `Μέσος Όρος Σελίδων`
FROM `ΑΡΘΡΟ` JOIN `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ` ON `ΑΡΘΡΟ`.`Κωδικός_Τεύχους`=`ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Κωδικός_Τεύχους`
JOIN `ΠΕΡΙΟΔΙΚΟ` ON `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Κωδικός_Περιοδικού`=`ΠΕΡΙΟΔΙΚΟ`.`Κωδικός_Περιοδικού`
WHERE YEAR(`ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Ημερομηνία_Έκδοσης`) >= %s AND YEAR(`ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Ημερομηνία_Έκδοσης`) <= %s
GROUP BY `ΠΕΡΙΟΔΙΚΟ`.`Όνομα`, `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Αριθμός_Τεύχους`""",(e3_1.get(),e3_2.get()))
r = cur.fetchall()
for row in r:
list1.insert(END, row)
return
def com19():
list1.delete(0,END)
id = e4.get()
if id.isdigit()!=0:
cur.execute("""SELECT `ΣΥΝΤΑΚΤΗΣ`.`Όνομα`,`Επώνυμο`, `ΙΔΡΥΜΑ`.`Όνομα` AS `Ίδρυμα`
FROM `Συντάσσει` JOIN `ΣΥΝΤΑΚΤΗΣ` ON `Συντάσσει`.`Κωδικός_Συντάκτη`=`ΣΥΝΤΑΚΤΗΣ`.`Κωδικός_Συντάκτη`
JOIN `ΙΔΡΥΜΑ` ON `Συντάσσει`.`Κωδικός_Ιδρύματος`=`ΙΔΡΥΜΑ`.`Κωδικός_Ιδρύματος`
JOIN `ΑΡΘΡΟ` ON `Συντάσσει`.`Κωδικός_Άρθρου`=`ΑΡΘΡΟ`.`Κωδικός_Άρθρου`
JOIN `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ` ON `ΑΡΘΡΟ`.`Κωδικός_Τεύχους`=`ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Κωδικός_Τεύχους`
WHERE `ΣΥΝΤΑΚΤΗΣ`.`Κωδικός_Συντάκτη` <> %s
AND `Συντάσσει`.`Κωδικός_Ιδρύματος` NOT IN (SELECT `Συντάσσει`.`Κωδικός_Ιδρύματος`
FROM `Συντάσσει` JOIN `ΑΡΘΡΟ` ON `Συντάσσει`.`Κωδικός_Άρθρου`=`ΑΡΘΡΟ`.`Κωδικός_Άρθρου`
JOIN `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ` ON `ΑΡΘΡΟ`.`Κωδικός_Τεύχους`=`ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Κωδικός_Τεύχους`
WHERE `Συντάσσει`.`Κωδικός_Συντάκτη`=%s AND `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Ημερομηνία_Έκδοσης`=ANY (SELECT MAX(`Ημερομηνία_Έκδοσης`)
FROM `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ` JOIN `ΑΡΘΡΟ` ON `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Κωδικός_Τεύχους`=`ΑΡΘΡΟ`.`Κωδικός_Τεύχους`
JOIN `Συντάσσει` ON `ΑΡΘΡΟ`.`Κωδικός_Άρθρου`=`Συντάσσει`.`Κωδικός_Άρθρου`
WHERE `Συντάσσει`.`Κωδικός_Συντάκτη`=%s))
AND `Συντάσσει`.`Κωδικός_Άρθρου` IN (SELECT `Συντάσσει`.`Κωδικός_Άρθρου`FROM `Συντάσσει` WHERE `Συντάσσει`.`Κωδικός_Συντάκτη`=%s)
AND `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Ημερομηνία_Έκδοσης` >= ANY (SELECT MIN(`Ημερομηνία_Έκδοσης`)
FROM `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ` JOIN `ΑΡΘΡΟ` ON `ΤΕΥΧΟΣ_ΠΕΡΙΟΔΙΚΟΥ`.`Κωδικός_Τεύχους`=`ΑΡΘΡΟ`.`Κωδικός_Τεύχους`
JOIN `Συντάσσει` ON `ΑΡΘΡΟ`.`Κωδικός_Άρθρου`=`Συντάσσει`.`Κωδικός_Άρθρου`
WHERE `Συντάσσει`.`Κωδικός_Συντάκτη`=%s)""",(id,id,id,id,id))
r = cur.fetchall()
for row in r:
list1.insert(END, row)
return
#--------------------------------------------------------------------------------------------
list1 = Listbox(root, height=25, width=65) #creating the list space to display all the rows of the table
list1.grid(row=0, column=0, rowspan=18, columnspan=2) #determining the size
vsb1 = Scrollbar(root, orient="vertical")
vsb1.grid(row=0, column=2, rowspan=25, sticky="ns")
list1.configure(yscrollcommand=vsb1.set) #configuring the scroll function for the scrollbar object sb1
vsb1.configure(command=list1.yview)
#--------------------------------------------------------------------------------------------
# Create entry box
e1 = Entry(root)
e1.grid(row=1, column=3, columnspan=2)
e2_1 = Entry(root)
e2_1.grid(row=4, column=3)
e2_2 = Entry(root)
e2_2.grid(row=4, column=4)
e3_1 = Entry(root)
e3_1.grid(row=7, column=3)
e3_2 = Entry(root)
e3_2.grid(row=7, column=4)
e4 = Entry(root)
e4.grid(row=10, column=3, columnspan=2)
#--------------------------------------------------------------------------------------------
# Create label and text box
wr1 = wrapper.fill(text="Αριθμός άρθρων ανά έτος, από περιοδικά με impact factor μεγαλύτερο του (entry_box)")
label1 = Label(root, text=wr1, width=50)
label1.grid(row=0, column=3, columnspan=2)
wr2 = wrapper.fill(text="Εμφάνισε τους συντάκτες οι οποίοι έχουν εκδώσει πάνω από (entry_box_left) άρθρα από διάφορα ιδρύματα, τα τελευταία (entry_box_right) χρόνια")
label2 = Label(root, text=wr2, width=50)
label2.grid(row=3, column=3, columnspan=2)
wr3 = wrapper.fill(text="Εμφάνισε τον μέσο όρο σελίδων των άρθρων, για κάθε τεύχος που εκδόθηκε στο διάστημα (entry_box_left - ex. [2015]) έως (entry_box_right - ex. [2019])")
label3 = Label(root, text=wr3, width=50)
label3.grid(row=6, column=3, columnspan=2)
wr4 = wrapper.fill(text="Με ποιους συντάκτες από διαφορετικά ιδρύματα έχει συνεργαστεί κάποιος (entry_box_left - ID_Συντάκτη) όσο καιρό είναι στο ίδρυμα που βρίσκεται τώρα")
label4 = Label(root, text=wr4, width=50)
label4.grid(row=9, column=3, columnspan=2)
#--------------------------------------------------------------------------------------------
# Create button
b1 = Button(root, text="Search", width=10,command= com16)
b1.grid(row=2, column=3, columnspan=2)
b2 = Button(root, text="Search", width=10, command= com17)
b2.grid(row=5, column=3, columnspan=2)
b3 = Button(root, text="Search", width=10, command= com18)
b3.grid(row=8, column=3, columnspan=2)
b4 = Button(root, text="Search", width=10, command= com19)
b4.grid(row=11, column=3, columnspan=2)
b5 = Button(root, text="Close", width=10, command=root.destroy)
b5.grid(row=14, column=3, columnspan=2)
#--------------------------------------------------------------------------------------------
root.mainloop()
cur.close()
con.close()
| 56.720497
| 265
| 0.511936
|
4a08b81d886436587fcf2dd913f4d4a257b69aea
| 20,700
|
py
|
Python
|
scvi/model/base/_rnamixin.py
|
marianogabitto/scvi-tools-1
|
3bbdb41956f73f24156604c9b17ba034c9e0c19b
|
[
"BSD-3-Clause"
] | null | null | null |
scvi/model/base/_rnamixin.py
|
marianogabitto/scvi-tools-1
|
3bbdb41956f73f24156604c9b17ba034c9e0c19b
|
[
"BSD-3-Clause"
] | null | null | null |
scvi/model/base/_rnamixin.py
|
marianogabitto/scvi-tools-1
|
3bbdb41956f73f24156604c9b17ba034c9e0c19b
|
[
"BSD-3-Clause"
] | null | null | null |
import inspect
import logging
import warnings
from functools import partial
from typing import Dict, Iterable, Optional, Sequence, Union
import numpy as np
import pandas as pd
import torch
from anndata import AnnData
from scvi import _CONSTANTS
from scvi._compat import Literal
from scvi._docs import doc_differential_expression
from scvi._utils import _doc_params
from .._utils import (
_get_batch_code_from_category,
_get_var_names_from_setup_anndata,
scrna_raw_counts_properties,
)
from ._utils import _de_core
logger = logging.getLogger(__name__)
Number = Union[int, float]
class RNASeqMixin:
"""General purpose methods for RNA-seq analysis."""
def _get_transform_batch_gen_kwargs(self, batch):
if "transform_batch" in inspect.signature(self.module.generative).parameters:
return dict(transform_batch=batch)
else:
raise NotImplementedError(
"Transforming batches is not implemented for this model."
)
@torch.no_grad()
def get_normalized_expression(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
gene_list: Optional[Sequence[str]] = None,
library_size: Union[float, Literal["latent"]] = 1,
n_samples: int = 1,
n_samples_overall: int = None,
batch_size: Optional[int] = None,
return_mean: bool = True,
return_numpy: Optional[bool] = None,
) -> Union[np.ndarray, pd.DataFrame]:
r"""
Returns the normalized (decoded) gene expression.
This is denoted as :math:`\rho_n` in the scVI paper.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
transform_batch
Batch to condition on.
If transform_batch is:
- None, then real observed batch is used.
- int, then batch transform_batch is used.
gene_list
Return frequencies of expression for a subset of genes.
This can save memory when working with large datasets and few genes are
of interest.
library_size
Scale the expression frequencies to a common library size.
This allows gene expression levels to be interpreted on a common scale of relevant
magnitude. If set to `"latent"`, use the latent libary size.
n_samples
Number of posterior samples to use for estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
return_mean
Whether to return the mean of the samples.
return_numpy
Return a :class:`~numpy.ndarray` instead of a :class:`~pandas.DataFrame`. DataFrame includes
gene names as columns. If either `n_samples=1` or `return_mean=True`, defaults to `False`.
Otherwise, it defaults to `True`.
Returns
-------
If `n_samples` > 1 and `return_mean` is False, then the shape is `(samples, cells, genes)`.
Otherwise, shape is `(cells, genes)`. In this case, return type is :class:`~pandas.DataFrame` unless `return_numpy` is True.
"""
adata = self._validate_anndata(adata)
if indices is None:
indices = np.arange(adata.n_obs)
if n_samples_overall is not None:
indices = np.random.choice(indices, n_samples_overall)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
transform_batch = _get_batch_code_from_category(adata, transform_batch)
if gene_list is None:
gene_mask = slice(None)
else:
all_genes = _get_var_names_from_setup_anndata(adata)
gene_mask = [True if gene in gene_list else False for gene in all_genes]
if n_samples > 1 and return_mean is False:
if return_numpy is False:
warnings.warn(
"return_numpy must be True if n_samples > 1 and return_mean is False, returning np.ndarray"
)
return_numpy = True
if library_size == "latent":
generative_output_key = "px_rate"
scaling = 1
else:
generative_output_key = "px_scale"
scaling = library_size
exprs = []
for tensors in scdl:
per_batch_exprs = []
for batch in transform_batch:
generative_kwargs = self._get_transform_batch_gen_kwargs(batch)
inference_kwargs = dict(n_samples=n_samples)
_, generative_outputs = self.module.forward(
tensors=tensors,
inference_kwargs=inference_kwargs,
generative_kwargs=generative_kwargs,
compute_loss=False,
)
output = generative_outputs[generative_output_key]
output = output[..., gene_mask]
output *= scaling
output = output.cpu().numpy()
per_batch_exprs.append(output)
per_batch_exprs = np.stack(
per_batch_exprs
) # shape is (len(transform_batch) x batch_size x n_var)
exprs += [per_batch_exprs.mean(0)]
if n_samples > 1:
# The -2 axis correspond to cells.
exprs = np.concatenate(exprs, axis=-2)
else:
exprs = np.concatenate(exprs, axis=0)
if n_samples > 1 and return_mean:
exprs = exprs.mean(0)
if return_numpy is None or return_numpy is False:
return pd.DataFrame(
exprs,
columns=adata.var_names[gene_mask],
index=adata.obs_names[indices],
)
else:
return exprs
@_doc_params(
doc_differential_expression=doc_differential_expression,
)
def differential_expression(
self,
adata: Optional[AnnData] = None,
groupby: Optional[str] = None,
group1: Optional[Iterable[str]] = None,
group2: Optional[str] = None,
idx1: Optional[Union[Sequence[int], Sequence[bool], str]] = None,
idx2: Optional[Union[Sequence[int], Sequence[bool], str]] = None,
mode: Literal["vanilla", "change"] = "change",
delta: float = 0.25,
batch_size: Optional[int] = None,
all_stats: bool = True,
batch_correction: bool = False,
batchid1: Optional[Iterable[str]] = None,
batchid2: Optional[Iterable[str]] = None,
fdr_target: float = 0.05,
silent: bool = False,
**kwargs,
) -> pd.DataFrame:
r"""
A unified method for differential expression analysis.
Implements `"vanilla"` DE [Lopez18]_ and `"change"` mode DE [Boyeau19]_.
Parameters
----------
{doc_differential_expression}
**kwargs
Keyword args for :func:`scvi.utils.DifferentialComputation.get_bayes_factors`
Returns
-------
Differential expression DataFrame.
"""
adata = self._validate_anndata(adata)
col_names = _get_var_names_from_setup_anndata(adata)
model_fn = partial(
self.get_normalized_expression,
return_numpy=True,
n_samples=1,
batch_size=batch_size,
)
result = _de_core(
adata,
model_fn,
groupby,
group1,
group2,
idx1,
idx2,
all_stats,
scrna_raw_counts_properties,
col_names,
mode,
batchid1,
batchid2,
delta,
batch_correction,
fdr_target,
silent,
**kwargs,
)
return result
@torch.no_grad()
def posterior_predictive_sample(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: int = 1,
gene_list: Optional[Sequence[str]] = None,
batch_size: Optional[int] = None,
) -> np.ndarray:
r"""
Generate observation samples from the posterior predictive distribution.
The posterior predictive distribution is written as :math:`p(\hat{x} \mid x)`.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of samples for each cell.
gene_list
Names of genes of interest.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
Returns
-------
x_new : :py:class:`torch.Tensor`
tensor with shape (n_cells, n_genes, n_samples)
"""
if self.module.gene_likelihood not in ["zinb", "nb", "poisson"]:
raise ValueError("Invalid gene_likelihood.")
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
if indices is None:
indices = np.arange(adata.n_obs)
if gene_list is None:
gene_mask = slice(None)
else:
all_genes = _get_var_names_from_setup_anndata(adata)
gene_mask = [True if gene in gene_list else False for gene in all_genes]
x_new = []
for tensors in scdl:
samples = self.module.sample(tensors, n_samples=n_samples)
if gene_list is not None:
samples = samples[:, gene_mask, ...]
x_new.append(samples)
x_new = torch.cat(x_new) # Shape (n_cells, n_genes, n_samples)
return x_new.numpy()
@torch.no_grad()
def _get_denoised_samples(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: int = 25,
batch_size: int = 64,
rna_size_factor: int = 1000,
transform_batch: Optional[Sequence[int]] = None,
) -> np.ndarray:
"""
Return samples from an adjusted posterior predictive.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of posterior samples to use for estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
rna_size_factor
size factor for RNA prior to sampling gamma distribution.
transform_batch
int of which batch to condition on for all cells.
Returns
-------
denoised_samples
"""
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
data_loader_list = []
for tensors in scdl:
x = tensors[_CONSTANTS.X_KEY]
generative_kwargs = self._get_transform_batch_gen_kwargs(transform_batch)
inference_kwargs = dict(n_samples=n_samples)
_, generative_outputs = self.module.forward(
tensors=tensors,
inference_kwargs=inference_kwargs,
generative_kwargs=generative_kwargs,
compute_loss=False,
)
px_scale = generative_outputs["px_scale"]
px_r = generative_outputs["px_r"]
device = px_r.device
rate = rna_size_factor * px_scale
if len(px_r.size()) == 2:
px_dispersion = px_r
else:
px_dispersion = torch.ones_like(x).to(device) * px_r
# This gamma is really l*w using scVI manuscript notation
p = rate / (rate + px_dispersion)
r = px_dispersion
l_train = torch.distributions.Gamma(r, (1 - p) / p).sample()
data = l_train.cpu().numpy()
# """
# In numpy (shape, scale) => (concentration, rate), with scale = p /(1 - p)
# rate = (1 - p) / p # = 1/scale # used in pytorch
# """
data_loader_list += [data]
data_loader_list[-1] = np.transpose(data_loader_list[-1], (1, 2, 0))
return np.concatenate(data_loader_list, axis=0)
@torch.no_grad()
def get_feature_correlation_matrix(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: int = 10,
batch_size: int = 64,
rna_size_factor: int = 1000,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
correlation_type: Literal["spearman", "pearson"] = "spearman",
) -> pd.DataFrame:
"""
Generate gene-gene correlation matrix using scvi uncertainty and expression.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of posterior samples to use for estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
rna_size_factor
size factor for RNA prior to sampling gamma distribution.
transform_batch
Batches to condition on.
If transform_batch is:
- None, then real observed batch is used.
- int, then batch transform_batch is used.
- list of int, then values are averaged over provided batches.
correlation_type
One of "pearson", "spearman".
Returns
-------
Gene-gene correlation matrix
"""
from scipy.stats import spearmanr
adata = self._validate_anndata(adata)
transform_batch = _get_batch_code_from_category(adata, transform_batch)
corr_mats = []
for b in transform_batch:
denoised_data = self._get_denoised_samples(
adata=adata,
indices=indices,
n_samples=n_samples,
batch_size=batch_size,
rna_size_factor=rna_size_factor,
transform_batch=b,
)
flattened = np.zeros(
(denoised_data.shape[0] * n_samples, denoised_data.shape[1])
)
for i in range(n_samples):
flattened[
denoised_data.shape[0] * (i) : denoised_data.shape[0] * (i + 1)
] = denoised_data[:, :, i]
if correlation_type == "pearson":
corr_matrix = np.corrcoef(flattened, rowvar=False)
elif correlation_type == "spearman":
corr_matrix, _ = spearmanr(flattened)
else:
raise ValueError(
"Unknown correlation type. Choose one of 'spearman', 'pearson'."
)
corr_mats.append(corr_matrix)
corr_matrix = np.mean(np.stack(corr_mats), axis=0)
var_names = _get_var_names_from_setup_anndata(adata)
return pd.DataFrame(corr_matrix, index=var_names, columns=var_names)
@torch.no_grad()
def get_likelihood_parameters(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: Optional[int] = 1,
give_mean: Optional[bool] = False,
batch_size: Optional[int] = None,
) -> Dict[str, np.ndarray]:
r"""
Estimates for the parameters of the likelihood :math:`p(x \mid z)`
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of posterior samples to use for estimation.
give_mean
Return expected value of parameters or a samples
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
dropout_list = []
mean_list = []
dispersion_list = []
for tensors in scdl:
inference_kwargs = dict(n_samples=n_samples)
_, generative_outputs = self.module.forward(
tensors=tensors,
inference_kwargs=inference_kwargs,
compute_loss=False,
)
px_r = generative_outputs["px_r"]
px_rate = generative_outputs["px_rate"]
px_dropout = generative_outputs["px_dropout"]
n_batch = px_rate.size(0) if n_samples == 1 else px_rate.size(1)
px_r = px_r.cpu().numpy()
if len(px_r.shape) == 1:
dispersion_list += [np.repeat(px_r[np.newaxis, :], n_batch, axis=0)]
else:
dispersion_list += [px_r]
mean_list += [px_rate.cpu().numpy()]
dropout_list += [px_dropout.cpu().numpy()]
dropout = np.concatenate(dropout_list)
means = np.concatenate(mean_list)
dispersions = np.concatenate(dispersion_list)
if give_mean and n_samples > 1:
dropout = dropout.mean(0)
means = means.mean(0)
return_dict = {}
return_dict["mean"] = means
if self.module.gene_likelihood == "zinb":
return_dict["dropout"] = dropout
return_dict["dispersions"] = dispersions
if self.module.gene_likelihood == "nb":
return_dict["dispersions"] = dispersions
return return_dict
@torch.no_grad()
def get_latent_library_size(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
give_mean: bool = True,
batch_size: Optional[int] = None,
) -> np.ndarray:
r"""
Returns the latent library size for each cell.
This is denoted as :math:`\ell_n` in the scVI paper.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
give_mean
Return the mean or a sample from the posterior distribution.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
self._check_if_trained(warn=False)
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
libraries = []
for tensors in scdl:
inference_inputs = self.module._get_inference_input(tensors)
outputs = self.module.inference(**inference_inputs)
library = outputs["library"]
if not give_mean:
library = torch.exp(library)
else:
ql_m = outputs["ql_m"]
ql_v = outputs["ql_v"]
if ql_m is None or ql_v is None:
raise RuntimeError(
"The module for this model does not compute the posterior distribution "
"for the library size. Set `give_mean` to False to use the observed library size instead."
)
library = torch.distributions.LogNormal(ql_m, ql_v.sqrt()).mean
libraries += [library.cpu()]
return torch.cat(libraries).numpy()
| 36.379613
| 132
| 0.584589
|
4a08b8a78a4ead2facf99bed1b7b198f70841e67
| 2,894
|
py
|
Python
|
code/python/archive/c0124_segment_records.py
|
jesnyder/MeasuredStress
|
8009529da326a66733c26983cc59af8619f6cb42
|
[
"MIT"
] | null | null | null |
code/python/archive/c0124_segment_records.py
|
jesnyder/MeasuredStress
|
8009529da326a66733c26983cc59af8619f6cb42
|
[
"MIT"
] | null | null | null |
code/python/archive/c0124_segment_records.py
|
jesnyder/MeasuredStress
|
8009529da326a66733c26983cc59af8619f6cb42
|
[
"MIT"
] | null | null | null |
from c0101_retrieve_ref import retrieve_ref
from c0102_timestamp import timestamp_source
from c0103_trim_record_to_max import trim_record_to_max
from c0104_plot_timestamp import plot_timestamp
from c0105_find_records import find_records
from c0106_record_to_summary import record_to_summary
from c0107_decide_inclusion import decide_inclusion
from c0108_save_meta import save_meta
from c0109_retrieve_meta import retrieve_meta
from c0110_find_temp_end import find_temp_end
from c0111_retrieve_analyzed import retrieve_analyzed
from c0112_plot_truncate import plot_truncate
from c0113_plot_acc import plot_acc
from c0114_segment_data import segment_data
from c0115_plot_segment import plot_segment
from c0116_find_pairs import find_pairs
from c0117_find_paired_end import find_paired_end
from c0118_find_paired_duration import find_paired_duration
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def segment_records():
"""
segment records
"""
print("begin segmenting records")
study_list = retrieve_ref('study_list')
format_types = retrieve_ref('format_types')
segment_list = retrieve_ref('segment_list')
sensor_list = retrieve_ref('sensor_list')
timePreStudy = retrieve_ref('timePreStudy')
timePostStudy = retrieve_ref('timePostStudy')
for study in study_list:
df_meta = retrieve_meta(study)
source_path = list(df_meta['source_path'])
for record in source_path:
for format in format_types:
for sensor in sensor_list:
df = retrieve_analyzed(study, format, record, 'All', sensor)
for segment in segment_list:
if segment == segment_list[0]:
timeEnd = timePreStudy
df = df.drop(df[df['timeMinutes'] > timeEnd].index)
if segment == segment_list[1]:
timeBegin = timePreStudy
timeEnd = timePostStudy
df = df.drop(df[df['timeMinutes'] < timeBegin].index)
df = df.drop(df[df['timeMinutes'] > timeEnd].index)
if segment == segment_list[2]:
timeBegin = timePostStudy
df = df.drop(df[df['timeMinutes'] < timeBegin].index)
path = os.path.join(study, 'formatted', format, record, segment)
if not os.path.isdir(path): os.mkdir(path)
file_path = os.path.join(study, 'formatted', format, record, segment, sensor + ".csv")
df.to_csv(file_path)
print('dataframe saved for segments: ' + str(file_path))
| 34.452381
| 111
| 0.633034
|
4a08b8ceda7581c8f024b00710749e4842cd59ab
| 5,696
|
py
|
Python
|
augmentation/utilities/wandb.py
|
SaraR-1/model-patching
|
97b30bad4bb4575a5f3a4cc23fbd333b10a057a8
|
[
"Apache-2.0"
] | null | null | null |
augmentation/utilities/wandb.py
|
SaraR-1/model-patching
|
97b30bad4bb4575a5f3a4cc23fbd333b10a057a8
|
[
"Apache-2.0"
] | null | null | null |
augmentation/utilities/wandb.py
|
SaraR-1/model-patching
|
97b30bad4bb4575a5f3a4cc23fbd333b10a057a8
|
[
"Apache-2.0"
] | null | null | null |
import wandb
import json
import time
import numpy as np
from collections import namedtuple
from augmentation.methods.cyclegan.models import mnist_unet_generator, unet_generator
from augmentation.models.models import create_keras_classification_model
WandbRun = namedtuple('WandbRun', 'path id name history files cfg url')
def particular_checkpoint_step_extractor(checkpoint, step_extractor=lambda fname: fname.split("_")[1].split(".")[0]):
def particular_checkpoint_step_extractor_(filename):
step = int(step_extractor(filename))
if step == checkpoint:
return step
else:
return 0
if checkpoint > 0:
return particular_checkpoint_step_extractor_
return step_extractor
def fetch_all_wandb_run_ids(wandb_project, wandb_entity='predictive-analytics-lab', wandb_api=None):
if wandb_api is None:
wandb_api = wandb.Api()
wandb_path = f'{wandb_entity}/{wandb_project}/*'
runs = wandb_api.runs(wandb_path)
return [run.id for run in runs]
def load_wandb_run(wandb_run_id, wandb_project, wandb_entity='predictive-analytics-lab', wandb_api=None):
if wandb_api is None:
wandb_api = wandb.Api()
wandb_path = f'{wandb_entity}/{wandb_project}/{wandb_run_id}'
run = wandb_api.run(wandb_path)
return WandbRun(path=wandb_path, id=run.id, name=run.name, history=run.scan_history,
files=run.files(per_page=10000), cfg=json.loads(run.json_config), url=run.url)
def get_most_recent_model_file(wandb_run: WandbRun, wandb_ckpt_path='checkpoints/',
model_name='', exclude=None,
step_extractor=lambda fname: fname.split("_")[1].split(".")[0]):
# Find checkpoints
checkpoints = [file for file in wandb_run.files if file.name.startswith(wandb_ckpt_path.lstrip("/"))]
relevant_checkpoints = [e for e in checkpoints if model_name in e.name]
if exclude:
relevant_checkpoints = [e for e in relevant_checkpoints if exclude not in e.name]
# Grab the latest checkpoint
latest_checkpoint = relevant_checkpoints[np.argmax([int(step_extractor(e.name)) for e in relevant_checkpoints])]
print(f"Retrieved checkpoint {latest_checkpoint.name}.")
# Restore the model
model_file = wandb.restore(latest_checkpoint.name, run_path=wandb_run.path, replace=True)
return model_file
def load_most_recent_keras_model_weights(keras_model,
wandb_run,
wandb_ckpt_path='checkpoints/',
model_name='',
exclude=None,
step_extractor=None):
# Make sure the step extractor is set to a reasonable default
if step_extractor is None:
step_extractor = lambda fname: fname.split(".")[-2].split("_")[-1]
# Get the most recent model file and load weights from it
try:
model_file = get_most_recent_model_file(wandb_run, wandb_ckpt_path, model_name, exclude, step_extractor)
time.sleep(3)
print(model_file.name)
keras_model.load_weights(model_file.name)
print('load_most_recent_keras_model_weights: file ', model_file.name)
try:
return model_file.name, int(step_extractor(model_file.name))
except ValueError:
return model_file.name, int(step_extractor(model_file.name.split("/")[-1]))
except ValueError:
print("No model file found. Continuing without loading..")
return None, None
def load_pretrained_keras_model_from_wandb(wandb_run_id, wandb_project, wandb_entity,
keras_model_creation_fn, keras_model_creation_fn_args,
model_name, step_extractor,
wandb_ckpt_path='checkpoints/'):
# Load the run
wandb_run = load_wandb_run(wandb_run_id, wandb_project, wandb_entity)
# Create the model architecture
keras_model = globals()[keras_model_creation_fn](**keras_model_creation_fn_args)
# Load up the model weights
if step_extractor is None:
load_file, load_step = load_most_recent_keras_model_weights(keras_model, wandb_run,
model_name=model_name,
wandb_ckpt_path=wandb_ckpt_path)
else:
load_file, load_step = load_most_recent_keras_model_weights(keras_model, wandb_run,
model_name=model_name,
step_extractor=step_extractor,
wandb_ckpt_path=wandb_ckpt_path)
return keras_model, (load_file, load_step)
def load_pretrained_keras_classification_model(source, architecture, input_shape, n_classes, imagenet_pretrained,
pretraining_source, pretraining_info, checkpoint_path):
# Create the model
model = create_keras_classification_model(source, architecture, input_shape, n_classes, imagenet_pretrained)
if pretraining_source == 'wandb':
# Extract the Weights and Biases run information
run_id, project, entity = pretraining_info.split(":")
# Load up the relevant run
wandb_run = load_wandb_run(run_id, project, entity)
# Load up the most recent checkpoint from that run
load_most_recent_keras_model_weights(model, wandb_run, checkpoint_path)
return model
| 45.935484
| 117
| 0.643785
|
4a08b94efca5a27dd8e7d13ca2e807e58c052de5
| 252
|
py
|
Python
|
apps/venv/lib/python2.7/site-packages/MySQLdb/constants/REFRESH.py
|
gmacchi93/serverInfoParaguay
|
251275431af72b33ecf302812783e1f6f70e0be8
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/MySQL-python-1.2.5/MySQLdb/constants/REFRESH.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/MySQL-python-1.2.5/MySQLdb/constants/REFRESH.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
"""MySQL REFRESH Constants
These constants seem to mostly deal with things internal to the
MySQL server. Forget you saw this.
"""
GRANT = 1
LOG = 2
TABLES = 4
HOSTS = 8
STATUS = 16
THREADS = 32
SLAVE = 64
MASTER = 128
READ_LOCK = 16384
FAST = 32768
| 14
| 63
| 0.714286
|
4a08b97be4f9a423d371b68e6eb9d25533fffe92
| 2,124
|
py
|
Python
|
biuropodrozy/outer_api/weather_api.py
|
adrianboratyn/TripRecommendations
|
d3e5a10d80c405d5ac22f028be54c8198bc10410
|
[
"MIT"
] | null | null | null |
biuropodrozy/outer_api/weather_api.py
|
adrianboratyn/TripRecommendations
|
d3e5a10d80c405d5ac22f028be54c8198bc10410
|
[
"MIT"
] | null | null | null |
biuropodrozy/outer_api/weather_api.py
|
adrianboratyn/TripRecommendations
|
d3e5a10d80c405d5ac22f028be54c8198bc10410
|
[
"MIT"
] | 2
|
2021-06-26T13:03:22.000Z
|
2021-06-27T10:47:59.000Z
|
import requests
import datetime
from tenacity import retry, stop_after_attempt
@retry(stop=stop_after_attempt(3))
def fetch(url):
"""
Pobranie adresu URl
Args:
url (string): adres url
Returns:
object: requests
"""
return requests.get(url)
def get_weather(city_name):
"""Fetch weather from weather API
Args:
city_name (string): nazwa miasta
Returns:
object: słownik z pogąda na dane dni tygodnia
"""
response = fetch(f"https://goweather.herokuapp.com/weather/{city_name}")
weekdays = ("Poniedziałek", "Wtorek", "Środa", "Czwartek", "Piątek", "Sobota", "Niedziela")
today = datetime.datetime.now()
next1day = today + datetime.timedelta(days=1)
next2day = today + datetime.timedelta(days=2)
next3day = today + datetime.timedelta(days=3)
today = weekdays[today.weekday()]
next1day = weekdays[next1day.weekday()]
next2day = weekdays[next2day.weekday()]
next3day = weekdays[next3day.weekday()]
if response.ok:
weather = response.json()
temp_today = f"{weather['temperature']}"
desc_today = f"{weather['description']}"
temp_next1day = f"{weather['forecast'][0]['temperature']}"
temp_next2day = f"{weather['forecast'][1]['temperature']}"
temp_next3day = f"{weather['forecast'][2]['temperature']}"
else:
temp_today = ""
desc_today = ""
temp_next1day = ""
temp_next2day = ""
temp_next3day = ""
return {"today_name": today,
"next1day_name": next1day,
"next2day_name": next2day,
"next3day_name": next3day,
"temp_today": temp_today,
"desc_today": desc_today,
"temp_next1day": temp_next1day,
"temp_next2day": temp_next2day,
"temp_next3day": temp_next3day}
'''@property - model
def results(self):
weather = get_weather('warsaw')
return weather'''
# <h1>{{model.results.today_name}}</h1> - template
'''if '__main__' == __name__:
res = get_weather('rzeszow')
print(res['today_name'])'''
| 29.09589
| 95
| 0.614407
|
4a08b9faa8508a42498b33f6ce4c0bc07a644043
| 4,644
|
py
|
Python
|
tools/kconfig/genconfig.py
|
zhouyangyale/c_cpp_project_framework
|
e7c5d67ea326f986ffb03a9647c0b632b27dcae3
|
[
"Apache-2.0"
] | 1,448
|
2018-10-26T13:55:40.000Z
|
2022-03-31T06:15:16.000Z
|
tools/kconfig/genconfig.py
|
zhouyangyale/c_cpp_project_framework
|
e7c5d67ea326f986ffb03a9647c0b632b27dcae3
|
[
"Apache-2.0"
] | 418
|
2018-10-26T14:18:26.000Z
|
2022-03-31T21:40:25.000Z
|
tools/kconfig/genconfig.py
|
zhouyangyale/c_cpp_project_framework
|
e7c5d67ea326f986ffb03a9647c0b632b27dcae3
|
[
"Apache-2.0"
] | 419
|
2018-11-02T09:53:19.000Z
|
2022-03-31T15:43:03.000Z
|
#
# @file from https://github.com/Neutree/c_cpp_project_framework
# @author neucrack
#
import argparse
import os, sys
kconfig_lib_path = sys.path[0]+"/Kconfiglib"
sys.path.append(kconfig_lib_path)
import kconfiglib
from menuconfig import menuconfig
def _cmake_contents(kconfig, header):
chunks = [header]
add = chunks.append
config_vars = []
for sym in kconfig.unique_defined_syms:
# _write_to_conf is determined when the value is calculated. This
# is a hidden function call due to property magic.
val = sym.str_value
if not sym._write_to_conf:
continue
if sym.orig_type in (kconfiglib.BOOL, kconfiglib.TRISTATE) and val == "n":
val = ""
add("set({}{} \"{}\")\n".format(
kconfig.config_prefix, sym.name, val))
config_vars.append(str(kconfig.config_prefix+sym.name))
add("set(CONFIGS_LIST {})\n".format(";".join(config_vars)))
return "".join(chunks)
def write_config(kconfig, filename, gui):
print("-- Write makefile config at: " + filename)
if not gui:
kconfig.write_config(filename)
def write_cmake(kconfig, filename, gui):
print("-- Write cmake config at: " + filename)
cmake_conf_header = "# Generated by c_cpp_project_framework(https://github.com/Neutree/c_cpp_project_framework)\n"
cmake_conf_header += "### DO NOT edit this file!! ###\n\n"
cmake_conf_content = _cmake_contents(kconfig, cmake_conf_header)
# don't change file info if config no change
if os.path.exists(filename):
with open(filename) as f:
if f.read() == cmake_conf_content:
return
f = open(filename, "w")
f.write(cmake_conf_content)
f.close()
def write_header(kconfig, filename, gui):
print("-- write c header file at: " + filename)
kconfig.write_autoconf(filename)
OUTPUT_FORMATS = {"makefile": write_config,
"header": write_header,
"cmake": write_cmake
}
parser = argparse.ArgumentParser(description='menuconfig tool', prog=os.path.basename(sys.argv[0]))
parser.add_argument('--kconfig',
help='KConfig file',
default='Kconfig',
metavar='FILENAME',
required=None)
parser.add_argument('--defaults',
action='append',
default=[],
help='Optional project defaults file. '
'Multiple files can be specified using multiple --defaults arguments.',
metavar="FILENAME"
)
parser.add_argument('--output', nargs=2, action='append',
help='Write output file (format and output filename)',
metavar=('FORMAT', 'FILENAME'),
default=[])
parser.add_argument('--env',
action='append',
default=[],
help='Environment to set when evaluating the config file',
metavar='VAR=VALUE'
)
parser.add_argument("--menuconfig",
help="Open menuconfig GUI interface",
choices=["False", "True"],
default="False",
)
args = parser.parse_args()
for env in args.env:
env = env.split("=")
var = env[0]
value = env[1]
os.environ[var] = value
out_format = {"makefile": ".config"}
for fmt, filename in args.output:
if fmt not in OUTPUT_FORMATS.keys():
print("Format %s not supported! Known formats:%s" %(fmt, OUTPUT_FORMATS.keys()))
sys.exit(1)
out_format[fmt] = filename
if out_format["makefile"] != ".config":
os.environ["KCONFIG_CONFIG"] = out_format["makefile"]
kconfig = kconfiglib.Kconfig(args.kconfig)
# load config, so if config file exist, the default file may
# not take effect, if want to use default,
# remove the config file in build directory
if not os.path.exists(out_format["makefile"]):
for path in args.defaults:
if not os.path.exists(path):
raise ValueError("Path %s not found!" %(path))
print("-- Load default:", path)
kconfig.load_config(path, replace=False)
else:
kconfig.load_config()
if args.menuconfig == "True":
menuconfig(kconfig)
# write back
for fmt, filename in out_format.items():
dir = os.path.split(filename)[0]
if not os.path.exists(dir):
os.makedirs(dir)
for fmt, filename in out_format.items():
func = OUTPUT_FORMATS[fmt]
func(kconfig, filename, args.menuconfig == "True")
| 32.25
| 118
| 0.599268
|
4a08ba1e24dba7a76099cc786e47977bfa5785f1
| 3,691
|
py
|
Python
|
beamofparticle/Beam.py
|
a-sandra/Isyn
|
4d0f3b7af25cb24b60b51a7f1104a748b0bf5ec4
|
[
"MIT"
] | null | null | null |
beamofparticle/Beam.py
|
a-sandra/Isyn
|
4d0f3b7af25cb24b60b51a7f1104a748b0bf5ec4
|
[
"MIT"
] | null | null | null |
beamofparticle/Beam.py
|
a-sandra/Isyn
|
4d0f3b7af25cb24b60b51a7f1104a748b0bf5ec4
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
import random
from Particle import Particle
import matplotlib.pyplot as plt
from physicalconstant.PhysicalConstant import *
class Beam(object):
""" Class Beam defined by:
- Its relativistic gamma
- Its rest energy
- Kind of distribution
- Number of particles """
def __init__(self): # Method constructor
self.gamma = 1.0
self.Erest = 938272046.0
self.qCharge = 1.0
self.nParticle = 100000
self.bunchOfParticle = []
self.particleArray=np.zeros((self.nParticle,2),dtype="float64")
self.beta = math.sqrt(1- 1/math.pow(self.gamma, 2))
self.distributionKind = "Parabolic"
self.givendistribution = []
def setGamma(self,valuegamma):
self.gamma = valuegamma
self.updateBeamParameters()
def getGamma(self):
return self.gamma
def setRestEnergy(self, valueE0):
self.Erest = valueE0
def getRestEnergy(self):
return self.Erest
def setNParticle(self, valueNb):
self.nParticle = valueNb
self.particleArray=np.zeros((self.nParticle,2),dtype="float64")
def getNParticle(self):
return self.nParticle
def setCharge(self, valuecharge):
self.qCharge = valuecharge
def updateBeamParameters(self):
self.beta = math.sqrt(1- 1/math.pow(self.gamma, 2))
self.TotalEnergy = self.gamma*self.Erest*e
def distributionFromFile(self, filename):
self.givendistribution = np.loadtxt(filename)
for i in range(self.nParticle):
self.bunchOfParticle.append(Particle(self.givendistribution[i][0], self.givendistribution[i][1]))
print self.givendistribution[i][0]
self.particleArray=self.givendistribution
def parabolicDistribution(self, initialDeltaPhi, initialdp, offsetphi = 0):
self.distributionKind = "Parabolic"
random.seed(3000)
for i in range(self.nParticle):
u = random.uniform(0,1)
v = random.uniform(0,1)
phi = offsetphi + initialDeltaPhi * math.sqrt(1.0 - math.pow(1.0 - u, 2.0/3.0))*math.cos(2*math.pi*v)
dpi = initialdp * math.sin(2.0*math.pi*v)*math.sqrt(1.0 - math.pow(1.0- u, 2.0/3.0))
self.bunchOfParticle.append(Particle(phi, dpi))
self.particleArray[i,0]=phi
self.particleArray[i,1]=dpi
def getParticles(self):
return self.particleArray.T
def plotBunch(self):
plt.plot(self.particleArray[:,0], self.particleArray[:,1],"r.")
plt.show()
def plotLongitudinalDistribution(self):
phi,dp = self.getParticles()
plt.hist(phi,bins = 100, color = 'green' )
plt.show()
def plotEnergyDistribution(self):
phi,dp = self.getParticles()
plt.hist(dp,bins = 100, color = 'green' )
plt.show()
def getRmsValues(self):
phi, dp = self.getParticles()
phi_rms = np.std(phi)
dp_rms = np.std(dp)
return phi_rms , dp_rms
def getMean(self):
phi, dp = self.getParticles()
phi_mean = np.mean(phi)
dp_mean = np.mean(dp)
return phi_mean , dp_mean
def printBeamParameters(self):
""" Print to screen a summary of the attributes of the object Beam """
print "beta=", self.beta
print "gamma=", self.gamma
print "Rest Energy=", self.Erest
print "Total Energy = ", self.TotalEnergy
print "Charge of particle = ", self.qCharge
print "Number of particle = ", self.nParticle
print "Type of distribution =", self.distributionKind
| 33.554545
| 113
| 0.616364
|
4a08ba23beded7de4f1edfcd1c3e7d8b659add71
| 5,995
|
py
|
Python
|
sprockets/logging/__init__.py
|
dave-shawley/sprockets.logging
|
37ff1180f91696547a25fed2228c829c1a8fcb17
|
[
"BSD-3-Clause"
] | 1
|
2016-10-12T05:09:11.000Z
|
2016-10-12T05:09:11.000Z
|
sprockets/logging/__init__.py
|
dave-shawley/sprockets.logging
|
37ff1180f91696547a25fed2228c829c1a8fcb17
|
[
"BSD-3-Clause"
] | 3
|
2015-06-17T21:56:34.000Z
|
2019-12-22T17:05:34.000Z
|
sprockets/logging/__init__.py
|
dave-shawley/sprockets.logging
|
37ff1180f91696547a25fed2228c829c1a8fcb17
|
[
"BSD-3-Clause"
] | 2
|
2019-11-24T21:17:39.000Z
|
2020-02-28T18:32:14.000Z
|
"""
Make good log output easier.
- :class:`ContextFilter` adds fixed properties to a log record
- :class:`JSONRequestFormatter` formats log records as JSON output
- :method:`tornado_log_function` is for use as the
:class`tornado.web.Application.log_function` in conjunction with
:class:`JSONRequestFormatter` to output log lines as JSON.
"""
from __future__ import absolute_import
from logging import config
import json
import logging
import os
import sys
import traceback
try:
from tornado import escape, log
except ImportError: # pragma no cover
escape = None
log = None
version_info = (1, 3, 2)
__version__ = '.'.join(str(v) for v in version_info)
# Shortcut methods and constants to avoid needing to import logging directly
dictConfig = config.dictConfig
getLogger = logging.getLogger
DEBUG = logging.DEBUG
INFO = logging.INFO
WARN = logging.WARN
WARNING = logging.WARNING
ERROR = logging.ERROR
class ContextFilter(logging.Filter):
"""
Ensures that properties exist on a LogRecord.
:param list|None properties: optional list of properties that
will be added to LogRecord instances if they are missing
This filter implementation will ensure that a set of properties
exists on every log record which means that you can always refer
to custom properties in a format string. Without this, referring
to a property that is not explicitly passed in will result in an
ugly ``KeyError`` exception.
"""
def __init__(self, name='', properties=None):
logging.Filter.__init__(self, name)
self.properties = list(properties) if properties else []
def filter(self, record):
for property_name in self.properties:
if not hasattr(record, property_name):
setattr(record, property_name, None)
return True
class JSONRequestFormatter(logging.Formatter):
"""Instead of spitting out a "human readable" log line, this outputs
the log data as JSON.
"""
def extract_exc_record(self, typ, val, tb):
"""Create a JSON representation of the traceback given the records
exc_info
:param `Exception` typ: Exception type of the exception being handled
:param `Exception` instance val: instance of the Exception class
:param `traceback` tb: traceback object with the call stack
:rtype: dict
"""
exc_record = {'type': typ.__name__,
'message': str(val),
'stack': []}
for file_name, line_no, func_name, txt in traceback.extract_tb(tb):
exc_record['stack'].append({'file': file_name,
'line': str(line_no),
'func': func_name,
'text': txt})
return exc_record
def format(self, record):
"""Return the log data as JSON
:param record logging.LogRecord: The record to format
:rtype: str
"""
if hasattr(record, 'exc_info'):
try:
traceback = self.extract_exc_record(*record.exc_info)
except:
traceback = None
output = {'name': record.name,
'module': record.module,
'message': record.msg % record.args,
'level': logging.getLevelName(record.levelno),
'line_number': record.lineno,
'process': record.processName,
'timestamp': self.formatTime(record),
'thread': record.threadName,
'file': record.filename,
'request': record.args,
'traceback': traceback}
for key, value in list(output.items()):
if not value:
del output[key]
if 'message' in output:
output.pop('request', None)
return json.dumps(output)
def tornado_log_function(handler):
"""Assigned when creating a :py:class:`tornado.web.Application` instance
by passing the method as the ``log_function`` argument:
.. code:: python
app = tornado.web.Application([('/', RequestHandler)],
log_function=tornado_log_function)
:type handler: :py:class:`tornado.web.RequestHandler`
"""
status_code = handler.get_status()
if status_code < 400:
log_method = log.access_log.info
elif status_code < 500:
log_method = log.access_log.warning
else:
log_method = log.access_log.error
correlation_id = (getattr(handler, 'correlation_id', None) or
handler.request.headers.get('Correlation-ID', None))
log_method('', {'correlation_id': correlation_id,
'duration': 1000.0 * handler.request.request_time(),
'headers': dict(handler.request.headers),
'method': handler.request.method,
'path': handler.request.path,
'protocol': handler.request.protocol,
'query_args': escape.recursive_unicode(
handler.request.query_arguments),
'remote_ip': handler.request.remote_ip,
'status_code': status_code,
'environment': os.environ.get('ENVIRONMENT')})
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
traceback = sys.exc_info()[2]
frame = traceback.tb_frame
while True:
if hasattr(frame, 'f_code'):
filename = frame.f_code.co_filename
if filename.endswith('logging.py') or \
filename.endswith('logging/__init__.py'):
frame = frame.f_back
continue
return frame
return traceback.tb_frame.f_back
# Monkey-patch currentframe
logging.currentframe = currentframe
| 33.679775
| 77
| 0.603837
|
4a08bb622ed5621e48a4ee3da3f2504c3d4944f9
| 1,645
|
py
|
Python
|
nnvm/python/nnvm/attribute.py
|
mostafaelhoushi/tvm
|
ae21eddf5f13ffa82d514e8311c87f38bcac559a
|
[
"Apache-2.0"
] | 1
|
2021-03-07T15:30:16.000Z
|
2021-03-07T15:30:16.000Z
|
nnvm/python/nnvm/attribute.py
|
mostafaelhoushi/tvm
|
ae21eddf5f13ffa82d514e8311c87f38bcac559a
|
[
"Apache-2.0"
] | null | null | null |
nnvm/python/nnvm/attribute.py
|
mostafaelhoushi/tvm
|
ae21eddf5f13ffa82d514e8311c87f38bcac559a
|
[
"Apache-2.0"
] | 1
|
2020-02-09T10:42:31.000Z
|
2020-02-09T10:42:31.000Z
|
# coding: utf-8
"""Attribute scoping support for symbolic API."""
from __future__ import absolute_import
from ._base import string_types
class AttrScope(object):
"""Attribute manager for scoping.
User can also inherit this object to change naming behavior.
Parameters
----------
kwargs
The attributes to set for all symbol creations in the scope.
"""
current = None
def __init__(self, **kwargs):
self._old_scope = None
for value in kwargs.values():
if not isinstance(value, string_types):
raise ValueError("Attributes need to be string")
self._attr = kwargs
def get(self, attr):
"""
Get the attribute dict given the attribute set by the symbol.
Parameters
----------
attr : dict of string to string
The attribute passed in by user during symbol creation.
Returns
-------
attr : dict of string to string
Updated attributes to add other scope related attributes.
"""
if self._attr:
ret = self._attr.copy()
if attr:
ret.update(attr)
return ret
return attr
def __enter__(self):
# pylint: disable=protected-access
self._old_scope = AttrScope.current
attr = AttrScope.current._attr.copy()
attr.update(self._attr)
self._attr = attr
AttrScope.current = self
return self
def __exit__(self, ptype, value, trace):
assert self._old_scope
AttrScope.current = self._old_scope
AttrScope.current = AttrScope()
| 26.967213
| 69
| 0.6
|
4a08bb6edef185cb608345fa6357538b4dce9bcc
| 4,172
|
py
|
Python
|
Momentum/Williams_%R.py
|
alihaskar/Algorithmic-Trading-with-Python
|
68d269bd4ca5be184dd2e1ffb33cf49d683aa2f7
|
[
"MIT"
] | 2
|
2022-01-31T22:28:24.000Z
|
2022-02-07T17:30:50.000Z
|
Momentum/Williams_%R.py
|
alihaskar/Algorithmic-Trading-with-Python
|
68d269bd4ca5be184dd2e1ffb33cf49d683aa2f7
|
[
"MIT"
] | null | null | null |
Momentum/Williams_%R.py
|
alihaskar/Algorithmic-Trading-with-Python
|
68d269bd4ca5be184dd2e1ffb33cf49d683aa2f7
|
[
"MIT"
] | 2
|
2022-01-31T22:28:26.000Z
|
2022-03-16T17:40:10.000Z
|
import pandas as pd
import numpy as np
import requests
import matplotlib.pyplot as plt
from math import floor
from termcolor import colored as cl
plt.rcParams['figure.figsize'] = (20,10)
plt.style.use('fivethirtyeight')
def get_historical_data(symbol, start_date):
api_key = 'YOUR API KEY'
api_url = f'https://api.twelvedata.com/time_series?symbol={symbol}&interval=1day&outputsize=5000&apikey={api_key}'
raw_df = requests.get(api_url).json()
df = pd.DataFrame(raw_df['values']).iloc[::-1].set_index('datetime').astype(float)
df = df[df.index >= start_date]
df.index = pd.to_datetime(df.index)
return df
aapl = get_historical_data('AAPL', '2020-01-01')
aapl
def get_wr(high, low, close, lookback):
highh = high.rolling(lookback).max()
lowl = low.rolling(lookback).min()
wr = -100 * ((highh - close) / (highh - lowl))
return wr
aapl['wr_14'] = get_wr(aapl['high'], aapl['low'], aapl['close'], 14)
aapl = aapl.dropna()
aapl
ax1 = plt.subplot2grid((11,1), (0,0), rowspan = 5, colspan = 1)
ax2 = plt.subplot2grid((11,1), (6,0), rowspan = 5, colspan = 1)
ax1.plot(aapl['close'], linewidth = 2)
ax1.set_title('aapl CLOSING PRICE')
ax2.plot(aapl['wr_14'], color = 'orange', linewidth = 2)
ax2.axhline(-20, linewidth = 1.5, linestyle = '--', color = 'grey')
ax2.axhline(-80, linewidth = 1.5, linestyle = '--', color = 'grey')
ax2.set_title('aapl WILLIAMS %R 14')
plt.show()
def implement_wr_strategy(prices, wr):
buy_price = []
sell_price = []
wr_signal = []
signal = 0
for i in range(len(wr)):
if wr[i-1] > -80 and wr[i] < -80:
if signal != 1:
buy_price.append(prices[i])
sell_price.append(np.nan)
signal = 1
wr_signal.append(signal)
else:
buy_price.append(np.nan)
sell_price.append(np.nan)
wr_signal.append(0)
elif wr[i-1] < -20 and wr[i] > -20:
if signal != -1:
buy_price.append(np.nan)
sell_price.append(prices[i])
signal = -1
wr_signal.append(signal)
else:
buy_price.append(np.nan)
sell_price.append(np.nan)
wr_signal.append(0)
else:
buy_price.append(np.nan)
sell_price.append(np.nan)
wr_signal.append(0)
return buy_price, sell_price, wr_signal
buy_price, sell_price, wr_signal = implement_wr_strategy(aapl['close'], aapl['wr_14'])
ax1 = plt.subplot2grid((11,1), (0,0), rowspan = 5, colspan = 1)
ax2 = plt.subplot2grid((11,1), (6,0), rowspan = 5, colspan = 1)
ax1.plot(aapl['close'], linewidth = 2)
ax1.plot(aapl.index, buy_price, marker = '^', markersize = 12, linewidth = 0, color = 'green', label = 'BUY SIGNAL')
ax1.plot(aapl.index, sell_price, marker = 'v', markersize = 12, linewidth = 0, color = 'r', label = 'SELL SIGNAL')
ax1.legend()
ax1.set_title('aapl TRADING SIGNALS')
ax2.plot(aapl['wr_14'], color = 'orange', linewidth = 2)
ax2.axhline(-20, linewidth = 1.5, linestyle = '--', color = 'grey')
ax2.axhline(-80, linewidth = 1.5, linestyle = '--', color = 'grey')
ax2.set_title('aapl WILLIAMS %R 14')
plt.show()
position = []
for i in range(len(wr_signal)):
if wr_signal[i] > 1:
position.append(0)
else:
position.append(1)
for i in range(len(aapl['close'])):
if wr_signal[i] == 1:
position[i] = 1
elif wr_signal[i] == -1:
position[i] = 0
else:
position[i] = position[i-1]
close_price = aapl['close']
wr = aapl['wr_14']
wr_signal = pd.DataFrame(wr_signal).rename(columns = {0:'wr_signal'}).set_index(aapl.index)
position = pd.DataFrame(position).rename(columns = {0:'wr_position'}).set_index(aapl.index)
frames = [close_price, wr, wr_signal, position]
strategy = pd.concat(frames, join = 'inner', axis = 1)
strategy.head()
rets = aapl.close.pct_change().dropna()
strat_rets = strategy.wr_position[1:]*rets
plt.title('Daily Returns')
rets.plot(color = 'blue', alpha = 0.3, linewidth = 7)
strat_rets.plot(color = 'r', linewidth = 1)
plt.show()
| 33.645161
| 118
| 0.611218
|
4a08bb8bf6ac6d05bdf563dfd45237a6a75fc23b
| 2,291
|
py
|
Python
|
test.py
|
alexjercan/normals-estimation
|
6f18f9248b7c01842f0d4ba57cde5227a893f506
|
[
"MIT"
] | null | null | null |
test.py
|
alexjercan/normals-estimation
|
6f18f9248b7c01842f0d4ba57cde5227a893f506
|
[
"MIT"
] | null | null | null |
test.py
|
alexjercan/normals-estimation
|
6f18f9248b7c01842f0d4ba57cde5227a893f506
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Developed by Alex Jercan <jercan_alex27@yahoo.com>
#
# References:
#
import torch
import argparse
import albumentations as A
import my_albumentations as M
from tqdm import tqdm
from metrics import MetricFunction, print_single_error
from config import parse_test_config, DEVICE, read_yaml_config
from model import Model, LossFunction
from general import images_to_device, load_checkpoint
from dataset import create_dataloader
def run_test(model, dataloader, loss_fn, metric_fn):
loop = tqdm(dataloader, position=0, leave=True)
for _, images in enumerate(loop):
with torch.no_grad():
images = images_to_device(images, DEVICE)
left_img, right_img, left_normal, _ = images
predictions = model(left_img, right_img)
loss_fn(predictions, left_normal)
metric_fn.evaluate(predictions, left_normal)
loop.close()
def test(model=None, config=None):
epoch = 0
torch.backends.cudnn.benchmark = True
config = parse_test_config() if not config else config
transform = A.Compose(
[
A.Normalize(),
M.MyToTensorV2(),
],
additional_targets={
'right_img': 'image',
'left_normal': 'normal',
'right_normal': 'normal',
}
)
_, dataloader = create_dataloader(config.DATASET_ROOT, config.JSON_PATH,
batch_size=config.BATCH_SIZE, transform=transform,
workers=config.WORKERS, pin_memory=config.PIN_MEMORY, shuffle=config.SHUFFLE)
if not model:
model = Model()
model = model.to(DEVICE)
epoch, model = load_checkpoint(model, config.CHECKPOINT_FILE, DEVICE)
loss_fn = LossFunction()
metric_fn = MetricFunction(config.BATCH_SIZE)
model.eval()
run_test(model, dataloader, loss_fn, metric_fn)
print_single_error(epoch, loss_fn.show(), metric_fn.show())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='test model')
parser.add_argument('--test', type=str, default="test.yaml", help='test config file')
opt = parser.parse_args()
config_test = parse_test_config(read_yaml_config(opt.test))
test(config=config_test)
| 29.371795
| 115
| 0.664339
|
4a08bbf5cc853d10cbd78d4e92e49b9ab847864c
| 4,800
|
py
|
Python
|
fastai/text/interpret.py
|
hgulersen/fastai_hg
|
a176a562037ecfc1e6d650fc0f9db03712d1922a
|
[
"Apache-2.0"
] | 1
|
2020-11-04T00:52:54.000Z
|
2020-11-04T00:52:54.000Z
|
fastai/text/interpret.py
|
Rajesh16702/fastai-1
|
67308f15394bd8189eb9b3fbb3db770c6c78039e
|
[
"Apache-2.0"
] | 2
|
2021-05-20T23:11:34.000Z
|
2022-02-26T10:23:57.000Z
|
fastai/text/interpret.py
|
Rajesh16702/fastai-1
|
67308f15394bd8189eb9b3fbb3db770c6c78039e
|
[
"Apache-2.0"
] | 1
|
2022-01-09T00:28:15.000Z
|
2022-01-09T00:28:15.000Z
|
from ..torch_core import *
from ..basic_data import *
from ..basic_train import *
from ..train import ClassificationInterpretation
import matplotlib.cm as cm
__all__ = ['TextClassificationInterpretation']
def value2rgba(x:float, cmap:Callable=cm.RdYlGn, alpha_mult:float=1.0)->Tuple:
"Convert a value `x` from 0 to 1 (inclusive) to an RGBA tuple according to `cmap` times transparency `alpha_mult`."
c = cmap(x)
rgb = (np.array(c[:-1]) * 255).astype(int)
a = c[-1] * alpha_mult
return tuple(rgb.tolist() + [a])
def piece_attn_html(pieces:List[str], attns:List[float], sep:str=' ', **kwargs)->str:
html_code,spans = ['<span style="font-family: monospace;">'], []
for p, a in zip(pieces, attns):
p = html.escape(p)
c = str(value2rgba(a, alpha_mult=0.5, **kwargs))
spans.append(f'<span title="{a:.3f}" style="background-color: rgba{c};">{p}</span>')
html_code.append(sep.join(spans))
html_code.append('</span>')
return ''.join(html_code)
def show_piece_attn(*args, **kwargs):
from IPython.display import display, HTML
display(HTML(piece_attn_html(*args, **kwargs)))
def _eval_dropouts(mod):
module_name = mod.__class__.__name__
if 'Dropout' in module_name or 'BatchNorm' in module_name: mod.training = False
for module in mod.children(): _eval_dropouts(module)
class TextClassificationInterpretation(ClassificationInterpretation):
"""Provides an interpretation of classification based on input sensitivity.
This was designed for AWD-LSTM only for the moment, because Transformer already has its own attentional model.
"""
def __init__(self, learn: Learner, preds: Tensor, y_true: Tensor, losses: Tensor, ds_type: DatasetType = DatasetType.Valid):
super().__init__(learn,preds,y_true,losses,ds_type)
self.model = learn.model
@classmethod
def from_learner(cls, learn: Learner, ds_type:DatasetType=DatasetType.Valid, activ:nn.Module=None):
"Gets preds, y_true, losses to construct base class from a learner"
return cls(learn, *learn.get_preds(ds_type=ds_type, activ=activ, with_loss=True, ordered=True))
def intrinsic_attention(self, text:str, class_id:int=None):
"""Calculate the intrinsic attention of the input w.r.t to an output `class_id`, or the classification given by the model if `None`.
For reference, see the Sequential Jacobian session at https://www.cs.toronto.edu/~graves/preprint.pdf
"""
self.model.train()
_eval_dropouts(self.model)
self.model.zero_grad()
self.model.reset()
ids = self.data.one_item(text)[0]
emb = self.model[0].module.encoder(ids).detach().requires_grad_(True)
lstm_output = self.model[0].module(emb, from_embeddings=True)
self.model.eval()
cl = self.model[1](lstm_output + (torch.zeros_like(ids).byte(),))[0].softmax(dim=-1)
if class_id is None: class_id = cl.argmax()
cl[0][class_id].backward()
attn = emb.grad.squeeze().abs().sum(dim=-1)
attn /= attn.max()
tokens = self.data.single_ds.reconstruct(ids[0].cpu())
return tokens, attn
def html_intrinsic_attention(self, text:str, class_id:int=None, **kwargs)->str:
text, attn = self.intrinsic_attention(text, class_id)
return piece_attn_html(text.text.split(), to_np(attn), **kwargs)
def show_intrinsic_attention(self, text:str, class_id:int=None, **kwargs)->None:
text, attn = self.intrinsic_attention(text, class_id)
show_piece_attn(text.text.split(), to_np(attn), **kwargs)
def show_top_losses(self, k:int, max_len:int=70)->None:
"""
Create a tabulation showing the first `k` texts in top_losses along with their prediction, actual,loss, and probability of
actual class. `max_len` is the maximum number of tokens displayed.
"""
from IPython.display import display, HTML
items = []
tl_val,tl_idx = self.top_losses()
for i,idx in enumerate(tl_idx):
if k <= 0: break
k -= 1
tx,cl = self.data.dl(self.ds_type).dataset[idx]
cl = cl.data
classes = self.data.classes
txt = ' '.join(tx.text.split(' ')[:max_len]) if max_len is not None else tx.text
tmp = [txt, f'{classes[self.pred_class[idx]]}', f'{classes[cl]}', f'{self.losses[idx]:.2f}',
f'{self.preds[idx][cl]:.2f}']
items.append(tmp)
items = np.array(items)
names = ['Text', 'Prediction', 'Actual', 'Loss', 'Probability']
df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names)
with pd.option_context('display.max_colwidth', -1):
display(HTML(df.to_html(index=False)))
| 48
| 140
| 0.654583
|
4a08bca72ea94a00abde9357e1e311761e41399f
| 11,765
|
py
|
Python
|
bokeh/core/enums.py
|
heistermann/bokeh
|
f044bdb79730a1e7581eda609a7ee81993597bd8
|
[
"BSD-3-Clause"
] | 1
|
2018-11-14T19:08:18.000Z
|
2018-11-14T19:08:18.000Z
|
bokeh/core/enums.py
|
heistermann/bokeh
|
f044bdb79730a1e7581eda609a7ee81993597bd8
|
[
"BSD-3-Clause"
] | 1
|
2021-05-09T02:45:17.000Z
|
2021-05-09T02:45:17.000Z
|
bokeh/core/enums.py
|
heistermann/bokeh
|
f044bdb79730a1e7581eda609a7ee81993597bd8
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Common enumerations to be used together with |Enum| property.
This module provides many pre-defined enumerations, as well as functions
for creating new enumerations.
New enumerations can be created using the |enumeration| function:
.. code-block:: python
#: Specify a nautically named side, port or starboard
MyEnum = enumeration("port", "starboard")
Typically, enumerations are used to define |Enum| properties:
.. code-block:: python
from bokeh.model import Model
from bokeh.core.properties import Enum
class MyModel(Model):
location = Enum(MyEnum, help="""
Whether the thing should be a port or starboard.
""")
Enumerations have a defined order and support iteration:
.. code-block:: python
>>> for loc in MyEnum:
... print(loc)
...
port
starboard
as well as containment tests:
.. code-block:: python
>>> "port" in MyEnum
True
Enumerations can be easily documented in Sphinx documentation with the
:ref:`bokeh.sphinxext.bokeh_enum` Sphinx extension.
----
.. autofunction:: bokeh.core.enums.enumeration
----
.. |Enum| replace:: :class:`~bokeh.core.properties.Enum`
.. |enumeration| replace:: :func:`~bokeh.core.enums.enumeration`
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from six import string_types
# Bokeh imports
from .. import colors, palettes
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Anchor',
'AngleUnits',
'ButtonType',
'DashPattern',
'DateFormat',
'DatetimeUnits',
'Dimension',
'Dimensions',
'Direction',
'Enumeration',
'enumeration',
'FontStyle',
'HoldPolicy',
'HorizontalLocation',
'JitterRandomDistribution',
'LatLon',
'LegendClickPolicy',
'LegendLocation',
'LineCap',
'LineDash',
'LineJoin',
'Location',
'MapType',
'MarkerType',
'NamedColor',
'NumeralLanguage',
'Orientation',
'OutputBackend',
'PaddingUnits',
'Palette',
'RenderLevel',
'RenderMode',
'RoundingFunction',
'SizingMode',
'SliderCallbackPolicy',
'SortDirection',
'SpatialUnits',
'StartEnd',
'StepMode',
'TextAlign',
'TextBaseline',
'TickLabelOrientation',
'TooltipAttachment',
'TooltipFieldFormatter',
'VerticalAlign',
'VerticalLocation',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class Enumeration(object):
''' Represent an enumerated collection of values.
.. note::
Instances of ``Enumeration`` typically should not be constructed
directly. Instead, use the |enumeration| function.
'''
__slots__ = ()
def __iter__(self):
return iter(self._values)
def __contains__(self, value):
if not self._case_sensitive:
value = value.lower()
return value in self._values
def __str__(self):
return "Enumeration(%s)" % ", ".join(self._values)
def __len__(self):
return len(self._values)
__repr__ = __str__
def enumeration(*values, **kwargs):
''' Create an |Enumeration| object from a sequence of values.
Call ``enumeration`` with a sequence of (unique) strings to create an
Enumeration object:
.. code-block:: python
#: Specify the horizontal alignment for rendering text
TextAlign = enumeration("left", "right", "center")
Args:
values (str) : string enumeration values, passed as positional arguments
The order of arguments is the order of the enumeration, and the
first element will be considered the default value when used
to create |Enum| properties.
Keyword Args:
case_sensitive (bool, optional) :
Whether validation should consider case or not (default: True)
Raises:
ValueError if values empty, if any value is not a string or not unique
Returns:
Enumeration
'''
if not (values and all(isinstance(value, string_types) and value for value in values)):
raise ValueError("expected a non-empty sequence of strings, got %s" % values)
if len(values) != len(set(values)):
raise ValueError("enumeration items must be unique, got %s" % values)
attrs = {value: value for value in values}
attrs.update({
"_values": list(values),
"_default": values[0],
"_case_sensitive": kwargs.get("case_sensitive", True),
})
return type(str("Enumeration"), (Enumeration,), attrs)()
#: Specify an achor position on a box/frame
Anchor = enumeration(
"top_left", "top_center", "top_right",
"center_left", "center", "center_right",
"bottom_left", "bottom_center", "bottom_right")
#: Specify the units for an angle value
AngleUnits = enumeration("deg", "rad")
#: Specify a style for button widgets
ButtonType = enumeration("default", "primary", "success", "warning", "danger", "link")
#: Specify a named dashing patter for stroking lines
DashPattern = enumeration("solid", "dashed", "dotted", "dotdash", "dashdot")
#: Specify a format for printing dates
DateFormat = enumeration("ATOM", "W3C", "RFC-3339", "ISO-8601", "COOKIE", "RFC-822",
"RFC-850", "RFC-1036", "RFC-1123", "RFC-2822", "RSS", "TIMESTAMP")
#: Specify a date/time scale
DatetimeUnits = enumeration("microseconds", "milliseconds", "seconds", "minsec",
"minutes", "hourmin", "hours", "days", "months", "years")
#: Specify a vertical/horizontal dimension
Dimension = enumeration("width", "height")
#: Specify a vertical/horizontal dimensions
Dimensions = enumeration("width", "height", "both")
#: Specify a stroke direction for circles, wedges, etc.
Direction = enumeration("clock", "anticlock")
#: Specify the font style for rendering text
FontStyle = enumeration("normal", "italic", "bold", "bold italic")
#: Specify whether events should be combined or collected as-is when a Document hold is in effect
HoldPolicy = enumeration("combine", "collect")
#: Specify a horizontal location in plot layouts
HorizontalLocation = enumeration("left", "right")
#: Specify a distribution to use for the Jitter class
JitterRandomDistribution = enumeration("uniform", "normal")
#: Specify whether a dimension or coordinate is latitude or longitude
LatLon = enumeration("lat", "lon")
#: Specify how a legend should respond to click events
LegendClickPolicy = enumeration("none", "hide", "mute")
#: Specify a fixed location for a Bokeh legend
LegendLocation = Anchor
#: Specify how stroked lines should be terminated
LineCap = enumeration("butt", "round", "square")
#: Specify a named dash pattern for stroking lines
LineDash = enumeration("solid", "dashed", "dotted", "dotdash", "dashdot")
#: Specify how stroked lines should be joined together
LineJoin = enumeration("miter", "round", "bevel")
#: Specify a location in plot layouts
Location = enumeration("above", "below", "left", "right")
#: Specify a style for a Google map
MapType = enumeration("satellite", "roadmap", "terrain", "hybrid")
#: Specify one of the built-in marker types
MarkerType = enumeration("asterisk", "circle", "circle_cross", "circle_x", "cross",
"dash", "diamond", "diamond_cross", "hex", "inverted_triangle",
"square", "square_cross", "square_x", "triangle", "x")
#: Specify one of the 137 named CSS colors
NamedColor = enumeration(*colors.named.__all__, case_sensitive=False)
#: Specify a locale for printing numeric values
NumeralLanguage = enumeration("be-nl", "chs", "cs", "da-dk", "de-ch", "de", "en",
"en-gb", "es-ES", "es", "et", "fi", "fr-CA", "fr-ch",
"fr", "hu", "it", "ja", "nl-nl", "pl", "pt-br",
"pt-pt", "ru", "ru-UA", "sk", "th", "tr", "uk-UA")
#: Specify a vertical/horizontal orientation for something
Orientation = enumeration("horizontal", "vertical")
#: Specify an output backend to render a plot area onto
OutputBackend = enumeration("canvas", "svg", "webgl")
#: Whether range padding should be interpreted a percentage or and absolute quantity
PaddingUnits = enumeration("percent", "absolute")
#: Specify the name of a palette from :ref:`bokeh.palettes`
Palette = enumeration(*palettes.__palettes__)
#: Specify a position in the render order for a Bokeh renderer
RenderLevel = enumeration("image", "underlay", "glyph", "annotation", "overlay")
#: Specify a render mode for renderers that support both Canvas or CSS rendering
RenderMode = enumeration("canvas", "css")
#: Specify a policy for how numbers should be rounded
RoundingFunction = enumeration("round", "nearest", "floor", "rounddown", "ceil", "roundup")
#: Sizing mode policies
SizingMode = enumeration("stretch_both", "scale_width", "scale_height", "scale_both", "fixed")
#: Specify different callback policies for the slider widget
SliderCallbackPolicy = enumeration("continuous", "throttle", "mouseup")
#: Specify sorting directions
SortDirection = enumeration("ascending", "descending")
#: Specify units for mapping values
SpatialUnits = enumeration("screen", "data")
#: Specify a start/end value
StartEnd = enumeration("start", "end")
#: Specify a mode for stepwise interpolation
StepMode = enumeration("before", "after", "center")
#: Specify the horizontal alignment for rendering text
TextAlign = enumeration("left", "right", "center")
#: Specify the baseline location for rendering text
TextBaseline = enumeration("top", "middle", "bottom", "alphabetic", "hanging", "ideographic")
#: Specify how axis tick labels are oriented with respect to the axis
TickLabelOrientation = enumeration("horizontal", "vertical", "parallel", "normal")
#: Specify an attachment for tooltips
TooltipAttachment = enumeration("horizontal", "vertical", "left", "right", "above", "below")
#: Specify how a format string for a tooltip field should be interpreted
TooltipFieldFormatter = enumeration("numeral", "datetime", "printf")
#: Specify the vertical alignment for rendering text
VerticalAlign = enumeration("top", "middle", "bottom")
#: Specify a vertical location in plot layouts
VerticalLocation = enumeration("above", "below")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 32.321429
| 97
| 0.60697
|
4a08bdf72c8897df00f8d86ef2a92b2329c1e89d
| 1,997
|
py
|
Python
|
examples/cocodetection/coco2ffrecord.py
|
HFAiLab/ffrecord
|
e916dc715ffa38a304a673ade7c5aa1efff5936d
|
[
"MIT"
] | 47
|
2021-09-07T08:34:03.000Z
|
2022-03-26T06:06:38.000Z
|
examples/cocodetection/coco2ffrecord.py
|
HFAiLab/ffrecord
|
e916dc715ffa38a304a673ade7c5aa1efff5936d
|
[
"MIT"
] | 2
|
2021-12-10T13:40:24.000Z
|
2022-03-31T12:02:30.000Z
|
examples/cocodetection/coco2ffrecord.py
|
HFAiLab/ffrecord
|
e916dc715ffa38a304a673ade7c5aa1efff5936d
|
[
"MIT"
] | 4
|
2021-09-10T05:21:21.000Z
|
2022-03-17T13:29:24.000Z
|
import sys
import os
import pickle
from typing import Union
from tqdm import tqdm, trange
from PIL import Image
import torch
import torchvision.datasets as datasets
from ffrecord import FileWriter
class DumpDataset(datasets.coco.CocoDetection):
def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
target = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
sample = (img, target, img_id)
data = pickle.dumps(sample) # 序列化数据
return data
def convert(dataset, out_file: Union[str, os.PathLike]):
"""
把CocoDetection数据集转换为FFrecord格式
"""
assert not os.path.exists(out_file)
n = len(dataset)
print(f'writing {n} samples to {out_file}')
# 采用DataLoader的多进程加速数据的处理
batch_size = 64
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
drop_last=False,
collate_fn=lambda x: x,
num_workers=16,
multiprocessing_context='fork',
)
# 创建FileWriter
writer = FileWriter(out_file, n)
# 把每条smaple写入FFRecord
for samples in tqdm(loader, total=len(loader)):
for sample in samples:
writer.write_one(sample)
# 关闭FileWriter
writer.close()
print(f'writing {writer.count} samples to {out_file}')
def main():
coco_dir = 'data/coco/'
out_dir = 'data/coco/'
train_dir = coco_dir + 'train2017/'
train_dataset = DumpDataset(
root=train_dir,
annFile=coco_dir + 'annotations/instances_train2017.json')
val_dir = coco_dir + 'val2017/'
val_dataset = DumpDataset(
root=val_dir, annFile=coco_dir + 'annotations/instances_val2017.json')
convert(train_dataset, out_dir + 'train2017.ffr')
convert(val_dataset, out_dir + 'val2017.ffr')
if __name__ == '__main__':
main()
| 24.654321
| 78
| 0.657486
|
4a08be58b2e8362a04510c278ac09252d3f40ff4
| 1,412
|
py
|
Python
|
aws/sdk/test-services.py
|
matago/smithy-rs
|
75056c6a780529692e1b44e18402352205f9e1f6
|
[
"Apache-2.0"
] | 125
|
2021-05-07T20:23:39.000Z
|
2022-03-29T07:18:48.000Z
|
aws/sdk/test-services.py
|
matago/smithy-rs
|
75056c6a780529692e1b44e18402352205f9e1f6
|
[
"Apache-2.0"
] | 631
|
2021-05-07T20:20:12.000Z
|
2022-03-31T23:54:05.000Z
|
aws/sdk/test-services.py
|
matago/smithy-rs
|
75056c6a780529692e1b44e18402352205f9e1f6
|
[
"Apache-2.0"
] | 69
|
2021-05-08T00:04:46.000Z
|
2022-03-23T05:04:44.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
"""
Generate a list of services which have non-trivial unit tests
This script generates output like `-p aws-sdk-s3 -p aws-sdk-dynamodb`. It is intended to be used in conjunction with
`cargo test` to compile a subset of services when running tests:
```bash
cargo test $(python test-services.py)
```
"""
import os
from pathlib import Path
def main():
# change working directory to `aws/sdk`:
script_path = os.path.abspath(__file__)
os.chdir(os.path.dirname(script_path))
services = set()
for service in os.listdir('integration-tests'):
if os.path.isdir(Path('integration-tests') / service):
services.add(service)
for model in os.listdir('aws-models'):
if model.endswith('-tests.smithy'):
service = model[:-len('-tests.smithy')]
services.add(service)
services = sorted(list(services))
aws_packages = set()
for aws_package in os.listdir('../rust-runtime'):
if aws_package.startswith('aws-') and 'inlineable' not in aws_package:
aws_packages.add(aws_package)
as_arguments = [f'-p aws-sdk-{service}' for service in services]
as_arguments.extend([f'-p {aws_package}' for aws_package in aws_packages])
print(' '.join(as_arguments), end='')
if __name__ == "__main__":
main()
| 30.042553
| 116
| 0.672805
|
4a08bf08d8f150a2dbec213fb2c9c459dac01a15
| 1,513
|
py
|
Python
|
vendor-local/lib/python/unidecode/__init__.py
|
glogiotatidis/remo
|
1c4f55c63c8d03cbee776b60af042b8068d9f297
|
[
"BSD-3-Clause"
] | null | null | null |
vendor-local/lib/python/unidecode/__init__.py
|
glogiotatidis/remo
|
1c4f55c63c8d03cbee776b60af042b8068d9f297
|
[
"BSD-3-Clause"
] | null | null | null |
vendor-local/lib/python/unidecode/__init__.py
|
glogiotatidis/remo
|
1c4f55c63c8d03cbee776b60af042b8068d9f297
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Transliterate Unicode text into plain 7-bit ASCII.
Example usage:
>>> from unidecode import unidecode:
>>> unidecode(u"\u5317\u4EB0")
"Bei Jing "
The transliteration uses a straightforward map, and doesn't have alternatives
for the same character based on language, position, or anything else.
In Python 3, a standard string object will be returned. If you need bytes, use:
>>> unidecode("Κνωσός").encode("ascii")
b'Knosos'
"""
Cache = {}
def unidecode(string):
"""Transliterate an Unicode object into an ASCII string
>>> unidecode(u"\u5317\u4EB0")
"Bei Jing "
"""
retval = []
for char in string:
codepoint = ord(char)
if codepoint < 0x80: # Basic ASCII
retval.append(str(char))
continue
if codepoint > 0xeffff:
continue # Characters in Private Use Area and above are ignored
section = codepoint >> 8 # Chop off the last two hex digits
position = codepoint % 256 # Last two hex digits
try:
table = Cache[section]
except KeyError:
try:
mod = __import__('unidecode.x%03x'%(section), [], [], ['data'])
except ImportError:
Cache[section] = None
continue # No match: ignore this character and carry on.
Cache[section] = table = mod.data
if table and len(table) > position:
retval.append( table[position] )
return ''.join(retval)
| 27.509091
| 79
| 0.601454
|
4a08bf6eec05fc8b850b9ac1580aa4027524b035
| 825
|
py
|
Python
|
chatbot/py_commands/reboot.py
|
WLAN-Pi/wlanpi-chat-bot
|
c0dff0779eb873bd074fd0a579798252e4018ee2
|
[
"MIT"
] | 1
|
2021-12-05T06:25:17.000Z
|
2021-12-05T06:25:17.000Z
|
chatbot/py_commands/reboot.py
|
WLAN-Pi/wlanpi-chat-bot
|
c0dff0779eb873bd074fd0a579798252e4018ee2
|
[
"MIT"
] | 6
|
2021-12-07T01:18:57.000Z
|
2022-02-18T05:05:24.000Z
|
chatbot/py_commands/reboot.py
|
WLAN-Pi/wlanpi-chat-bot
|
c0dff0779eb873bd074fd0a579798252e4018ee2
|
[
"MIT"
] | 1
|
2021-07-17T20:09:47.000Z
|
2021-07-17T20:09:47.000Z
|
import os
import chatbot.utils.emojis
from .command import Command
class Reboot(Command):
def __init__(self, telegram_object, conf_obj):
super().__init__(telegram_object, conf_obj)
self.command_name = "reboot"
def run(self, args_list):
os.system("(sync; sleep 2; systemctl reboot) &")
restart_emoji = chatbot.utils.emojis.restart()
return self._render(f"{restart_emoji} Rebooting....please wait {restart_emoji}")
def help(self):
"""
Return the help page for this command
"""
short_msg = "reboot: Reboot the probe"
long_msg = """reboot:
Reboot the probe
syntax: reboot"""
if self.display_mode == "compact":
return short_msg
else:
return chatbot.utils.emojis.help() + " " + long_msg
| 23.571429
| 88
| 0.62303
|
4a08c0081827430908a13b5f3bf2a461c4b50ecc
| 2,977
|
py
|
Python
|
server/database/migration/test/navigation-building-id/compare.py
|
Paschalis/anyplace
|
e752f1e865d2a044eee7bb817dceff034c243976
|
[
"MIT"
] | 684
|
2015-08-28T11:03:41.000Z
|
2022-03-15T03:56:10.000Z
|
server/database/migration/test/navigation-building-id/compare.py
|
Paschalis/anyplace
|
e752f1e865d2a044eee7bb817dceff034c243976
|
[
"MIT"
] | 355
|
2015-11-03T09:30:16.000Z
|
2022-03-23T11:02:05.000Z
|
server/database/migration/test/navigation-building-id/compare.py
|
Paschalis/anyplace
|
e752f1e865d2a044eee7bb817dceff034c243976
|
[
"MIT"
] | 345
|
2015-09-19T03:01:29.000Z
|
2022-03-23T11:54:43.000Z
|
import json
import sys
def defineKey(key):
if key == "buildings":
return 'buid'
if key == "floorplans":
return 'fuid'
def compareJsonArray(cObj, mObj, uKey, keys):
if len(cObj) != len(mObj):
print(uKey, "= couch:", len(cObj), "mongo:", len(mObj))
return False
for i in range(len(cObj)):
for j in range(len(mObj)):
if cObj[i][uKey] == mObj[j][uKey]:
for key in keys:
if key in cObj[i] and key in mObj[j]:
if cObj[i][key] != mObj[j][key]:
return False
return True
def compareBuildingId(cObject, mObject, type):
keys = ["buid", "address", "is_published", "coordinates_lat", "name", "description", "bucode", "coordinates_lon",
"url", "co_owners", "owner_id", "floors", "pois"]
floorSubKeys = ["floor_name", "buid", "top_right_lat", "is_published", "username_creator", "bottom_left_lat",
"width", "description", "floor_number", "top_right_lng", "bottom_left_lng", "height",
"image_height", "fuid"]
poisSubKeys = ["floor_name", "buid", "top_right_lat", "is_published", "username_creator", "bottom_left_lat",
"width", "description", "floor_number", "top_right_lng", "bottom_left_lng", "height"]
isSame2 = 1
uniqueKey = defineKey(type)
if cObject[uniqueKey] == mObject[uniqueKey]:
for key in keys:
if key in cObject and key in mObject:
if key == "floors":
if not compareJsonArray(cObject['floors'], mObject['floors'], "floor_name", floorSubKeys):
isSame2 = 3
elif key == "pois":
print("Skipping pois comparison due to CouchDB bug.(Fetching floors instead of Pois.)")
#if not compareJsonArray(cObject['pois'], mObject['pois'], "puid", poisSubKeys):
# isSame2 = 3
elif cObject[key] != mObject[key]:
print(cObject[uniqueKey], "differ at ", key)
print("CouchDB: ", cObject[key])
print("MongoDB: ", mObject[key])
isSame2 = 3
return isSame2
def parseEndpoint(file):
try:
file = open(file, encoding="utf8")
except:
print("Path was not correct.")
exit()
return json.loads(file.readline())
# main
if len(sys.argv) - 1 != 1:
print("CompareJsons::Provide type of endpoint.")
exit()
couchObjects = parseEndpoint("couch.json")
mongoObjects = parseEndpoint("mongo.json")
# isSame: 1 = same, 2 = different # of objects, 3 = at least 1 object has different values
isSame = compareBuildingId(couchObjects, mongoObjects, sys.argv[1])
if isSame == 1:
print("Files are same.")
elif isSame == 2:
print("Different number of Jsons")
elif isSame == 3:
print("At least one CouchDB json object has different key-value from MongoDB")
| 36.753086
| 117
| 0.574068
|
4a08c0700c723742a62211f3ddfbae4a78560be2
| 56,875
|
py
|
Python
|
CodonSubstitution/build/biopython/Bio/GenBank/__init__.py
|
JackCurragh/DARNED
|
13963d129bd8f69fb1106ad1f47394b3211a939c
|
[
"MIT"
] | null | null | null |
CodonSubstitution/build/biopython/Bio/GenBank/__init__.py
|
JackCurragh/DARNED
|
13963d129bd8f69fb1106ad1f47394b3211a939c
|
[
"MIT"
] | null | null | null |
CodonSubstitution/build/biopython/Bio/GenBank/__init__.py
|
JackCurragh/DARNED
|
13963d129bd8f69fb1106ad1f47394b3211a939c
|
[
"MIT"
] | null | null | null |
# Copyright 2000 by Jeffrey Chang, Brad Chapman. All rights reserved.
# Copyright 2006-2011 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code to work with GenBank formatted files.
Rather than using Bio.GenBank, you are now encouraged to use Bio.SeqIO with
the "genbank" or "embl" format names to parse GenBank or EMBL files into
SeqRecord and SeqFeature objects (see the Biopython tutorial for details).
Using Bio.GenBank directly to parse GenBank files is only useful if you want
to obtain GenBank-specific Record objects, which is a much closer
representation to the raw file contents that the SeqRecord alternative from
the FeatureParser (used in Bio.SeqIO).
To use the Bio.GenBank parser, there are two helper functions:
read Parse a handle containing a single GenBank record
as Bio.GenBank specific Record objects.
parse Iterate over a handle containing multiple GenBank
records as Bio.GenBank specific Record objects.
The following internal classes are not intended for direct use and may
be deprecated in a future release.
Classes:
Iterator Iterate through a file of GenBank entries
ErrorFeatureParser Catch errors caused during parsing.
FeatureParser Parse GenBank data in SeqRecord and SeqFeature objects.
RecordParser Parse GenBank data into a Record object.
Exceptions:
ParserFailureError Exception indicating a failure in the parser (ie.
scanner or consumer)
LocationParserError Exception indiciating a problem with the spark based
location parser.
"""
import re
# other Biopython stuff
from Bio import SeqFeature
# other Bio.GenBank stuff
from utils import FeatureValueCleaner
from Scanner import GenBankScanner
#Constants used to parse GenBank header lines
GENBANK_INDENT = 12
GENBANK_SPACER = " " * GENBANK_INDENT
#Constants for parsing GenBank feature lines
FEATURE_KEY_INDENT = 5
FEATURE_QUALIFIER_INDENT = 21
FEATURE_KEY_SPACER = " " * FEATURE_KEY_INDENT
FEATURE_QUALIFIER_SPACER = " " * FEATURE_QUALIFIER_INDENT
#Regular expresions for location parsing
_solo_location = r"[<>]?\d+"
_pair_location = r"[<>]?\d+\.\.[<>]?\d+"
_between_location = r"\d+\^\d+"
_within_position = r"\(\d+\.\d+\)"
_re_within_position = re.compile(_within_position)
_within_location = r"([<>]?\d+|%s)\.\.([<>]?\d+|%s)" \
% (_within_position,_within_position)
assert _re_within_position.match("(3.9)")
assert re.compile(_within_location).match("(3.9)..10")
assert re.compile(_within_location).match("26..(30.33)")
assert re.compile(_within_location).match("(13.19)..(20.28)")
_oneof_position = r"one\-of\(\d+(,\d+)+\)"
_re_oneof_position = re.compile(_oneof_position)
_oneof_location = r"([<>]?\d+|%s)\.\.([<>]?\d+|%s)" \
% (_oneof_position,_oneof_position)
assert _re_oneof_position.match("one-of(6,9)")
assert re.compile(_oneof_location).match("one-of(6,9)..101")
assert re.compile(_oneof_location).match("one-of(6,9)..one-of(101,104)")
assert re.compile(_oneof_location).match("6..one-of(101,104)")
assert not _re_oneof_position.match("one-of(3)")
assert _re_oneof_position.match("one-of(3,6)")
assert _re_oneof_position.match("one-of(3,6,9)")
_simple_location = r"\d+\.\.\d+"
_re_simple_location = re.compile(_simple_location)
_re_simple_compound = re.compile(r"^(join|order|bond)\(%s(,%s)*\)$" \
% (_simple_location, _simple_location))
_complex_location = r"([a-zA-z][a-zA-Z0-9_]*(\.[a-zA-Z0-9]+)?\:)?(%s|%s|%s|%s|%s)" \
% (_pair_location, _solo_location, _between_location,
_within_location, _oneof_location)
_re_complex_location = re.compile(r"^%s$" % _complex_location)
_possibly_complemented_complex_location = r"(%s|complement\(%s\))" \
% (_complex_location, _complex_location)
_re_complex_compound = re.compile(r"^(join|order|bond)\(%s(,%s)*\)$" \
% (_possibly_complemented_complex_location,
_possibly_complemented_complex_location))
assert _re_simple_location.match("104..160")
assert not _re_simple_location.match("<104..>160")
assert not _re_simple_location.match("104")
assert not _re_simple_location.match("<1")
assert not _re_simple_location.match(">99999")
assert not _re_simple_location.match("join(104..160,320..390,504..579)")
assert not _re_simple_compound.match("bond(12,63)")
assert _re_simple_compound.match("join(104..160,320..390,504..579)")
assert _re_simple_compound.match("order(1..69,1308..1465)")
assert not _re_simple_compound.match("order(1..69,1308..1465,1524)")
assert not _re_simple_compound.match("join(<1..442,992..1228,1524..>1983)")
assert not _re_simple_compound.match("join(<1..181,254..336,422..497,574..>590)")
assert not _re_simple_compound.match("join(1475..1577,2841..2986,3074..3193,3314..3481,4126..>4215)")
assert not _re_simple_compound.match("test(1..69,1308..1465)")
assert not _re_simple_compound.match("complement(1..69)")
assert not _re_simple_compound.match("(1..69)")
assert _re_complex_location.match("(3.9)..10")
assert _re_complex_location.match("26..(30.33)")
assert _re_complex_location.match("(13.19)..(20.28)")
assert _re_complex_location.match("41^42") #between
assert _re_complex_location.match("AL121804:41^42")
assert _re_complex_location.match("AL121804:41..610")
assert _re_complex_location.match("AL121804.2:41..610")
assert _re_complex_location.match("one-of(3,6)..101")
assert _re_complex_compound.match("join(153490..154269,AL121804.2:41..610,AL121804.2:672..1487)")
assert not _re_simple_compound.match("join(153490..154269,AL121804.2:41..610,AL121804.2:672..1487)")
assert _re_complex_compound.match("join(complement(69611..69724),139856..140650)")
#Trans-spliced example from NC_016406, note underscore in reference name:
assert _re_complex_location.match("NC_016402.1:6618..6676")
assert _re_complex_location.match("181647..181905")
assert _re_complex_compound.match("join(complement(149815..150200),complement(293787..295573),NC_016402.1:6618..6676,181647..181905)")
assert not _re_complex_location.match("join(complement(149815..150200),complement(293787..295573),NC_016402.1:6618..6676,181647..181905)")
assert not _re_simple_compound.match("join(complement(149815..150200),complement(293787..295573),NC_016402.1:6618..6676,181647..181905)")
assert not _re_complex_location.match("join(complement(149815..150200),complement(293787..295573),NC_016402.1:6618..6676,181647..181905)")
assert not _re_simple_location.match("join(complement(149815..150200),complement(293787..295573),NC_016402.1:6618..6676,181647..181905)")
def _pos(pos_str, offset=0):
"""Build a Position object (PRIVATE).
For an end position, leave offset as zero (default):
>>> _pos("5")
ExactPosition(5)
For a start position, set offset to minus one (for Python counting):
>>> _pos("5", -1)
ExactPosition(4)
This also covers fuzzy positions:
>>> p = _pos("<5")
>>> p
BeforePosition(5)
>>> print p
<5
>>> int(p)
5
>>> _pos(">5")
AfterPosition(5)
By default assumes an end position, so note the integer behaviour:
>>> p = _pos("one-of(5,8,11)")
>>> p
OneOfPosition(11, choices=[ExactPosition(5), ExactPosition(8), ExactPosition(11)])
>>> print p
one-of(5,8,11)
>>> int(p)
11
>>> _pos("(8.10)")
WithinPosition(10, left=8, right=10)
Fuzzy start positions:
>>> p = _pos("<5", -1)
>>> p
BeforePosition(4)
>>> print p
<4
>>> int(p)
4
Notice how the integer behaviour changes too!
>>> p = _pos("one-of(5,8,11)", -1)
>>> p
OneOfPosition(4, choices=[ExactPosition(4), ExactPosition(7), ExactPosition(10)])
>>> print(p)
one-of(4,7,10)
>>> int(p)
4
"""
if pos_str.startswith("<"):
return SeqFeature.BeforePosition(int(pos_str[1:])+offset)
elif pos_str.startswith(">"):
return SeqFeature.AfterPosition(int(pos_str[1:])+offset)
elif _re_within_position.match(pos_str):
s,e = pos_str[1:-1].split(".")
s = int(s) + offset
e = int(e) + offset
if offset == -1:
default = s
else:
default = e
return SeqFeature.WithinPosition(default, left=s, right=e)
elif _re_oneof_position.match(pos_str):
assert pos_str.startswith("one-of(")
assert pos_str[-1]==")"
parts = [SeqFeature.ExactPosition(int(pos)+offset) \
for pos in pos_str[7:-1].split(",")]
if offset == -1:
default = min(int(pos) for pos in parts)
else:
default = max(int(pos) for pos in parts)
return SeqFeature.OneOfPosition(default, choices=parts)
else:
return SeqFeature.ExactPosition(int(pos_str)+offset)
def _loc(loc_str, expected_seq_length, strand):
"""FeatureLocation from non-compound non-complement location (PRIVATE).
Simple examples,
>>> _loc("123..456", 1000, +1)
FeatureLocation(ExactPosition(122), ExactPosition(456), strand=1)
>>> _loc("<123..>456", 1000, strand = -1)
FeatureLocation(BeforePosition(122), AfterPosition(456), strand=-1)
A more complex location using within positions,
>>> _loc("(9.10)..(20.25)", 1000, 1)
FeatureLocation(WithinPosition(8, left=8, right=9), WithinPosition(25, left=20, right=25), strand=1)
Notice how that will act as though it has overall start 8 and end 25.
Zero length between feature,
>>> _loc("123^124", 1000, 0)
FeatureLocation(ExactPosition(123), ExactPosition(123), strand=0)
The expected sequence length is needed for a special case, a between
position at the start/end of a circular genome:
>>> _loc("1000^1", 1000, 1)
FeatureLocation(ExactPosition(1000), ExactPosition(1000), strand=1)
Apart from this special case, between positions P^Q must have P+1==Q,
>>> _loc("123^456", 1000, 1)
Traceback (most recent call last):
...
ValueError: Invalid between location '123^456'
"""
try:
s, e = loc_str.split("..")
except ValueError:
assert ".." not in loc_str
if "^" in loc_str:
#A between location like "67^68" (one based counting) is a
#special case (note it has zero length). In python slice
#notation this is 67:67, a zero length slice. See Bug 2622
#Further more, on a circular genome of length N you can have
#a location N^1 meaning the junction at the origin. See Bug 3098.
#NOTE - We can imagine between locations like "2^4", but this
#is just "3". Similarly, "2^5" is just "3..4"
s, e = loc_str.split("^")
if int(s)+1==int(e):
pos = _pos(s)
elif int(s)==expected_seq_length and e=="1":
pos = _pos(s)
else:
raise ValueError("Invalid between location %s" % repr(loc_str))
return SeqFeature.FeatureLocation(pos, pos, strand)
else:
#e.g. "123"
s = loc_str
e = loc_str
return SeqFeature.FeatureLocation(_pos(s,-1), _pos(e), strand)
def _split_compound_loc(compound_loc):
"""Split a tricky compound location string (PRIVATE).
>>> list(_split_compound_loc("123..145"))
['123..145']
>>> list(_split_compound_loc("123..145,200..209"))
['123..145', '200..209']
>>> list(_split_compound_loc("one-of(200,203)..300"))
['one-of(200,203)..300']
>>> list(_split_compound_loc("complement(123..145),200..209"))
['complement(123..145)', '200..209']
>>> list(_split_compound_loc("123..145,one-of(200,203)..209"))
['123..145', 'one-of(200,203)..209']
>>> list(_split_compound_loc("123..145,one-of(200,203)..one-of(209,211),300"))
['123..145', 'one-of(200,203)..one-of(209,211)', '300']
>>> list(_split_compound_loc("123..145,complement(one-of(200,203)..one-of(209,211)),300"))
['123..145', 'complement(one-of(200,203)..one-of(209,211))', '300']
>>> list(_split_compound_loc("123..145,200..one-of(209,211),300"))
['123..145', '200..one-of(209,211)', '300']
>>> list(_split_compound_loc("123..145,200..one-of(209,211)"))
['123..145', '200..one-of(209,211)']
>>> list(_split_compound_loc("complement(149815..150200),complement(293787..295573),NC_016402.1:6618..6676,181647..181905"))
['complement(149815..150200)', 'complement(293787..295573)', 'NC_016402.1:6618..6676', '181647..181905']
"""
if "one-of(" in compound_loc:
#Hard case
while "," in compound_loc:
assert compound_loc[0] != ","
assert compound_loc[0:2] != ".."
i = compound_loc.find(",")
part = compound_loc[:i]
compound_loc = compound_loc[i:] #includes the comma
while part.count("(") > part.count(")"):
assert "one-of(" in part, (part, compound_loc)
i = compound_loc.find(")")
part += compound_loc[:i+1]
compound_loc = compound_loc[i+1:]
if compound_loc.startswith(".."):
i = compound_loc.find(",")
if i==-1:
part += compound_loc
compound_loc = ""
else:
part += compound_loc[:i]
compound_loc = compound_loc[i:] #includes the comma
while part.count("(") > part.count(")"):
assert part.count("one-of(") == 2
i = compound_loc.find(")")
part += compound_loc[:i+1]
compound_loc = compound_loc[i+1:]
if compound_loc.startswith(","):
compound_loc = compound_loc[1:]
assert part
yield part
if compound_loc:
yield compound_loc
else:
#Easy case
for part in compound_loc.split(","):
yield part
class Iterator(object):
"""Iterator interface to move over a file of GenBank entries one at a time (OBSOLETE).
This class is likely to be deprecated in a future release of Biopython.
Please use Bio.SeqIO.parse(..., format="gb") or Bio.GenBank.parse(...)
for SeqRecord and GenBank specific Record objects respectively instead.
"""
def __init__(self, handle, parser = None):
"""Initialize the iterator.
Arguments:
o handle - A handle with GenBank entries to iterate through.
o parser - An optional parser to pass the entries through before
returning them. If None, then the raw entry will be returned.
"""
self.handle = handle
self._parser = parser
def next(self):
"""Return the next GenBank record from the handle.
Will return None if we ran out of records.
"""
if self._parser is None:
lines = []
while True:
line = self.handle.readline()
if not line : return None #Premature end of file?
lines.append(line)
if line.rstrip() == "//" : break
return "".join(lines)
try:
return self._parser.parse(self.handle)
except StopIteration:
return None
def __iter__(self):
return iter(self.next, None)
class ParserFailureError(Exception):
"""Failure caused by some kind of problem in the parser.
"""
pass
class LocationParserError(Exception):
"""Could not Properly parse out a location from a GenBank file.
"""
pass
class FeatureParser(object):
"""Parse GenBank files into Seq + Feature objects (OBSOLETE).
Direct use of this class is discouraged, and may be deprecated in
a future release of Biopython.
Please use Bio.SeqIO.parse(...) or Bio.SeqIO.read(...) instead.
"""
def __init__(self, debug_level = 0, use_fuzziness = 1,
feature_cleaner = FeatureValueCleaner()):
"""Initialize a GenBank parser and Feature consumer.
Arguments:
o debug_level - An optional argument that species the amount of
debugging information the parser should spit out. By default we have
no debugging info (the fastest way to do things), but if you want
you can set this as high as two and see exactly where a parse fails.
o use_fuzziness - Specify whether or not to use fuzzy representations.
The default is 1 (use fuzziness).
o feature_cleaner - A class which will be used to clean out the
values of features. This class must implement the function
clean_value. GenBank.utils has a "standard" cleaner class, which
is used by default.
"""
self._scanner = GenBankScanner(debug_level)
self.use_fuzziness = use_fuzziness
self._cleaner = feature_cleaner
def parse(self, handle):
"""Parse the specified handle.
"""
self._consumer = _FeatureConsumer(self.use_fuzziness,
self._cleaner)
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class RecordParser(object):
"""Parse GenBank files into Record objects (OBSOLETE).
Direct use of this class is discouraged, and may be deprecated in
a future release of Biopython.
Please use the Bio.GenBank.parse(...) or Bio.GenBank.read(...) functions
instead.
"""
def __init__(self, debug_level = 0):
"""Initialize the parser.
Arguments:
o debug_level - An optional argument that species the amount of
debugging information the parser should spit out. By default we have
no debugging info (the fastest way to do things), but if you want
you can set this as high as two and see exactly where a parse fails.
"""
self._scanner = GenBankScanner(debug_level)
def parse(self, handle):
"""Parse the specified handle into a GenBank record.
"""
self._consumer = _RecordConsumer()
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class _BaseGenBankConsumer(object):
"""Abstract GenBank consumer providing useful general functions (PRIVATE).
This just helps to eliminate some duplication in things that most
GenBank consumers want to do.
"""
# Special keys in GenBank records that we should remove spaces from
# For instance, \translation keys have values which are proteins and
# should have spaces and newlines removed from them. This class
# attribute gives us more control over specific formatting problems.
remove_space_keys = ["translation"]
def __init__(self):
pass
def _unhandled(self, data):
pass
def __getattr__(self, attr):
return self._unhandled
def _split_keywords(self, keyword_string):
"""Split a string of keywords into a nice clean list.
"""
# process the keywords into a python list
if keyword_string == "" or keyword_string == ".":
keywords = ""
elif keyword_string[-1] == '.':
keywords = keyword_string[:-1]
else:
keywords = keyword_string
keyword_list = keywords.split(';')
clean_keyword_list = [x.strip() for x in keyword_list]
return clean_keyword_list
def _split_accessions(self, accession_string):
"""Split a string of accession numbers into a list.
"""
# first replace all line feeds with spaces
# Also, EMBL style accessions are split with ';'
accession = accession_string.replace("\n", " ").replace(";"," ")
return [x.strip() for x in accession.split() if x.strip()]
def _split_taxonomy(self, taxonomy_string):
"""Split a string with taxonomy info into a list.
"""
if not taxonomy_string or taxonomy_string==".":
#Missing data, no taxonomy
return []
if taxonomy_string[-1] == '.':
tax_info = taxonomy_string[:-1]
else:
tax_info = taxonomy_string
tax_list = tax_info.split(';')
new_tax_list = []
for tax_item in tax_list:
new_items = tax_item.split("\n")
new_tax_list.extend(new_items)
while '' in new_tax_list:
new_tax_list.remove('')
clean_tax_list = [x.strip() for x in new_tax_list]
return clean_tax_list
def _clean_location(self, location_string):
"""Clean whitespace out of a location string.
The location parser isn't a fan of whitespace, so we clean it out
before feeding it into the parser.
"""
#Originally this imported string.whitespace and did a replace
#via a loop. It's simpler to just split on whitespace and rejoin
#the string - and this avoids importing string too. See Bug 2684.
return ''.join(location_string.split())
def _remove_newlines(self, text):
"""Remove any newlines in the passed text, returning the new string.
"""
# get rid of newlines in the qualifier value
newlines = ["\n", "\r"]
for ws in newlines:
text = text.replace(ws, "")
return text
def _normalize_spaces(self, text):
"""Replace multiple spaces in the passed text with single spaces.
"""
# get rid of excessive spaces
text_parts = text.split(" ")
text_parts = filter(None, text_parts)
return ' '.join(text_parts)
def _remove_spaces(self, text):
"""Remove all spaces from the passed text.
"""
return text.replace(" ", "")
def _convert_to_python_numbers(self, start, end):
"""Convert a start and end range to python notation.
In GenBank, starts and ends are defined in "biological" coordinates,
where 1 is the first base and [i, j] means to include both i and j.
In python, 0 is the first base and [i, j] means to include i, but
not j.
So, to convert "biological" to python coordinates, we need to
subtract 1 from the start, and leave the end and things should
be converted happily.
"""
new_start = start - 1
new_end = end
return new_start, new_end
class _FeatureConsumer(_BaseGenBankConsumer):
"""Create a SeqRecord object with Features to return (PRIVATE).
Attributes:
o use_fuzziness - specify whether or not to parse with fuzziness in
feature locations.
o feature_cleaner - a class that will be used to provide specialized
cleaning-up of feature values.
"""
def __init__(self, use_fuzziness, feature_cleaner = None):
from Bio.SeqRecord import SeqRecord
_BaseGenBankConsumer.__init__(self)
self.data = SeqRecord(None, id = None)
self.data.id = None
self.data.description = ""
self._use_fuzziness = use_fuzziness
self._feature_cleaner = feature_cleaner
self._seq_type = ''
self._seq_data = []
self._cur_reference = None
self._cur_feature = None
self._expected_size = None
def locus(self, locus_name):
"""Set the locus name is set as the name of the Sequence.
"""
self.data.name = locus_name
def size(self, content):
"""Record the sequence length."""
self._expected_size = int(content)
def residue_type(self, type):
"""Record the sequence type so we can choose an appropriate alphabet.
"""
self._seq_type = type
def data_file_division(self, division):
self.data.annotations['data_file_division'] = division
def date(self, submit_date):
self.data.annotations['date'] = submit_date
def definition(self, definition):
"""Set the definition as the description of the sequence.
"""
if self.data.description:
#Append to any existing description
#e.g. EMBL files with two DE lines.
self.data.description += " " + definition
else:
self.data.description = definition
def accession(self, acc_num):
"""Set the accession number as the id of the sequence.
If we have multiple accession numbers, the first one passed is
used.
"""
new_acc_nums = self._split_accessions(acc_num)
#Also record them ALL in the annotations
try:
#On the off chance there was more than one accession line:
for acc in new_acc_nums:
#Prevent repeat entries
if acc not in self.data.annotations['accessions']:
self.data.annotations['accessions'].append(acc)
except KeyError:
self.data.annotations['accessions'] = new_acc_nums
# if we haven't set the id information yet, add the first acc num
if self.data.id is None:
if len(new_acc_nums) > 0:
#self.data.id = new_acc_nums[0]
#Use the FIRST accession as the ID, not the first on this line!
self.data.id = self.data.annotations['accessions'][0]
def wgs(self, content):
self.data.annotations['wgs'] = content.split('-')
def add_wgs_scafld(self, content):
self.data.annotations.setdefault('wgs_scafld',[]).append(content.split('-'))
def nid(self, content):
self.data.annotations['nid'] = content
def pid(self, content):
self.data.annotations['pid'] = content
def version(self, version_id):
#Want to use the versioned accession as the record.id
#This comes from the VERSION line in GenBank files, or the
#obsolete SV line in EMBL. For the new EMBL files we need
#both the version suffix from the ID line and the accession
#from the AC line.
if version_id.count(".")==1 and version_id.split(".")[1].isdigit():
self.accession(version_id.split(".")[0])
self.version_suffix(version_id.split(".")[1])
else:
#For backwards compatibility...
self.data.id = version_id
def project(self, content):
"""Handle the information from the PROJECT line as a list of projects.
e.g.
PROJECT GenomeProject:28471
or:
PROJECT GenomeProject:13543 GenomeProject:99999
This is stored as dbxrefs in the SeqRecord to be consistent with the
projected switch of this line to DBLINK in future GenBank versions.
Note the NCBI plan to replace "GenomeProject:28471" with the shorter
"Project:28471" as part of this transition.
"""
content = content.replace("GenomeProject:", "Project:")
self.data.dbxrefs.extend([p for p in content.split() if p])
def dblink(self, content):
"""Store DBLINK cross references as dbxrefs in our record object.
This line type is expected to replace the PROJECT line in 2009. e.g.
During transition:
PROJECT GenomeProject:28471
DBLINK Project:28471
Trace Assembly Archive:123456
Once the project line is dropped:
DBLINK Project:28471
Trace Assembly Archive:123456
Note GenomeProject -> Project.
We'll have to see some real examples to be sure, but based on the
above example we can expect one reference per line.
Note that at some point the NCBI have included an extra space, e.g.
DBLINK Project: 28471
"""
#During the transition period with both PROJECT and DBLINK lines,
#we don't want to add the same cross reference twice.
while ": " in content:
content = content.replace(": ", ":")
if content.strip() not in self.data.dbxrefs:
self.data.dbxrefs.append(content.strip())
def version_suffix(self, version):
"""Set the version to overwrite the id.
Since the verison provides the same information as the accession
number, plus some extra info, we set this as the id if we have
a version.
"""
#e.g. GenBank line:
#VERSION U49845.1 GI:1293613
#or the obsolete EMBL line:
#SV U49845.1
#Scanner calls consumer.version("U49845.1")
#which then calls consumer.version_suffix(1)
#
#e.g. EMBL new line:
#ID X56734; SV 1; linear; mRNA; STD; PLN; 1859 BP.
#Scanner calls consumer.version_suffix(1)
assert version.isdigit()
self.data.annotations['sequence_version'] = int(version)
def db_source(self, content):
self.data.annotations['db_source'] = content.rstrip()
def gi(self, content):
self.data.annotations['gi'] = content
def keywords(self, content):
self.data.annotations['keywords'] = self._split_keywords(content)
def segment(self, content):
self.data.annotations['segment'] = content
def source(self, content):
#Note that some software (e.g. VectorNTI) may produce an empty
#source (rather than using a dot/period as might be expected).
if content == "":
source_info = ""
elif content[-1] == '.':
source_info = content[:-1]
else:
source_info = content
self.data.annotations['source'] = source_info
def organism(self, content):
self.data.annotations['organism'] = content
def taxonomy(self, content):
"""Records (another line of) the taxonomy lineage.
"""
lineage = self._split_taxonomy(content)
try:
self.data.annotations['taxonomy'].extend(lineage)
except KeyError:
self.data.annotations['taxonomy'] = lineage
def reference_num(self, content):
"""Signal the beginning of a new reference object.
"""
# if we have a current reference that hasn't been added to
# the list of references, add it.
if self._cur_reference is not None:
self.data.annotations['references'].append(self._cur_reference)
else:
self.data.annotations['references'] = []
self._cur_reference = SeqFeature.Reference()
def reference_bases(self, content):
"""Attempt to determine the sequence region the reference entails.
Possible types of information we may have to deal with:
(bases 1 to 86436)
(sites)
(bases 1 to 105654; 110423 to 111122)
1 (residues 1 to 182)
"""
# first remove the parentheses or other junk
ref_base_info = content[1:-1]
all_locations = []
# parse if we've got 'bases' and 'to'
if ref_base_info.find('bases') != -1 and \
ref_base_info.find('to') != -1:
# get rid of the beginning 'bases'
ref_base_info = ref_base_info[5:]
locations = self._split_reference_locations(ref_base_info)
all_locations.extend(locations)
elif (ref_base_info.find("residues") >= 0 and
ref_base_info.find("to") >= 0):
residues_start = ref_base_info.find("residues")
# get only the information after "residues"
ref_base_info = ref_base_info[(residues_start + len("residues ")):]
locations = self._split_reference_locations(ref_base_info)
all_locations.extend(locations)
# make sure if we are not finding information then we have
# the string 'sites' or the string 'bases'
elif (ref_base_info == 'sites' or
ref_base_info.strip() == 'bases'):
pass
# otherwise raise an error
else:
raise ValueError("Could not parse base info %s in record %s" %
(ref_base_info, self.data.id))
self._cur_reference.location = all_locations
def _split_reference_locations(self, location_string):
"""Get reference locations out of a string of reference information
The passed string should be of the form:
1 to 20; 20 to 100
This splits the information out and returns a list of location objects
based on the reference locations.
"""
# split possibly multiple locations using the ';'
all_base_info = location_string.split(';')
new_locations = []
for base_info in all_base_info:
start, end = base_info.split('to')
new_start, new_end = \
self._convert_to_python_numbers(int(start.strip()),
int(end.strip()))
this_location = SeqFeature.FeatureLocation(new_start, new_end)
new_locations.append(this_location)
return new_locations
def authors(self, content):
if self._cur_reference.authors:
self._cur_reference.authors += ' ' + content
else:
self._cur_reference.authors = content
def consrtm(self, content):
if self._cur_reference.consrtm:
self._cur_reference.consrtm += ' ' + content
else:
self._cur_reference.consrtm = content
def title(self, content):
if self._cur_reference is None:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("GenBank TITLE line without REFERENCE line.",
BiopythonParserWarning)
elif self._cur_reference.title:
self._cur_reference.title += ' ' + content
else:
self._cur_reference.title = content
def journal(self, content):
if self._cur_reference.journal:
self._cur_reference.journal += ' ' + content
else:
self._cur_reference.journal = content
def medline_id(self, content):
self._cur_reference.medline_id = content
def pubmed_id(self, content):
self._cur_reference.pubmed_id = content
def remark(self, content):
"""Deal with a reference comment."""
if self._cur_reference.comment:
self._cur_reference.comment += ' ' + content
else:
self._cur_reference.comment = content
def comment(self, content):
try:
self.data.annotations['comment'] += "\n" + "\n".join(content)
except KeyError:
self.data.annotations['comment'] = "\n".join(content)
def features_line(self, content):
"""Get ready for the feature table when we reach the FEATURE line.
"""
self.start_feature_table()
def start_feature_table(self):
"""Indicate we've got to the start of the feature table.
"""
# make sure we've added on our last reference object
if self._cur_reference is not None:
self.data.annotations['references'].append(self._cur_reference)
self._cur_reference = None
def feature_key(self, content):
# start a new feature
self._cur_feature = SeqFeature.SeqFeature()
self._cur_feature.type = content
self.data.features.append(self._cur_feature)
def location(self, content):
"""Parse out location information from the location string.
This uses simple Python code with some regular expressions to do the
parsing, and then translates the results into appropriate objects.
"""
# clean up newlines and other whitespace inside the location before
# parsing - locations should have no whitespace whatsoever
location_line = self._clean_location(content)
# Older records have junk like replace(266,"c") in the
# location line. Newer records just replace this with
# the number 266 and have the information in a more reasonable
# place. So we'll just grab out the number and feed this to the
# parser. We shouldn't really be losing any info this way.
if location_line.find('replace') != -1:
comma_pos = location_line.find(',')
location_line = location_line[8:comma_pos]
cur_feature = self._cur_feature
#Handle top level complement here for speed
if location_line.startswith("complement("):
assert location_line.endswith(")")
location_line = location_line[11:-1]
strand = -1
elif self._seq_type.find("DNA") >= 0 \
or self._seq_type.find("RNA") >= 0:
#Nucleotide
strand = 1
else:
#Protein
strand = None
#Special case handling of the most common cases for speed
if _re_simple_location.match(location_line):
#e.g. "123..456"
s, e = location_line.split("..")
cur_feature.location = SeqFeature.FeatureLocation(int(s)-1,
int(e),
strand)
return
if _re_simple_compound.match(location_line):
#e.g. join(<123..456,480..>500)
i = location_line.find("(")
cur_feature.location_operator = location_line[:i]
#we can split on the comma because these are simple locations
for part in location_line[i+1:-1].split(","):
s, e = part.split("..")
f = SeqFeature.SeqFeature(SeqFeature.FeatureLocation(int(s)-1,
int(e),
strand),
location_operator=cur_feature.location_operator,
type=cur_feature.type)
cur_feature.sub_features.append(f)
s = cur_feature.sub_features[0].location.start
e = cur_feature.sub_features[-1].location.end
cur_feature.location = SeqFeature.FeatureLocation(s,e, strand)
return
#Handle the general case with more complex regular expressions
if _re_complex_location.match(location_line):
#e.g. "AL121804.2:41..610"
if ":" in location_line:
location_ref, location_line = location_line.split(":")
cur_feature.location = _loc(location_line, self._expected_size, strand)
cur_feature.location.ref = location_ref
else:
cur_feature.location = _loc(location_line, self._expected_size, strand)
return
if _re_complex_compound.match(location_line):
i = location_line.find("(")
cur_feature.location_operator = location_line[:i]
#Can't split on the comma because of positions like one-of(1,2,3)
for part in _split_compound_loc(location_line[i+1:-1]):
if part.startswith("complement("):
assert part[-1]==")"
part = part[11:-1]
assert strand != -1, "Double complement?"
part_strand = -1
else:
part_strand = strand
if ":" in part:
ref, part = part.split(":")
else:
ref = None
try:
loc = _loc(part, self._expected_size, part_strand)
except ValueError, err:
print location_line
print part
raise err
f = SeqFeature.SeqFeature(location=loc, ref=ref,
location_operator=cur_feature.location_operator,
type=cur_feature.type)
cur_feature.sub_features.append(f)
# Historically a join on the reverse strand has been represented
# in Biopython with both the parent SeqFeature and its children
# (the exons for a CDS) all given a strand of -1. Likewise, for
# a join feature on the forward strand they all have strand +1.
# However, we must also consider evil mixed strand examples like
# this, join(complement(69611..69724),139856..140087,140625..140650)
strands = set(sf.strand for sf in cur_feature.sub_features)
if len(strands)==1:
strand = cur_feature.sub_features[0].strand
else:
strand = None # i.e. mixed strands
s = cur_feature.sub_features[0].location.start
e = cur_feature.sub_features[-1].location.end
cur_feature.location = SeqFeature.FeatureLocation(s, e, strand)
return
#Not recognised
if "order" in location_line and "join" in location_line:
#See Bug 3197
msg = 'Combinations of "join" and "order" within the same ' + \
'location (nested operators) are illegal:\n' + location_line
raise LocationParserError(msg)
#This used to be an error....
cur_feature.location = None
import warnings
from Bio import BiopythonParserWarning
warnings.warn(BiopythonParserWarning("Couldn't parse feature location: %r" \
% (location_line)))
def feature_qualifier(self, key, value):
"""When we get a qualifier key and its value.
Can receive None, since you can have valueless keys such as /pseudo
"""
# Hack to try to preserve historical behaviour of /pseudo etc
if value is None:
if key not in self._cur_feature.qualifiers:
self._cur_feature.qualifiers[key] = [""]
return
value = value.replace('"', '')
if self._feature_cleaner is not None:
value = self._feature_cleaner.clean_value(key, value)
# if the qualifier name exists, append the value
if key in self._cur_feature.qualifiers:
self._cur_feature.qualifiers[key].append(value)
# otherwise start a new list of the key with its values
else:
self._cur_feature.qualifiers[key] = [value]
def feature_qualifier_name(self, content_list):
"""Use feature_qualifier instead (OBSOLETE)."""
raise NotImplementedError("Use the feature_qualifier method instead.")
def feature_qualifier_description(self, content):
"""Use feature_qualifier instead (OBSOLETE)."""
raise NotImplementedError("Use the feature_qualifier method instead.")
def contig_location(self, content):
"""Deal with CONTIG information."""
#Historically this was stored as a SeqFeature object, but it was
#stored under record.annotations["contig"] and not under
#record.features with the other SeqFeature objects.
#
#The CONTIG location line can include additional tokens like
#Gap(), Gap(100) or Gap(unk100) which are not used in the feature
#location lines, so storing it using SeqFeature based location
#objects is difficult.
#
#We now store this a string, which means for BioSQL we are now in
#much better agreement with how BioPerl records the CONTIG line
#in the database.
#
#NOTE - This code assumes the scanner will return all the CONTIG
#lines already combined into one long string!
self.data.annotations["contig"] = content
def origin_name(self, content):
pass
def base_count(self, content):
pass
def base_number(self, content):
pass
def sequence(self, content):
"""Add up sequence information as we get it.
To try and make things speedier, this puts all of the strings
into a list of strings, and then uses string.join later to put
them together. Supposedly, this is a big time savings
"""
assert ' ' not in content
self._seq_data.append(content.upper())
def record_end(self, content):
"""Clean up when we've finished the record.
"""
from Bio import Alphabet
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq, UnknownSeq
#Try and append the version number to the accession for the full id
if self.data.id is None:
assert 'accessions' not in self.data.annotations, \
self.data.annotations['accessions']
self.data.id = self.data.name #Good fall back?
elif self.data.id.count('.') == 0:
try:
self.data.id+='.%i' % self.data.annotations['sequence_version']
except KeyError:
pass
# add the sequence information
# first, determine the alphabet
# we default to an generic alphabet if we don't have a
# seq type or have strange sequence information.
seq_alphabet = Alphabet.generic_alphabet
# now set the sequence
sequence = "".join(self._seq_data)
if self._expected_size is not None \
and len(sequence) != 0 \
and self._expected_size != len(sequence):
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Expected sequence length %i, found %i (%s)." \
% (self._expected_size, len(sequence), self.data.id),
BiopythonParserWarning)
if self._seq_type:
# mRNA is really also DNA, since it is actually cDNA
if self._seq_type.find('DNA') != -1 or \
self._seq_type.find('mRNA') != -1:
seq_alphabet = IUPAC.ambiguous_dna
# are there ever really RNA sequences in GenBank?
elif self._seq_type.find('RNA') != -1:
#Even for data which was from RNA, the sequence string
#is usually given as DNA (T not U). Bug 2408
if "T" in sequence and "U" not in sequence:
seq_alphabet = IUPAC.ambiguous_dna
else:
seq_alphabet = IUPAC.ambiguous_rna
elif self._seq_type.upper().find('PROTEIN') != -1:
seq_alphabet = IUPAC.protein # or extended protein?
# work around ugly GenBank records which have circular or
# linear but no indication of sequence type
elif self._seq_type in ["circular", "linear", "unspecified"]:
pass
# we have a bug if we get here
else:
raise ValueError("Could not determine alphabet for seq_type %s"
% self._seq_type)
if not sequence and self.__expected_size:
self.data.seq = UnknownSeq(self._expected_size, seq_alphabet)
else:
self.data.seq = Seq(sequence, seq_alphabet)
class _RecordConsumer(_BaseGenBankConsumer):
"""Create a GenBank Record object from scanner generated information (PRIVATE).
"""
def __init__(self):
_BaseGenBankConsumer.__init__(self)
import Record
self.data = Record.Record()
self._seq_data = []
self._cur_reference = None
self._cur_feature = None
self._cur_qualifier = None
def wgs(self, content):
self.data.wgs = content.split('-')
def add_wgs_scafld(self, content):
self.data.wgs_scafld.append(content.split('-'))
def locus(self, content):
self.data.locus = content
def size(self, content):
self.data.size = content
def residue_type(self, content):
self.data.residue_type = content
def data_file_division(self, content):
self.data.data_file_division = content
def date(self, content):
self.data.date = content
def definition(self, content):
self.data.definition = content
def accession(self, content):
for acc in self._split_accessions(content):
if acc not in self.data.accession:
self.data.accession.append(acc)
def nid(self, content):
self.data.nid = content
def pid(self, content):
self.data.pid = content
def version(self, content):
self.data.version = content
def db_source(self, content):
self.data.db_source = content.rstrip()
def gi(self, content):
self.data.gi = content
def keywords(self, content):
self.data.keywords = self._split_keywords(content)
def project(self, content):
self.data.projects.extend([p for p in content.split() if p])
def dblink(self, content):
self.data.dblinks.append(content)
def segment(self, content):
self.data.segment = content
def source(self, content):
self.data.source = content
def organism(self, content):
self.data.organism = content
def taxonomy(self, content):
self.data.taxonomy = self._split_taxonomy(content)
def reference_num(self, content):
"""Grab the reference number and signal the start of a new reference.
"""
# check if we have a reference to add
if self._cur_reference is not None:
self.data.references.append(self._cur_reference)
import Record
self._cur_reference = Record.Reference()
self._cur_reference.number = content
def reference_bases(self, content):
self._cur_reference.bases = content
def authors(self, content):
self._cur_reference.authors = content
def consrtm(self, content):
self._cur_reference.consrtm = content
def title(self, content):
if self._cur_reference is None:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("GenBank TITLE line without REFERENCE line.",
BiopythonParserWarning)
return
self._cur_reference.title = content
def journal(self, content):
self._cur_reference.journal = content
def medline_id(self, content):
self._cur_reference.medline_id = content
def pubmed_id(self, content):
self._cur_reference.pubmed_id = content
def remark(self, content):
self._cur_reference.remark = content
def comment(self, content):
self.data.comment += "\n".join(content)
def primary_ref_line(self,content):
"""Data for the PRIMARY line"""
self.data.primary.append(content)
def primary(self,content):
pass
def features_line(self, content):
"""Get ready for the feature table when we reach the FEATURE line.
"""
self.start_feature_table()
def start_feature_table(self):
"""Signal the start of the feature table.
"""
# we need to add on the last reference
if self._cur_reference is not None:
self.data.references.append(self._cur_reference)
def feature_key(self, content):
"""Grab the key of the feature and signal the start of a new feature.
"""
# first add on feature information if we've got any
self._add_feature()
import Record
self._cur_feature = Record.Feature()
self._cur_feature.key = content
def _add_feature(self):
"""Utility function to add a feature to the Record.
This does all of the appropriate checking to make sure we haven't
left any info behind, and that we are only adding info if it
exists.
"""
if self._cur_feature is not None:
# if we have a left over qualifier, add it to the qualifiers
# on the current feature
if self._cur_qualifier is not None:
self._cur_feature.qualifiers.append(self._cur_qualifier)
self._cur_qualifier = None
self.data.features.append(self._cur_feature)
def location(self, content):
self._cur_feature.location = self._clean_location(content)
def feature_qualifier(self, key, value):
self.feature_qualifier_name([key])
if value is not None:
self.feature_qualifier_description(value)
def feature_qualifier_name(self, content_list):
"""Deal with qualifier names
We receive a list of keys, since you can have valueless keys such as
/pseudo which would be passed in with the next key (since no other
tags separate them in the file)
"""
import Record
for content in content_list:
# the record parser keeps the /s -- add them if we don't have 'em
if content.find("/") != 0:
content = "/%s" % content
# add on a qualifier if we've got one
if self._cur_qualifier is not None:
self._cur_feature.qualifiers.append(self._cur_qualifier)
self._cur_qualifier = Record.Qualifier()
self._cur_qualifier.key = content
def feature_qualifier_description(self, content):
# if we have info then the qualifier key should have a ='s
if self._cur_qualifier.key.find("=") == -1:
self._cur_qualifier.key = "%s=" % self._cur_qualifier.key
cur_content = self._remove_newlines(content)
# remove all spaces from the value if it is a type where spaces
# are not important
for remove_space_key in self.__class__.remove_space_keys:
if self._cur_qualifier.key.find(remove_space_key) >= 0:
cur_content = self._remove_spaces(cur_content)
self._cur_qualifier.value = self._normalize_spaces(cur_content)
def base_count(self, content):
self.data.base_counts = content
def origin_name(self, content):
self.data.origin = content
def contig_location(self, content):
"""Signal that we have contig information to add to the record.
"""
self.data.contig = self._clean_location(content)
def sequence(self, content):
"""Add sequence information to a list of sequence strings.
This removes spaces in the data and uppercases the sequence, and
then adds it to a list of sequences. Later on we'll join this
list together to make the final sequence. This is faster than
adding on the new string every time.
"""
assert ' ' not in content
self._seq_data.append(content.upper())
def record_end(self, content):
"""Signal the end of the record and do any necessary clean-up.
"""
# add together all of the sequence parts to create the
# final sequence string
self.data.sequence = "".join(self._seq_data)
# add on the last feature
self._add_feature()
def parse(handle):
"""Iterate over GenBank formatted entries as Record objects.
>>> from Bio import GenBank
>>> handle = open("GenBank/NC_000932.gb")
>>> for record in GenBank.parse(handle):
... print record.accession
['NC_000932']
>>> handle.close()
To get SeqRecord objects use Bio.SeqIO.parse(..., format="gb")
instead.
"""
return iter(Iterator(handle, RecordParser()))
def read(handle):
"""Read a handle containing a single GenBank entry as a Record object.
>>> from Bio import GenBank
>>> handle = open("GenBank/NC_000932.gb")
>>> record = GenBank.read(handle)
>>> print record.accession
['NC_000932']
>>> handle.close()
To get a SeqRecord object use Bio.SeqIO.read(..., format="gb")
instead.
"""
iterator = parse(handle)
try:
first = iterator.next()
except StopIteration:
first = None
if first is None:
raise ValueError("No records found in handle")
try:
second = iterator.next()
except StopIteration:
second = None
if second is not None:
raise ValueError("More than one record found in handle")
return first
def _test():
"""Run the Bio.GenBank module's doctests."""
import doctest
import os
if os.path.isdir(os.path.join("..","..","Tests")):
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("..","..","Tests"))
doctest.testmod()
os.chdir(cur_dir)
del cur_dir
print "Done"
elif os.path.isdir(os.path.join("Tests")):
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("Tests"))
doctest.testmod()
os.chdir(cur_dir)
del cur_dir
print "Done"
if __name__ == "__main__":
_test()
| 38.043478
| 138
| 0.617284
|
4a08c18d0d3a6b787265bdcf1eeeefbeb2288c07
| 1,015
|
py
|
Python
|
portals/wwits/groups/general/code/schemas.py
|
jalanb/portals
|
7a5360b48547719d3fbe50790f08eaf5571148dd
|
[
"ADSL"
] | null | null | null |
portals/wwits/groups/general/code/schemas.py
|
jalanb/portals
|
7a5360b48547719d3fbe50790f08eaf5571148dd
|
[
"ADSL"
] | null | null | null |
portals/wwits/groups/general/code/schemas.py
|
jalanb/portals
|
7a5360b48547719d3fbe50790f08eaf5571148dd
|
[
"ADSL"
] | null | null | null |
from marshmallow import fields, post_load
from portals.wwits.apis.rest import BaseSchemaExcludeFields as Schema
from .models import CodesParmModel, CodeModel, GetCodesModel
class ParmSchema(Schema):
CodeTable = fields.Str()
UserID = fields.Str()
Version = fields.Str()
Env = fields.Str()
Source = fields.Str()
Session = fields.Integer()
RC = fields.Integer()
ResultMsg = fields.Str()
@post_load
def make_schema(self, data, **kwargs):
return CodesParmModel(**data)
class CodeSchema(Schema):
code = fields.Str(data_key="Code")
descr = fields.Str(data_key="Descr")
option = fields.Str(data_key="Option")
xtable = fields.Str(data_key="XTable")
@post_load
def make_code(self, data, **kwargs):
return CodeModel(**data)
class GetCodesSchema(Schema):
Parms = fields.Nested(ParmSchema)
GetCodes = fields.Nested(CodeSchema, many=True)
@post_load
def make_code(self, data, **kwargs):
return GetCodesModel(**data)
| 26.025641
| 69
| 0.682759
|
4a08c2278fa7bdea508969e26c5d5a859670ea6e
| 1,105
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_backend_health.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_backend_health.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 2
|
2016-09-30T21:40:24.000Z
|
2017-11-10T18:16:18.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_backend_health.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationGatewayBackendHealth(Model):
"""List of ApplicationGatewayBackendHealthPool resources.
:param backend_address_pools:
:type backend_address_pools:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationGatewayBackendHealthPool]
"""
_attribute_map = {
'backend_address_pools': {'key': 'backendAddressPools', 'type': '[ApplicationGatewayBackendHealthPool]'},
}
def __init__(self, backend_address_pools=None):
super(ApplicationGatewayBackendHealth, self).__init__()
self.backend_address_pools = backend_address_pools
| 36.833333
| 113
| 0.655204
|
4a08c28b1f7a8d3bf9513ffdc5ea376906011d69
| 2,753
|
py
|
Python
|
Layout/plotGenerator.py
|
iamwendellbalagot/sieve-machine
|
8ed8dc73b97575263e2e013334602e8db4930e17
|
[
"MIT"
] | null | null | null |
Layout/plotGenerator.py
|
iamwendellbalagot/sieve-machine
|
8ed8dc73b97575263e2e013334602e8db4930e17
|
[
"MIT"
] | null | null | null |
Layout/plotGenerator.py
|
iamwendellbalagot/sieve-machine
|
8ed8dc73b97575263e2e013334602e8db4930e17
|
[
"MIT"
] | null | null | null |
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import sys
sys.path.insert(1, '/path/to/applicaiton/getdata')
import getdata.getdata as getdata
def get_scatter(df=None):
# if df is None:
# # x = [0, 50, 100, 200, 300, 400, 500, 600]
# # y = [0, 1.5, 2, 4, 7.5, 12.5, 20, 40.6]
# # y1 = [0, 1.7, 3, 5, 8.5, 15.5, 24, 42.6]
# d1 = np.random.normal(5, 0.5, 200)
# d2 = np.random.normal(30, 1.2, 60)
# d3 = np.random.normal(6, 1, 60)
# d11 = np.random.normal(5, 0.5, 200)
# d22 = np.random.normal(30, 1.2, 60)
# d33 = np.random.normal(6, 1, 60)
# y = np.concatenate((d1, d2, d3)).flatten()
# y1 = np.concatenate((d11, d22, d33)).flatten()
# x = np.arange(len(y))
# else:
# x = np.arange(len(df))
# y = df['S1']
# y1 = df['S2']
if df is None:
df = getdata.get_dataframe()
x = np.arange(len(df))
y1 = df['S1']
y2 = df['S2']
y3 = df['S3']
y4 = df['S4']
y5 = df['S5']
y6 = df['S6']
y7 = df['S7']
fig = go.Figure()
fig.add_trace(go.Scatter(x=x,
y=y1,
name='Sensor 1',
marker_color='#E48F72'))
fig.add_trace(go.Scatter(x=x,
y=y2,
name='Sensor 2',
marker_color='green'))
fig.add_trace(go.Scatter(x=x,
y=y3,
name='Sensor 3',
marker_color='black'))
fig.add_trace(go.Scatter(x=x,
y=y4,
name='Sensor 4',
marker_color='violet'))
fig.add_trace(go.Scatter(x=x,
y=y5,
name='Sensor 5',
marker_color='gold'))
fig.add_trace(go.Scatter(x=x,
y=y6,
name='Sensor 6',
marker_color='darkviolet'))
fig.add_trace(go.Scatter(x=x,
y=y7,
name='Sensor 7',
marker_color='maroon'))
fig.update_layout(title=dict(
x=0.5,
y=0.8,
font=dict(size=20, color='white')),
legend=dict(
bgcolor = '#373a40',
traceorder='normal',
font=dict(
size=12,
color= 'white'),
),
template='plotly_dark',
height=330,
width=800,
font=dict(family="Courier",
size=12, color='#99aab5'),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='#ccc',
margin=dict(t=50, b=70, l=80, r=1))
fig.update_xaxes(title='Time Interval [2000ms]')
fig.update_yaxes(title='WEIGHT in KILOGRAMS')
return fig
| 28.381443
| 56
| 0.469306
|
4a08c32162acd4727f6a920d6f07ae443f9a189e
| 4,544
|
py
|
Python
|
rpc/models.py
|
Infixz/mime_custodian
|
3eb871180eb3964b829c6c6babd2a6d021fea054
|
[
"MIT"
] | null | null | null |
rpc/models.py
|
Infixz/mime_custodian
|
3eb871180eb3964b829c6c6babd2a6d021fea054
|
[
"MIT"
] | null | null | null |
rpc/models.py
|
Infixz/mime_custodian
|
3eb871180eb3964b829c6c6babd2a6d021fea054
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import os
import uuid
import magic
import urllib
from datetime import datetime
import cropresize2
import short_url
from PIL import Image
from flask import abort, request
from werkzeug.utils import cached_property
from mimes import IMAGE_MIMES, AUDIO_MIMES, VIDEO_MIMES
from utils import get_file_md5, get_file_path
from ext import db
class PasteFile(db.Model):
__tablename__ = 'PasteFile'
id = db.Column(db.Integer, primary_key=True)
filename = db.Column(db.String(5000), nullable=False)
filehash = db.Column(db.String(128), nullable=False, unique=True)
filemd5 = db.Column(db.String(128), nullable=False, unique=True)
uploadtime = db.Column(db.DateTime, nullable=False)
mimetype = db.Column(db.String(256), nullable=False)
size = db.Column(db.Integer, nullable=False)
def __init__(self, filename='', mimetype='application/octet-stream',
size=0, filehash=None, filemd5=None):
self.uploadtime = datetime.now()
self.mimetype = mimetype
self.size = int(size)
self.filehash = filehash if filehash else self._hash_filename(filename)
self.filename = filename if filename else self.filehash
self.filemd5 = filemd5
@staticmethod
def _hash_filename(filename):
_, _, suffix = filename.rpartition('.')
return '%s.%s' % (uuid.uuid4().hex, suffix)
@cached_property
def symlink(self):
return short_url.encode_url(self.id)
@classmethod
def get_by_symlink(cls, symlink, code=404):
id = short_url.decode_url(symlink)
return cls.query.filter_by(id=id).first() or abort(code)
@classmethod
def get_by_filehash(cls, filehash, code=404):
return cls.query.filter_by(filehash=filehash).first() or abort(code)
@classmethod
def get_by_md5(cls, filemd5):
return cls.query.filter_by(filemd5=filemd5).first()
@classmethod
def create_by_upload_file(cls, uploaded_file):
rst = cls(uploaded_file.filename, uploaded_file.mimetype, 0)
uploaded_file.save(rst.path)
with open(rst.path) as f:
filemd5 = get_file_md5(f)
uploaded_file = cls.get_by_md5(filemd5)
if uploaded_file:
os.remove(rst.path)
return uploaded_file
filestat = os.stat(rst.path)
rst.size = filestat.st_size
rst.filemd5 = filemd5
return rst
@classmethod
def create_by_old_paste(cls, filehash):
filepath = get_file_path(filehash)
mimetype = magic.from_file(filepath, mime=True)
filestat = os.stat(filepath)
size = filestat.st_size
rst = cls(filehash, mimetype, size, filehash=filehash)
return rst
@property
def path(self):
return get_file_path(self.filehash)
def get_url(self, subtype, is_symlink=False):
hash_or_link = self.symlink if is_symlink else self.filehash
return 'http://{host}/{subtype}/{hash_or_link}'.format(
subtype=subtype, host=request.host, hash_or_link=hash_or_link)
@property
def url_i(self):
return self.get_url('i')
@property
def url_p(self):
return self.get_url('p')
@property
def url_s(self):
return self.get_url('s', is_symlink=True)
@property
def url_d(self):
return self.get_url('d')
@property
def image_size(self):
if self.is_image:
im = Image.open(self.path)
return im.size
return (0, 0)
@property
def quoteurl(self):
return urllib.quote(self.url_i)
@classmethod
def rsize(cls, old_paste, weight, height):
assert old_paste.is_image, TypeError('Unsupported Image Type.')
img = cropresize2.crop_resize(
Image.open(old_paste.path), (int(weight), int(height)))
rst = cls(old_paste.filename, old_paste.mimetype, 0)
img.save(rst.path)
filestat = os.stat(rst.path)
rst.size = filestat.st_size
return rst
@property
def is_image(self):
return self.mimetype in IMAGE_MIMES
@property
def is_audio(self):
return self.mimetype in AUDIO_MIMES
@property
def is_video(self):
return self.mimetype in VIDEO_MIMES
@property
def is_pdf(self):
return self.mimetype == 'application/pdf'
@property
def type(self):
for t in ('image', 'pdf', 'video', 'audio'):
if getattr(self, 'is_' + t):
return t
return 'binary'
| 29.128205
| 79
| 0.645687
|
4a08c3eed19c351057afdc7b143d2495f0bc4d02
| 3,987
|
py
|
Python
|
models.py
|
phamdt/mobile-scavenger-hunt
|
9b553406884df97e596c21f00f806810a9e27a16
|
[
"Apache-2.0"
] | 1
|
2018-11-04T19:56:29.000Z
|
2018-11-04T19:56:29.000Z
|
models.py
|
phamdt/mobile-scavenger-hunt
|
9b553406884df97e596c21f00f806810a9e27a16
|
[
"Apache-2.0"
] | null | null | null |
models.py
|
phamdt/mobile-scavenger-hunt
|
9b553406884df97e596c21f00f806810a9e27a16
|
[
"Apache-2.0"
] | 1
|
2016-08-17T22:56:59.000Z
|
2016-08-17T22:56:59.000Z
|
import os
from hunt import db
import sys
from flask.ext.login import UserMixin
class Admin(db.Model):
__tablename__ = 'admins'
admin_id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(320), unique=True)
pw_hash = db.Column(db.String(64))
hunts = db.relationship('Hunt', backref='hunts')
def __repr__(self):
return '<Admin %r>' % self.email
# http://flask-login.readthedocs.org/en/latest/_modules/flask/ext/login.html#UserMixin
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.admin_id)
except AttributeError:
raise NotImplementedError('No `id` attribute - override `get_id`')
def __eq__(self, other):
'''
Checks the equality of two `UserMixin` objects using `get_id`.
'''
if isinstance(other, UserMixin):
return self.get_id() == other.get_id()
return NotImplemented
def __ne__(self, other):
'''
Checks the inequality of two `UserMixin` objects using `get_id`.
'''
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
if sys.version_info[0] != 2: # pragma: no cover
# Python 3 implicitly set __hash__ to None if we override __eq__
# We set it back to its default implementation
__hash__ = object.__hash__
class Hunt(db.Model):
__tablename__ = 'hunts'
hunt_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True)
participants = db.relationship(
'Participant', backref='hunts', cascade='all')
participant_rule = db.Column(db.String(20))
items = db.relationship('Item', backref='hunts', cascade='all')
date_created = db.Column(db.DateTime(timezone=True), server_default=db.func.now())
last_modified = db.Column(db.DateTime(timezone=True), server_default=db.func.now(),
onupdate=db.func.now())
# refers to items required
all_required = db.Column(db.Boolean)
num_required = db.Column(db.Integer)
welcome_message = db.Column(db.String(500))
congratulations_message = db.Column(db.String(500))
admin_id = db.Column(db.Integer, db.ForeignKey('admins.admin_id'))
domain = db.Column(db.String(50))
def __repr__(self):
return '<Hunt %r>' % self.name
@classmethod
def list_for_admin_id(cls, db, admin_id):
return db.session.query(Hunt).filter(Hunt.admin_id == admin_id).all()
@classmethod
def find_by_id(cls, db, hunt_id):
return db.session.query(Hunt).filter(Hunt.hunt_id == hunt_id).first()
class Participant(db.Model):
__tablename__ = 'participants'
participant_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
email = db.Column(db.String(50), nullable=False)
hunt_id = db.Column(db.Integer, db.ForeignKey('hunts.hunt_id'))
registered = db.Column(db.Boolean)
def __repr__(self):
return '<Participant %r>' % self.email
class Item(db.Model):
__tablename__ = 'items'
item_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(500), nullable=False)
hunt_id = db.Column(db.Integer, db.ForeignKey('hunts.hunt_id'))
required = db.Column(db.Boolean)
def __repr__(self):
return '<Item %r %r>' % (self.item_id, self.name)
class Setting(db.Model):
__tablename__ = 'settings'
settings_id = db.Column(db.Integer, primary_key=True)
admin_id = db.Column(db.Integer, db.ForeignKey('admins.admin_id'))
wax_site = db.Column(db.String(500), nullable=False)
login = db.Column(db.String(50), nullable=False)
password = db.Column(db.String(50), nullable=False)
def __repr__(self):
return '<Settings for admin id: %r>' % self.admin_id
| 32.414634
| 90
| 0.654628
|
4a08c450f60def57a7781ea1e6689b4065f221b2
| 300
|
py
|
Python
|
Crawling/2_python urllib/save_csv_join.py
|
twer4774/TIL
|
79fcaa8464c3f1833ce6bcf998213a9a4bbfa559
|
[
"MIT"
] | 1
|
2018-09-11T10:34:15.000Z
|
2018-09-11T10:34:15.000Z
|
Crawling/2_python urllib/save_csv_join.py
|
twer4774/TIL
|
79fcaa8464c3f1833ce6bcf998213a9a4bbfa559
|
[
"MIT"
] | null | null | null |
Crawling/2_python urllib/save_csv_join.py
|
twer4774/TIL
|
79fcaa8464c3f1833ce6bcf998213a9a4bbfa559
|
[
"MIT"
] | null | null | null |
#-*-encoding:UTF-8-*-
print('rank,city,population')
#join()메서드의 매개변수로 저달한 list는 str이어야 함
print(','.join(['1', '상하이', '24150000']))
print(','.join(['2', '카라치', '23500000']))
print(','.join(['3', '베이징', '21515500']))
print(','.join(['4', '텐진', '124596990']))
print(','.join(['5', '이스탄불', '11102223']))
| 33.333333
| 42
| 0.563333
|
4a08c45ccd653e69b0428f64a80cf017f3b8059f
| 13,424
|
py
|
Python
|
profiles/serializers.py
|
openprocurement/market.prozorro.ua
|
ae19ca5558e16fc6d193f29d21940075cadd8ebc
|
[
"Apache-2.0"
] | 3
|
2019-12-04T15:55:45.000Z
|
2020-01-22T14:15:56.000Z
|
profiles/serializers.py
|
openprocurement/market.prozorro.ua
|
ae19ca5558e16fc6d193f29d21940075cadd8ebc
|
[
"Apache-2.0"
] | 1
|
2020-02-14T10:22:57.000Z
|
2020-02-14T10:22:57.000Z
|
profiles/serializers.py
|
openprocurement/market.prozorro.ua
|
ae19ca5558e16fc6d193f29d21940075cadd8ebc
|
[
"Apache-2.0"
] | 2
|
2019-11-29T14:16:22.000Z
|
2020-02-14T10:27:39.000Z
|
from operator import itemgetter
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from criteria.models import Criteria
from profiles.models import (
CURRENCY_CHOICES, Profile, ProfileCriteria, Requirement, RequirementGroup
)
from profiles.mixins import ValidateRequiredFieldsMixin
from standarts.serializers import (
AdditionalClassificationSerializer, ClassificationSerializer,
UnitSerializer
)
class RequirementSerializer(serializers.ModelSerializer):
ONE_VALUE_AT_A_TIME_FIELDS = set(
('expected_value', 'min_value', 'max_value')
)
relatedCriteria_id = serializers.UUIDField(source='related_criteria_id')
unit = UnitSerializer(read_only=True)
expectedValue = serializers.CharField(
required=False, source='expected_value', allow_null=True
)
minValue = serializers.CharField(
required=False, source='min_value', allow_null=True
)
maxValue = serializers.CharField(
required=False, source='max_value', allow_null=True
)
class Meta:
model = Requirement
exclude = (
'related_criteria', 'min_value', 'max_value', 'expected_value',
'id'
)
def validate(self, data):
if not data.get('related_criteria_id'):
raise ValidationError({
'relatedCriteria_id': 'relatedCriteria_id is missing'
})
error_msg = (
'You must pass exact one of following keys: '
'"expectedValue", "minValue", "maxValue"'
)
# dict for storing passed values for expectedValue, minValue, maxValue
value_dict = {}
for key, value in data.items():
if key in self.ONE_VALUE_AT_A_TIME_FIELDS:
if value and value_dict:
raise ValidationError(error_msg)
else:
value_dict[key] = value
if not value_dict:
raise ValidationError(error_msg)
self.validate_value(value_dict)
return data
def validate_value(self, value_dict):
key, value = next(iter(value_dict.items()))
data_type = self.criteria.data_type
error_dict = {key: f'You must provide a valid {data_type} value'}
if data_type == 'string':
if not isinstance(value, str):
raise ValidationError(error_dict)
elif data_type == 'boolean':
if value not in ('true', 'false'):
raise ValidationError(error_dict)
elif data_type == 'integer':
try:
int(value)
except ValueError:
raise ValidationError(error_dict)
elif data_type == 'number':
try:
float(value)
except ValueError:
raise ValidationError(error_dict)
def validate_relatedCriteria_id(self, value):
try:
self.criteria = Criteria.objects.get(id=value, status='active')
except Criteria.DoesNotExist:
raise ValidationError(
{'relatedCriteria_id': 'No active Criteria found by passed id'}
)
return value
def to_representation(self, instance):
data = super().to_representation(instance)
data = dict(filter(itemgetter(1), data.items()))
data['relatedCriteria_id'] = instance.related_criteria.id.hex
return data
class RequirementGroupSerializer(
ValidateRequiredFieldsMixin, serializers.ModelSerializer
):
REQUIRED_FIELDS = ('requirements',)
id = serializers.CharField(source='id.hex', required=False)
requirements = RequirementSerializer(many=True, allow_empty=False)
class Meta:
model = RequirementGroup
exclude = ()
class ProfileCriteriaSerializer(
ValidateRequiredFieldsMixin, serializers.ModelSerializer
):
REQUIRED_FIELDS = ('title', 'requirement_groups')
id = serializers.CharField(source='id.hex', required=False)
requirementGroups = RequirementGroupSerializer(
source='requirement_groups', many=True, allow_empty=False
)
class Meta:
model = ProfileCriteria
exclude = ('requirement_groups', )
class ProfileImageSerializer(serializers.Serializer):
sizes = serializers.CharField(
max_length=10, required=False, allow_blank=True
)
url = serializers.CharField(
max_length=100, required=False, allow_blank=True
)
class ValueSerializer(serializers.Serializer):
valueAddedTaxIncluded = serializers.BooleanField(
source='value_added_tax_included'
)
amount = serializers.DecimalField(
max_digits=10, decimal_places=2, coerce_to_string=False, required=True
)
currency = serializers.ChoiceField(choices=CURRENCY_CHOICES, required=True)
class ProfileBaseSerializer(serializers.ModelSerializer):
id = serializers.CharField(source='id.hex', read_only=True)
unit = UnitSerializer()
value = ValueSerializer()
criteria = ProfileCriteriaSerializer(many=True, allow_empty=False)
images = ProfileImageSerializer(many=True)
dateModified = serializers.DateTimeField(
source='date_modified', read_only=True
)
class Meta:
model = Profile
exclude = (
'classification_id', 'classification_description', 'unit_code',
'unit_name', 'value_amount', 'additional_classification',
'value_currency', 'value_value_added_tax_included', 'access_token',
'date_modified'
)
read_only_fields = ('author', )
def _create_requirement(self, requirement_data):
criteria = Criteria.objects.get(
id=requirement_data.pop('related_criteria_id'),
status='active'
)
requirement_data['related_criteria'] = criteria
return Requirement.objects.create(
**requirement_data
)
def _create_requirement_group(self, requirement_group_data):
requirements_list = requirement_group_data.pop('requirements', [])
group_id = requirement_group_data.pop('id', {}).get('hex')
if group_id:
try:
requirement_group = RequirementGroup.objects.get(
id=group_id
)
except RequirementGroup.DoesNotExist:
raise ValidationError({
'criteria': [
{'requirementGroups': [{
'id': f'RequirementGroup with id {group_id} not found'
}]}
]
})
for attr, value in requirement_group_data.items():
setattr(requirement_group, attr, value)
requirement_group.save()
else:
requirement_group = RequirementGroup.objects.create(
**requirement_group_data
)
if requirements_list:
self._set_requirements_to_requirement_group(
requirements_list, requirement_group
)
return requirement_group
def _set_requirements_to_requirement_group(
self, requirements_list_data, requirement_group
):
requirement_instances = []
for requirement_data in requirements_list_data:
requirement = self._create_requirement(requirement_data)
requirement_instances.append(requirement)
requirement_group.requirements.set(requirement_instances)
def _create_criteria(self, criteria_data):
requirement_group_list = criteria_data.pop('requirement_groups', [])
profile_criteria = ProfileCriteria.objects.create(**criteria_data)
for requirement_group_data in requirement_group_list:
requirement_group = self._create_requirement_group(
requirement_group_data
)
profile_criteria.requirement_groups.add(requirement_group)
return profile_criteria
class ProfileCreateSerializer(ProfileBaseSerializer):
classification = ClassificationSerializer()
additionalClassification = AdditionalClassificationSerializer(
required=False, source='additional_classification',
allow_null=True, many=True
)
status = serializers.CharField(read_only=True)
def create(self, data):
criteria_data_list = data.pop('criteria')
data['author'] = self.context['request'].user.username
instance = Profile.objects.create(**data)
criteria_instances = self._create_requirement_criteria(
criteria_data_list
)
instance.criteria.set(criteria_instances)
return instance
def _create_requirement_criteria(self, criteria_data_list):
criteria_instances = []
for criteria_data in criteria_data_list:
profile_criteria = self._create_criteria(criteria_data)
criteria_instances.append(profile_criteria)
return criteria_instances
class ProfileEditSerializer(ProfileBaseSerializer):
classification = ClassificationSerializer(read_only=True)
additionalClassification = AdditionalClassificationSerializer(
required=False, source='additional_classification',
allow_null=True, many=True, read_only=True
)
def update(self, instance, validated_data):
criteria_data_list = validated_data.pop('criteria', [])
for attr, value in validated_data.items():
setattr(instance, attr, value)
instance.save()
if criteria_data_list:
criteria_instances = self._update_requirement_criteria(
criteria_data_list, instance
)
instance.criteria.set(criteria_instances)
return instance
def validate(self, data):
# check if extra fields were passed
if hasattr(self, 'initial_data'):
writable_fields = set(
key for key, value in self.fields.items()
if not value.read_only
)
unknown_keys = set(self.initial_data.keys()) - writable_fields
if unknown_keys:
raise ValidationError(
f'Got unknown fields for PATCH: {", ".join(unknown_keys)}'
)
return super().validate(data)
def _update_requirement_criteria(self, criteria_data_list, instance):
criteria_instances = []
for criteria_data in criteria_data_list:
criteria_id = criteria_data.pop('id', {}).get('hex')
if criteria_id:
# editing existing ProfileCriteria
try:
profile_criteria = ProfileCriteria.objects.get(
id=criteria_id
)
except ProfileCriteria.DoesNotExist:
raise ValidationError({
'criteria': [
{'id': f'Criteria with id {criteria_id} not found'}
]
})
# setting new field values for ProfileCriteria
requirement_group_list = criteria_data.pop('requirement_groups', None)
for attr, value in criteria_data.items():
setattr(profile_criteria, attr, value)
profile_criteria.save()
requirement_group_instances = []
for requirement_group_data in requirement_group_list or []:
requirement_group_id = requirement_group_data.pop('id', {}).get('hex')
if requirement_group_id:
# editing existing RequirementGroup
try:
requirement_group = RequirementGroup.objects.get(
id=requirement_group_id
)
except RequirementGroup.DoesNotExist:
raise ValidationError({
'criteria': [
{'requirementGroups': [{
'id': f'RequirementGroup with id {requirement_group_id} not found'
}]}
]
})
requirements_list = requirement_group_data.pop('requirements', [])
# setting new field values for RequirementGroup
for attr, value in requirement_group_data.items():
setattr(requirement_group, attr, value)
requirement_group.save()
if requirements_list:
self._set_requirements_to_requirement_group(
requirements_list, requirement_group
)
else:
# creating RequirementGroup
requirement_group = self._create_requirement_group(requirement_group_data)
requirement_group_instances.append(requirement_group)
if requirement_group_list is not None:
profile_criteria.requirement_groups.set(requirement_group_instances)
else:
# creating ProfileCriteria
profile_criteria = self._create_criteria(criteria_data)
criteria_instances.append(profile_criteria)
return criteria_instances
| 37.392758
| 106
| 0.614198
|
4a08c4909982cb2c5453640de01199d4474d2269
| 4,771
|
py
|
Python
|
Modules/HealerFriend.py
|
hugoj-goncalves/TibiaAuto12
|
63b97cd4aeedd87cda11efa2a78c9e59e2a088d7
|
[
"MIT"
] | 2
|
2021-09-16T15:01:46.000Z
|
2021-09-22T20:50:32.000Z
|
Modules/HealerFriend.py
|
hugoj-goncalves/TibiaAuto12
|
63b97cd4aeedd87cda11efa2a78c9e59e2a088d7
|
[
"MIT"
] | null | null | null |
Modules/HealerFriend.py
|
hugoj-goncalves/TibiaAuto12
|
63b97cd4aeedd87cda11efa2a78c9e59e2a088d7
|
[
"MIT"
] | null | null | null |
from Conf.Hotkeys import Hotkey
from Core.GUI import *
from Core.GUISetter import GUISetter
from Core.ThreadManager import AllThreads, ThreadManager
from Core.HookWindow import LocateCenterImage, SaveImage
from Engine.ScanStages import ScanStages
GUIChanges = []
EnabledHealerFriend = False
class HealerFriend:
def ScanTarget(self, BattlePosition, Target):
HasTarget = [0, 0]
HasTarget[0], HasTarget[1] = LocateCenterImage('images/Targets/Players/Names/' + Target + '.png', Precision=0.86, LeftHandle=True, Region=(
BattlePosition[0], BattlePosition[1], BattlePosition[2], BattlePosition[3]))
if HasTarget[0] != 0 and HasTarget[1] != 0:
if HasTarget[0] < BattlePosition[0]:
return (BattlePosition[0] - 30) + HasTarget[0] + 1, HasTarget[1] + BattlePosition[1] + 1
else:
return (BattlePosition[0] - 40) + HasTarget[0] + 1, HasTarget[1] + BattlePosition[1] + 1
else:
return 0, 0
def ScanHealerFriend(self, wait):
Target = self.ScanTarget(self.BattlePosition, "Tataruga")
# print("Target: ", Target[0], " - ", Target[1])
if Target[0] != 0 and Target[1] != 0:
# SaveImage('images/Tests/TestMaior.png', Region=(Target[0] + 29, Target[1] - 4, Target[0] + 159, Target[1] + 13))
Target = [Target[0] + 29, Target[1] + 8]
# SaveImage('images/Tests/Test.png', Region=(Target[0], Target[1], Target[0] + 130, Target[1] + 3))
# SaveImage('images/Tests/Test2.png', Region=(Target[0], Target[1], Target[0] + 129, Target[1] + 3))
# SaveImage('images/Tests/Test3.png', Region=(Target[0], Target[1] - 1, Target[0] + 130, Target[1] + 2))
Life = self.Scan.ScanStagesBattle(Target, 130)
if Life is None:
Life = 0
# print('Life: ', Life)
if Life > 0:
if Life < 68:
print("Pressed ", self.HotkeyHealerFriend.get(), " To Heal Friend from: ", Life)
self.SendToClient.Press(self.HotkeyHealerFriend.get())
wait(1)
return
wait(.15)
def __init__(self, MOUSE_OPTION, BattlePosition):
self.HealerFriend = GUI('HealerFriend', 'Module: Healer Friend')
self.HealerFriend.DefaultWindow('AutoHeal2', [306, 372], [1.2, 2.29])
self.Setter = GUISetter("HealerFriendLoader")
self.Scan = ScanStages('Life')
self.SendToClient = Hotkey(MOUSE_OPTION)
self.AllThreads = AllThreads()
self.ThreadName = 'ThreadHealerFriend'
if not self.AllThreads.ExistsThread(self.ThreadName):
self.ThreadManager = ThreadManager(self.ThreadName, Managed=True, Func=self.ScanHealerFriend)
self.BattlePosition = BattlePosition
self.HotkeyHealerFriend, self.InitiatedHotkeyHealerFriend = self.Setter.Variables.Str('HotkeyHealerFriend')
def SetHealerFriend():
global EnabledHealerFriend
if not EnabledHealerFriend:
EnabledHealerFriend = True
ButtonEnabled.configure(text='HealerFriend: ON')
Checking()
self.AllThreads.UnPauseThreads(self.ThreadName)
else:
EnabledHealerFriend = False
ButtonEnabled.configure(text='HealerFriend: OFF')
Checking()
self.AllThreads.PauseThreads(self.ThreadName)
def Checking():
HotkeyOption = self.HealerFriend.addOption(
self.HotkeyHealerFriend, self.SendToClient.Hotkeys, [145, 170], 10)
if EnabledHealerFriend:
HotkeyOption.configure(state='disabled')
else:
HotkeyOption.configure(state='normal')
def CheckingGUI(Init, Get, Name):
if Get != Init:
GUIChanges.append((Name, Get))
def Destroy():
CheckingGUI(self.InitiatedHotkeyHealerFriend,
self.HotkeyHealerFriend.get(), 'HotkeyHealerFriend')
self.HealerFriend.destroyWindow()
self.HealerFriend.addButton('Ok', Destroy, [84, 29, 130, 504], [5, 50, 8])
global EnabledHealerFriend
if not EnabledHealerFriend:
ButtonEnabled = self.HealerFriend.addButton('HealerFriend: OFF', SetHealerFriend, [328, 29, 12, 469],
[5, 17, 8])
else:
ButtonEnabled = self.HealerFriend.addButton('HealerFriend: ON', SetHealerFriend, [328, 29, 12, 469],
[5, 17, 8])
Checking()
self.HealerFriend.Protocol(Destroy)
self.HealerFriend.loop()
| 41.486957
| 147
| 0.588975
|
4a08c520de544c53117bd8eaa23e5b4ac46dad47
| 360
|
py
|
Python
|
pingo/examples/blink_firmata_auto.py
|
rbanffy/pingo
|
b448d5eecae17b82aafd708c2594dae6d4194d84
|
[
"MIT"
] | 116
|
2015-05-06T17:49:22.000Z
|
2021-11-16T12:59:35.000Z
|
pingo/examples/blink_firmata_auto.py
|
pingo-io/pingo-py
|
5d7081f99ff13973404dc6361560f30ce8f7009c
|
[
"MIT"
] | 49
|
2015-05-08T23:18:05.000Z
|
2017-07-12T17:11:48.000Z
|
pingo/examples/blink_firmata_auto.py
|
rbanffy/pingo
|
b448d5eecae17b82aafd708c2594dae6d4194d84
|
[
"MIT"
] | 47
|
2015-05-04T07:42:04.000Z
|
2021-08-04T20:49:54.000Z
|
"""Blink an LED on a remote Arduino
This script assumes:
- this computer is connected to an Arduino
- the Arduino is running the Examples->Firmata->StandardFirmata sketch
"""
import time
import pingo
ard = pingo.arduino.get_arduino()
print('Connected to: %s' % ard)
led = ard.pins[13]
led.mode = pingo.OUT
while True:
led.toggle()
time.sleep(.5)
| 17.142857
| 70
| 0.713889
|
4a08c55ee0cbc7b7843a5cdfab5475c2ed365b5a
| 723
|
py
|
Python
|
backend/feedback/migrations/0001_initial.py
|
crowdbotics-apps/nccaa-rfp-33947
|
fe376b10f61be3c50c18dfb5ed3e169f6d0a6721
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/feedback/migrations/0001_initial.py
|
crowdbotics-apps/nccaa-rfp-33947
|
fe376b10f61be3c50c18dfb5ed3e169f6d0a6721
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/feedback/migrations/0001_initial.py
|
crowdbotics-apps/nccaa-rfp-33947
|
fe376b10f61be3c50c18dfb5ed3e169f6d0a6721
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
# Generated by Django 2.2.24 on 2021-12-21 10:18
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FeedBack',
fields=[
('key', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('email', models.EmailField(default='email', max_length=254)),
('rating', models.FloatField()),
('message', models.TextField(blank=True, max_length=255)),
('isContact', models.BooleanField(default=False)),
],
),
]
| 27.807692
| 126
| 0.582296
|
4a08c5c77a389792b8ab9f3139a9fba784f41d0d
| 887
|
py
|
Python
|
scripts/extract_content_types.py
|
b-cube/Response-Identification-Info
|
d2fa24c9f0d7db7d8bbf5cda937e1a9dd29a8f6e
|
[
"MIT"
] | null | null | null |
scripts/extract_content_types.py
|
b-cube/Response-Identification-Info
|
d2fa24c9f0d7db7d8bbf5cda937e1a9dd29a8f6e
|
[
"MIT"
] | 1
|
2015-09-23T16:30:34.000Z
|
2015-09-23T16:30:34.000Z
|
scripts/extract_content_types.py
|
b-cube/Response-Identification-Info
|
d2fa24c9f0d7db7d8bbf5cda937e1a9dd29a8f6e
|
[
"MIT"
] | 1
|
2020-03-25T09:41:03.000Z
|
2020-03-25T09:41:03.000Z
|
import os
import glob
import json
from dateutil.parser import *
from datetime import datetime
content_types = {}
for f in glob.glob('/Users/sparky/Documents/solr_responses/solr_20150922_docs/*.json'):
with open(f, 'r') as g:
data = json.loads(g.read())
headers = data.get('response_headers', [])
if not headers:
continue
headers = dict(
(k.strip().lower(), v.strip()) for k, v in (h.split(':', 1) for h in headers)
)
content_type = headers.get('content-type', '')
if content_type:
d = content_types.get(content_type, [])
d.append(parse(data.get('tstamp')))
# content_types.add(content_type)
content_types[content_type] = d
with open('unique_content_types_by_date.txt', 'w') as f:
for k, v in content_types.iteritems():
f.write('|'.join([k, min(v).isoformat(), max(v).isoformat()])+'\n')
| 30.586207
| 87
| 0.633596
|
4a08c72afc6ba24273d8bf26266d49526ce4f482
| 309
|
py
|
Python
|
app/core/migrations/0005_auto_20200607_1806.py
|
PrathameshChari04/User-Management-API
|
42bd91cdd6fc95ac3e68e455dc950320fc98b8da
|
[
"MIT"
] | null | null | null |
app/core/migrations/0005_auto_20200607_1806.py
|
PrathameshChari04/User-Management-API
|
42bd91cdd6fc95ac3e68e455dc950320fc98b8da
|
[
"MIT"
] | null | null | null |
app/core/migrations/0005_auto_20200607_1806.py
|
PrathameshChari04/User-Management-API
|
42bd91cdd6fc95ac3e68e455dc950320fc98b8da
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0 on 2020-06-07 12:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_job'),
]
operations = [
migrations.RenameModel(
old_name='Job',
new_name='Services',
),
]
| 17.166667
| 45
| 0.569579
|
4a08c870f6c691d67ae16e828058443d92368d41
| 1,389
|
py
|
Python
|
nova/network/constants.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 1,874
|
2015-01-04T05:18:34.000Z
|
2022-03-31T03:30:28.000Z
|
nova/network/constants.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 40
|
2015-04-13T02:32:42.000Z
|
2022-02-16T02:28:06.000Z
|
nova/network/constants.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 1,996
|
2015-01-04T15:11:51.000Z
|
2022-03-31T11:03:13.000Z
|
# Copyright 2013 UnitedStack Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
QOS_QUEUE = 'QoS Queue'
NET_EXTERNAL = 'router:external'
VNIC_INDEX_EXT = 'VNIC Index'
DNS_INTEGRATION = 'DNS Integration'
MULTI_NET_EXT = 'Multi Provider Network'
FIP_PORT_DETAILS = 'Floating IP Port Details Extension'
SUBSTR_PORT_FILTERING = 'IP address substring filtering'
PORT_BINDING = 'Port Binding'
PORT_BINDING_EXTENDED = 'Port Bindings Extended'
DEFAULT_SECGROUP = 'default'
BINDING_PROFILE = 'binding:profile'
BINDING_HOST_ID = 'binding:host_id'
MIGRATING_ATTR = 'migrating_to'
L3_NETWORK_TYPES = ['vxlan', 'gre', 'geneve']
ALLOCATION = 'allocation'
RESOURCE_REQUEST = 'resource_request'
REQUEST_GROUPS = 'request_groups'
SEGMENT = 'Segment'
NUMA_POLICY = 'numa_affinity_policy'
RESOURCE_REQUEST_GROUPS_EXTENSION = "Port Resource Request Groups"
| 38.583333
| 78
| 0.767459
|
4a08c980ada6092fe3af64ea7b91d4aeb77da7b7
| 270
|
py
|
Python
|
packages/mcni/tests/mcni/components/create-storage-for-NeutronFromStorage-testcase.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 5
|
2017-01-16T03:59:47.000Z
|
2020-06-23T02:54:19.000Z
|
packages/mcni/tests/mcni/components/create-storage-for-NeutronFromStorage-testcase.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 293
|
2015-10-29T17:45:52.000Z
|
2022-01-07T16:31:09.000Z
|
packages/mcni/tests/mcni/components/create-storage-for-NeutronFromStorage-testcase.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 1
|
2019-05-25T00:53:31.000Z
|
2019-05-25T00:53:31.000Z
|
#!/usr/bin/env python
import numpy
arr = numpy.arange(1000.)
arr.shape = -1, 10
from mcni.neutron_storage import neutrons_from_npyarr, storage
neutrons = neutrons_from_npyarr(arr)
s = storage('neutron-storage-for-NeutronFromStorage_TestCase', 'w')
s.write(neutrons)
| 20.769231
| 67
| 0.774074
|
4a08c98b7147ea44dde06fcab372927c9ead9624
| 1,989
|
py
|
Python
|
simulate.py
|
pekrau/queue_simulation
|
8a918316761b486842f96a063fea0aba898666b6
|
[
"MIT"
] | null | null | null |
simulate.py
|
pekrau/queue_simulation
|
8a918316761b486842f96a063fea0aba898666b6
|
[
"MIT"
] | null | null | null |
simulate.py
|
pekrau/queue_simulation
|
8a918316761b486842f96a063fea0aba898666b6
|
[
"MIT"
] | null | null | null |
"""Discrete event simulation of queue of tasks for N number of machines.
Simulate and output statistics.
"""
from __future__ import unicode_literals, print_function, absolute_import
import csv
import base
import statistics
N_MACHINES = 1
N_TASKS = 1000
DURATION = 5.0
STEP = 0.025
N = 20
fraction_delayed_writer = csv.writer(open('fraction_delayed.csv', 'w'))
mean_delays_writer = csv.writer(open('mean_delays.csv', 'w'))
mean_positive_delays_writer = csv.writer(open('mean_positive_delays.csv', 'w'))
median_delays_writer = csv.writer(open('median_delays.csv', 'w'))
utilization = 0.0
while True:
utilization += STEP
if utilization >= (1.0 - 0.1*STEP): break
simulation = base.Simulation(n_machines=N_MACHINES, utilization=utilization)
fraction_delayed = []
mean_delays = []
mean_positive_delays = []
median_delays = []
for n in xrange(N):
simulation.run(n_tasks=N_TASKS, duration=DURATION)
fraction_delayed.append(simulation.get_number_delayed()/float(N_TASKS))
mean_delays.append(simulation.get_mean_delay())
mean_positive_delays.append(simulation.get_mean_positive_delay())
median_delays.append(simulation.get_median_delay())
fraction_delayed_writer.writerow((utilization,
statistics.mean(fraction_delayed),
statistics.sstdev(fraction_delayed)))
mean_delays_writer.writerow((utilization,
statistics.mean(mean_delays),
statistics.sstdev(mean_delays)))
mean_positive_delays_writer.writerow((utilization,
statistics.mean(mean_positive_delays),
statistics.sstdev(mean_positive_delays)))
median_delays_writer.writerow((utilization,
statistics.mean(median_delays),
statistics.sstdev(median_delays)))
| 39
| 83
| 0.653092
|
4a08c98e9891e4bf47779cfa1201311bb86a9cc9
| 1,451
|
py
|
Python
|
bratreader/sentence.py
|
Gusyatnikova/argument-mining-rus
|
dc81159fce12093289a65a5e8e59890002ff74e5
|
[
"BSD-3-Clause"
] | null | null | null |
bratreader/sentence.py
|
Gusyatnikova/argument-mining-rus
|
dc81159fce12093289a65a5e8e59890002ff74e5
|
[
"BSD-3-Clause"
] | null | null | null |
bratreader/sentence.py
|
Gusyatnikova/argument-mining-rus
|
dc81159fce12093289a65a5e8e59890002ff74e5
|
[
"BSD-3-Clause"
] | null | null | null |
from bratreader.word import Word
class Sentence(object):
def __init__(self, key, line, start):
"""
Sentence object.
:param key: The key to which this sentence belongs.
:param line: The line on which this sentences occurs.
:param start: The start index of this line in characters.
"""
self.key = key
self.words = []
self.start = start
self.end = start + len(line)
for windex, w in enumerate(line.split()):
start = start
end = start+len(w)
self.words.append(Word(key=windex,
sentkey=self.key,
form=w,
start=start,
end=end))
start = end+1
def getwordsinspan(self, start, end):
"""
Retrieve all words in the specified character span.
:param start: The start index in characters.
:param end: The end index in characters.
:return a list of words that fall inside the span.
"""
return [word for word in self.words if
(word.start <= start < word.end)
or (word.start < end <= word.end)
or (start < word.start < end and start < word.end < end)]
def __repr__(self):
"""Representation as string."""
return " ".join([x.form for x in self.words])
| 30.87234
| 73
| 0.51275
|
4a08cb5bb3890e42d2b3425893072bd595547f72
| 1,189
|
py
|
Python
|
tests/test_cmd_script.py
|
demlution/ledis-py
|
20312ca3c43e3b345db799aff0e2d7180f5704b4
|
[
"MIT"
] | 12
|
2015-03-20T06:26:04.000Z
|
2021-03-27T16:26:34.000Z
|
tests/test_cmd_script.py
|
demlution/ledis-py
|
20312ca3c43e3b345db799aff0e2d7180f5704b4
|
[
"MIT"
] | 2
|
2015-03-23T20:32:12.000Z
|
2019-03-06T05:41:40.000Z
|
tests/test_cmd_script.py
|
demlution/ledis-py
|
20312ca3c43e3b345db799aff0e2d7180f5704b4
|
[
"MIT"
] | 5
|
2015-03-21T05:56:18.000Z
|
2019-05-19T11:53:47.000Z
|
# coding: utf-8
# Test Cases for bit commands
import unittest
import sys
sys.path.append('..')
import ledis
from ledis._compat import b
from util import expire_at, expire_at_seconds
l = ledis.Ledis(port=6380)
simple_script = "return {KEYS[1], KEYS[2], ARGV[1], ARGV[2]}"
class TestCmdScript(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
l.flushdb()
def test_eval(self):
assert l.eval(simple_script, ["key1", "key2"], "first", "second") == ["key1", "key2", "first", "second"]
def test_evalsha(self):
sha1 = l.scriptload(simple_script)
assert len(sha1) == 40
assert l.evalsha(sha1, ["key1", "key2"], "first", "second") == ["key1", "key2", "first", "second"]
def test_scriptload(self):
sha1 = l.scriptload(simple_script)
assert len(sha1) == 40
def test_scriptexists(self):
sha1 = l.scriptload(simple_script)
assert l.scriptexists(sha1) == [1L]
def test_scriptflush(self):
sha1 = l.scriptload(simple_script)
assert l.scriptexists(sha1) == [1L]
assert l.scriptflush() == 'OK'
assert l.scriptexists(sha1) == [0L]
| 21.618182
| 112
| 0.616484
|
4a08cb7724bb54cd161c694c10f8514fcbaacefb
| 4,283
|
py
|
Python
|
tests/flytekit/unit/remote/test_wrapper_classes.py
|
bimtauer/flytekit
|
3cfbb71779c44cf33866b45b22f71ffc2d607d92
|
[
"Apache-2.0"
] | null | null | null |
tests/flytekit/unit/remote/test_wrapper_classes.py
|
bimtauer/flytekit
|
3cfbb71779c44cf33866b45b22f71ffc2d607d92
|
[
"Apache-2.0"
] | null | null | null |
tests/flytekit/unit/remote/test_wrapper_classes.py
|
bimtauer/flytekit
|
3cfbb71779c44cf33866b45b22f71ffc2d607d92
|
[
"Apache-2.0"
] | null | null | null |
import typing
from collections import OrderedDict
import pytest
import flytekit.configuration
from flytekit.configuration import Image, ImageConfig
from flytekit.core.condition import conditional
from flytekit.core.launch_plan import LaunchPlan
from flytekit.core.task import task
from flytekit.core.workflow import workflow
from flytekit.remote import FlyteWorkflow
from flytekit.tools.translator import gather_dependent_entities, get_serializable
default_img = Image(name="default", fqn="test", tag="tag")
serialization_settings = flytekit.configuration.SerializationSettings(
project="project",
domain="domain",
version="version",
env=None,
image_config=ImageConfig(default_image=default_img, images=[default_img]),
)
@pytest.mark.skip(reason="branch nodes don't work yet.")
def test_wf_cond():
@task
def t1(a: int) -> int:
return a + 2
@workflow
def my_sub_wf(a: int) -> int:
return t1(a=a)
@workflow
def my_wf(a: int) -> int:
d = conditional("test1").if_(a > 3).then(t1(a=a)).else_().then(my_sub_wf(a=a))
return d
get_serializable(OrderedDict(), serialization_settings, my_wf)
def test_wf_promote_subwf_lps():
@task
def t1(a: int) -> int:
a = a + 2
return a
@workflow
def subwf(a: int) -> int:
return t1(a=a)
sub_lp = LaunchPlan.get_or_create(subwf)
@workflow
def wf(b: int) -> int:
return subwf(a=b)
serialized = OrderedDict()
wf_spec = get_serializable(serialized, serialization_settings, wf)
sub_wf_dict = {s.id: s for s in wf_spec.sub_workflows}
task_templates, wf_specs, lp_specs = gather_dependent_entities(serialized)
fwf = FlyteWorkflow.promote_from_model(
wf_spec.template, sub_workflows=sub_wf_dict, node_launch_plans=lp_specs, tasks=task_templates
)
assert len(fwf.outputs) == 1
assert list(fwf.interface.inputs.keys()) == ["b"]
assert len(fwf.nodes) == 1
assert len(fwf.flyte_nodes) == 1
# Test another subwf that calls a launch plan instead of the sub_wf directly
@workflow
def wf2(b: int) -> int:
return sub_lp(a=b)
serialized = OrderedDict()
wf_spec = get_serializable(serialized, serialization_settings, wf2)
task_templates, wf_specs, lp_specs = gather_dependent_entities(serialized)
fwf = FlyteWorkflow.promote_from_model(
wf_spec.template, sub_workflows={}, node_launch_plans=lp_specs, tasks=task_templates
)
assert len(fwf.outputs) == 1
assert list(fwf.interface.inputs.keys()) == ["b"]
assert len(fwf.nodes) == 1
assert len(fwf.flyte_nodes) == 1
# The resource type will be different, so just check the name
assert fwf.nodes[0].workflow_node.launchplan_ref.name == list(lp_specs.values())[0].workflow_id.name
def test_upstream():
@task
def t1(a: int) -> typing.Dict[str, str]:
return {"a": str(a)}
@task
def t2(a: typing.Dict[str, str]) -> str:
return " ".join([v for k, v in a.items()])
@task
def t3() -> str:
return "hello"
@workflow
def my_wf(a: int) -> str:
return t2(a=t1(a=a))
serialized = OrderedDict()
wf_spec = get_serializable(serialized, serialization_settings, my_wf)
task_templates, wf_specs, lp_specs = gather_dependent_entities(serialized)
fwf = FlyteWorkflow.promote_from_model(
wf_spec.template, sub_workflows={}, node_launch_plans={}, tasks=task_templates
)
assert len(fwf.flyte_nodes[0].upstream_nodes) == 0
assert len(fwf.flyte_nodes[1].upstream_nodes) == 1
@workflow
def parent(a: int) -> (str, str):
first = my_wf(a=a)
second = t3()
return first, second
serialized = OrderedDict()
wf_spec = get_serializable(serialized, serialization_settings, parent)
sub_wf_dict = {s.id: s for s in wf_spec.sub_workflows}
task_templates, wf_specs, lp_specs = gather_dependent_entities(serialized)
fwf = FlyteWorkflow.promote_from_model(
wf_spec.template, sub_workflows=sub_wf_dict, node_launch_plans={}, tasks=task_templates
)
# Test upstream nodes don't get confused by subworkflows
assert len(fwf.flyte_nodes[0].upstream_nodes) == 0
assert len(fwf.flyte_nodes[1].upstream_nodes) == 0
| 31.262774
| 104
| 0.69017
|
4a08cc13e3f149db0da1a206a4608da33fb4f151
| 6,100
|
py
|
Python
|
DeepLearningExamples/TensorFlow/Classification/ConvNets/triton/run_online_performance_test_on_triton.py
|
puririshi98/benchmark
|
79f554f1e1cf36f62994c78e0e6e5b360f554022
|
[
"BSD-3-Clause"
] | null | null | null |
DeepLearningExamples/TensorFlow/Classification/ConvNets/triton/run_online_performance_test_on_triton.py
|
puririshi98/benchmark
|
79f554f1e1cf36f62994c78e0e6e5b360f554022
|
[
"BSD-3-Clause"
] | null | null | null |
DeepLearningExamples/TensorFlow/Classification/ConvNets/triton/run_online_performance_test_on_triton.py
|
puririshi98/benchmark
|
79f554f1e1cf36f62994c78e0e6e5b360f554022
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows
what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ],
where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ]
`--shape IMAGE:3,224,224`.
"""
import argparse
import csv
import os
from pathlib import Path
from typing import List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.warmup import warmup
def calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def update_performance_data(results: List, performance_file: str):
with open(performance_file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row["avg latency"] = calculate_average_latency(row)
results.append(row)
def _parse_batch_sizes(batch_sizes: str):
batches = batch_sizes.split(sep=",")
return list(map(lambda x: int(x.strip()), batches))
def online_performance(
model_name: str,
batch_sizes: List[int],
result_path: str,
input_shapes: Optional[List[str]] = None,
profiling_data: str = "random",
triton_instances: int = 1,
triton_gpu_engine_count: int = 1,
server_url: str = "localhost",
measurement_window: int = 10000,
):
print("\n")
print(f"==== Dynamic batching analysis start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
print(f"Running performance tests for dynamic batching")
performance_file = f"triton_performance_dynamic_partial.csv"
steps = 16
max_batch_size = max(batch_sizes)
max_concurrency = max(steps, max_batch_size * triton_instances * triton_gpu_engine_count)
step = max(1, max_concurrency // steps)
min_concurrency = step
batch_size = 1
exec_args = f"""-m {model_name} \
-x 1 \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
-f {performance_file} \
--concurrency-range {min_concurrency}:{max_concurrency}:{step} \
--input-data {profiling_data} {input_shapes}
"""
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
exit(1)
results = list()
update_performance_data(results=results, performance_file=performance_file)
results = sort_results(results=results)
save_results(filename=result_path, data=results)
show_results(results=results)
os.remove(performance_file)
print("Performance results for dynamic batching stored in: {0}".format(result_path))
print("\n")
print(f"==== Analysis done ====")
print("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test")
parser.add_argument(
"--input-data", type=str, required=False, default="random", help="Input data to perform profiling."
)
parser.add_argument(
"--input-shape",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.")
parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances")
parser.add_argument(
"--number-of-model-instances", type=int, default=1, help="Number of models instances on Triton Server"
)
parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.")
parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server")
parser.add_argument(
"--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000
)
args = parser.parse_args()
warmup(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
measurement_window=args.measurement_window,
)
online_performance(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
triton_gpu_engine_count=args.number_of_model_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
result_path=args.result_path,
measurement_window=args.measurement_window,
)
if __name__ == "__main__":
main()
| 33.888889
| 120
| 0.689508
|
4a08cdf95b1dbd8fa27f3ced9569646ad97943f7
| 8,156
|
py
|
Python
|
network-design-qap/network-design-qap-branch-and-bound-lite/paramsQAP.py
|
Grieverwzn/network-design-qap
|
6d1a55c8d3e5dd9a97bffa7b3e8de7391a3a0843
|
[
"MIT"
] | 1
|
2021-04-16T07:03:32.000Z
|
2021-04-16T07:03:32.000Z
|
network-design-qap/network-design-qap-branch-and-bound-lite/paramsQAP.py
|
Grieverwzn/network-design-qap
|
6d1a55c8d3e5dd9a97bffa7b3e8de7391a3a0843
|
[
"MIT"
] | null | null | null |
network-design-qap/network-design-qap-branch-and-bound-lite/paramsQAP.py
|
Grieverwzn/network-design-qap
|
6d1a55c8d3e5dd9a97bffa7b3e8de7391a3a0843
|
[
"MIT"
] | 1
|
2021-04-16T07:03:33.000Z
|
2021-04-16T07:03:33.000Z
|
import pandas as pd
import os
import time
import numpy as np
class Node:
def __init__(self):
self.name = ''
self.node_id = 0
self.m_outgoing_link_list = []
self.m_incoming_link_list = []
class Link:
def __init__(self):
self.link_id = 0
self.link_type = 0
self.from_node_id = 0
self.to_node_id = 0
self.from_node = None
self.to_node = None
self.built_cost = 0.0
self.trans_cost = 0.0
self.multiplier = 0.0
class Agent:
def __init__(self):
self.agent_id = 0
self.agent_type = 0
# self.agent_list_seq_no = 0
self.origin_node_id = 0
self.destination_node_id = 0
self.origin_node = None
self.destination_node = None
self.customized_cost_link_type = 0
self.flow = 0.0
self.set_of_allowed_links_types = []
self.path_cost = 0
self.path_node_seq_no_list = []
self.path_link_seq_no_list = []
self.path_node_seq_str = ''
self.path_time_seq_str = ''
self.number_of_nodes = 0
self.path_cost = 0
self.m_path_link_seq_no_list_size = 0
self.m_current_link_seq_no = 0
class Branch: # it is the branch for Gilmore Lawler bound
def __init__(self, from_node_id, to_node_id):
self.branch_id = 0
self.from_node_id = from_node_id
self.to_node_id = to_node_id
self.i_ind = 0
self.k_ind = 0
self.trans_cost_array = None
self.flow_array = None
class ParamsQAP:
def __init__(self, cwd='', args=None):
self.cwd = cwd
self.args = args
self.nb_of_nodes = 0
self.nb_of_links = 0
self.nb_of_agents = 0
self.node_list = []
self.link_list = []
self.agent_list = []
self.node_id_to_node_dict = {}
self.nb_of_orig_building = 0
self.nb_of_orig_location = 0
self.nb_of_dest_building = 0
self.nb_of_dest_location = 0
self.building_orig_list = []
self.building_dest_list = []
self.location_orig_list = []
self.location_dest_list = []
self.flow_mat = None
self.trans_cost_mat = None
self.build_cost_orig_mat = None
self.build_cost_dest_mat = None
self.GLB_cost_mat = None
self.nb_of_branch = 0
self.branch_list = []
self.readInputData()
self.initialization()
def readInputData(self):
print('Start input data...')
t0 = time.time()
node_df = pd.read_csv(os.path.join(self.cwd,'input_node.csv'), encoding='gbk')
self.nb_of_nodes = len(node_df)
for i in range(self.nb_of_nodes):
node = Node()
node.node_id = node_df.loc[i,'node_id']
node.name = node_df.loc[i,'node_name']
self.node_list.append(node)
self.node_id_to_node_dict[node.node_id] = node
if node.name == 'building node1':
self.building_orig_list.append(node)
elif node.name == 'building node2':
self.building_dest_list.append(node)
elif node.name == 'location node1':
self.location_orig_list.append(node)
elif node.name == 'location node2':
self.location_dest_list.append(node)
link_df = pd.read_csv(os.path.join(self.cwd,'input_link.csv'), encoding='gbk')
self.nb_of_links = len(link_df)
for i in range(self.nb_of_links):
link = Link()
link.link_id = link_df.loc[i,'link_id']
link.link_type = link_df.loc[i, 'link_type']
link.from_node_id = link_df.loc[i,'from_node_id']
link.to_node_id = link_df.loc[i,'to_node_id']
link.built_cost = link_df.loc[i,'built_cost']
link.trans_cost = link_df.loc[i,'trans_cost']
link.from_node = self.node_id_to_node_dict[link.from_node_id]
link.to_node = self.node_id_to_node_dict[link.to_node_id]
self.link_list.append(link)
link.from_node.m_outgoing_link_list.append(link)
link.to_node.m_incoming_link_list.append(link)
agent_df = pd.read_csv(os.path.join(self.cwd,'input_agent.csv'), encoding='gbk')
self.nb_of_agents = len(agent_df)
for i in range(self.nb_of_agents):
agent = Agent()
agent.agent_id = agent_df.loc[i,'agent_id']
agent.agent_type = agent_df.loc[i, 'agent_type']
agent.origin_node_id = agent_df.loc[i, 'origin_node_id']
agent.destination_node_id = agent_df.loc[i, 'destination_node_id']
agent.customized_cost_link_type = agent_df.loc[i, 'customized_cost_link_type']
agent.flow = agent_df.loc[i, 'customized_cost_link_value']
set_of_allowed_link_types = agent_df.loc[i, 'set_of_allowed_link_types']
agent.set_of_allowed_links_types = list(map(int, set_of_allowed_link_types.split(";")))
agent.origin_node = self.node_id_to_node_dict[agent.origin_node_id]
agent.destination_node = self.node_id_to_node_dict[agent.destination_node_id]
self.agent_list.append(agent)
t1 = time.time()
total_time = round(t1-t0, 2)
print(f' {self.nb_of_nodes} nodes, {self.nb_of_links} links, {self.nb_of_agents} agents loaded')
print(f' time used: {total_time}s')
def initialization(self):
print('Start initialization...')
t0 = time.time()
self.nb_of_orig_building = len(self.building_orig_list)
self.nb_of_orig_location = len(self.location_orig_list)
self.nb_of_dest_building = len(self.building_dest_list)
self.nb_of_dest_location = len(self.location_dest_list)
self.flow_mat = np.zeros([self.nb_of_orig_building, self.nb_of_dest_building])
self.trans_cost_mat = np.zeros([self.nb_of_orig_location, self.nb_of_dest_location])
self.build_cost_orig_mat = np.zeros([self.nb_of_orig_building, self.nb_of_orig_location])
self.build_cost_dest_mat = np.zeros([self.nb_of_dest_location, self.nb_of_dest_building])
self.GLB_cost_mat = np.zeros([self.nb_of_orig_building, self.nb_of_orig_location])
node_id_unit = self.args['node_id_unit']
for link in self.link_list:
if (link.link_type == 1) or (link.link_type == 4): # transportation link
k_ind = np.mod(link.from_node_id, node_id_unit)
l_ind = np.mod(link.to_node_id, node_id_unit)
self.trans_cost_mat[k_ind - 1][l_ind - 1] = link.trans_cost
elif link.link_type == 2: # 'building_orig':
i_ind = np.mod(link.from_node_id, node_id_unit)
k_ind = np.mod(link.to_node_id, node_id_unit)
self.build_cost_orig_mat[i_ind - 1][k_ind - 1] = link.built_cost
elif link.link_type == 3: # 'building_dest':
l_ind = np.mod(link.from_node_id, node_id_unit)
j_ind = np.mod(link.to_node_id, node_id_unit)
self.build_cost_dest_mat[l_ind - 1][j_ind - 1] = link.built_cost
for agent in self.agent_list:
i_ind = np.mod(agent.origin_node_id, node_id_unit)
j_ind = np.mod(agent.destination_node_id, node_id_unit)
self.flow_mat[i_ind - 1][j_ind - 1] = agent.flow
for link in self.link_list:
if link.link_type == 2: # 'building_orig'
branch = Branch(link.from_node_id, link.to_node_id)
i_ind = np.mod(branch.from_node_id, node_id_unit) - 1
k_ind = np.mod(branch.to_node_id, node_id_unit) - 1
branch.i_ind, branch.k_ind = i_ind, k_ind
branch.trans_cost_array = self.trans_cost_mat[k_ind,:]
branch.flow_array = self.flow_mat[i_ind,:]
branch.branch_id = self.nb_of_branch
self.nb_of_branch += 1
self.branch_list.append(branch)
t1 = time.time()
total_time = round(t1-t0, 2)
print(f' time used: {total_time}s')
| 37.585253
| 105
| 0.617705
|
4a08cf9cd67dd335f55fc9ccecbe0d72fa6e27a7
| 73
|
py
|
Python
|
processors/instagram_grabber.py
|
brianhouse/okavango
|
4006940ddead3f31eea701efb9b9dcdc7b19402e
|
[
"MIT"
] | 2
|
2015-01-25T06:20:03.000Z
|
2015-02-15T23:54:41.000Z
|
processors/instagram_grabber.py
|
brianhouse/okavango_15
|
4006940ddead3f31eea701efb9b9dcdc7b19402e
|
[
"MIT"
] | null | null | null |
processors/instagram_grabber.py
|
brianhouse/okavango_15
|
4006940ddead3f31eea701efb9b9dcdc7b19402e
|
[
"MIT"
] | 3
|
2017-11-14T21:18:23.000Z
|
2021-06-20T21:08:31.000Z
|
#!/usr/bin/env python3
from ingest import instagram
instagram.main()
| 9.125
| 28
| 0.739726
|
4a08cfba85cbc0ed9d1da7a3d868c83f90632a41
| 4,029
|
py
|
Python
|
codemate/structure.py
|
DavidMeu/codemate
|
fcdc7591c8a1cd5922ddab1a3ec7a0dae37576c3
|
[
"MIT"
] | null | null | null |
codemate/structure.py
|
DavidMeu/codemate
|
fcdc7591c8a1cd5922ddab1a3ec7a0dae37576c3
|
[
"MIT"
] | null | null | null |
codemate/structure.py
|
DavidMeu/codemate
|
fcdc7591c8a1cd5922ddab1a3ec7a0dae37576c3
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod
from collections import Counter
from functools import partial
from typing import Collection, List, Optional
from codemate.block import Block
class Structure(Block):
"""
Creates an abstract Python structure syntax.
Args:
name(str): The name of the structure.
"""
def __init__(
self,
name: str,
) -> None:
super().__init__()
self._name = name
self._decorators: List[str] = []
def add_decorator(self, line: str) -> "Structure":
"""
Adds a line that represent a Python decorator syntax to the structure,
in FIFO order.
Args:
line (str): The syntax line that should be inserted as a decorator.
Returns:
Class: The class instance.
"""
self._decorators.append(f"@{line}")
return self
def _format_decorators(self, indent: int) -> str:
format_line = partial(self.parse_block, new_line=1, indent=indent)
syntax = "".join(format_line(line) for line in self._decorators)
return syntax.strip()
@abstractmethod
def _format_signature(self, indent: int) -> str:
raise NotImplementedError
def syntax(self, indent: int = 0, imports: bool = True) -> str:
"""
Convert the structure to Python syntax.
Args:
indent (int): How much to indent the class content.
imports (bool): Whether to add imports or not.
Returns:
str: The structure Python syntax.
"""
syntax = ""
if self._decorators:
syntax += self._format_decorators(indent)
syntax += "\n"
syntax += self._format_signature(indent)
block_content = super().syntax(indent + 1, imports)
syntax += "\n"
syntax += self.parse_block(block_content)
return syntax
class Function(Structure):
"""
Generates a Python function syntax.
Args:
name (str): The name of the function.
arguments (Collection[str]): The inputs of the function.
is_async (bool): Represents whether async keyword should be added.
return_value (Optional[str]): The type of the function return value.
"""
def __init__(
self,
name: str,
arguments: Collection[str] = (),
is_async: bool = False,
return_value: Optional[str] = None,
) -> None:
super().__init__(name)
self._arguments = arguments
self._is_async = is_async
self._return_value = return_value
def _format_signature(self, indent: int) -> str:
# Counter is used to remove duplications of arguments
args_syntax = ", ".join(Counter(self._arguments))
async_prefix = "async " if self._is_async else ""
return_value = f" -> {self._return_value}" if self._return_value else ""
signature = f"{async_prefix}def {self._name}({args_syntax}){return_value}:"
return self.parse_block(signature, indent=indent)
class Class(Structure):
"""
Creates a python class syntax.
Args:
name(str): The name of the class.
inherit (Collection[str]): The classes that this class inherits from.
"""
def __init__(
self,
name: str,
metaclass: Optional[str] = None,
inherit: Collection[str] = (),
) -> None:
super().__init__(name)
self._metaclass = metaclass
self._inherit = inherit
def _format_signature(self, indent: int) -> str:
signature = f"class {self._name}"
# Counter is used to remove duplications of arguments
inheritance = ", ".join(Counter(self._inherit))
metaclass = f"metaclass={self._metaclass}" if self._metaclass else ""
if inheritance and metaclass:
inheritance += ","
if inheritance or metaclass:
signature += f"({inheritance}{metaclass})"
signature += ":"
return self.parse_block(signature, indent=indent)
| 30.522727
| 83
| 0.607843
|
4a08cfd4e4689b958b3c273b449fca2caa87e7ce
| 6,922
|
py
|
Python
|
nodeconductor/openstack/models.py
|
p-p-m/nodeconductor
|
bc702302ef65c89793452f0fd6ca9a6bec79782f
|
[
"Apache-2.0"
] | null | null | null |
nodeconductor/openstack/models.py
|
p-p-m/nodeconductor
|
bc702302ef65c89793452f0fd6ca9a6bec79782f
|
[
"Apache-2.0"
] | null | null | null |
nodeconductor/openstack/models.py
|
p-p-m/nodeconductor
|
bc702302ef65c89793452f0fd6ca9a6bec79782f
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.utils.encoding import python_2_unicode_compatible
from model_utils import FieldTracker
from nodeconductor.core import models as core_models
from nodeconductor.structure import models as structure_models
from nodeconductor.quotas.models import QuotaModelMixin
from nodeconductor.iaas.models import SecurityGroupRuleValidationMixin
from nodeconductor.logging.log import LoggableMixin
class OpenStackService(structure_models.Service):
projects = models.ManyToManyField(
structure_models.Project, related_name='openstack_services', through='OpenStackServiceProjectLink')
class Meta:
unique_together = ('customer', 'settings')
verbose_name = 'OpenStack service'
verbose_name_plural = 'OpenStack services'
@property
def auth_url(self):
# XXX: Temporary backward compatibility
return self.settings.backend_url
class OpenStackServiceProjectLink(QuotaModelMixin, structure_models.ServiceProjectLink):
QUOTAS_NAMES = ['vcpu', 'ram', 'storage', 'instances', 'security_group_count', 'security_group_rule_count',
'floating_ip_count']
service = models.ForeignKey(OpenStackService)
tenant_id = models.CharField(max_length=64, blank=True)
internal_network_id = models.CharField(max_length=64, blank=True)
external_network_id = models.CharField(max_length=64, blank=True)
availability_zone = models.CharField(
max_length=100, blank=True,
help_text='Optional availability group. Will be used for all instances provisioned in this tenant'
)
class Meta:
unique_together = ('service', 'project')
verbose_name = 'OpenStack service project link'
verbose_name_plural = 'OpenStack service project links'
@property
def cloud(self):
# XXX: Temporary backward compatibility
return self.service
@property
def username(self):
# XXX: Temporary backward compatibility
return self.service.settings.username
@property
def password(self):
# XXX: Temporary backward compatibility
return self.service.settings.password
def get_backend(self):
return super(OpenStackServiceProjectLink, self).get_backend(tenant_id=self.tenant_id)
class Flavor(LoggableMixin, structure_models.ServiceProperty):
cores = models.PositiveSmallIntegerField(help_text='Number of cores in a VM')
ram = models.PositiveIntegerField(help_text='Memory size in MiB')
disk = models.PositiveIntegerField(help_text='Root disk size in MiB')
class Image(structure_models.ServiceProperty):
min_disk = models.PositiveIntegerField(default=0, help_text='Minimum disk size in MiB')
min_ram = models.PositiveIntegerField(default=0, help_text='Minimum memory size in MiB')
@python_2_unicode_compatible
class SecurityGroup(core_models.UuidMixin,
core_models.NameMixin,
core_models.DescribableMixin,
core_models.SynchronizableMixin):
class Permissions(object):
customer_path = 'service_project_link__project__customer'
project_path = 'service_project_link__project'
project_group_path = 'service_project_link__project__project_groups'
service_project_link = models.ForeignKey(
OpenStackServiceProjectLink, related_name='security_groups')
backend_id = models.CharField(max_length=128, blank=True)
def __str__(self):
return '%s (%s)' % (self.name, self.service_project_link)
@classmethod
def get_url_name(cls):
return 'openstack-sgp'
SecurityGroup._meta.get_field('state').default = core_models.SynchronizationStates.SYNCING_SCHEDULED
@python_2_unicode_compatible
class SecurityGroupRule(SecurityGroupRuleValidationMixin, models.Model):
TCP = 'tcp'
UDP = 'udp'
ICMP = 'icmp'
CHOICES = (
(TCP, 'tcp'),
(UDP, 'udp'),
(ICMP, 'icmp'),
)
security_group = models.ForeignKey(SecurityGroup, related_name='rules')
protocol = models.CharField(max_length=4, blank=True, choices=CHOICES)
from_port = models.IntegerField(validators=[MaxValueValidator(65535)], null=True)
to_port = models.IntegerField(validators=[MaxValueValidator(65535)], null=True)
cidr = models.CharField(max_length=32, blank=True)
backend_id = models.CharField(max_length=128, blank=True)
def __str__(self):
return '%s (%s): %s (%s -> %s)' % \
(self.security_group, self.protocol, self.cidr, self.from_port, self.to_port)
class FloatingIP(core_models.UuidMixin):
class Permissions(object):
customer_path = 'service_project_link__project__customer'
project_path = 'service_project_link__project'
project_group_path = 'service_project_link__project__project_groups'
service_project_link = models.ForeignKey(
OpenStackServiceProjectLink, related_name='floating_ips')
address = models.GenericIPAddressField(protocol='IPv4')
status = models.CharField(max_length=30)
backend_id = models.CharField(max_length=255)
backend_network_id = models.CharField(max_length=255, editable=False)
tracker = FieldTracker()
class Instance(structure_models.Resource,
structure_models.PaidResource,
structure_models.VirtualMachineMixin):
DEFAULT_DATA_VOLUME_SIZE = 20 * 1024
service_project_link = models.ForeignKey(
OpenStackServiceProjectLink, related_name='instances', on_delete=models.PROTECT)
external_ips = models.GenericIPAddressField(null=True, blank=True, protocol='IPv4')
internal_ips = models.GenericIPAddressField(null=True, blank=True, protocol='IPv4')
# OpenStack backend specific fields
system_volume_id = models.CharField(max_length=255, blank=True)
system_volume_size = models.PositiveIntegerField(default=0, help_text='Root disk size in MiB')
data_volume_id = models.CharField(max_length=255, blank=True)
data_volume_size = models.PositiveIntegerField(
default=DEFAULT_DATA_VOLUME_SIZE, help_text='Data disk size in MiB', validators=[MinValueValidator(1 * 1024)])
tracker = FieldTracker()
@property
def cloud_project_membership(self):
# Temporary backward compatibility
return self.service_project_link
def get_log_fields(self):
return (
'uuid', 'name', 'type', 'service_project_link', 'ram', 'cores',
'data_volume_size', 'system_volume_size',
)
class InstanceSecurityGroup(models.Model):
class Permissions(object):
project_path = 'instance__project'
project_group_path = 'instance__project__project_groups'
instance = models.ForeignKey(Instance, related_name='security_groups')
security_group = models.ForeignKey(SecurityGroup, related_name='instance_groups')
| 36.819149
| 118
| 0.733603
|
4a08d005d029c0c30db516a367647a8d94fc7f8b
| 8,641
|
py
|
Python
|
src/simmate/calculators/vasp/workflows/_to_do/scratch_ELF.py
|
sionab/simmate
|
6dedea7310829aae425bf3393e7923e454a0129f
|
[
"BSD-3-Clause"
] | 9
|
2021-12-21T02:58:21.000Z
|
2022-01-25T14:00:06.000Z
|
src/simmate/calculators/vasp/workflows/_to_do/scratch_ELF.py
|
sionab/simmate
|
6dedea7310829aae425bf3393e7923e454a0129f
|
[
"BSD-3-Clause"
] | 51
|
2022-01-01T15:59:58.000Z
|
2022-03-26T21:25:42.000Z
|
src/simmate/calculators/vasp/workflows/_to_do/scratch_ELF.py
|
sionab/simmate
|
6dedea7310829aae425bf3393e7923e454a0129f
|
[
"BSD-3-Clause"
] | 7
|
2022-01-01T03:44:32.000Z
|
2022-03-29T19:59:27.000Z
|
# -*- coding: utf-8 -*-
from pymatgen.io.vasp.sets import DictSet
#!!! The warning that is raised here is because there is no YAML! It can be ignored
class MyCustomSet(DictSet):
CONFIG = {
"INCAR": {
# Base Settings
"NSW": 0, # Max ionic steps (0 is static energy calc)
"IVDW": 12, # use Grimmes VDW correction
"NELM": 100, # Max SCF loops allowed
"ISPIN": 2, # run spin-polarized
# Quality of Calc
"PREC": "Accurate", #
"EDIFF": 1.0e-07, # Break condition for SCF loop
"EDIFFG": -1e-04, # Break condition for ionic step loop # negative has different meaning!
"ENCUT": 500,
"ISIF": 3, # Relax cell shape, volume, and atomic positions
"ISMEAR": 0, # Guassian smearing
"SIGMA": 0.060,
# Deciding which files to write
"LCHARG": True, # write CHGCAR
"LWAVE": True, # write WAVECAR
"LELF": True, # write ELFCAR
# Parallel Options
"NPAR": 1, # Must be set if LELF is set to True
},
"KPOINTS": {"reciprocal_density": 50},
"POTCAR_FUNCTIONAL": "PBE",
"POTCAR": {
"Ac": "Ac",
"Ag": "Ag",
"Al": "Al",
"Ar": "Ar",
"As": "As",
"Au": "Au",
"B": "B",
"Ba": "Ba_sv",
"Be": "Be_sv",
"Bi": "Bi",
"Br": "Br",
"C": "C",
"Ca": "Ca_sv",
"Cd": "Cd",
"Ce": "Ce",
"Cl": "Cl",
"Co": "Co",
"Cr": "Cr_pv",
"Cs": "Cs_sv",
"Cu": "Cu_pv",
"Dy": "Dy_3",
"Er": "Er_3",
"Eu": "Eu",
"F": "F",
"Fe": "Fe_pv",
"Ga": "Ga_d",
"Gd": "Gd",
"Ge": "Ge_d",
"H": "H",
"He": "He",
"Hf": "Hf_pv",
"Hg": "Hg",
"Ho": "Ho_3",
"I": "I",
"In": "In_d",
"Ir": "Ir",
"K": "K_sv",
"Kr": "Kr",
"La": "La",
"Li": "Li_sv",
"Lu": "Lu_3",
"Mg": "Mg_pv",
"Mn": "Mn_pv",
"Mo": "Mo_pv",
"N": "N",
"Na": "Na_pv",
"Nb": "Nb_pv",
"Nd": "Nd_3",
"Ne": "Ne",
"Ni": "Ni_pv",
"Np": "Np",
"O": "O",
"Os": "Os_pv",
"P": "P",
"Pa": "Pa",
"Pb": "Pb_d",
"Pd": "Pd",
"Pm": "Pm_3",
"Pr": "Pr_3",
"Pt": "Pt",
"Pu": "Pu",
"Rb": "Rb_sv",
"Re": "Re_pv",
"Rh": "Rh_pv",
"Ru": "Ru_pv",
"S": "S",
"Sb": "Sb",
"Sc": "Sc_sv",
"Se": "Se",
"Si": "Si",
"Sm": "Sm_3",
"Sn": "Sn_d",
"Sr": "Sr_sv",
"Ta": "Ta_pv",
"Tb": "Tb_3",
"Tc": "Tc_pv",
"Te": "Te",
"Th": "Th",
"Ti": "Ti_pv",
"Tl": "Tl_d",
"Tm": "Tm_3",
"U": "U",
"V": "V_pv",
"W": "W_pv",
"Xe": "Xe",
"Y": "Y_sv",
"Yb": "Yb_2",
"Zn": "Zn",
"Zr": "Zr_sv",
},
}
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, MyCustomSet.CONFIG, **kwargs)
self.kwargs = kwargs
# -----------------------------------------------------------------------------
print("Setting up...")
# load structure
from pymatgen.core.structure import Structure
structure = Structure.from_file("Y2C.cif") #!!! NAME YOUR INPUT STRUCTURE FILE HERE
structure = structure.get_primitive_structure()
# write the vasp input files
MyCustomSet(structure).write_input(".")
# -----------------------------------------------------------------------------
print("Running vasp...")
# run vasp
import subprocess
subprocess.run(
"module load vasp; mpirun -np 20 /nas/longleaf/apps-dogwood/vasp/5.4.4/bin/vasp_std > vasp.out",
shell=True,
)
# -----------------------------------------------------------------------------
print("Working up...")
# import the VASP results
from pymatgen.io.vasp.outputs import Vasprun
xmlReader = Vasprun(
filename="vasprun.xml",
parse_dos=True,
parse_eigen=True,
parse_projected_eigen=False,
parse_potcar_file=True,
exception_on_bad_xml=True,
)
# grab the info we want
final_structure = xmlReader.structures[-1] # or Structure.from_file('CONTCAR')
final_energy = (
xmlReader.final_energy / final_structure.num_sites
) #!!! convert this to per_atom!
converged = xmlReader.converged
from pymatgen.io.vasp.outputs import Elfcar
elfcar = Elfcar.from_file("ELFCAR")
alphacar = elfcar.get_alpha()
alphacar.write_file("ALPHACAR.vasp")
from pymatgen.io.vasp.outputs import Chgcar
chgcar = Chgcar.from_file("CHGCAR")
# scale data of the Elfcar to the size of Chgcar using Kronecker product
import numpy as np
scale = [
int(c / e) for c, e in zip(chgcar.data["total"].shape, elfcar.data["total"].shape)
]
elfcar_scaled = np.kron(elfcar.data["total"], np.ones(scale))
from scipy.ndimage import gaussian_filter #!!! This is for smoothing and optional
elfcar_scaled = gaussian_filter(elfcar_scaled, sigma=0.75)
# from scipy.linalg import kron
# elfcar_scaled = kron(elfcar.data["total"], chgcar.data["total"])
elfcar_scaled = Elfcar(elfcar.structure, {"total": elfcar_scaled})
elfcar_scaled.write_file("ELFCAR_scaled.vasp")
alphacar_scaled = elfcar_scaled.get_alpha()
alphacar_scaled.write_file("ALPHACAR_scaled.vasp")
# also write the crystal structure because it may differ from the input
elfcar.structure.to(filename="primitive_structure.cif")
# For 3d integrations:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.tplquad.html
# def f(x,y,z):
# return chgcar.data['total'][0][1][2]
# tplquad(f, 1, 2, lambda x: 2, lambda x: 3, lambda x, y: 0, lambda x, y: 1)
# OR
# https://stackoverflow.com/questions/47415122/calculate-the-volume-of-3d-plot
# -----------------------------------------------------------------------------
raise NameError
import numpy as np
import itertools
def get_integrated_total(elfcar, ind, radius, nbins=1):
"""
Get integrated difference of atom index ind up to radius. This can be
an extremely computationally intensive process, depending on how many
grid points are in the VolumetricData.
Args:
ind (int): Index of atom.
radius (float): Radius of integration.
nbins (int): Number of bins. Defaults to 1. This allows one to
obtain the charge integration up to a list of the cumulative
charge integration values for radii for [radius/nbins,
2 * radius/nbins, ....].
Returns:
Differential integrated charge as a np array of [[radius, value],
...]. Format is for ease of plotting. E.g., plt.plot(data[:,0],
data[:,1])
"""
struct = elfcar.structure
a = elfcar.dim
if (
ind not in elfcar._distance_matrix
or elfcar._distance_matrix[ind]["max_radius"] < radius
):
coords = []
for (x, y, z) in itertools.product(*[list(range(i)) for i in a]):
coords.append([x / a[0], y / a[1], z / a[2]])
sites_dist = struct.lattice.get_points_in_sphere(
coords, struct[ind].coords, radius
)
elfcar._distance_matrix[ind] = {
"max_radius": radius,
"data": np.array(sites_dist),
}
data = elfcar._distance_matrix[ind]["data"]
# Use boolean indexing to find all charges within the desired distance.
inds = data[:, 1] <= radius
dists = data[inds, 1]
data_inds = np.rint(
np.mod(list(data[inds, 0]), 1) * np.tile(a, (len(dists), 1))
).astype(int)
vals = [elfcar.data["total"][x, y, z] for x, y, z in data_inds] #!!! diff to total
hist, edges = np.histogram(dists, bins=nbins, range=[0, radius], weights=vals)
data = np.zeros((nbins, 2))
data[:, 0] = edges[1:]
data[:, 1] = [sum(hist[0 : i + 1]) / elfcar.ngridpts for i in range(nbins)]
return data
data = get_integrated_total(chgcar, ind=0, radius=2.5, nbins=100)
import matplotlib.pyplot as plt
plt.plot(data[:, 0], data[:, 1])
| 30.213287
| 102
| 0.505844
|
4a08d02dffa67d4ea0c27fc91257e279ec5344e3
| 3,100
|
py
|
Python
|
find_planes.py
|
janvimadhani/satellite_planes
|
cb80d3c3a840e681ff26c78f40218ddbbec985b0
|
[
"MIT"
] | null | null | null |
find_planes.py
|
janvimadhani/satellite_planes
|
cb80d3c3a840e681ff26c78f40218ddbbec985b0
|
[
"MIT"
] | null | null | null |
find_planes.py
|
janvimadhani/satellite_planes
|
cb80d3c3a840e681ff26c78f40218ddbbec985b0
|
[
"MIT"
] | null | null | null |
import numpy as np
import time as time
import plane_finding_tools as pf
import sys
snapshot = sys.argv[1]
systems_file='/data78/welker/madhani/systems/systems_' + str(snapshot) + '.pickle'
#systems_file = '/Users/JanviMadhani/satellite_planes/systems/MWsystems.pickle'
systems = pf.read_systems(systems_file)
corotation_dict = {}
corotation_dict['syst_ID'] = []
corotation_dict['best_rms'] = []
corotation_dict['phys_c_to_a'] = []
corotation_dict['corotating_frac'] = []
# JUST CHECK WITH ONE RIGHT NOW
#syst = 46
for syst in range(len(systems)):
print('System with Halo ID:', systems[syst]['halo_ID'])
name_of_syst = systems[syst]['halo_ID']
corotation_dict['syst_ID'].append(name_of_syst)
best_u1,best_u2,best_u3,best_rms = pf.evolutionary_plane_finder(systems=systems,system=syst,n_iter=200,n_start=25,n_erase=10,n_avg_mutants=5,level=1,rand=False,verbose=True)
z_best,xx,yy,unit_n,los = pf.get_plane(u1=best_u1,u2=best_u2,u3=best_u3,systems=systems,system=syst)
corotation_dict['best_rms'].append(best_rms)
## get physical extent, c_to_a:
a,b,c,phys_c_to_a = pf.find_physical_extent(u1=best_u1,u2=best_u2,u3=best_u3,systems=systems,system=syst,actual_rms=best_rms,nrms = 2,level=1)
phys_ext = [a,b,c,phys_c_to_a]
corotation_dict['phys_c_to_a'].append(phys_c_to_a)
## find inertia tensor
I = pf.find_inertia_tensor(systems[syst])
v1,v2,v3 = pf.find_axes_of_rot(I)
i_c_to_a = pf.find_axes_ratios(I)
inertia = [v1,v2,v3,i_c_to_a]
name_of_3dplot = 'system_' + str(name_of_syst) +'.png'
pf.save_3Dplot(name_of_3dplot,systems=systems,syst=syst,snapshot=snapshot,xx=xx,yy=yy,z_best=z_best,los=v2,unit_n=unit_n,phys_ext = phys_ext, inertia=inertia)
corot_frac = pf.corotating_frac(systems=systems,syst=syst,unit_n=unit_n,actual_rms =best_rms,nrms=1,level=1)
corotation_dict['corotating_frac'].append(corot_frac)
#####################
# ISOTROPY ANALYSIS #
#####################
## check for isotropy n times and find n rms dists
iso_sph_systs_rms,iso_ell_systs_rms = pf.check_isotropy(systems=systems,syst=syst,unit_n=unit_n,actual_rms=best_rms,n=2000,corot=False)
name_of_hist = 'system_' + str(name_of_syst) +'_hist.png'
## save spherical and get significance
sph_sig = pf.save_hist(name_of_hist,best_rms,iso_sph_systs_rms,snapshot=snapshot,type='spherical',savedat=True)
## save elliptical and get significance
ell_sig = pf.save_hist(name_of_hist,best_rms,iso_ell_systs_rms,snapshot=snapshot,type='elliptical',savedat=True)
## find significance of rms then change below file to include this info
#save all information to a .csv file
#name_of_file = 'system_' + str(name_of_syst) + '.csv'
#pf.save_outputs(name_of_file,snapshot=snapshot,systems=systems,syst=syst,inertial=inertia,physical=phys_ext,sig_spherical=sph_sig,sig_elliptical=ell_sig)
#save corotation dictionary to pickle for later analysis
name_of_corot_dict = 'corotation_analysis_' + str(snapshot)
pf.write_to_pickle(corotation_dict,snapshot,name_of_corot_dict)
| 37.804878
| 177
| 0.743871
|
4a08d0c2fcbe9f8fd2bbf5bf359c09d9dbaf069c
| 9,506
|
py
|
Python
|
CybORG/CybORG/Agents/ComplexAgents/utilities/data_structures/Prioritised_Replay_Buffer.py
|
rafvasq/cage-challenge-1
|
95affdfa38afc1124f1a1a09c92fbc0ed5b96318
|
[
"MIT"
] | null | null | null |
CybORG/CybORG/Agents/ComplexAgents/utilities/data_structures/Prioritised_Replay_Buffer.py
|
rafvasq/cage-challenge-1
|
95affdfa38afc1124f1a1a09c92fbc0ed5b96318
|
[
"MIT"
] | null | null | null |
CybORG/CybORG/Agents/ComplexAgents/utilities/data_structures/Prioritised_Replay_Buffer.py
|
rafvasq/cage-challenge-1
|
95affdfa38afc1124f1a1a09c92fbc0ed5b96318
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
from CybORG.Agents.ComplexAgents.utilities.data_structures.Deque import Deque
from CybORG.Agents.ComplexAgents.utilities.data_structures.Max_Heap import Max_Heap
class Prioritised_Replay_Buffer(Max_Heap, Deque):
"""Data structure that maintains a deque, a heap and an array. The deque keeps track of which experiences are the oldest and so
tells us which ones to delete once the buffer starts getting full. The heap lets us quickly retrieve the experience
with the max td_value. And the array lets us do quick random samples with probabilities equal to the proportional td errors.
We also keep track of the sum of the td values using a simple variable.
NOTE that this implementation is not optimal in terms of speed. At some point I will make improvements to it.
"""
def __init__(self, hyperparameters, seed=0):
Max_Heap.__init__(self, hyperparameters["buffer_size"], dimension_of_value_attribute=5, default_key_to_use=0)
Deque.__init__(self, hyperparameters["buffer_size"], dimension_of_value_attribute=5)
np.random.seed(seed)
self.deques_td_errors = self.initialise_td_errors_array()
self.heap_index_to_overwrite_next = 1
self.number_experiences_in_deque = 0
self.adapted_overall_sum_of_td_errors = 0
self.alpha = hyperparameters["alpha_prioritised_replay"]
self.beta = hyperparameters["beta_prioritised_replay"]
self.incremental_td_error = hyperparameters["incremental_td_error"]
self.batch_size = hyperparameters["batch_size"]
self.heap_indexes_to_update_td_error_for = None
self.indexes_in_node_value_tuple = {
"state": 0,
"action": 1,
"reward": 2,
"next_state": 3,
"done": 4
}
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def initialise_td_errors_array(self):
"""Initialises a deque of Nodes of length self.max_size"""
return np.zeros(self.max_size)
def add_experience(self, raw_td_error, state, action, reward, next_state, done):
"""Save an experience in the replay buffer"""
td_error = (abs(raw_td_error) + self.incremental_td_error) ** self.alpha
self.update_overall_sum(td_error, self.deque[self.deque_index_to_overwrite_next].key)
self.update_deque_and_deque_td_errors(td_error, state, action, reward, next_state, done)
self.update_heap_and_heap_index_to_overwrite()
self.update_number_experiences_in_deque()
self.update_deque_index_to_overwrite_next()
def update_overall_sum(self, new_td_error, old_td_error):
"""Updates the overall sum of td_values present in the buffer"""
self.adapted_overall_sum_of_td_errors += new_td_error - old_td_error
def update_deque_and_deque_td_errors(self, td_error, state, action, reward, next_state, done):
"""Updates the deque by overwriting the oldest experience with the experience provided"""
self.deques_td_errors[self.deque_index_to_overwrite_next] = td_error
self.add_element_to_deque(td_error, (state, action, reward, next_state, done))
def add_element_to_deque(self, new_key, new_value):
"""Adds an element to the deque"""
self.update_deque_node_key_and_value(self.deque_index_to_overwrite_next, new_key, new_value)
def update_heap_and_heap_index_to_overwrite(self):
"""Updates the heap by rearranging it given the new experience that was just incorporated into it. If we haven't
reached max capacity then the new experience is added directly into the heap, otherwise a pointer on the heap has
changed to reflect the new experience so there's no need to add it in"""
if not self.reached_max_capacity:
self.update_heap_element(self.heap_index_to_overwrite_next, self.deque[self.deque_index_to_overwrite_next])
self.deque[self.deque_index_to_overwrite_next].heap_index = self.heap_index_to_overwrite_next
self.update_heap_index_to_overwrite_next()
heap_index_change = self.deque[self.deque_index_to_overwrite_next].heap_index
self.reorganise_heap(heap_index_change)
def update_heap_index_to_overwrite_next(self):
"""This updates the heap index to write over next. Once the buffer gets full we stop calling this function because
the nodes the heap points to start being changed directly rather than the pointers on the heap changing"""
self.heap_index_to_overwrite_next += 1
def swap_heap_elements(self, index1, index2):
"""Swaps two position of two heap elements and then updates the heap_index stored in the two nodes. We have to override
this method from Max_Heap so that it also updates the heap_index variables"""
self.heap[index1], self.heap[index2] = self.heap[index2], self.heap[index1]
self.heap[index1].heap_index = index1
self.heap[index2].heap_index = index2
def sample(self, rank_based=True):
"""Randomly samples a batch from experiences giving a higher likelihood to experiences with a higher td error. It then
calculates an importance sampling weight for each sampled experience, you can read about this in the paper:
https://arxiv.org/pdf/1511.05952.pdf"""
experiences, deque_sample_indexes = self.pick_experiences_based_on_proportional_td_error()
states, actions, rewards, next_states, dones = self.separate_out_data_types(experiences)
self.deque_sample_indexes_to_update_td_error_for = deque_sample_indexes
importance_sampling_weights = self.calculate_importance_sampling_weights(experiences)
return (states, actions, rewards, next_states, dones), importance_sampling_weights
def pick_experiences_based_on_proportional_td_error(self):
"""Randomly picks a batch of experiences with probability equal to their proportional td_errors"""
probabilities = self.deques_td_errors / self.give_adapted_sum_of_td_errors()
deque_sample_indexes = np.random.choice(range(len(self.deques_td_errors)), size=self.batch_size, replace=False, p=probabilities)
experiences = self.deque[deque_sample_indexes]
return experiences, deque_sample_indexes
def separate_out_data_types(self, experiences):
"""Separates out experiences into their different parts and makes them tensors ready to be used in a pytorch model"""
states = torch.from_numpy(np.vstack([e.value[self.indexes_in_node_value_tuple["state"]] for e in experiences])).float().to(self.device)
actions = torch.from_numpy(np.vstack([e.value[self.indexes_in_node_value_tuple["action"]] for e in experiences])).float().to(self.device)
rewards = torch.from_numpy(np.vstack([e.value[self.indexes_in_node_value_tuple["reward"]] for e in experiences])).float().to(self.device)
next_states = torch.from_numpy(np.vstack([e.value[self.indexes_in_node_value_tuple["next_state"]] for e in experiences])).float().to(
self.device)
dones = torch.from_numpy(np.vstack([int(e.value[self.indexes_in_node_value_tuple["done"]]) for e in experiences])).float().to(self.device)
return states, actions, rewards, next_states, dones
def calculate_importance_sampling_weights(self, experiences):
"""Calculates the importance sampling weight of each observation in the sample. The weight is proportional to the td_error of the observation,
see the paper here for more details: https://arxiv.org/pdf/1511.05952.pdf"""
td_errors = [experience.key for experience in experiences]
importance_sampling_weights = [((1.0 / self.number_experiences_in_deque) * (self.give_adapted_sum_of_td_errors() / td_error)) ** self.beta for td_error in td_errors]
sample_max_importance_weight = max(importance_sampling_weights)
importance_sampling_weights = [is_weight / sample_max_importance_weight for is_weight in importance_sampling_weights]
importance_sampling_weights = torch.tensor(importance_sampling_weights).float().to(self.device)
return importance_sampling_weights
def update_td_errors(self, td_errors):
"""Updates the td_errors for the provided heap indexes. The indexes should be the observations provided most
recently by the give_sample method"""
for raw_td_error, deque_index in zip(td_errors, self.deque_sample_indexes_to_update_td_error_for):
td_error = (abs(raw_td_error) + self.incremental_td_error) ** self.alpha
corresponding_heap_index = self.deque[deque_index].heap_index
self.update_overall_sum(td_error, self.heap[corresponding_heap_index].key)
self.heap[corresponding_heap_index].key = td_error
self.reorganise_heap(corresponding_heap_index)
self.deques_td_errors[deque_index] = td_error
def give_max_td_error(self):
"""Returns the maximum td error currently in the heap. Because it is a max heap this is the top element of the heap"""
return self.give_max_key()
def give_adapted_sum_of_td_errors(self):
"""Returns the sum of td errors of the experiences currently in the heap"""
return self.adapted_overall_sum_of_td_errors
def __len__(self):
"""Tells us how many experiences there are in the replay buffer. This number will never exceed self.max_size"""
return self.number_experiences_in_deque
| 61.329032
| 173
| 0.739638
|
4a08d21344ca2d36e5ec2b76836c48da35b65f0a
| 1,891
|
py
|
Python
|
pytorch_toolkit/instance_segmentation/setup.py
|
xzry6/openvino_training_extensions
|
b8b17bbcc352633b0f0d3a99d6179a9ec616e426
|
[
"Apache-2.0"
] | 1
|
2019-08-01T05:57:28.000Z
|
2019-08-01T05:57:28.000Z
|
pytorch_toolkit/instance_segmentation/setup.py
|
xzry6/openvino_training_extensions
|
b8b17bbcc352633b0f0d3a99d6179a9ec616e426
|
[
"Apache-2.0"
] | null | null | null |
pytorch_toolkit/instance_segmentation/setup.py
|
xzry6/openvino_training_extensions
|
b8b17bbcc352633b0f0d3a99d6179a9ec616e426
|
[
"Apache-2.0"
] | 1
|
2019-08-21T08:36:08.000Z
|
2019-08-21T08:36:08.000Z
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path as osp
from setuptools import setup, find_packages
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
this_dir = osp.dirname(osp.abspath(__file__))
extensions_dir = osp.join(this_dir, 'segmentoly', 'extensions')
sources = [
osp.join(extensions_dir, 'extensions.cpp'),
osp.join(extensions_dir, 'nms', 'nms.cpp'),
osp.join(extensions_dir, 'nms', 'cpu', 'nms_kernel.cpp'),
osp.join(extensions_dir, 'roi_align', 'roi_align.cpp'),
osp.join(extensions_dir, 'roi_align', 'cpu', 'roi_align_kernel.cpp'),
]
extension = CppExtension
macroses = []
if torch.cuda.is_available() and CUDA_HOME is not None:
sources_gpu = [
osp.join(extensions_dir, 'nms', 'gpu', 'nms_kernel.cu'),
osp.join(extensions_dir, 'roi_align', 'gpu', 'roi_align_kernel.cu'),
]
extension = CUDAExtension
sources += sources_gpu
macroses += [("WITH_CUDA", None)]
setup(name='segmentoly',
ext_modules=[extension(name='segmentoly.extensions._EXTRA',
sources=sources,
include_dirs=[extensions_dir],
define_macros=macroses)
],
cmdclass={'build_ext': BuildExtension},
packages=find_packages()
)
| 34.381818
| 92
| 0.687467
|
4a08d28a56defb5fae4fbd06220358dbd0f265b8
| 179
|
py
|
Python
|
seligimus/standards/ansi/escape_codes/erase.py
|
AustinScola/seligimus
|
206654fd8cf5e9b9a9da25439ccde2efe5a6cc7a
|
[
"MIT"
] | 1
|
2021-01-30T15:57:40.000Z
|
2021-01-30T15:57:40.000Z
|
seligimus/standards/ansi/escape_codes/erase.py
|
AustinScola/seligimus
|
206654fd8cf5e9b9a9da25439ccde2efe5a6cc7a
|
[
"MIT"
] | 100
|
2021-01-30T16:01:46.000Z
|
2021-07-24T14:00:04.000Z
|
seligimus/standards/ansi/escape_codes/erase.py
|
AustinScola/seligimus
|
206654fd8cf5e9b9a9da25439ccde2efe5a6cc7a
|
[
"MIT"
] | null | null | null |
"""ANSI escape codes for erasing text."""
from seligimus.standards.ansi.escape_codes.control import CONTROL_SEQUENCE_INTRODUCER
CLEAR_SCREEN = CONTROL_SEQUENCE_INTRODUCER + '2J'
| 35.8
| 85
| 0.826816
|
4a08d37a19b04f027f32e69fcc5f7213cc072fa6
| 516
|
py
|
Python
|
Section #1 - Basics/read.py
|
ArjenArumalingam/opencv-course
|
a1d6ea58379e1c19afcef207b04ba5090e09423c
|
[
"MIT"
] | null | null | null |
Section #1 - Basics/read.py
|
ArjenArumalingam/opencv-course
|
a1d6ea58379e1c19afcef207b04ba5090e09423c
|
[
"MIT"
] | null | null | null |
Section #1 - Basics/read.py
|
ArjenArumalingam/opencv-course
|
a1d6ea58379e1c19afcef207b04ba5090e09423c
|
[
"MIT"
] | null | null | null |
#pylint:disable=no-member
import cv2 as cv
img = cv.imread('Photos/cats.jpg')
#(name of the window, display)
cv.imshow('Cats', img)
# delay for key to be pressed
cv.waitKey(0)
# Reading Videos
#(0,1,2,3 = Webcam)
capture = cv.VideoCapture('../Resources/Videos/dog.mp4')
while True:
# Reads frame by frame
isTrue, frame = capture.read()
cv.imshow('Video', frame)
#if d is pressed leave the loop
if cv.waitKey(20) & 0xFF==ord('d'):
break
capture.release()
cv.destroyAllWindows()
| 19.846154
| 56
| 0.660853
|
4a08d3ef569cdf0d941c0adf79d56fbefc96b11e
| 3,359
|
py
|
Python
|
hyperbolic/poincare/Point.py
|
zemora/Hyperbolic
|
0e8720e30fe89b49d6a06a987bbf46ccffa86ba6
|
[
"MIT"
] | null | null | null |
hyperbolic/poincare/Point.py
|
zemora/Hyperbolic
|
0e8720e30fe89b49d6a06a987bbf46ccffa86ba6
|
[
"MIT"
] | null | null | null |
hyperbolic/poincare/Point.py
|
zemora/Hyperbolic
|
0e8720e30fe89b49d6a06a987bbf46ccffa86ba6
|
[
"MIT"
] | null | null | null |
import math
from .. import util
from ..euclid.shapes import Circle as ECircle
from .. import poincare as module
from . import shapes
class Point:
def __init__(self, x, y, hr=None, theta=None):
self.x = x
self.y = y
# Hyperbolic polar coordinates
if theta is None:
theta = math.atan2(y, x)
if hr is None:
r = math.hypot(x, y)
hr = 2 * math.atanh(r)
self.theta = theta
self.hr = hr
def __iter__(self):
return iter((self.x, self.y))
def __getitem__(self, i):
return (self.x, self.y)[i]
def __len__(self):
return 2
def isIdeal(self):
return util.nearZero(math.hypot(self.x, self.y) - 1)
def polarAngleTo(self, p2, origin=None):
if origin is None:
return p2.theta - self.theta
else:
assert False, 'TODO'
def distanceTo(self, p2):
r1, t1 = self.hr, self.theta
r2, t2 = p2.hr, p2.theta
d = math.acosh(math.cosh(r1)*math.cosh(r2)
- math.sinh(r1)*math.sinh(r2)*math.cos(t2-t1))
return d
def midpointWith(self, p2, frac=0.5):
d = self.distanceTo(p2)
pMid = Point.fromHPolar(d*frac, 0)
return module.Transform.translation(self, p2).applyToPoint(pMid)
@staticmethod
def fromEuclid(x, y):
r = math.hypot(x, y)
if util.nearZero(r - 1.0):
return Ideal(math.atan2(y, x))
elif r < 1.0:
return Point(x, y)
else:
raise ValueError('Euclidean coordinates are outside the unit circle')
@staticmethod
def fromPolarEuclid(r, rad=None, deg=None):
assert (rad is None) != (deg is None)
if rad is None: rad = math.radians(deg)
if util.nearZero(r - 1.0):
return Ideal(rad)
elif r < 1.0:
return Point(r*math.cos(rad), r*math.sin(rad))
else:
raise ValueError('Euclidean coordinates are outside the unit circle')
@staticmethod
def fromHPolar(hr, theta=None, deg=None):
assert (theta is None) != (deg is None)
if theta is None: theta = math.radians(deg)
r = math.tanh(hr/2)
x, y = r*math.cos(theta), r*math.sin(theta)
return Point(x, y, hr=hr, theta=theta)
def __eq__(self, other):
return util.nearZero(self.x - other.x) and util.nearZero(self.y - other.y)
def __repr__(self):
return '{}({}, {})'.format(type(self).__name__,
round(self.x, 3), round(self.y, 3))
def toDrawables(self, elements, radius=0, hradius=None, **kwargs):
if hradius is not None and not isinstance(self, Ideal):
shape = shapes.Circle.fromCenterRadius(Point(self.x, self.y), hradius)
return shape.toDrawables(elements, **kwargs)
else:
return ECircle(self.x, self.y, radius).toDrawables(elements, **kwargs)
class Ideal(Point):
def __init__(self, theta):
self.theta = theta % (2*math.pi)
@property
def x(self):
return math.cos(self.theta)
@property
def y(self):
return math.sin(self.theta)
def isIdeal(self):
return True
@classmethod
def fromDegree(cls, deg):
return cls(math.radians(deg))
@classmethod
def fromRadian(cls, rad):
return cls(rad)
| 33.257426
| 82
| 0.575171
|
4a08d4189b2ef2a5624a01d1382ed8bdd9d86eff
| 45
|
py
|
Python
|
src/model/__init__.py
|
SignorinoY/immortal-python
|
b2d3c9a3b5c262d16785026cfaf6f022e6bbefd7
|
[
"MIT"
] | 1
|
2021-06-05T03:45:15.000Z
|
2021-06-05T03:45:15.000Z
|
src/model/__init__.py
|
SignorinoY/immortal-python
|
b2d3c9a3b5c262d16785026cfaf6f022e6bbefd7
|
[
"MIT"
] | null | null | null |
src/model/__init__.py
|
SignorinoY/immortal-python
|
b2d3c9a3b5c262d16785026cfaf6f022e6bbefd7
|
[
"MIT"
] | null | null | null |
from .classfier import FashionMNISTClassfier
| 22.5
| 44
| 0.888889
|
4a08d454a1aadfcdc8f9c5945394f64ea5875444
| 2,542
|
py
|
Python
|
nova/tests/integrated/v3/test_flavor_extraspecs.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 1
|
2015-11-25T10:18:22.000Z
|
2015-11-25T10:18:22.000Z
|
nova/tests/integrated/v3/test_flavor_extraspecs.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 9
|
2015-05-20T11:20:17.000Z
|
2017-07-27T08:21:33.000Z
|
nova/tests/integrated/v3/test_flavor_extraspecs.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 13
|
2015-05-05T09:34:04.000Z
|
2017-11-08T02:03:46.000Z
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.integrated.v3 import api_sample_base
class FlavorExtraSpecsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
extension_name = 'flavor-extra-specs'
def _flavor_extra_specs_create(self):
subs = {'value1': 'value1',
'value2': 'value2'
}
response = self._do_post('flavors/1/os-extra_specs',
'flavor-extra-specs-create-req', subs)
self._verify_response('flavor-extra-specs-create-resp',
subs, response, 200)
def test_flavor_extra_specs_get(self):
subs = {'value1': 'value1'}
self._flavor_extra_specs_create()
response = self._do_get('flavors/1/os-extra_specs/key1')
self._verify_response('flavor-extra-specs-get-resp',
subs, response, 200)
def test_flavor_extra_specs_list(self):
subs = {'value1': 'value1',
'value2': 'value2'
}
self._flavor_extra_specs_create()
response = self._do_get('flavors/1/os-extra_specs')
self._verify_response('flavor-extra-specs-list-resp',
subs, response, 200)
def test_flavor_extra_specs_create(self):
self._flavor_extra_specs_create()
def test_flavor_extra_specs_update(self):
subs = {'value1': 'new_value1'}
self._flavor_extra_specs_create()
response = self._do_put('flavors/1/os-extra_specs/key1',
'flavor-extra-specs-update-req', subs)
self._verify_response('flavor-extra-specs-update-resp',
subs, response, 200)
def test_flavor_extra_specs_delete(self):
self._flavor_extra_specs_create()
response = self._do_delete('flavors/1/os-extra_specs/key1')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, '')
| 40.349206
| 78
| 0.644768
|
4a08d49669987959e79396a69b3a2a091d159348
| 5,438
|
py
|
Python
|
baselines/test_baselines_cli.py
|
iarai/NeurIPS2021-traffic4cast
|
a7b31f6727cd639370d3237cda74c27e54eb5a32
|
[
"Apache-2.0"
] | 19
|
2021-06-20T21:35:49.000Z
|
2022-03-01T14:49:19.000Z
|
baselines/test_baselines_cli.py
|
iarai/NeurIPS2021-traffic4cast
|
a7b31f6727cd639370d3237cda74c27e54eb5a32
|
[
"Apache-2.0"
] | 3
|
2021-06-15T19:57:39.000Z
|
2021-09-09T15:52:56.000Z
|
baselines/test_baselines_cli.py
|
iarai/NeurIPS2021-traffic4cast
|
a7b31f6727cd639370d3237cda74c27e54eb5a32
|
[
"Apache-2.0"
] | 9
|
2021-06-20T21:37:19.000Z
|
2021-12-15T15:30:05.000Z
|
# Copyright 2021 Institute of Advanced Research in Artificial Intelligence (IARAI) GmbH.
# IARAI licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import tempfile
import zipfile
from pathlib import Path
import numpy as np
import pytest
from baselines.baselines_cli import main
from competition.competition_constants import MAX_TEST_SLOT_INDEX
from competition.scorecomp import scorecomp
from util.h5_util import write_data_to_h5
@pytest.mark.skipif(os.getenv("CI") is not None, reason="Not enough resources in ci.")
@pytest.mark.parametrize(
"model_str", ["naive_average", "unet", "gcn"],
)
def test_baselines_cli_run_through(caplog, model_str):
caplog.set_level(logging.INFO)
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir_path = Path(temp_dir)
data_raw_path = temp_dir_path / "raw"
dynamic_file = data_raw_path / "DOWNTOWN" / "training" / "1970-01-01_DOWNTOWN_8ch.h5"
test_temporal = data_raw_path / "DOWNTOWN" / "DOWNTOWN_test_temporal.h5"
static_file = data_raw_path / "DOWNTOWN" / "DOWNTOWN_static.h5"
additional_test_temporal = data_raw_path / "DOWNTOWN" / "DOWNTOWN_test_additional_temporal.h5"
test_spatiotemporal = data_raw_path / "DOWNTOWN" / "DOWNTOWN_test_spatiotemporal.h5"
additional_test_spatiotemporal = data_raw_path / "DOWNTOWN" / "DOWNTOWN_test_additional_spatiotemporal.h5"
submission_output_dir = temp_dir_path / "submissions"
dynamic_file.parent.mkdir(exist_ok=True, parents=True)
data = np.random.randint(256, size=(288, 495, 436, 8), dtype=np.uint8)
write_data_to_h5(data=data, filename=dynamic_file, compression="lzf", compression_level=None)
data = np.random.randint(2, size=(9, 495, 436), dtype=np.uint8) * 255
data[:, 0, :] = 0
data[:, 494, :] = 0
data[:, :, 0] = 0
data[:, :, 435] = 0
write_data_to_h5(data=data, filename=static_file, compression="lzf", compression_level=None)
num_tests_per_file = 4
data = np.random.randint(256, size=(num_tests_per_file, 12, 495, 436, 8), dtype=np.uint8)
write_data_to_h5(data=data, filename=str(test_temporal), compression="lzf", compression_level=None)
data = np.random.randint(MAX_TEST_SLOT_INDEX, size=(num_tests_per_file, 2), dtype=np.uint8)
write_data_to_h5(data=data, filename=additional_test_temporal, compression="lzf", compression_level=None)
data = np.random.randint(256, size=(num_tests_per_file, 12, 495, 436, 8), dtype=np.uint8)
write_data_to_h5(data=data, filename=test_spatiotemporal, compression="lzf", compression_level=None)
data = np.random.randint(MAX_TEST_SLOT_INDEX, size=(num_tests_per_file, 495, 436, 8), dtype=np.uint8)
write_data_to_h5(data=data, filename=additional_test_spatiotemporal, compression="lzf", compression_level=None)
ground_truth_dir = temp_dir_path / "ground_truth"
ground_truth_dir.mkdir()
for competition in ["temporal", "spatiotemporal"]:
data = np.random.randint(256, size=(num_tests_per_file, 6, 495, 436, 8), dtype=np.uint8)
ground_truth_h5 = ground_truth_dir / f"DOWNTOWN_test_{competition}.h5"
write_data_to_h5(data, ground_truth_h5, compression="lzf", compression_level=None)
with zipfile.ZipFile(ground_truth_dir / f"ground_truth_{competition}.zip", "w") as ground_truth_f:
ground_truth_f.write(ground_truth_h5, arcname=f"DOWNTOWN/DOWNTOWN_test_{competition}.h5")
ground_truth_f.write(static_file, arcname=f"DOWNTOWN/DOWNTOWN_static.h5")
scorecomp.EXPECTED_SHAPE = (num_tests_per_file, 6, 495, 436, 8)
main(
[
"--model_str",
model_str,
"--limit",
"4",
"--epochs",
"1",
"--data_raw_path",
str(data_raw_path),
"--num_workers",
"1",
"--ground_truth_dir",
str(ground_truth_dir),
"--submission_output_dir",
str(submission_output_dir),
"--batch_size",
"2",
"--num_tests_per_file",
str(num_tests_per_file),
"--device",
"cpu",
"--batch_size_scoring",
"2",
]
)
logs = list(Path(submission_output_dir).rglob("submission*.log"))
assert len(logs) == 4, f"found {len(logs)}, expected 4. {logs}"
for log in logs:
content = log.read_text()
assert "ERROR" not in content
assert "completed ok with score" in content, content
print(log)
print(content)
submissions = list(submission_output_dir.rglob("submission*.zip"))
assert len(submissions) == 2
| 47.701754
| 119
| 0.657411
|
4a08d49ad4e18bc1f3a92712b3b8189c2ae45b05
| 1,285
|
py
|
Python
|
src/1_1_solution.py
|
Rchase8/AdventOfCode2020
|
d2d98825177b3ff3b0eece694505b9b36ffd5b04
|
[
"MIT"
] | null | null | null |
src/1_1_solution.py
|
Rchase8/AdventOfCode2020
|
d2d98825177b3ff3b0eece694505b9b36ffd5b04
|
[
"MIT"
] | null | null | null |
src/1_1_solution.py
|
Rchase8/AdventOfCode2020
|
d2d98825177b3ff3b0eece694505b9b36ffd5b04
|
[
"MIT"
] | null | null | null |
def get_number_list():
file = open("Data/1_1inputs.txt", "r")
lines = file.readlines()
new_numberlist = []
for line in lines:
new_numberlist.append(int(line))
return new_numberlist
# block ends
def filter_number_list(the_numberlist, criteria_def):
filtered_number_list = []
for first_pos in range(0,len(the_numberlist)):
for second_pos in range(0,len(the_numberlist)):
if first_pos == second_pos:
continue
first_number = the_numberlist[first_pos]
second_number = the_numberlist[second_pos]
is_true = criteria_def(first_number, second_number)
print(first_number, second_number, is_true)
if is_true:
filtered_number_list.append(first_number)
filtered_number_list.append(second_number)
break
# end of 2nd for Loop
if len(filtered_number_list) > 0:
break
# end of 1st for loop
return(filtered_number_list)
# block ends
def is_sum_2020(the_first_number, the_second_number):
the_sum = the_first_number + the_second_number
if the_sum == 2020:
return True
return False
# block ends
# main
numberlist = get_number_list()
print(numberlist)
results = filter_number_list(numberlist, is_sum_2020)
print(results)
answer = results[0] * results[1]
print(answer)
| 22.946429
| 57
| 0.716732
|
4a08d4e4d4b777eef14b11d27ba8b63d08ad0a0d
| 2,781
|
py
|
Python
|
scooch/configurize_helper.py
|
PandoraMedia/scooch
|
1890f139ce411395de1022fa3764946007a35e58
|
[
"Apache-2.0"
] | 6
|
2021-11-09T17:46:35.000Z
|
2022-03-12T02:10:45.000Z
|
scooch/configurize_helper.py
|
PandoraMedia/scooch
|
1890f139ce411395de1022fa3764946007a35e58
|
[
"Apache-2.0"
] | null | null | null |
scooch/configurize_helper.py
|
PandoraMedia/scooch
|
1890f139ce411395de1022fa3764946007a35e58
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 Pandora Media, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Python standard library imports
import inspect
import functools
# Third party imports
# None.
# Local imports
from .configurable import Configurable
def configurize(cls=None, base_class=None):
"""
Takes a class and makes it scooch configurable. This will prepend "Conf" to the class name
to distinguish it from the class definition. The returned / transformed class will be accessible
via the name assigned in code, although it must be referred to as "Conf<original_class_name>" in
scooch config files.
Args:
cls: class - A python class that will be made configurable via scooch.
base_class: class - A python class that the newly minted scooch configurable class will inherit
from.
Returns:
class - The augmented Configurable class that may be configured via scooch.
"""
def configurize_impl(cls, base_cls=None):
# TODO [matt.c.mccallum 11.08.21]: Check the class is not already `Configurable`
# TODO [matt.c.mccallum 11.08.21]: Check that base_cls is `Configurable`
# TODO [matt.c.mccallum 11.08.21]: Inherit class documentation too
if base_cls is None:
base_cls = Configurable
class DerivedConfigurable(cls, base_cls):
"""
"""
__SCOOCH_NAME__ = 'Scooch' + cls.__name__
# TODO [matt.c.mccallum 11.08.21]: Add type info here
__PARAMS__ = {param: f'<> - Parameter derived by extending base class: {cls.__name__}' for param in inspect.signature(cls).parameters.keys()}
__PARAM_DEFAULTS__ = {param: val.default for param, val in inspect.signature(cls).parameters.items() if val.default != val.empty}
def __init__(self, cfg):
cls.__init__(self, **(cfg[self.__class__.__name__]))
Configurable.__init__(self, cfg)
return DerivedConfigurable
if base_class is None and cls is None:
return None
if base_class is None:
return configurize_impl(cls)
elif cls is None:
return functools.partial(configurize_impl, base_cls=base_class)
else:
return configurize_impl(cls, base_class)
| 34.333333
| 153
| 0.688601
|
4a08d5665b6ae55c49a7526aadb3ba7523fdc24a
| 1,047
|
py
|
Python
|
manage.py
|
kathe-ruiz/company_match
|
9e3e18a09d9b4c58f8d650bc0e920e7141947433
|
[
"MIT"
] | null | null | null |
manage.py
|
kathe-ruiz/company_match
|
9e3e18a09d9b4c58f8d650bc0e920e7141947433
|
[
"MIT"
] | null | null | null |
manage.py
|
kathe-ruiz/company_match
|
9e3e18a09d9b4c58f8d650bc0e920e7141947433
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
from pathlib import Path
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# company_match directory.
current_path = Path(__file__).parent.resolve()
sys.path.append(str(current_path / "company_match"))
execute_from_command_line(sys.argv)
| 32.71875
| 77
| 0.660936
|
4a08d57f0c8889074d2da065b14affbe21c2bb67
| 6,619
|
py
|
Python
|
bcselector/tests/no_cost_based_filter_methods_test.py
|
Kaketo/bc-selector
|
c7acd1033bee741530735fb601f9e464c3ccc26f
|
[
"MIT"
] | 5
|
2020-03-30T17:36:11.000Z
|
2021-06-16T09:14:20.000Z
|
bcselector/tests/no_cost_based_filter_methods_test.py
|
Kaketo/bc-selector
|
c7acd1033bee741530735fb601f9e464c3ccc26f
|
[
"MIT"
] | 6
|
2020-04-28T16:32:04.000Z
|
2020-12-18T13:35:28.000Z
|
bcselector/tests/no_cost_based_filter_methods_test.py
|
Kaketo/bc-selector
|
c7acd1033bee741530735fb601f9e464c3ccc26f
|
[
"MIT"
] | 2
|
2020-04-28T15:59:51.000Z
|
2020-05-14T08:11:33.000Z
|
import unittest
import numpy as np
from bcselector.filter_methods.no_cost_based_filter_methods import no_cost_find_best_feature
from bcselector.information_theory.j_criterion_approximations import mim, mifs, mrmr, jmi, cife
class TestNoCostMethod(unittest.TestCase):
def test_simple_input_mim(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
selected_feature, criterion_value, cost = no_cost_find_best_feature(
j_criterion_func=mim,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs)
self.assertIsInstance(selected_feature,int)
self.assertIsInstance(criterion_value, float)
self.assertIsInstance(cost, float)
def test_simple_input_mifs(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
prev_variables_index = [3,4,5]
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
beta = 10
selected_feature, criterion_value, cost = no_cost_find_best_feature(
j_criterion_func=mifs,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs,
prev_variables_index=prev_variables_index,
beta=beta)
self.assertIsInstance(selected_feature,int)
self.assertIsInstance(criterion_value, float)
self.assertIsInstance(cost, float)
def test_simple_input_mifs_no_beta_provided(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
prev_variables_index = [3,4,5]
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
with self.assertWarns(Warning): no_cost_find_best_feature(
j_criterion_func=mifs,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs,
prev_variables_index=prev_variables_index)
def test_simple_input_mrmr(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
prev_variable_index = [3,4,5]
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
selected_feature, criterion_value, cost = no_cost_find_best_feature(
j_criterion_func=mrmr,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs,
prev_variables_index=prev_variable_index)
self.assertIsInstance(selected_feature,int)
self.assertIsInstance(criterion_value, float)
self.assertIsInstance(cost, float)
def test_simple_input_jmi(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
prev_variable_index = [3,4,5]
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
selected_feature, criterion_value, cost = no_cost_find_best_feature(
j_criterion_func=jmi,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs,
prev_variables_index=prev_variable_index)
self.assertIsInstance(selected_feature,int)
self.assertIsInstance(criterion_value, float)
self.assertIsInstance(cost, float)
def test_simple_input_cife(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
prev_variable_index = [3,4,5]
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
beta = 10
selected_feature, criterion_value, cost = no_cost_find_best_feature(
j_criterion_func=cife,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs,
prev_variables_index=prev_variable_index,
beta=beta)
self.assertIsInstance(selected_feature,int)
self.assertIsInstance(criterion_value, float)
self.assertIsInstance(cost, float)
def test_simple_input_cife_no_beta_provided(self):
integer_matrix = np.random.randint(0,10,(100,10))
diverse_target = np.random.randint(0,10,(100))
prev_variables_index = [3,4,5]
candidates_index = [0,1,2,6,7,8,9]
costs = [ 1.76, 0.19, -0.36, 0.96, 0.41, 0.17, -0.36, 0.75, 0.79, -1.38]
with self.assertWarns(Warning): no_cost_find_best_feature(
j_criterion_func=cife,
data=integer_matrix,
target_variable=diverse_target,
possible_variables_index=candidates_index,
costs=costs,
prev_variables_index=prev_variables_index)
if __name__ == '__main__':
unittest.main()
| 50.143939
| 95
| 0.539205
|
4a08d692a00ed9e773d0ec67685b01341114a7e6
| 150
|
py
|
Python
|
application/dataprocessing/cors.py
|
oooooooooooooooosip/analytics_backend
|
4006b736c2824af6308b414e32a53a77f805d4cb
|
[
"MIT"
] | 3
|
2019-10-10T05:49:44.000Z
|
2020-05-14T16:53:30.000Z
|
application/dataprocessing/cors.py
|
oooooooooooooooosip/analytics_backend
|
4006b736c2824af6308b414e32a53a77f805d4cb
|
[
"MIT"
] | 35
|
2020-06-06T01:48:56.000Z
|
2022-03-09T08:59:48.000Z
|
application/dataprocessing/cors.py
|
oooooooooooooooosip/analytics_backend
|
4006b736c2824af6308b414e32a53a77f805d4cb
|
[
"MIT"
] | 31
|
2020-04-26T13:12:53.000Z
|
2022-03-28T13:13:35.000Z
|
class CorsMiddleware(object):
def process_response(self, req, resp):
response["Access-Control-Allow-Origin"] = "*"
return response
| 37.5
| 53
| 0.673333
|
4a08d6afcb61e2c155976442e455ca2a4d29e1f0
| 850
|
py
|
Python
|
src/virtates/web/app.py
|
mtnmts/Virtattes
|
8a54d7b09567e82e46f44a924044bb2520f28b75
|
[
"MIT"
] | null | null | null |
src/virtates/web/app.py
|
mtnmts/Virtattes
|
8a54d7b09567e82e46f44a924044bb2520f28b75
|
[
"MIT"
] | null | null | null |
src/virtates/web/app.py
|
mtnmts/Virtattes
|
8a54d7b09567e82e46f44a924044bb2520f28b75
|
[
"MIT"
] | null | null | null |
# he smell of paint, a flask of wine
# And turn those faces all to me
# The blunderbuss and halberd-shaft
# And Dutch respectability
from flask import Flask, Blueprint, render_template, abort
from jinja2 import TemplateNotFound
APP_NAME = "Virtates"
def build_app(blueprints):
app = Flask(APP_NAME)
blueprints = build_blueprints()
for bp in blueprints:
app.register_blueprints(bp)
return app
def build_blueprints():
return [create_main_blueprint()]
def create_main_blueprint():
main_page = Blueprint("main_page", APP_NAME, template_folder='templates')
@main_page.route('/', defaults={'page': 'index'})
@main_page.route('/<page>')
def show(page):
try:
return render_template('pages/%s.html' % page)
except TemplateNotFound:
abort(404)
return main_page
| 22.972973
| 77
| 0.687059
|
4a08d7b06fbd59dfb9d21a58234e90c6e8fe1424
| 8,592
|
py
|
Python
|
tests/test_searchHandler.py
|
CameronJRAllan/eTree-Browser
|
72601450eb8538f79511715c5793a8594bdcfc80
|
[
"MIT"
] | 1
|
2019-07-19T20:03:00.000Z
|
2019-07-19T20:03:00.000Z
|
tests/test_searchHandler.py
|
CameronJRAllan/eTree-Browser
|
72601450eb8538f79511715c5793a8594bdcfc80
|
[
"MIT"
] | null | null | null |
tests/test_searchHandler.py
|
CameronJRAllan/eTree-Browser
|
72601450eb8538f79511715c5793a8594bdcfc80
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
import pytest
import application
import sys
from PyQt5 import QtWidgets, QtCore, QtGui
from unittest import TestCase
import search
class TestSearchHandlerQt():
@pytest.fixture(scope="function", autouse=True)
def setup(self, qtbot):
# Create dialog to show this instance
self.dialog = QtWidgets.QMainWindow()
qtbot.add_widget(self.dialog)
# Start main event loop
self.prog = application.mainWindow(self.dialog)
# Search handler
self.searchHandler = search.SearchHandler(self.prog)
def test_add_custom_condition(self):
# Check is zero
assert (self.prog.searchForm.advancedSearchLayout.count() == 0)
# Check is 3
self.searchHandler.add_custom_condition()
assert (self.prog.searchForm.advancedSearchLayout.count() == 3)
# Check is 6
self.searchHandler.add_custom_condition()
assert (self.prog.searchForm.advancedSearchLayout.count() == 6)
def test_remove_custom_condition(self):
# Check is zero
assert (self.prog.searchForm.advancedSearchLayout.count() == 0)
self.searchHandler.add_custom_condition()
self.searchHandler.remove_custom_condition()
assert (self.prog.searchForm.advancedSearchLayout.count() == 0)
self.searchHandler.add_custom_condition()
self.searchHandler.add_custom_condition()
self.searchHandler.remove_custom_condition()
assert (self.prog.searchForm.advancedSearchLayout.count() == 3)
self.searchHandler.remove_custom_condition()
assert (self.prog.searchForm.advancedSearchLayout.count() == 0)
def test_generate_field_combo(self):
fieldCombo = self.searchHandler.generate_field_combo()
assert (isinstance(fieldCombo, QtWidgets.QComboBox))
def test_update_auto_complete(self, qtbot):
# Click on button to add new custom condition
qtbot.mouseClick(self.prog.searchForm.addConditionBtn, QtCore.Qt.LeftButton)
# Check auto-complete
try:
self.prog.searchHandler.update_auto_complete()
except Exception as e:
pytest.fail()
def test_load_saved_search(self):
model = QtGui.QStandardItemModel()
item = QtGui.QStandardItem("Grateful Dead")
model.appendRow(item)
self.prog.searchHandler.load_saved_search(model.index(0, 0))
assert(self.prog.topMenuTabs.currentIndex() == 2)
def test_generate_condition_combo(self):
combo = self.searchHandler.generate_condition_combo()
assert(isinstance(combo, QtWidgets.QComboBox))
assert(combo.count() == 8)
def test_generate_advanced_search(self):
result = self.searchHandler.generate_advanced_search("Artist", "is", "Grooveshire")
assert(result=='FILTER(?name="Grooveshire") ')
result = self.searchHandler.generate_advanced_search("Artist", "is not", "Grooveshire")
assert(result=='FILTER(?name!="Grooveshire") ')
result = self.searchHandler.generate_advanced_search("Artist", "starts with", "Grooveshire")
assert(result=='FILTER(STRSTARTS(?name,"Grooveshire")) ')
result = self.searchHandler.generate_advanced_search("Artist", "ends with", "Grooveshire")
assert(result=='FILTER(STRENDS(?name,"Grooveshire")) ')
result = self.searchHandler.generate_advanced_search("Artist", "contains", "Grooveshire")
assert(result=='FILTER(CONTAINS(?name,"Grooveshire")) ')
result = self.searchHandler.generate_advanced_search("Artist", "does not contain", "Grooveshire")
assert(result=='FILTER(!CONTAINS(?name,"Grooveshire") ')
result = self.searchHandler.generate_advanced_search("Artist", "matches RegEx", "Grooveshire")
assert(result=='FILTER(regex(?name, "Grooveshire", "i")) ')
result = self.searchHandler.generate_advanced_search("Genre", "does not match RegEx", "Grooveshire")
assert(result=='FILTER(!regex(?genre, "Grooveshire", "i")) ')
result = self.searchHandler.generate_advanced_search("Genre", "contains", "folk")
assert(result=='FILTER(CONTAINS(?genre,"folk")) ')
result = self.searchHandler.generate_advanced_search("Location", "contains", "New York City, USA")
assert(result=='FILTER(CONTAINS(?place,"New York City, USA")) ')
def test_custom_search(self, qtbot):
# Setup custom search boxes
qtbot.mouseClick(self.prog.searchForm.addConditionBtn, QtCore.Qt.LeftButton)
self.prog.searchForm.advancedSearchLayout.itemAt(2).widget().setText("Jason Mraz")
customConditions = self.prog.searchHandler.custom_search()
assert(isinstance(customConditions, list))
assert(customConditions[0] == """FILTER(?name="Jason Mraz") """)
def test_change_condition_or(self):
customConditions = ['FILTER(?name="Grateful Dead") ', 'FILTER(?name!="Jason Mraz") ']
result = self.searchHandler.change_condition_or(customConditions)
assert("||" in result[0])
assert("||" not in result[1])
customConditions = ['FILTER(?name="Grateful Dead") ', 'FILTER(?name!="Jason Mraz") ']
result = self.searchHandler.change_condition_or(customConditions)
assert("||" in result[0])
assert("||" not in result[1])
def test_ensure_matching_parentheses(self):
customConditions = ['FILTER(?name="Grateful Dead") ', 'FILTER(?name!="Jason Mraz") ']
result = self.searchHandler.ensure_matching_parentheses(customConditions)
assert(result.count('(') == result.count(')'))
customConditions = ['FILTER(?name="Grateful Dead") ', 'FILTER(?name!="Jason Mraz")) ']
result = self.searchHandler.ensure_matching_parentheses(customConditions)
assert(result.count('(') == result.count(')'))
#
# customConditions = ['FILTER(?name="Grateful Dead") ', 'FILTER(?name!="Jason Mraz")) ']
# result = self.searchHandler.ensure_matching_parentheses(customConditions)
# assert(result.count('(') == result.count(')'))
def test_generate_mapped_locations(self):
self.prog.searchForm.locationRangeFilter.setText("500")
self.prog.searchForm.locationFilter.setText("Gettysburg, PA, USA")
locations = self.prog.searchHandler.generate_mapped_locations()
assert(len(locations)==11404)
def test_get_mapped_countries(self):
countries = self.prog.searchHandler.get_mapped_countries("Israel")
assert(len(countries) == 7)
countries = self.prog.searchHandler.get_mapped_countries("United Kingdom, USA, Israel")
assert(len(countries) == 31208)
countries = self.prog.searchHandler.get_mapped_countries("France, Germany")
assert(len(countries) == 607)
def test_perform_search(self):
self.prog.searchForm.artistFilter.setText("Jason Mraz")
self.prog.searchForm.artistFilter.setText("")
try:
self.prog.searchHandler.perform_search()
except Exception as e:
print(e)
pytest.fail(e)
# Check fields were reset
assert(self.prog.searchForm.artistFilter.text() == "")
assert(self.prog.searchForm.genreFilter.text() == "")
assert(self.prog.searchForm.venueFilter.text() == "")
assert(self.prog.searchForm.locationFilter.text() == "")
def test_reset_search_form(self):
# Add text to boxes
self.prog.searchForm.artistFilter.setText("Artist")
self.prog.searchForm.genreFilter.setText("Genre")
self.prog.searchForm.trackNameFilter.setText("Track Name")
self.prog.searchForm.dateFrom.setDate(QtCore.QDate(1952, 2, 12))
self.prog.searchForm.dateTo.setDate(QtCore.QDate(2014, 2, 12))
self.prog.searchForm.venueFilter.setText("Venue")
self.prog.searchForm.locationFilter.setText("New York City, NYC")
self.prog.searchForm.locationRangeFilter.setText("200")
self.prog.searchForm.countryFilter.setText("USA")
# Call reset function
self.prog.searchHandler.reset_search_form()
# Assert they were reset as expected
assert(len(self.prog.searchForm.artistFilter.text()) == 0)
assert(len(self.prog.searchForm.genreFilter.text()) == 0)
assert(len(self.prog.searchForm.trackNameFilter.text()) == 0)
assert(len(self.prog.searchForm.venueFilter.text()) == 0)
assert(len(self.prog.searchForm.locationFilter.text()) == 0)
assert(len(self.prog.searchForm.locationRangeFilter.text()) == 0)
assert(len(self.prog.searchForm.countryFilter.text()) == 0)
assert(self.prog.searchForm.dateFrom.date() == QtCore.QDate(1950, 1, 1))
assert(self.prog.searchForm.dateTo.date() == QtCore.QDate(2017, 1, 1))
def test_setup_views(self):
# pytest.fail()
requestedViews = ['map', 'timeline', 'table']
results = {'results' : {'bindings' : [] },
'head' : { 'vars' : []
}}
try:
self.searchHandler.setup_views(requestedViews, results)
except Exception as e:
print(e)
pytest.fail()
| 40.149533
| 104
| 0.716131
|
4a08d87b4148fc4c67b4247759e108162de4b4bd
| 473
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/choropleth/_visible.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/choropleth/_visible.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/choropleth/_visible.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="choropleth", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs
)
| 36.384615
| 82
| 0.663848
|
4a08db0db055e76efbb58382f5ea27463598a629
| 10,992
|
py
|
Python
|
test/test_profile.py
|
esoma/pgo
|
7960dac20431dd6358bceb17a70250637d0d7f53
|
[
"MIT"
] | 1
|
2022-01-29T20:57:45.000Z
|
2022-01-29T20:57:45.000Z
|
test/test_profile.py
|
esoma/pgo
|
7960dac20431dd6358bceb17a70250637d0d7f53
|
[
"MIT"
] | 4
|
2021-03-18T01:13:15.000Z
|
2021-08-30T12:16:48.000Z
|
test/test_profile.py
|
esoma/pgo
|
7960dac20431dd6358bceb17a70250637d0d7f53
|
[
"MIT"
] | null | null | null |
# pgo
from pgo.setuptools import compiler
from pgo.setuptools.profile import ProfileError
# pytest
import pytest
# python
import os
import sys
import textwrap
# setuptools
import distutils.errors
from setuptools import Distribution
@pytest.fixture
def profile_command():
return [sys.executable, '-c', 'print("hello world")']
@pytest.fixture
def distribution(profile_command):
return Distribution({
"pgo": { "profile_command": list(profile_command) }
})
@pytest.mark.parametrize('dist_kwargs', [
{},
{"pgo": {}},
])
def test_not_available_with_no_profile_command(argv, dist_kwargs):
argv.extend(['profile'])
distribution = Distribution({
**dist_kwargs,
})
with pytest.raises(distutils.errors.DistutilsArgError):
distribution.parse_command_line()
def test_profile_command(argv, distribution, profile_command):
argv.extend(['profile'])
distribution.parse_command_line()
assert len(distribution.commands) == 1
cmd = distribution.get_command_obj(distribution.commands[0])
cmd.ensure_finalized()
assert isinstance(cmd.profile_command, tuple)
assert cmd.profile_command == tuple(profile_command)
def test_default_build_dirs(argv, distribution):
argv.extend(['profile'])
distribution.parse_command_line()
assert len(distribution.commands) == 1
cmd = distribution.get_command_obj(distribution.commands[0])
cmd.ensure_finalized()
assert os.path.basename(cmd.build_lib).startswith('.pgo-')
assert os.path.basename(cmd.build_temp).startswith('.pgo-')
def test_set_build_dirs(argv, distribution):
argv.extend([
'profile',
'--build-lib', 'build',
'--build-temp', 'temp'
])
distribution.parse_command_line()
assert len(distribution.commands) == 1
cmd = distribution.get_command_obj(distribution.commands[0])
cmd.ensure_finalized()
assert cmd.build_lib == 'build'
assert cmd.build_temp == 'temp'
def test_set_pgo_build_dirs_through_build_profile_generate(argv, distribution):
argv.extend([
'profile',
'build_profile_generate',
'--build-lib', 'build',
'--build-temp', 'temp',
])
distribution.parse_command_line()
assert len(distribution.commands) == 2
cmd = distribution.get_command_obj(distribution.commands[0])
cmd.ensure_finalized()
assert cmd.build_lib == 'build'
assert cmd.build_temp == 'temp'
def test_set_pgo_build_dirs_through_build(argv, distribution):
argv.extend([
'profile',
'build',
'--pgo-build-lib', 'build',
'--pgo-build-temp', 'temp',
])
distribution.parse_command_line()
assert len(distribution.commands) == 2
cmd = distribution.get_command_obj(distribution.commands[0])
cmd.ensure_finalized()
assert cmd.build_lib == 'build'
assert cmd.build_temp == 'temp'
def test_set_build_dirs_through_build(argv, distribution):
argv.extend([
'profile',
'build',
'--build-lib', 'build',
'--build-temp', 'temp',
])
distribution.parse_command_line()
assert len(distribution.commands) == 2
cmd = distribution.get_command_obj(distribution.commands[0])
cmd.ensure_finalized()
assert cmd.build_lib == '.pgo-build'
assert cmd.build_temp == '.pgo-temp'
def test_run_pgo_build_lib_env(argv, pgo_lib_dir):
argv.extend(['profile', '--build-lib', pgo_lib_dir])
file_name = os.path.join(pgo_lib_dir, 'var')
distribution = Distribution({ "pgo": { "profile_command": [
sys.executable, '-c', textwrap.dedent(f"""
import os
with open({file_name!r}, 'w') as f:
f.write(os.environ["PGO_BUILD_LIB"])
""")
]}})
distribution.parse_command_line()
distribution.run_commands()
with open(file_name) as f:
pgo_build_lib = f.read()
assert pgo_build_lib == pgo_lib_dir
def test_run_pgo_build_temp_env(argv, pgo_lib_dir, pgo_temp_dir):
argv.extend(['profile', '--build-temp', pgo_temp_dir])
file_name = os.path.join(pgo_lib_dir, 'var')
distribution = Distribution({ "pgo": { "profile_command": [
sys.executable, '-c', textwrap.dedent(f"""
import os
with open({file_name!r}, 'w') as f:
f.write(os.environ["PGO_BUILD_TEMP"])
""")
]}})
distribution.parse_command_line()
distribution.run_commands()
with open(file_name) as f:
pgo_build_temp = f.read()
assert pgo_build_temp == pgo_temp_dir
def test_run_pgo_python_env(argv, pgo_lib_dir):
argv.extend(['profile'])
file_name = os.path.join(pgo_lib_dir, 'var')
distribution = Distribution({ "pgo": { "profile_command": [
sys.executable, '-c', textwrap.dedent(f"""
import os
with open({file_name!r}, 'w') as f:
f.write(os.environ["PGO_PYTHON"])
""")
]}})
distribution.parse_command_line()
distribution.run_commands()
with open(file_name) as f:
pgo_python = f.read()
assert pgo_python == sys.executable
def test_run_pythonpath_env_outer_empty(argv, pgo_lib_dir):
try:
original_pythonpath = os.environ["PYTHONPATH"]
del os.environ["PYTHONPATH"]
except KeyError:
original_pythonpath = None
try:
argv.extend(['profile', '--build-lib', pgo_lib_dir])
file_name = os.path.join(pgo_lib_dir, 'var')
distribution = Distribution({ "pgo": { "profile_command": [
sys.executable, '-c', textwrap.dedent(f"""
import os
with open({file_name!r}, 'w') as f:
f.write(os.environ["PYTHONPATH"])
""")
]}})
distribution.parse_command_line()
distribution.run_commands()
with open(file_name) as f:
pythonpath = f.read()
assert pythonpath == pgo_lib_dir
finally:
if original_pythonpath is not None:
os.environ["PYTHONPATH"] = original_pythonpath
def test_run_pythonpath_env_outer_has_values(argv, pgo_lib_dir, pgo_temp_dir):
try:
original_pythonpath = os.environ["PYTHONPATH"]
except KeyError:
original_pythonpath = None
os.environ["PYTHONPATH"] = pgo_temp_dir
try:
argv.extend(['profile', '--build-lib', pgo_lib_dir])
file_name = os.path.join(pgo_lib_dir, 'var')
distribution = Distribution({ "pgo": { "profile_command": [
sys.executable, '-c', textwrap.dedent(f"""
import os
with open({file_name!r}, 'w') as f:
f.write(os.environ["PYTHONPATH"])
""")
]}})
distribution.parse_command_line()
distribution.run_commands()
with open(file_name) as f:
pythonpath = f.read()
assert pythonpath == os.pathsep.join([pgo_lib_dir, pgo_temp_dir])
finally:
if original_pythonpath is None:
del os.environ["PYTHONPATH"]
else:
os.environ["PYTHONPATH"] = original_pythonpath
def test_run_error(argv, pgo_lib_dir):
argv.extend(['profile'])
distribution = Distribution({ "pgo": { "profile_command": [
sys.executable, '-c', textwrap.dedent(f"""
import sys
sys.exit(1)
""")
]}})
distribution.parse_command_line()
with pytest.raises(ProfileError):
distribution.run_commands()
def test_run_not_a_command(argv, pgo_lib_dir):
argv.extend(['profile'])
distribution = Distribution({ "pgo": { "profile_command": [
os.path.join(pgo_lib_dir, 'invalid')
]}})
distribution.parse_command_line()
with pytest.raises(ProfileError):
distribution.run_commands()
def test_dry_run(argv, pgo_lib_dir):
argv.extend(['--dry-run', 'profile'])
distribution = Distribution({ "pgo": { "profile_command": [
sys.executable, '-c', textwrap.dedent(f"""
import sys
sys.exit(1)
""")
]}})
distribution.parse_command_line()
distribution.run_commands()
@pytest.mark.skipif(sys.platform != 'win32', reason='not windows')
def test_run_msvc(argv, extension, pgo_lib_dir, pgo_temp_dir):
argv.extend([
'build_ext_profile_generate',
'--build-lib', pgo_lib_dir,
'--build-temp', pgo_temp_dir,
'profile',
'--build-lib', pgo_lib_dir,
'--build-temp', pgo_temp_dir,
])
distribution = Distribution({
"ext_modules": [extension],
"pgo": { "profile_command": [
sys.executable, '-c', 'import _pgo_test'
]}
})
distribution.parse_command_line()
distribution.run_commands()
# there should be a pgd file in the build directory
build_files = os.listdir(pgo_lib_dir)
assert [
f for f in build_files
if f.startswith('_pgo_test')
if f.endswith('.pyd.pgd')
]
@pytest.mark.skipif(sys.platform != 'linux', reason='not linux')
def test_run_gcc(argv, extension, pgo_lib_dir, pgo_temp_dir):
argv.extend([
'build_ext_profile_generate',
'--build-lib', pgo_lib_dir,
'--build-temp', pgo_temp_dir,
'profile',
'--build-lib', pgo_lib_dir,
'--build-temp', pgo_temp_dir,
])
distribution = Distribution({
"ext_modules": [extension],
"pgo": { "profile_command": [
sys.executable, '-c', 'import _pgo_test'
]}
})
distribution.parse_command_line()
distribution.run_commands()
# there should be a _pgo_test.gcda file in the temp directory
temp_files = [
file
for root, _, files in os.walk(pgo_temp_dir)
for file in files
]
assert '_pgo_test.gcda' in temp_files
@pytest.mark.skipif(sys.platform != 'darwin', reason='not macos')
def test_run_clang(argv, extension, pgo_lib_dir, pgo_temp_dir):
argv.extend([
'build_ext_profile_generate',
'--build-lib', pgo_lib_dir,
'--build-temp', pgo_temp_dir,
'profile',
'--build-lib', pgo_lib_dir,
'--build-temp', pgo_temp_dir,
])
distribution = Distribution({
"ext_modules": [extension],
"pgo": { "profile_command": [
sys.executable, '-c', 'import _pgo_test'
]}
})
distribution.parse_command_line()
distribution.run_commands()
# there should be a .pgo-profdatas directory in the temp directory
temp_files = os.listdir(pgo_temp_dir)
assert '.pgo-profdatas' in temp_files
| 32.617211
| 80
| 0.603985
|
4a08db18ef1b95b3970403f2d3d48e31488acf8f
| 7,076
|
py
|
Python
|
Python-Client/agents/navigation/controller.py
|
SAMMiCA/Scenario2-AutonomousDriving
|
e9e3faeaab0f2652fb1cf4d00544a18b557ee4b6
|
[
"MIT"
] | null | null | null |
Python-Client/agents/navigation/controller.py
|
SAMMiCA/Scenario2-AutonomousDriving
|
e9e3faeaab0f2652fb1cf4d00544a18b557ee4b6
|
[
"MIT"
] | null | null | null |
Python-Client/agents/navigation/controller.py
|
SAMMiCA/Scenario2-AutonomousDriving
|
e9e3faeaab0f2652fb1cf4d00544a18b557ee4b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2018 Intel Labs.
# authors: German Ros (german.ros@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
""" This module contains PID controllers to perform lateral and longitudinal control. """
from collections import deque
import math
import numpy as np
import carla
from agents.tools.misc import get_speed
class VehiclePIDController():
"""
VehiclePIDController is the combination of two PID controllers (lateral and longitudinal) to perform the
low level control a vehicle from client side
"""
def __init__(self, vehicle, args_lateral=None, args_longitudinal=None):
"""
:param vehicle: actor to apply to local planner logic onto
:param args_lateral: dictionary of arguments to set the lateral PID controller using the following semantics:
K_P -- Proportional term
K_D -- Differential term
K_I -- Integral term
:param args_longitudinal: dictionary of arguments to set the longitudinal PID controller using the following
semantics:
K_P -- Proportional term
K_D -- Differential term
K_I -- Integral term
"""
if not args_lateral:
args_lateral = {'K_P': 1.0, 'K_D': 0.0, 'K_I': 0.0}
if not args_longitudinal:
args_longitudinal = {'K_P': 1.0, 'K_D': 0.0, 'K_I': 0.0}
self._vehicle = vehicle
self._world = self._vehicle.get_world()
self._lon_controller = PIDLongitudinalController(self._vehicle, **args_longitudinal)
self._lat_controller = PIDLateralController(self._vehicle, **args_lateral)
def run_step(self, target_speed, waypoint):
"""
Execute one step of control invoking both lateral and longitudinal PID controllers to reach a target waypoint
at a given target_speed.
:param target_speed: desired vehicle speed
:param waypoint: target location encoded as a waypoint
:return: distance (in meters) to the waypoint
"""
throttle = self._lon_controller.run_step(target_speed)
steering = self._lat_controller.run_step(waypoint)
control = carla.VehicleControl()
control.steer = steering
control.throttle = throttle
control.brake = 0.0
control.hand_brake = False
control.manual_gear_shift = False
return control
class PIDLongitudinalController():
"""
PIDLongitudinalController implements longitudinal control using a PID.
"""
def __init__(self, vehicle, K_P=1.0, K_D=0.0, K_I=0.0, dt=0.03):
"""
:param vehicle: actor to apply to local planner logic onto
:param K_P: Proportional term
:param K_D: Differential term
:param K_I: Integral term
:param dt: time differential in seconds
"""
self._vehicle = vehicle
self._K_P = K_P
self._K_D = K_D
self._K_I = K_I
self._dt = dt
self._e_buffer = deque(maxlen=30)
def run_step(self, target_speed, debug=False):
"""
Execute one step of longitudinal control to reach a given target speed.
:param target_speed: target speed in Km/h
:return: throttle control in the range [0, 1]
"""
current_speed = get_speed(self._vehicle)
if debug:
print('Current speed = {}'.format(current_speed))
return self._pid_control(target_speed, current_speed)
def _pid_control(self, target_speed, current_speed):
"""
Estimate the throttle of the vehicle based on the PID equations
:param target_speed: target speed in Km/h
:param current_speed: current speed of the vehicle in Km/h
:return: throttle control in the range [0, 1]
"""
_e = (target_speed - current_speed)/target_speed
self._e_buffer.append(_e)
if len(self._e_buffer) >= 2:
_de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt
_ie = sum(self._e_buffer) * self._dt
else:
_de = 0.0
_ie = 0.0
return np.clip((self._K_P * _e) + (self._K_D * _de / self._dt) + (self._K_I * _ie * self._dt), 0.0, 1.0)
class PIDLateralController():
"""
PIDLateralController implements lateral control using a PID.
"""
def __init__(self, vehicle, K_P=1.0, K_D=0.0, K_I=0.0, dt=0.03):
"""
:param vehicle: actor to apply to local planner logic onto
:param K_P: Proportional term
:param K_D: Differential term
:param K_I: Integral term
:param dt: time differential in seconds
"""
self._vehicle = vehicle
self._K_P = K_P
self._K_D = K_D
self._K_I = K_I
self._dt = dt
self._e_buffer = deque(maxlen=10)
def run_step(self, waypoint):
"""
Execute one step of lateral control to steer the vehicle towards a certain waypoin.
:param waypoint: target waypoint
:return: steering control in the range [-1, 1] where:
-1 represent maximum steering to left
+1 maximum steering to right
"""
return self._pid_control(waypoint, self._vehicle.get_transform())
def _pid_control(self, waypoint, vehicle_transform):
"""
Estimate the steering angle of the vehicle based on the PID equations
:param waypoint: target waypoint -----> sort of the GNSS value
:param vehicle_transform: current transform of the vehicle ----> How? Odometry, IMU, camera
:return: steering control in the range [-1, 1]
"""
v_begin = vehicle_transform.location
v_end = v_begin + carla.Location(x=math.cos(math.radians(vehicle_transform.rotation.yaw)),
y=math.sin(math.radians(vehicle_transform.rotation.yaw)))
# Position change due to lateral movement
v_vec = np.array([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])
# ?????? vector of lateral distance
w_vec = np.array([waypoint.transform.location.x -
v_begin.x, waypoint.transform.location.y -
v_begin.y, 0.0])
_dot = math.acos(np.clip(np.dot(w_vec, v_vec) /
(np.linalg.norm(w_vec) * np.linalg.norm(v_vec)), -1.0, 1.0))
_cross = np.cross(v_vec, w_vec)
if _cross[2] < 0:
_dot *= -1.0
print(_dot)
self._e_buffer.append(_dot)
if len(self._e_buffer) >= 2:
_de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt
_ie = sum(self._e_buffer) * self._dt
else:
_de = 0.0
_ie = 0.0
return np.clip((self._K_P * _dot) + (self._K_D * _de /
self._dt) + (self._K_I * _ie * self._dt), -1.0, 1.0)
| 35.557789
| 117
| 0.601187
|
4a08dba43a61f4ee93d125acaa86d389a8921d37
| 53,645
|
py
|
Python
|
bayesfast/core/recipe.py
|
HerculesJack/bayesfast
|
4354bab286bb4e3019eabba4c04a75d51fe04967
|
[
"Apache-2.0"
] | 33
|
2019-11-06T15:42:50.000Z
|
2022-01-14T05:56:03.000Z
|
bayesfast/core/recipe.py
|
HerculesJack/bayesfast
|
4354bab286bb4e3019eabba4c04a75d51fe04967
|
[
"Apache-2.0"
] | 30
|
2020-04-07T05:23:15.000Z
|
2022-03-25T21:34:56.000Z
|
bayesfast/core/recipe.py
|
HerculesJack/bayesfast
|
4354bab286bb4e3019eabba4c04a75d51fe04967
|
[
"Apache-2.0"
] | 7
|
2019-12-03T06:43:20.000Z
|
2021-12-09T14:26:06.000Z
|
from .module import Surrogate
from .density import Density, DensityLite
from .sample import sample
from ..modules.poly import PolyConfig, PolyModel
from ..samplers import SampleTrace, NTrace, _HTrace, TraceTuple
from ..samplers import _get_step_size, _get_metric
from ..utils import all_isinstance, Laplace
from ..utils.parallel import ParallelBackend, get_backend
from ..utils.sobol import multivariate_normal
from ..utils import SystematicResampler, integrated_time
from ..utils.collections import VariableDict, PropertyList
from ..evidence import GBS, GIS, GHM
import numpy as np
from collections import namedtuple, OrderedDict
import warnings
from copy import deepcopy
from scipy.special import logsumexp
__all__ = ['OptimizeStep', 'SampleStep', 'PostStep', 'StaticSample',
'RecipeTrace', 'Recipe']
# TODO: RecipeTrace.n_call
# TODO: early stop in pipeline evaluation
# TODO: early stop by comparing KL
# TODO: use tqdm to add progress bar for map
# TODO: better control when we don't have enough points before resampling
# TODO: monitor the progress of IS
# TODO: improve optimization with trust region?
# https://arxiv.org/pdf/1804.00154.pdf
# TODO: add checkpoint facility
# TODO: review Recipe.__getstate__
# TODO: recover the initial values of original_space, use_surrogate, return_dict
class _BaseStep:
"""Utilities shared by `OptimizeStep` and `SampleStep`."""
def __init__(self, surrogate_list=(), alpha_n=2, fitted=False,
sample_trace=None, x_0=None, reuse_metric=True):
self.surrogate_list = surrogate_list
self.alpha_n = alpha_n
self.fitted = fitted
self.sample_trace = sample_trace
self.x_0 = x_0
self.reuse_metric = reuse_metric
@property
def surrogate_list(self):
return self._surrogate_list
@surrogate_list.setter
def surrogate_list(self, sl):
if isinstance(sl, Surrogate):
sl = [sl]
self._surrogate_list = PropertyList(sl, self._sl_check)
def _sl_check(self, sl):
for i, s in enumerate(sl):
if not isinstance(s, Surrogate):
raise ValueError('element #{} of surrogate_list is not a '
'Surrogate'.format(i))
return sl
@property
def n_surrogate(self):
return len(self._surrogate_list)
@property
def has_surrogate(self):
return self.n_surrogate > 0
@property
def alpha_n(self):
return self._alpha_n
@alpha_n.setter
def alpha_n(self, a):
try:
a = float(a)
except Exception:
raise ValueError('alpha_n should be a float.')
self._alpha_n = a
@property
def n_eval(self):
return int(self._alpha_n *
max(su.n_param for su in self._surrogate_list))
@property
def x_0(self):
return self._x_0
@x_0.setter
def x_0(self, x):
if x is None:
self._x_0 = None
else:
try:
self._x_0 = np.atleast_2d(x).copy()
except Exception:
raise ValueError('invalid value for x_0.')
@property
def fitted(self):
return self._fitted
@fitted.setter
def fitted(self, f):
self._fitted = bool(f)
@property
def sample_trace(self):
return self._sample_trace
@sample_trace.setter
def sample_trace(self, t):
if t is None:
t = {}
if isinstance(t, dict):
t = NTrace(**t)
elif isinstance(t, (SampleTrace, TraceTuple)):
pass
else:
raise ValueError('invalid value for sample_trace.')
self._sample_trace = t
@property
def reuse_metric(self):
return self._reuse_metric
@reuse_metric.setter
def reuse_metric(self, rm):
self._reuse_metric = bool(rm)
class OptimizeStep(_BaseStep):
"""
Configuring a step for optimization.
Parameters
----------
surrogate_list : Surrogate or 1-d array_like of Surrogate, optional
Each element should be a subclass object derived from ``Surrogate``.
Set to ``()`` by default.
alpha_n : int, optional
Controlling the number of samples used to fit the surrogate models, so
that for a surrogate model with ``n`` parameters, we will evaluate the
true model at ``alpha_n * n`` points during each iteration. If negative,
will use all the samples available. Set to ``2`` by default.
laplace : Laplace or dict, optional
Configuring the Laplace sampler. Set to ``{'beta': 100.}`` by default.
eps_pp : positive float, optional
The convergence threshold for |logp_i - logp_i-1|. Set to ``0.1`` by
default.
eps_pq : positive float, optional
The convergence threshold for |logp_i - logq_i|. Set to ``0.1`` by
default.
max_iter : positive int, optional
The maximum number of iterations allowed. Set to ``5`` by default.
x_0 : 2-d array of float or None, optional
The starting points to fit the first surrogate model. If None, will draw
from standard multivariate Gaussian via Sobol sequence. Set to ``None``
by default.
fitted : bool, optional
If True, will assume that the surrogate models have already been fitted.
Set to ``False`` by default.
run_sampling : bool, optional
Whether to do a real MCMC sampling in the end. This can be beneficial
since it can provide a better starting point for the subsequent
SampleStep. Set to ``True`` by default.
sample_trace : SampleTrace or dict, optional
Configuring the sampler parameters. Only used if ``run_sampling`` is
True. If dict, will be used to initialize an ``NTrace``. Set to ``{}``
by default.
reuse_metric : bool, optional
If True, will use the cov information of previous (Laplace) samples to
set up the MCMC metric, or its starting point if the metric is adaptive.
Set to ``True`` by default.
"""
def __init__(self, surrogate_list=(), alpha_n=2., laplace=None, eps_pp=0.1,
eps_pq=0.1, max_iter=5, x_0=None, fitted=False,
run_sampling=True, sample_trace=None, reuse_metric=True):
super().__init__(surrogate_list, alpha_n, fitted, sample_trace, x_0,
reuse_metric)
self.laplace = laplace
self.eps_pp = eps_pp
self.eps_pq = eps_pq
self.max_iter = max_iter
self.run_sampling = run_sampling
@property
def laplace(self):
return self._laplace
@laplace.setter
def laplace(self, lap):
if lap is None:
lap = {'beta': 100.}
if isinstance(lap, dict):
lap = Laplace(**lap)
elif isinstance(lap, Laplace):
pass
else:
raise ValueError('invalid value for laplace.')
self._laplace = lap
@property
def eps_pp(self):
return self._eps_pp
@eps_pp.setter
def eps_pp(self, eps):
try:
eps = float(eps)
assert eps > 0
except Exception:
raise ValueError('eps_pp should be a positive float.')
self._eps_pp = eps
@property
def eps_pq(self):
return self._eps_pq
@eps_pq.setter
def eps_pq(self, eps):
try:
eps = float(eps)
assert eps > 0
except Exception:
raise ValueError('eps_pq should be a positive float.')
self._eps_pq = eps
@property
def max_iter(self):
return self._max_iter
@max_iter.setter
def max_iter(self, mi):
try:
mi = int(mi)
assert mi > 0
except Exception:
raise ValueError('max_iter should be a positive int.')
self._max_iter = mi
@property
def run_sampling(self):
return self._run_sampling
@run_sampling.setter
def run_sampling(self, run):
self._run_sampling = bool(run)
class SampleStep(_BaseStep):
"""
Configuring a step for sampling.
Parameters
----------
surrogate_list : Surrogate or 1-d array_like of Surrogate, optional
Each element should be a subclass object derived from ``Surrogate``.
Set to ``()`` by default.
alpha_n : int, optional
Controlling the number of samples used to fit the surrogate models, so
that for a surrogate model with ``n`` parameters, we will evaluate the
true model at ``alpha_n * n`` points during each iteration. If negative,
will use all the samples available. See the notes below for more details
about the case where ``logp_cutoff`` is True. Set to ``2`` by default.
sample_trace : SampleTrace or dict, optional
Configuring the sampler parameters. If dict, will be used to initialize
a ``NTrace``. Set to ``{}`` by default.
resampler : callable or dict, optional
Given the previous surrogate samples, deciding where to evaluate the
true model for the next iteration. If dict, will be used to initilize a
``SystematicResampler``. If callable, should have the same signature as
``SystematicResampler.run``. Set to ``{}`` by default.
reuse_samples : int, optional
If positive, will also use the existing (adequate) samples from this
number of previous SampleStep(s) to fit the surrogate models. If
negative, will use all the previous SampleStep(s). Set to ``0`` by
default.
reuse_step_size : bool, optional
If True, will use the previous SampleStep to set up the MCMC step size,
or its starting point if the step size is adaptive. Set to ``True`` by
default.
reuse_metric : bool, optional
If True, will use the cov information of previous samples to set up the
MCMC metric, or its starting point if the metric is adaptive. Set to
``True`` by default.
logp_cutoff : bool, optional
Whether to abandon the samples with too small logp. See the notes below
for more details.
alpha_min : float, optional
Only used when ``logp_cutoff`` is True. See the notes below for more
details.
alpha_supp : float, optional
Only used when ``logp_cutoff`` is True. See the notes below for more
details.
x_0 : 2-d array of float or None, optional
The starting points to fit the first surrogate model. If None, will
first try to get from previous steps; if failed, will then draw from
standard multivariate Gaussian via Sobol sequence. Set to ``None`` by
default.
fitted : bool, optional
If True, will assume that the surrogate models have already been fitted.
Set to ``False`` by default.
Notes
-----
If ``logp_cutoff`` is False, we will just evaluate the true model at
``alpha_n * n`` points and use them to fit the surrogate model. If
``logp_cutoff`` is True, we will compare the logp and (previous) logq values
of these ``alpha_n * n`` points, and abandon the points whose logp is
smaller than the smallest logq. Then, we require that the number of
remaining adequate samples is larger than ``alpha_n * alpha_min * n``,
otherwise we will continue to draw more samples until this requirement is
satisfied. ``alpha_supp`` controls how many supplemental samples to draw at
each time. It might be useful to use some value larger than ``1``, since
some of the new samples may also be rejected.
"""
def __init__(self, surrogate_list=(), alpha_n=2., sample_trace=None,
resampler=None, reuse_samples=0, reuse_step_size=True,
reuse_metric=True, logp_cutoff=True, alpha_min=0.75,
alpha_supp=1.25, x_0=None, fitted=False):
super().__init__(surrogate_list, alpha_n, fitted, sample_trace, x_0,
reuse_metric)
self.resampler = resampler
self.reuse_samples = reuse_samples
self.reuse_step_size = reuse_step_size
self.logp_cutoff = logp_cutoff
self.alpha_min = alpha_min
self.alpha_supp = alpha_supp
@property
def resampler(self):
return self._resampler
@resampler.setter
def resampler(self, rs):
if rs is None:
rs = {}
if isinstance(rs, dict):
rs = SystematicResampler(**rs)
elif callable(rs):
pass
else:
raise ValueError('invalid value for resampler.')
self._resampler = rs
@property
def reuse_samples(self):
return self._reuse_samples
@reuse_samples.setter
def reuse_samples(self, rs):
try:
self._reuse_samples = int(rs)
except Exception:
raise ValueError('invalid value for reuse_samples.')
@property
def reuse_step_size(self):
return self._reuse_step_size
@reuse_step_size.setter
def reuse_step_size(self, rss):
self._reuse_step_size = bool(rss)
@property
def logp_cutoff(self):
return self._logp_cutoff
@logp_cutoff.setter
def logp_cutoff(self, lc):
self._logp_cutoff = bool(lc)
@property
def alpha_min(self):
return self._alpha_min
@alpha_min.setter
def alpha_min(self, am):
try:
am = float(am)
assert 0. < am <= 1.
except Exception:
raise ValueError('invalid value for alpha_min.')
self._alpha_min = am
@property
def alpha_supp(self):
return self._alpha_supp
@alpha_supp.setter
def alpha_supp(self, asu):
try:
asu = float(asu)
assert asu > 0.
except Exception:
raise ValueError('invalid value for alpha_supp.')
self._alpha_supp = asu
@property
def n_eval_min(self):
return int(self.alpha_min * self.n_eval)
class PostStep:
"""
Configuring a step for post-processing.
Parameters
----------
n_is : int, optional
The number of samples to do Importance Sampling (IS). If negative, will
use all the samples available. Set to ``0`` by default.
k_trunc : float, optional
Truncating the IS weights w at ``<w> * n_IS**k_trunc``. Set to ``0.25``
by default.
evidence_method : str, None, or specified object, optional
If None, will not compute the evidence. If str, should be one of
``GBS``, ``GIS`` and ``GHM``. If dict, will be used to initialize a
``GBS`` object. Otherwise, should have a valid ``run`` method, with
the same signature as ``GBS.run``. Set to ``None`` by default.
"""
def __init__(self, n_is=0, k_trunc=0.25, evidence_method=None):
self.n_is = n_is
self.k_trunc = k_trunc
self.evidence_method = evidence_method
@property
def n_is(self):
return self._n_is
@n_is.setter
def n_is(self, n):
try:
self._n_is = int(n)
except Exception:
raise ValueError('invalid value for n_is.')
@property
def k_trunc(self):
return self._k_trunc
@k_trunc.setter
def k_trunc(self, k):
try:
self._k_trunc = float(k)
except Exception:
raise ValueError('invalid value for k_trunc.')
@property
def evidence_method(self):
return self._evidence_method
@evidence_method.setter
def evidence_method(self, em):
if em is None:
pass
elif em == 'GBS':
em = GBS()
elif em == 'GIS':
em = GIS()
elif em == 'GHM':
em = GHM()
elif isinstance(em, dict):
em = GBS(**em)
elif hasattr(em, 'run'):
pass
else:
raise ValueError('invalid value for evidence_method.')
self._evidence_method = em
class _SampleStrategy:
"""Configuring a multi-step sample strategy."""
def __init__(self):
self._i = 0
def update(self, sample_results):
raise NotImplementedError('abstract method.')
@property
def n_step(self):
raise NotImplementedError('abstract property.')
class StaticSample(_SampleStrategy):
"""
Configuring a static multi-step sample strategy.
Parameters
----------
sample_steps : SampleStep, 1-d array-like of SampleStep, or None, optional
Specifying how to perform surrogate sampling in each step. If ``None``,
will be interpreted as ``()``, i.e. no steps.
repeat : 1-d array-like of positive int, or None, optional
If not None, will be interpreted as the number of times to repeat each
element of ``sample_steps``. Set to ``None`` by default.
verbose : bool, optional
Whether to print a message before each iteration. Set to ``True`` by
default.
"""
def __init__(self, sample_steps=None, repeat=None, verbose=True):
super().__init__()
if repeat is not None:
if not hasattr(sample_steps, '__iter__'):
warnings.warn('repeat is ignored since sample_steps is not '
'iterable.', RuntimeWarning)
else:
try:
sample_steps = [x for i, x in enumerate(sample_steps) for j
in range(repeat[i])]
except Exception:
warnings.warn('repeat is ignored since I failed to '
'interpret it.', RuntimeWarning)
self.sample_steps = sample_steps
self.verbose = verbose
@property
def sample_steps(self):
return self._sample_steps
@sample_steps.setter
def sample_steps(self, steps):
if isinstance(steps, SampleStep):
self._sample_steps = (deepcopy(steps),)
elif steps is None:
self._sample_steps = ()
elif isinstance(steps, dict):
self._sample_steps = (SampleStep(**deepcopy(steps)),)
elif all_isinstance(steps, (SampleStep, dict)) and len(steps) > 0:
self._sample_steps = list(steps)
for i, s in enumerate(self._sample_steps):
s = deepcopy(s)
if isinstance(s, dict):
self._sample_steps[i] = SampleStep(**s)
else:
self._sample_steps[i] = s
self._sample_steps = tuple(self._sample_steps)
else:
raise ValueError('invalid value for sample_steps.')
@property
def n_step(self):
return len(self.sample_steps)
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, v):
self._verbose = bool(v)
def update(self, sample_results):
i_step = len(sample_results)
if i_step < self.n_step:
if self.verbose:
print('\n *** StaticSample: returning the #{} SampleStep. *** '
'\n'.format(i_step))
return deepcopy(self.sample_steps[i_step])
else:
if self.verbose:
print('\n *** StaticSample: iter #{}, no more SampleStep. *** '
'\n'.format(i_step))
return None
class DynamicSample(_SampleStrategy):
"""Configuring a dynamic multi-step sample strategy."""
def __init__(self, *args):
raise NotImplementedError
RecipePhases = namedtuple('RecipePhases', 'optimize, sample, post')
class RecipeTrace:
"""
Recording the process of running a Recipe.
Parameters
----------
optimize : OptimizeStep, dict or None, optional
If None, no OptimizeStep will be performed. If dict, will be used to
initialize an OptimizeStep. Set to ``None`` by default.
sample : StaticSample, SampleStep, 1-d array-like of SampleStep, or None, optional
If not StaticSample, will be used to initialize a StaticSample. Set to
``None`` by default.
post : PostStep or dict, optional
If dict, will be used to initialize a PostStep. Set to ``{}`` by
default.
sample_repeat : 1-d array-like of positive int, or None, optional
If ``sample`` is not a StaticSample, will be used to initialize a
StaticSample (as the ``repeat`` argument). Set to ``None`` by default.
Notes
-----
The default behavior of SampleStrategy initialization may change later.
"""
def __init__(self, optimize=None, sample=None, post=None,
sample_repeat=None):
if isinstance(optimize, OptimizeStep) or optimize is None:
self._s_optimize = deepcopy(optimize)
elif isinstance(optimize, dict):
self._s_optimize = OptimizeStep(**deepcopy(optimize))
else:
raise ValueError('invalid value for optimize.')
if isinstance(sample, _SampleStrategy):
self._strategy = sample
else:
try:
# TODO: update this when DynamicSample is ready
self._strategy = StaticSample(sample, sample_repeat)
except:
raise ValueError('failed to initialize a StaticSample.')
self._s_sample = []
if post is None:
post = {}
if isinstance(post, PostStep):
self._s_post = deepcopy(post)
elif isinstance(post, dict):
self._s_post = PostStep(**deepcopy(post))
else:
raise ValueError('invalid value for post.')
self._r_optimize = []
self._r_sample = []
self._r_post = None
self._n_optimize = 0 if self._s_optimize is None else 1
self._n_sample = self._strategy.n_step
self._n_post = 0 if self._s_post is None else 1
self._i_optimize = 0
self._i_sample = 0
self._i_post = 0
@property
def results(self):
return RecipePhases(tuple(self._r_optimize), tuple(self._r_sample),
self._r_post)
@property
def steps(self):
return RecipePhases(self._s_optimize, self._s_sample, self._s_post)
@property
def sample_strategy(self):
return self._strategy
@property
def i(self):
return RecipePhases(self._i_optimize, self._i_sample, self._i_post)
@property
def n(self):
return RecipePhases(self._n_optimize, self._n_sample, self._n_post)
# TODO: finish this
@property
def n_call(self):
if self._r_post is None:
_n_call = 0
for _opt in self._r_optimize:
if len(_opt.surrogate_list) > 0:
_n_call += len(_opt.var_dicts)
else:
raise NotImplementedError
for _sam in self._r_sample:
if len(_sam.surrogate_list) > 0:
_n_call += len(_sam.var_dicts)
else:
raise NotImplementedError
return _n_call
else:
return self._r_post.n_call
# TODO: update this when DynamicSample is ready
@property
def finished(self):
if self._n_sample is not None:
return RecipePhases(self._i_optimize == self._n_optimize,
self._i_sample == self._n_sample,
self._i_post == self._n_post)
else:
raise NotImplementedError
# I'm not good at naming things... :)
PointDoublet = namedtuple('PointDoublet', 'x, x_trans')
DensityQuartet = namedtuple('DensityQuartet',
'logp, logq, logp_trans, logq_trans')
OptimizeResult = namedtuple('OptimizeResult', 'x_max, f_max, surrogate_list, '
'var_dicts, laplace_samples, laplace_result, '
'samples, sample_trace')
SampleResult = namedtuple('SampleResult', 'samples, surrogate_list, '
'var_dicts, sample_trace')
PostResult = namedtuple('PostResult', 'samples, weights, weights_trunc, logp, '
'logq, logz, logz_err, x_p, x_q, logp_p, logq_q, '
'trace_p, trace_q, n_call, x_max, f_max')
class Recipe:
"""
Unified recipe to run the whole BayesFast surrogate sampling process.
Parameters
----------
density : Density or DensityLite
The probability density to sample.
parallel_backend : None, ParallelBackend, int, Pool, Client or MapReduce, optional
If None, will use the global bayesfast parallel backend. Otherwise, will
be passed to construct a ``ParallelBackend`` for parallelization.
recipe_trace : RecipeTrace, dict or None, optional
If dict, will be used to initialize a RecipeTrace. If None, will use the
arguments below to initialize a RecipeTrace. Set to ``None`` by default.
optimize : OptimizeStep, dict or None, optional
The ``optimize`` parameter to initialize a RecipeTrace. Only used if
``recipe_trace`` is None. Set to ``None`` by default.
sample : StaticSample, SampleStep, 1-d array-like of SampleStep, or None, optional
The ``sample`` parameter to initialize a RecipeTrace. Only used if
``recipe_trace`` is None. Set to ``None`` by default.
post : PostStep or dict, optional
The ``post`` parameter to initialize a RecipeTrace. Only used if
``recipe_trace`` is None. Set to ``None`` by default.
sample_repeat : 1-d array-like of positive int, or None, optional
The ``sample_repeat`` parameter to initialize a RecipeTrace. Only used
if ``recipe_trace`` is None. Set to ``None`` by default.
copy_density : bool, optional
Whether to make a deepcopy of ``density``. Set to ``True`` by default.
"""
def __init__(self, density, parallel_backend=None, recipe_trace=None,
optimize=None, sample=None, post=None, sample_repeat=None,
copy_density=True):
if isinstance(density, (Density, DensityLite)):
self._density = deepcopy(density) if copy_density else density
else:
raise ValueError('density should be a Density or DensityLite.')
self.parallel_backend = parallel_backend
if recipe_trace is None:
recipe_trace = RecipeTrace(optimize, sample, post, sample_repeat)
elif isinstance(recipe_trace, RecipeTrace):
pass
elif isinstance(recipe_trace, dict):
recipe_trace = RecipeTrace(**recipe_trace)
else:
raise ValueError('recipe_trace should be a RecipeTrace or None.')
self._recipe_trace = recipe_trace
def __getstate__(self):
"""We need this to make self._parallel_backend work correctly."""
self_dict = self.__dict__.copy()
del self_dict['_parallel_backend'], self_dict['_recipe_trace']
# TODO: review this
# we remove recipe_trace because it contains a PropertyList
# that is not pickle-able
# however, this may lead to unexpected bahaviors
# if one wants to pickle this Recipe object
return self_dict
@property
def density(self):
return self._density
@property
def parallel_backend(self):
if self._parallel_backend is None:
return get_backend()
else:
return self._parallel_backend
@parallel_backend.setter
def parallel_backend(self, backend):
if backend is None:
self._parallel_backend = None
else:
self._parallel_backend = ParallelBackend(backend)
@property
def recipe_trace(self):
return self._recipe_trace
def _opt_surro(self, x_0, var_dicts):
step = self.recipe_trace._s_optimize
result = self.recipe_trace._r_optimize
_logp = lambda x: self.density.logp(x, original_space=False,
use_surrogate=True)
_grad = lambda x: self.density.grad(x, original_space=False,
use_surrogate=True)
x_0 = self.density.from_original(x_0[0])
laplace_result = step.laplace.run(logp=_logp, x_0=x_0, grad=_grad)
x_trans = laplace_result.x_max
x = self.density.to_original(x_trans)
x_max = PointDoublet(x, x_trans)
logp = self.density.logp(x, original_space=True, use_surrogate=False)
logp_trans = self.density.from_original_density(density=logp, x=x)
logq_trans = laplace_result.f_max
logq = self.density.to_original_density(density=logq_trans, x=x)
f_max = DensityQuartet(float(logp), float(logq), float(logp_trans),
float(logq_trans))
laplace_samples = self.density.to_original(laplace_result.samples)
surrogate_list = deepcopy(self.density._surrogate_list)
result.append(
OptimizeResult(x_max=x_max, f_max=f_max,
surrogate_list=surrogate_list, var_dicts=var_dicts,
laplace_samples=laplace_samples, laplace_result=laplace_result,
samples=None, sample_trace=None))
def _opt_step(self):
# DEVELOPMENT NOTES
# if has surrogate, iterate until convergence
# if no surrogate, just run on true model
# in the end, optionally run sampling
step = self.recipe_trace._s_optimize
result = self.recipe_trace._r_optimize
recipe_trace = self.recipe_trace
if step.has_surrogate:
if isinstance(self._density, DensityLite):
raise RuntimeError('self.density should be a Density, instead '
'of DensityLite, for surrogate modeling.')
self._density.surrogate_list = step._surrogate_list
if step.fitted:
if step.x_0 is None:
x_0 = np.zeros(self.density.input_size)
else:
x_0 = step.x_0.copy()
var_dicts = None
else:
if step.x_0 is None:
dim = self.density.input_size
x_0 = multivariate_normal(np.zeros(dim), np.eye(dim),
step.n_eval)
else:
if step.n_eval > 0:
if step.x_0.shape[0] < step.n_eval:
raise RuntimeError(
'I need {} points to fit the surrogate model, '
'but you only gave me enough {} points in '
'x_0.'.format(step.n_eval, step.x_0.shape[0]))
x_0 = step.x_0[:step.n_eval].copy()
else:
x_0 = step.x_0.copy()
self.density.use_surrogate = False
self.density.original_space = True
with self.parallel_backend:
var_dicts = self.parallel_backend.map(self.density.fun, x_0)
self.density.fit(var_dicts)
self._opt_surro(x_0, var_dicts)
_a = result[-1].f_max
_pq = _a.logp_trans - _a.logq_trans
print(' OptimizeStep proceeding: iter #0 finished, while current '
'logp = {:.3f}, logp_trans = {:.3f}, delta_pq = '
'{:.3f}.'.format(_a.logp, _a.logp_trans, _pq))
for i in range(1, step.max_iter):
if step.n_eval <= 0:
raise RuntimeError('alpha_n should be positive if max_iter '
'is larger than 1.')
x_0 = result[-1].laplace_samples
if x_0.shape[0] < step.n_eval:
raise RuntimeError(
'I need {} points to fit the surrogate model, but I '
'can only get {} points from the previous '
'iteration.'.format(step.n_eval, x_0.shape[0]))
x_0 = x_0[:step.n_eval].copy()
self.density.use_surrogate = False
self.density.original_space = True
with self.parallel_backend:
var_dicts = self.parallel_backend.map(self.density.fun, x_0)
self.density.fit(var_dicts)
self._opt_surro(x_0, var_dicts)
_a = result[-1].f_max
_b = result[-2].f_max
_pp = _a.logp_trans - _b.logp_trans
_pq = _a.logp_trans - _a.logq_trans
print(' OptimizeStep proceeding: iter #{} finished, while '
'current logp = {:.3f}, logp_trans = {:.3f}, delta_pp = '
'{:.3f}, delta_pq = {:.3f}.'.format(i, _a.logp,
_a.logp_trans, _pp, _pq))
if i == step.max_iter - 1:
warnings.warn('Optimization did not converge within the max'
' number of iterations.', RuntimeWarning)
if (abs(_pp) < step._eps_pp) and (abs(_pq) < step._eps_pq):
break
logp_trans_all = np.asarray([r.f_max.logp_trans for r in result])
is_max = np.where(logp_trans_all == np.max(logp_trans_all))[0]
if is_max.size == 1:
i_max = is_max[0]
else:
logq_trans_all = np.asarray(
[r.f_max.logq_trans for r in result])
diff_all = np.abs(logp_trans_all - logq_trans_all)
i_max = is_max[np.argmin(diff_all[is_max])]
result.append(result[i_max])
print(' OptimizeStep proceeding: we will use iter #{} as it has '
'the highest logp_trans.\n'.format(i_max))
else:
if step.x_0 is None:
dim = self.density.input_size
if dim is None:
raise RuntimeError('Neither OptimizeStep.x_0 nor Density'
'/DensityLite.input_size is defined.')
x_0 = np.zeros(dim)
else:
x_0 = self.density.from_original(step.x_0[0])
_logp = lambda x: self.density.logp(x, original_space=False)
# if self.density.grad is well-defined, we will use it
# otherwise, we will use finite-difference gradient
try:
_grad_0 = self.density.grad(x_0, original_space=False)
assert np.all(np.isfinite(_grad_0))
_grad = lambda x: self.density.grad(x, original_space=False)
except Exception:
_grad = None
# TODO: allow user-defined hessian for optimizer?
laplace_result = step.laplace.run(logp=_logp, x_0=x_0, grad=_grad)
x_trans = laplace_result.x_max
x = self.density.to_original(x_trans)
x_max = PointDoublet(x, x_trans)
logp_trans = laplace_result.f_max
logp = self.density.to_original_density(density=logp_trans, x=x_max)
f_max = DensityQuartet(float(logp), None, float(logp_trans), None)
laplace_samples = self.density.to_original(laplace_result.samples)
result.append(
OptimizeResult(x_max=x_max, f_max=f_max, surrogate_list=(),
var_dicts=None, laplace_samples=laplace_samples,
laplace_result=laplace_result, samples=None, sample_trace=None))
if step.has_surrogate and step.run_sampling:
self._opt_sample()
recipe_trace._i_optimize = 1
print('\n ***** OptimizeStep finished. ***** \n')
def _opt_sample(self):
step = self.recipe_trace._s_optimize
result = self.recipe_trace._r_optimize
sample_trace = step.sample_trace
if sample_trace.x_0 is None:
sample_trace.x_0 = result[-1].laplace_samples
sample_trace._x_0_transformed = False
if step.reuse_metric:
cov = result[-1].laplace_result.cov.copy()
if sample_trace._metric == 'diag':
sample_trace._metric = np.diag(cov)
elif sample_trace._metric == 'full':
sample_trace._metric = cov
self._density.surrogate_list = result[-1].surrogate_list
self._density.use_surrogate = True
t = sample(self.density, sample_trace=sample_trace,
parallel_backend=self.parallel_backend)
x = t.get(flatten=True)
result[-1] = result[-1]._replace(samples=x, sample_trace=t)
print('\n *** Finished sampling the surrogate density defined by the '
'selected OptimizeStep. *** \n')
def _sam_step(self):
steps = self.recipe_trace._s_sample
results = self.recipe_trace._r_sample
recipe_trace = self.recipe_trace
i = recipe_trace._i_sample
this_step = recipe_trace._strategy.update(results)
while this_step is not None:
sample_trace = this_step.sample_trace
get_prev_step = not (i == 0 and not recipe_trace._i_optimize)
get_prev_samples = get_prev_step or (this_step.x_0 is not None)
if get_prev_step:
if i == 0:
prev_result = recipe_trace._r_optimize[-1]
prev_step = recipe_trace._s_optimize
else:
prev_result = results[i - 1]
prev_step = steps[i - 1]
get_prev_density = (get_prev_step and this_step.x_0 is None and
prev_step.sample_trace is not None)
if get_prev_samples:
if this_step.x_0 is None:
if prev_result.samples is None:
prev_samples = Laplace.untemper_laplace_samples(
prev_result.laplace_result)
prev_transformed = True
else:
prev_samples = prev_result.samples
prev_transformed = False
else:
prev_samples = this_step.x_0
prev_transformed = False
if get_prev_density:
prev_density = prev_result.sample_trace.get(return_type='logp',
flatten=True)
if isinstance(sample_trace, _HTrace):
if sample_trace.x_0 is None and get_prev_samples:
sample_trace.x_0 = prev_samples
sample_trace._x_0_transformed = prev_transformed
if get_prev_step:
if sample_trace._step_size is None:
if (this_step.reuse_step_size and
prev_result.sample_trace is not None):
sample_trace._step_size = _get_step_size(
prev_result.sample_trace)
if (sample_trace._metric == 'diag' or
sample_trace._metric == 'full'):
if (this_step.reuse_metric and
prev_result.sample_trace is not None):
sample_trace._metric = _get_metric(
prev_result.sample_trace, sample_trace._metric)
if this_step.has_surrogate:
if not isinstance(self._density, Density):
raise RuntimeError('self.density should be a Density for '
'surrogate modeling.')
self._density.surrogate_list = this_step._surrogate_list
if this_step._fitted:
var_dicts = None
else:
if not get_prev_samples:
raise RuntimeError('You did not give me samples to fit '
'the surrogate model.')
if (this_step.n_eval > 0 and
prev_samples.shape[0] < this_step.n_eval):
raise RuntimeError(
'I need {} points to fit the surrogate model, but I'
' can find at most {} points.'.format(
this_step.n_eval, prev_samples.shape[0]))
if i > 0 and not prev_step.has_surrogate:
warnings.warn(
'you are doing surrogate modeling after sampling '
'the true density. Please make sure this is what '
'you want.', RuntimeWarning)
if get_prev_density:
i_resample = this_step.resampler(prev_density,
this_step.n_eval)
else:
if this_step.n_eval > 0:
i_resample = np.arange(this_step.n_eval)
else:
i_resample = np.arange(prev_samples.shape[0])
x_fit = prev_samples[i_resample]
self.density.use_surrogate = False
self.density.original_space = True
with self.parallel_backend:
var_dicts = np.asarray(
self.parallel_backend.map(self.density.fun, x_fit))
var_dicts_fit = var_dicts.copy()
if this_step.reuse_samples:
for j in range(i):
if (j + this_step.reuse_samples >= i or
this_step.reuse_samples < 0):
var_dicts_fit = np.concatenate(
(var_dicts_fit, results[j].var_dicts))
if this_step.logp_cutoff and get_prev_density:
logp_fit = np.concatenate(
[vd.fun[self.density.density_name] for vd in
var_dicts_fit])
logq_fit = prev_density[i_resample]
logq_min = np.min(logq_fit)
np.delete(prev_samples, i_resample, axis=0)
np.delete(prev_density, i_resample, axis=0)
is_good = logp_fit > logq_min
n_good = np.sum(is_good)
f_good = n_good / logp_fit.size
if f_good < 0.5:
warnings.warn('more than half of the samples are '
'abandoned because their logp < '
'logq_min.', RuntimeWarning)
if f_good == 0.:
raise RuntimeError(
'f_good is 0, indicating that the samples seem '
'very bad. Please check your recipe setup. You '
'may also want to try logp_cutoff=False for the'
' SampleStep.')
var_dicts_fit = var_dicts_fit[is_good]
while len(var_dicts_fit) < this_step.n_eval_min:
n_eval_supp = ((this_step.n_eval_min -
len(var_dicts_fit)) / f_good *
this_step.alpha_supp)
n_eval_supp = max(int(n_eval_supp), 4)
if prev_samples.shape[0] < n_eval_supp:
raise RuntimeError('I do not have enough '
'supplementary points.')
i_resample = this_step.resampler(prev_density,
n_eval_supp)
x_fit = prev_samples[i_resample]
self.density.use_surrogate = False
self.density.original_space = True
with self.parallel_backend:
var_dicts_supp = np.asarray(
self.parallel_backend.map(self.density.fun,
x_fit))
logp_supp = np.concatenate(
[vd.fun[self.density.density_name] for vd in
var_dicts_supp])
np.delete(prev_samples, i_resample, axis=0)
np.delete(prev_density, i_resample, axis=0)
is_good = logp_supp > logq_min
n_good = np.sum(is_good)
if n_good < logp_supp.size / 2:
warnings.warn(
'more than half of the samples are '
'abandoned because their logp < logq_min.',
RuntimeWarning)
var_dicts = np.concatenate((var_dicts,
var_dicts_supp))
var_dicts_fit = np.concatenate(
(var_dicts_fit, var_dicts_supp[is_good]))
self.density.fit(var_dicts_fit)
self.density.use_surrogate = True
t = sample(self.density, sample_trace=sample_trace,
parallel_backend=self.parallel_backend)
x = t.get(flatten=True)
surrogate_list = deepcopy(self._density._surrogate_list)
results.append(SampleResult(
samples=x, surrogate_list=surrogate_list,
var_dicts=var_dicts, sample_trace=t))
else:
if isinstance(self._density, Density):
self.density.use_surrogate = False
t = sample(self.density, sample_trace=sample_trace,
parallel_backend=self.parallel_backend)
x = t.get(flatten=True)
results.append(SampleResult(samples=x, surrogate_list=(),
var_dicts=None, sample_trace=t))
steps.append(this_step)
print('\n *** SampleStep proceeding: iter #{} finished. *** '
'\n'.format(i))
recipe_trace._i_sample += 1
i = recipe_trace._i_sample
this_step = recipe_trace._strategy.update(results)
print('\n ***** SampleStep finished. ***** \n')
def _pos_step(self):
step = self.recipe_trace._s_post
recipe_trace = self.recipe_trace
x_p = None
x_q = None
f_logp = None
f_logq = None
logp_p = None
logq_q = None
x_max = None
f_max = None
samples = None
weights = None
weights_trunc = None
logp = None
logq = None
trace_p = None
trace_q = None
logz = None
logz_err = None
if recipe_trace._i_optimize:
opt_result = recipe_trace._r_optimize[-1]
x_max = opt_result.x_max
f_max = opt_result.f_max
if recipe_trace._i_sample:
prev_step = recipe_trace._s_sample[-1]
prev_result = recipe_trace._r_sample[-1]
if prev_step.has_surrogate:
trace_q = prev_result.sample_trace
x_q = trace_q.get(return_type='samples', flatten=False)
logq_q = trace_q.get(return_type='logp', flatten=False)
self.density._surrogate_list = prev_step.surrogate_list
else:
trace_p = prev_result.sample_trace
x_p = trace_p.get(return_type='samples', flatten=False)
logp_p = trace_p.get(return_type='logp', flatten=False)
elif recipe_trace._i_optimize:
prev_step = recipe_trace._s_optimize
prev_result = recipe_trace._r_optimize[-1]
if prev_step.has_surrogate and prev_result.sample_trace is not None:
# sample_trace in OptimizeStep will be ignored
# if has_surrogate is False
trace_q = prev_result.sample_trace
x_q = trace_q.get(return_type='samples', flatten=False)
logq_q = trace_q.get(return_type='logp', flatten=False)
self.density._surrogate_list = prev_step.surrogate_list
else:
warnings.warn('no existing samples found.', RuntimeWarning)
else:
raise RuntimeError('you have run neither OptimizeStep nor '
'SampleStep before the PostStep.')
if x_p is not None:
samples = x_p.reshape((-1, x_p.shape[-1]))
weights = np.ones(samples.shape[0])
weights_trunc = weights
logp = logp_p.reshape(-1)
if step.evidence_method is not None:
logz, logz_err = step.evidence_method(
x_p=trace_p, logp=self._f_logp, logp_p=logp_p)
if step.n_is > 0:
warnings.warn('n_is will not be used when we already have exact'
' samples from logp.', RuntimeWarning)
elif x_q is not None:
samples = x_q.reshape((-1, x_q.shape[-1]))
logq = logq_q.reshape(-1)
if step.n_is != 0:
if step.n_is < 0 or step.n_is > samples.shape[0]:
if step.n_is > 0:
warnings.warn(
'you set n_is as {}, but I can only get {} samples '
'from the previous step, so I will use all these '
'samples to do IS for now.'.format(step.n_is,
samples.shape[0]), RuntimeWarning)
n_is = samples.shape[0]
else:
n_is = step.n_is
foo = int(samples.shape[0] / n_is)
samples = samples[::foo][:n_is]
logq = logq[::foo][:n_is]
self.density.use_surrogate = False
self.density.original_space = True
with self.parallel_backend:
logp = np.asarray(
self.parallel_backend.map(self.density.logp,
samples)).reshape(-1)
weights = np.exp(logp - logq)
if step.k_trunc < 0:
weights_trunc = weights.copy()
else:
weights_trunc = np.clip(weights, 0, np.mean(weights) *
n_is**step.k_trunc)
if step.evidence_method is not None:
logz_q, logz_err_q = step.evidence_method(
x_p=trace_q, logp=self._f_logq, logp_p=logq_q)
logz_pq = logsumexp(logp - logq, b=1 / logp.size)
foo = np.exp(logp - logq - logz_pq)
tau = float(integrated_time(foo))
logz_err_pq = (
np.var(foo) / np.mean(foo)**2 / logp.size * tau)**0.5
logz = logz_q + logz_pq
logz_err = (logz_err_q**2 + logz_err_pq**2)**0.5
else:
weights = np.ones(samples.shape[0])
weights_trunc = weights
if step.evidence_method is not None:
warnings.warn('since n_is is 0, we are computing the '
'evidence of logq, which may differ from the '
'evidence of logp.', RuntimeWarning)
logz, logz_err = step.evidence_method(
x_p=trace_q, logp=self._f_logq, logp_p=logq_q)
else:
if (step.n_is is not None) or (step.evidence_method is not None):
warnings.warn('n_is and evidence_method will not be used when '
'we only have Laplace samples.', RuntimeWarning)
try:
n_call = recipe_trace.n_call + step.n_is
warnings.warn('as of now, n_call does not take the possible logp '
'calls during evidence evaluation into account.',
RuntimeWarning)
except Exception:
n_call = None
recipe_trace._r_post = PostResult(
samples, weights, weights_trunc, logp, logq, logz, logz_err, x_p,
x_q, logp_p, logq_q, trace_p, trace_q, n_call, x_max, f_max)
recipe_trace._i_post = 1
print('\n ***** PostStep finished. ***** \n')
def _f_logp(self, x):
return self.density.logp(x, original_space=True, use_surrogate=False)
def _f_logq(self, x):
return self.density.logp(x, original_space=True, use_surrogate=True)
def run(self):
"""Running the Recipe."""
f_opt, f_sam, f_pos = self.recipe_trace.finished
if not f_opt:
self._opt_step()
if not f_sam:
self._sam_step()
if not f_pos:
self._pos_step()
def get(self):
"""
Getting the results of the Recipe.
Returns
-------
result : PostResult
The results of the Recipe.
"""
try:
return self.recipe_trace._r_post
except Exception:
raise RuntimeError('you have not run a PostStep.')
| 39.214181
| 86
| 0.564246
|
4a08dbbdb51c2100efc68dedd66aee3300d85782
| 9,877
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20201101/get_p2s_vpn_gateway_p2s_vpn_connection_health.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20201101/get_p2s_vpn_gateway_p2s_vpn_connection_health.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20201101/get_p2s_vpn_gateway_p2s_vpn_connection_health.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetP2sVpnGatewayP2sVpnConnectionHealthResult',
'AwaitableGetP2sVpnGatewayP2sVpnConnectionHealthResult',
'get_p2s_vpn_gateway_p2s_vpn_connection_health',
]
@pulumi.output_type
class GetP2sVpnGatewayP2sVpnConnectionHealthResult:
"""
P2SVpnGateway Resource.
"""
def __init__(__self__, custom_dns_servers=None, etag=None, id=None, is_routing_preference_internet=None, location=None, name=None, p2_s_connection_configurations=None, provisioning_state=None, tags=None, type=None, virtual_hub=None, vpn_client_connection_health=None, vpn_gateway_scale_unit=None, vpn_server_configuration=None):
if custom_dns_servers and not isinstance(custom_dns_servers, list):
raise TypeError("Expected argument 'custom_dns_servers' to be a list")
pulumi.set(__self__, "custom_dns_servers", custom_dns_servers)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_routing_preference_internet and not isinstance(is_routing_preference_internet, bool):
raise TypeError("Expected argument 'is_routing_preference_internet' to be a bool")
pulumi.set(__self__, "is_routing_preference_internet", is_routing_preference_internet)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if p2_s_connection_configurations and not isinstance(p2_s_connection_configurations, list):
raise TypeError("Expected argument 'p2_s_connection_configurations' to be a list")
pulumi.set(__self__, "p2_s_connection_configurations", p2_s_connection_configurations)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_hub and not isinstance(virtual_hub, dict):
raise TypeError("Expected argument 'virtual_hub' to be a dict")
pulumi.set(__self__, "virtual_hub", virtual_hub)
if vpn_client_connection_health and not isinstance(vpn_client_connection_health, dict):
raise TypeError("Expected argument 'vpn_client_connection_health' to be a dict")
pulumi.set(__self__, "vpn_client_connection_health", vpn_client_connection_health)
if vpn_gateway_scale_unit and not isinstance(vpn_gateway_scale_unit, int):
raise TypeError("Expected argument 'vpn_gateway_scale_unit' to be a int")
pulumi.set(__self__, "vpn_gateway_scale_unit", vpn_gateway_scale_unit)
if vpn_server_configuration and not isinstance(vpn_server_configuration, dict):
raise TypeError("Expected argument 'vpn_server_configuration' to be a dict")
pulumi.set(__self__, "vpn_server_configuration", vpn_server_configuration)
@property
@pulumi.getter(name="customDnsServers")
def custom_dns_servers(self) -> Optional[Sequence[str]]:
"""
List of all customer specified DNS servers IP addresses.
"""
return pulumi.get(self, "custom_dns_servers")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isRoutingPreferenceInternet")
def is_routing_preference_internet(self) -> Optional[bool]:
"""
Enable Routing Preference property for the Public IP Interface of the P2SVpnGateway.
"""
return pulumi.get(self, "is_routing_preference_internet")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="p2SConnectionConfigurations")
def p2_s_connection_configurations(self) -> Optional[Sequence['outputs.P2SConnectionConfigurationResponse']]:
"""
List of all p2s connection configurations of the gateway.
"""
return pulumi.get(self, "p2_s_connection_configurations")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the P2S VPN gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional['outputs.SubResourceResponse']:
"""
The VirtualHub to which the gateway belongs.
"""
return pulumi.get(self, "virtual_hub")
@property
@pulumi.getter(name="vpnClientConnectionHealth")
def vpn_client_connection_health(self) -> 'outputs.VpnClientConnectionHealthResponse':
"""
All P2S VPN clients' connection health status.
"""
return pulumi.get(self, "vpn_client_connection_health")
@property
@pulumi.getter(name="vpnGatewayScaleUnit")
def vpn_gateway_scale_unit(self) -> Optional[int]:
"""
The scale unit for this p2s vpn gateway.
"""
return pulumi.get(self, "vpn_gateway_scale_unit")
@property
@pulumi.getter(name="vpnServerConfiguration")
def vpn_server_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
The VpnServerConfiguration to which the p2sVpnGateway is attached to.
"""
return pulumi.get(self, "vpn_server_configuration")
class AwaitableGetP2sVpnGatewayP2sVpnConnectionHealthResult(GetP2sVpnGatewayP2sVpnConnectionHealthResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetP2sVpnGatewayP2sVpnConnectionHealthResult(
custom_dns_servers=self.custom_dns_servers,
etag=self.etag,
id=self.id,
is_routing_preference_internet=self.is_routing_preference_internet,
location=self.location,
name=self.name,
p2_s_connection_configurations=self.p2_s_connection_configurations,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
virtual_hub=self.virtual_hub,
vpn_client_connection_health=self.vpn_client_connection_health,
vpn_gateway_scale_unit=self.vpn_gateway_scale_unit,
vpn_server_configuration=self.vpn_server_configuration)
def get_p2s_vpn_gateway_p2s_vpn_connection_health(gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetP2sVpnGatewayP2sVpnConnectionHealthResult:
"""
P2SVpnGateway Resource.
:param str gateway_name: The name of the P2SVpnGateway.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['gatewayName'] = gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20201101:getP2sVpnGatewayP2sVpnConnectionHealth', __args__, opts=opts, typ=GetP2sVpnGatewayP2sVpnConnectionHealthResult).value
return AwaitableGetP2sVpnGatewayP2sVpnConnectionHealthResult(
custom_dns_servers=__ret__.custom_dns_servers,
etag=__ret__.etag,
id=__ret__.id,
is_routing_preference_internet=__ret__.is_routing_preference_internet,
location=__ret__.location,
name=__ret__.name,
p2_s_connection_configurations=__ret__.p2_s_connection_configurations,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
virtual_hub=__ret__.virtual_hub,
vpn_client_connection_health=__ret__.vpn_client_connection_health,
vpn_gateway_scale_unit=__ret__.vpn_gateway_scale_unit,
vpn_server_configuration=__ret__.vpn_server_configuration)
| 41.851695
| 332
| 0.687861
|
4a08de59790cdc37a1a4e3c4355531030dba8ea8
| 49,060
|
py
|
Python
|
test/quantization/core/test_workflow_module.py
|
sanchitintel/pytorch
|
416f59308023b5d98f6ea4ecdd0bcd3829edb7a7
|
[
"Intel"
] | 24
|
2020-11-02T21:25:12.000Z
|
2022-03-17T07:20:33.000Z
|
test/quantization/core/test_workflow_module.py
|
sanchitintel/pytorch
|
416f59308023b5d98f6ea4ecdd0bcd3829edb7a7
|
[
"Intel"
] | 1
|
2019-08-01T00:17:43.000Z
|
2019-09-12T01:31:53.000Z
|
test/quantization/core/test_workflow_module.py
|
sanchitintel/pytorch
|
416f59308023b5d98f6ea4ecdd0bcd3829edb7a7
|
[
"Intel"
] | 12
|
2020-11-06T05:00:37.000Z
|
2022-01-30T19:17:36.000Z
|
# Torch
import torch
from torch.ao.quantization import (
MinMaxObserver,
PerChannelMinMaxObserver,
MovingAverageMinMaxObserver,
MovingAveragePerChannelMinMaxObserver,
HistogramObserver,
RecordingObserver,
PlaceholderObserver,
NoopObserver,
FakeQuantize,
FixedQParamsFakeQuantize,
default_debug_qconfig,
default_observer,
default_histogram_observer,
default_per_channel_weight_observer,
get_observer_dict,
prepare,
QConfig,
FusedMovingAvgObsFakeQuantize,
)
import torch.nn as nn
# Standard library
import copy
import io
import itertools
import unittest
import math
import numpy as np
# Testing utils
from hypothesis import given, settings
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
AnnotatedSingleLayerLinearModel,
test_only_eval_fn,
SingleLayerLinearModel,
)
from torch.testing._internal.common_quantized import (
override_quantized_engine,
supported_qengines,
override_qengines,
_fake_quantize_per_channel_affine_reference,
_fake_quantize_per_channel_affine_grad_reference,
to_tensor,
)
NP_RANDOM_SEED = 19
tolerance = 1e-6
class TestObserver(QuantizationTestCase):
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)),
reduce_range=st.booleans())
def test_per_tensor_observers(self, qdtype, qscheme, reduce_range):
# reduce_range cannot be true for symmetric quantization with uint8
if qdtype == torch.quint8 and qscheme == torch.per_tensor_symmetric:
reduce_range = False
ObserverList = [MinMaxObserver(dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range),
MovingAverageMinMaxObserver(averaging_constant=0.5,
dtype=qdtype,
qscheme=qscheme,
reduce_range=reduce_range)]
for myobs in ObserverList:
# Calculate Qparams should return with a warning for observers with no data
qparams = myobs.calculate_qparams()
if type(myobs) == MinMaxObserver:
x = torch.tensor([1.0, 2.0, 2.0, 3.0, 4.0, 5.0, 6.0])
y = torch.tensor([4.0, 5.0, 5.0, 6.0, 7.0, 8.0])
else:
# Moving average of min/max for x and y matches that of
# extreme values for x/y used for minmax observer
x = torch.tensor([0.0, 2.0, 2.0, 3.0, 4.0, 5.0, 6.0])
y = torch.tensor([2.0, 5.0, 5.0, 6.0, 7.0, 10.0])
result = myobs(x)
result = myobs(y)
self.assertEqual(result, y)
self.assertEqual(myobs.min_val, 1.0)
self.assertEqual(myobs.max_val, 8.0)
qparams = myobs.calculate_qparams()
if reduce_range:
if qscheme == torch.per_tensor_symmetric:
ref_scale = 0.062745 * 255 / 127
ref_zero_point = 0 if qdtype is torch.qint8 else 128
else:
ref_scale = 0.0313725 * 255 / 127
ref_zero_point = -64 if qdtype is torch.qint8 else 0
else:
if qscheme == torch.per_tensor_symmetric:
ref_scale = 0.062745
ref_zero_point = 0 if qdtype is torch.qint8 else 128
else:
ref_scale = 0.0313725
ref_zero_point = -128 if qdtype is torch.qint8 else 0
self.assertEqual(qparams[1].item(), ref_zero_point)
self.assertEqual(qparams[0].item(), ref_scale, atol=1e-5, rtol=0)
state_dict = myobs.state_dict()
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_dict[key])
loaded_obs = MinMaxObserver(dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)
loaded_obs.load_state_dict(loaded_dict)
loaded_qparams = loaded_obs.calculate_qparams()
self.assertEqual(myobs.min_val, loaded_obs.min_val)
self.assertEqual(myobs.max_val, loaded_obs.max_val)
self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from((torch.per_channel_affine, torch.per_channel_symmetric, torch.per_channel_affine_float_qparams)),
ch_axis=st.sampled_from((0, 1, 2, 3)), reduce_range=st.booleans())
def test_per_channel_observers(self, qdtype, qscheme, ch_axis, reduce_range):
# reduce_range cannot be true for symmetric quantization with uint8
if qscheme == torch.per_channel_affine_float_qparams:
reduce_range = False
if qdtype == torch.quint8 and qscheme == torch.per_channel_symmetric:
reduce_range = False
ObserverList = [PerChannelMinMaxObserver(reduce_range=reduce_range,
ch_axis=ch_axis,
dtype=qdtype,
qscheme=qscheme),
MovingAveragePerChannelMinMaxObserver(averaging_constant=0.5,
reduce_range=reduce_range,
ch_axis=ch_axis,
dtype=qdtype,
qscheme=qscheme)]
for myobs in ObserverList:
# Calculate qparams should work for empty observers
qparams = myobs.calculate_qparams()
x = torch.tensor(
[
[[[1.0, 2.0], [2.0, 2.5]], [[3.0, 4.0], [4.5, 6.0]]],
[[[-4.0, -3.0], [5.0, 5.0]], [[6.0, 3.0], [7.0, 8.0]]],
]
)
if type(myobs) == MovingAveragePerChannelMinMaxObserver:
# Scaling the input tensor to model change in min/max values
# across batches
result = myobs(0.5 * x)
result = myobs(1.5 * x)
self.assertEqual(result, 1.5 * x)
else:
result = myobs(x)
self.assertEqual(result, x)
qparams = myobs.calculate_qparams()
ref_min_vals = [[1.0, -4.0], [-4.0, 3.0], [-4.0, 2.0], [-4.0, -3.0]]
ref_max_vals = [[6.0, 8.0], [5.0, 8.0], [6.0, 8.0], [7.0, 8.0]]
per_channel_symmetric_ref_scales = [
[0.04705882, 0.06274509],
[0.03921569, 0.0627451],
[0.04705882, 0.0627451],
[0.05490196, 0.0627451],
]
per_channel_affine_ref_scales = [
[0.02352941, 0.04705882],
[0.03529412, 0.03137255],
[0.03921569, 0.03137255],
[0.04313726, 0.04313726],
]
per_channel_affine_qint8_zp = [
[-128, -43],
[-15, -128],
[-26, -128],
[-35, -58],
]
per_channel_affine_float_qparams_ref_scales = [
[0.0196, 0.0471],
[0.0353, 0.0196],
[0.0392, 0.0235],
[0.0431, 0.0431],
]
per_channel_affine_quint8_zp = [[0, 85], [113, 0], [102, 0], [93, 70]]
self.assertEqual(myobs.min_val, ref_min_vals[ch_axis])
self.assertEqual(myobs.max_val, ref_max_vals[ch_axis])
if qscheme == torch.per_channel_symmetric:
ref_scales = per_channel_symmetric_ref_scales[ch_axis]
ref_zero_points = [0, 0] if qdtype is torch.qint8 else [128, 128]
elif qscheme == torch.per_channel_affine_float_qparams:
ref_scales = per_channel_affine_float_qparams_ref_scales[ch_axis]
ref_zero_points = [-1 * ref_min_vals[ch_axis][i] / ref_scales[i] for i in range(len(ref_scales))]
else:
ref_scales = per_channel_affine_ref_scales[ch_axis]
ref_zero_points = (
per_channel_affine_qint8_zp[ch_axis]
if qdtype is torch.qint8
else per_channel_affine_quint8_zp[ch_axis]
)
if reduce_range:
ref_scales = [s * 255 / 127 for s in ref_scales]
ref_zero_points = [math.floor(z / 2) for z in ref_zero_points]
self.assertEqual(qparams[0], torch.tensor(ref_scales, dtype=qparams[0].dtype), rtol=1e-5, atol=0.0001)
if qscheme == torch.per_channel_affine_float_qparams:
self.assertEqual(qparams[1], torch.tensor(ref_zero_points, dtype=qparams[1].dtype), rtol=1e-5, atol=1)
else:
self.assertEqual(qparams[1], torch.tensor(ref_zero_points, dtype=qparams[1].dtype))
# Test for serializability
state_dict = myobs.state_dict()
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_dict[key])
loaded_obs = PerChannelMinMaxObserver(reduce_range=reduce_range, ch_axis=ch_axis, dtype=qdtype, qscheme=qscheme)
loaded_obs.load_state_dict(loaded_dict)
loaded_qparams = loaded_obs.calculate_qparams()
self.assertEqual(myobs.min_val, loaded_obs.min_val)
self.assertEqual(myobs.max_val, loaded_obs.max_val)
self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())
def test_observer_scriptable(self):
obs_list = [MinMaxObserver(), MovingAverageMinMaxObserver()]
for obs in obs_list:
scripted = torch.jit.script(obs)
x = torch.rand(3, 4)
obs(x)
scripted(x)
self.assertEqual(obs.calculate_qparams(), scripted.calculate_qparams())
buf = io.BytesIO()
torch.jit.save(scripted, buf)
buf.seek(0)
loaded = torch.jit.load(buf)
self.assertEqual(obs.calculate_qparams(), loaded.calculate_qparams())
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@override_qengines
def test_state_dict_respects_device_affinity(self):
"""
Tests that loading from a state dict loads buffers to the correct
device.
"""
device_cpu = torch.device('cpu')
device_cuda = torch.device('cuda:0')
test_cases = itertools.product(
[device_cpu, device_cuda],
[device_cpu, device_cuda],
[MinMaxObserver, MovingAverageMinMaxObserver,
PerChannelMinMaxObserver,
MovingAveragePerChannelMinMaxObserver,
# TODO: enable this (separate PR)
# HistogramObserver,
PlaceholderObserver, RecordingObserver, NoopObserver,
FakeQuantize])
for device_source, device_target, obs_cls in test_cases:
# calibrated source model
model = obs_cls()
model.to(device_source)
model(torch.randn(4, 1, 4, 4, device=device_source))
# target model
model2 = obs_cls()
model2.to(device_target)
model2.load_state_dict(model.state_dict())
# verify that buffers stayed on model2's device
model_devices = {p.device for p in model2.parameters()} | \
{p.device for p in model2.buffers()}
# some observers do not have any buffers, so lessEqual instead of
# Equal
self.assertLessEqual(len(model_devices), 1)
if len(model_devices) == 1:
model_device = next(iter(model_devices))
self.assertEqual(model_device, device_target)
def test_histogram_observer_consistent_buffer_shape(self):
"""
Ensures that the buffer shapes do not change from uninitialized to
initialized states for HistogramObserver.
"""
obs = HistogramObserver()
min_shape_before = obs.min_val.shape
max_shape_before = obs.max_val.shape
for _ in range(2):
obs(torch.randn(4, 4, 4, 4))
self.assertEqual(min_shape_before, obs.min_val.shape)
self.assertEqual(max_shape_before, obs.max_val.shape)
def test_histogram_observer_save_load_state_dict(self):
"""
Smoke test on saving/loading state_dict
"""
obs1 = HistogramObserver()
obs1(torch.randn(4, 4, 4, 4))
obs2 = HistogramObserver()
obs2.load_state_dict(obs1.state_dict())
self.assertEqual(obs2.min_val.shape, torch.Size([]))
self.assertEqual(obs2.max_val.shape, torch.Size([]))
def test_save_load_state_dict_script(self):
"""
Tests that we can save and load state_dict for observers that are scripted
in a quantized model.
"""
obs_list = [MinMaxObserver, MovingAverageMinMaxObserver, HistogramObserver]
for obs in obs_list:
model = SingleLayerLinearModel().eval()
qconfig = QConfig(activation=default_observer, weight=obs)
qconfig_dict = {'' : qconfig}
scripted = torch.jit.script(model)
scripted = torch.ao.quantization.prepare_jit(scripted, qconfig_dict)
x = torch.rand(5, 5)
scripted(x)
obs_dict = torch.ao.quantization.get_observer_state_dict(scripted)
# Load stats
scripted_2 = torch.jit.script(model)
scripted_2 = torch.ao.quantization.prepare_jit(scripted_2, qconfig_dict)
torch.ao.quantization.load_observer_state_dict(scripted_2, obs_dict)
# Verify that state_dict matches exactly with original one.
self.assertEqual(scripted.state_dict(), scripted_2.state_dict())
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_observer_qparams_respects_device_affinity(self):
"""
Ensure that the scale and zero_point returned by the observer
are on the same device as the input tensor.
"""
observerList = [MinMaxObserver(),
MovingAverageMinMaxObserver(),
PerChannelMinMaxObserver(),
MovingAveragePerChannelMinMaxObserver()]
for obs in observerList:
device = torch.device('cuda:1')
x = torch.randn(1, 2, device=device)
obs.to(device)
result = obs(x)
scale, zero_point = obs.calculate_qparams()
self.assertEqual(x.device, scale.device)
self.assertEqual(x.device, zero_point.device)
def test_zero_numel(self):
obs_list = [MinMaxObserver, MovingAverageMinMaxObserver,
PerChannelMinMaxObserver,
MovingAveragePerChannelMinMaxObserver, HistogramObserver,
FakeQuantize, FixedQParamsFakeQuantize]
for obs_cls in obs_list:
if obs_cls is FixedQParamsFakeQuantize:
obs = obs_cls(0.1, 0)
else:
obs = obs_cls()
x = torch.tensor([])
# verify no crash
x = obs(x)
def _test_memoryless(self, obs_class):
obs = obs_class(memoryless=True)
x = torch.randn((3, 3))
obs(x)
params = obs.calculate_qparams()
for _ in range(20):
obs(10 * torch.randn((3, 3)))
self.assertNotEqual(params, obs.calculate_qparams())
obs(x)
self.assertEqual(params, obs.calculate_qparams())
def test_memoryless_minmaxobserver(self):
self._test_memoryless(MinMaxObserver)
def test_memoryless_perchannelminmaxobserver(self):
self._test_memoryless(PerChannelMinMaxObserver)
# HistogramObserver that works like it does on master
class _ReferenceHistogramObserver(HistogramObserver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@torch.jit.ignore
def _non_linear_param_search(self):
r"""Non-linear parameter search.
An approximation for L2 error minimization for selecting min/max.
By selecting new min/max, we filter out outliers in input distribution.
This follows the implementation of NormMinimization::NonlinearQuantizationParamsSearch in
caffe2/quantization/server/norm_minimization.cc
"""
def _get_norm(delta_begin, delta_end, density, norm_type):
r"""
Compute the norm of the values uniformaly distributed between
delta_begin and delta_end.
norm = density * (integral_{begin, end} x^2)
= density * (end^3 - begin^3) / 3
"""
assert norm_type == "L2", "Only L2 norms are currently supported"
norm = 0.0
if norm_type == "L2":
norm = (
delta_end * delta_end * delta_end
- delta_begin * delta_begin * delta_begin
) / 3
return density * norm
def _compute_quantization_error(next_start_bin, next_end_bin, norm_type):
r"""
Compute the quantization error if we use start_bin to end_bin as the
min and max to do the quantization.
"""
bin_width = (self.max_val.item() - self.min_val.item()) / self.bins
norm = 0.0
dst_bin_width = bin_width * (next_end_bin - next_start_bin + 1) / self.dst_nbins
if dst_bin_width == 0.0:
return 0.0
for src_bin in range(self.bins):
# distances from the beginning of first dst_bin to the beginning and
# end of src_bin
src_bin_begin = (src_bin - next_start_bin) * bin_width
src_bin_end = src_bin_begin + bin_width
# which dst_bins the beginning and end of src_bin belong to?
dst_bin_of_begin = min(
self.dst_nbins - 1, max(0.0, math.floor(src_bin_begin / dst_bin_width))
)
dst_bin_of_end = min(
self.dst_nbins - 1, max(0.0, math.floor(src_bin_end / dst_bin_width))
)
dst_bin_of_begin_center = (
dst_bin_of_begin * dst_bin_width + dst_bin_width / 2
)
density = self.histogram[src_bin] / bin_width
if dst_bin_of_begin == dst_bin_of_end:
# if src_bin is entirely within 1 dst_bin
delta_begin = src_bin_begin - dst_bin_of_begin_center
delta_end = src_bin_end - dst_bin_of_begin_center
norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)
else:
delta_begin = src_bin_begin - dst_bin_of_begin_center
delta_end = dst_bin_width / 2
norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)
norm = norm + (dst_bin_of_end - dst_bin_of_begin - 1) * _get_norm(
-dst_bin_width / 2, dst_bin_width / 2, density, norm_type
)
dst_bin_of_end_center = (
dst_bin_of_end * dst_bin_width + dst_bin_width / 2
)
delta_begin = -dst_bin_width / 2
delta_end = src_bin_end - dst_bin_of_end_center
norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)
return norm
assert self.histogram.size()[0] == self.bins, "bins mistmatch"
bin_width = (self.max_val - self.min_val) / self.bins
# cumulative sum
total = sum(self.histogram)
cSum = torch.cumsum(self.histogram, dim=0)
stepsize = 1e-5 # granularity
alpha = 0.0 # lower bound
beta = 1.0 # upper bound
start_bin = 0
end_bin = self.bins - 1
norm_min = float("inf")
while alpha < beta:
# Find the next step
next_alpha = alpha + stepsize
next_beta = beta - stepsize
# find the left and right bins between the quantile bounds
l = start_bin
r = end_bin
while l < end_bin and cSum[l] < next_alpha * total:
l = l + 1
while r > start_bin and cSum[r] > next_beta * total:
r = r - 1
# decide the next move
next_start_bin = start_bin
next_end_bin = end_bin
if (l - start_bin) > (end_bin - r):
# move the start bin
next_start_bin = l
alpha = next_alpha
else:
# move the end bin
next_end_bin = r
beta = next_beta
if next_start_bin == start_bin and next_end_bin == end_bin:
continue
# calculate the quantization error using next_start_bin and next_end_bin
norm = _compute_quantization_error(next_start_bin, next_end_bin, "L2")
if norm > norm_min:
break
norm_min = norm
start_bin = next_start_bin
end_bin = next_end_bin
new_min = self.min_val + bin_width * start_bin
new_max = self.min_val + bin_width * (end_bin + 1)
return new_min, new_max
class TestRecordHistogramObserver(QuantizationTestCase):
# TODO: move this to quantize.py
def test_record_observer(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = AnnotatedSingleLayerLinearModel()
model.qconfig = default_debug_qconfig
model = prepare(model)
# run the evaluation and dump all tensors
test_only_eval_fn(model, self.calib_data)
test_only_eval_fn(model, self.calib_data)
observer_dict = {}
get_observer_dict(model, observer_dict)
self.assertTrue('fc1.module.activation_post_process' in observer_dict.keys(),
'observer is not recorded in the dict')
self.assertEqual(len(observer_dict['fc1.module.activation_post_process'].get_tensor_value()),
2 * len(self.calib_data))
self.assertEqual(observer_dict['fc1.module.activation_post_process'].get_tensor_value()[0],
model(self.calib_data[0][0]))
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)))
def test_observer_scriptable(self, qdtype, qscheme):
obs = RecordingObserver(dtype=qdtype, qscheme=qscheme)
scripted = torch.jit.script(obs)
x = torch.rand(3, 4)
obs(x)
scripted(x)
self.assertTrue(torch.equal(obs.get_tensor_value()[0], scripted.get_tensor_value()[0]))
buf = io.BytesIO()
torch.jit.save(scripted, buf)
buf.seek(0)
loaded = torch.jit.load(buf)
self.assertTrue(torch.equal(obs.get_tensor_value()[0], loaded.get_tensor_value()[0]))
class TestHistogramObserver(QuantizationTestCase):
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from(
(torch.per_tensor_affine, torch.per_tensor_symmetric))
)
def test_observer_scriptable(self, qdtype, qscheme):
ob_list = [
HistogramObserver(dtype=qdtype, qscheme=qscheme),
default_histogram_observer()
]
for obs in ob_list:
scripted = torch.jit.script(obs)
x = torch.rand(3, 4)
obs(x)
scripted(x)
self.assertTrue(torch.equal(obs.histogram, scripted.histogram))
buf = io.BytesIO()
torch.jit.save(scripted, buf)
buf.seek(0)
loaded = torch.jit.load(buf)
self.assertTrue(torch.equal(obs.histogram, scripted.histogram))
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)),
reduce_range=st.booleans())
@settings(max_examples=10)
def test_histogram_observer(self, qdtype, qscheme, reduce_range):
myobs = HistogramObserver(bins=3, dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)
# Calculate qparams should work for empty observers
qparams = myobs.calculate_qparams()
x = torch.tensor([2.0, 3.0, 4.0, 5.0], requires_grad=True)
y = torch.tensor([5.0, 6.0, 7.0, 8.0])
out_x = myobs(x)
self.assertTrue(out_x.requires_grad)
myobs(y)
self.assertEqual(myobs.min_val, 2.0)
self.assertEqual(myobs.max_val, 8.0)
self.assertEqual(myobs.histogram, [2., 3., 3.])
qparams = myobs.calculate_qparams()
if reduce_range:
if qscheme == torch.per_tensor_symmetric:
ref_scale = 0.0470588 * 255 / 127
ref_zero_point = 0 if qdtype is torch.qint8 else 128
else:
ref_scale = 0.0235294 * 255 / 127
ref_zero_point = -64 if qdtype is torch.qint8 else 0
else:
if qscheme == torch.per_tensor_symmetric:
ref_scale = 0.0470588
ref_zero_point = 0 if qdtype is torch.qint8 else 128
else:
ref_scale = 0.0235294
ref_zero_point = -128 if qdtype is torch.qint8 else 0
self.assertEqual(qparams[1].item(), ref_zero_point)
self.assertEqual(qparams[0].item(), ref_scale, atol=1e-5, rtol=0)
# Test for serializability
state_dict = myobs.state_dict()
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_dict[key])
loaded_obs = HistogramObserver(bins=3, dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)
loaded_obs.load_state_dict(loaded_dict)
loaded_qparams = loaded_obs.calculate_qparams()
self.assertEqual(myobs.min_val, loaded_obs.min_val)
self.assertEqual(myobs.max_val, loaded_obs.max_val)
self.assertEqual(myobs.histogram, loaded_obs.histogram)
self.assertEqual(myobs.bins, loaded_obs.bins)
self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())
def test_histogram_observer_one_sided(self):
myobs = HistogramObserver(bins=8, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=True)
x = torch.tensor([0.0, 0.3, 1.2, 1.7])
y = torch.tensor([0.1, 1.3, 2.0, 2.7])
myobs(x)
myobs(y)
self.assertEqual(myobs.min_val, 0)
qparams = myobs.calculate_qparams()
self.assertEqual(qparams[1].item(), 0)
def test_histogram_observer_same_inputs(self):
myobs = HistogramObserver(bins=3, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, reduce_range=False)
w = torch.ones(4, requires_grad=True)
x = torch.zeros(4, requires_grad=True)
y = torch.tensor([2.0, 3.0, 4.0, 5.0], requires_grad=True)
z = torch.tensor([5.0, 6.0, 7.0, 8.0])
myobs(w)
myobs(x)
myobs(x)
myobs(y)
myobs(z)
qparams = myobs.calculate_qparams()
self.assertEqual(myobs.min_val, 2.0)
self.assertEqual(myobs.max_val, 8.0)
self.assertEqual(myobs.histogram, [2., 3., 3.])
@given(N=st.sampled_from([10, 1000]),
bins=st.sampled_from([256, 512, 1024, 2048]),
dtype=st.sampled_from([torch.qint8, torch.quint8]),
qscheme=st.sampled_from([torch.per_tensor_affine, torch.per_tensor_symmetric]),
reduce_range=st.booleans())
def test_histogram_observer_against_reference(self, N, bins, dtype, qscheme, reduce_range):
ref_obs = _ReferenceHistogramObserver(bins=bins, dtype=dtype, qscheme=qscheme, reduce_range=reduce_range)
my_obs = HistogramObserver(bins=bins, dtype=dtype, qscheme=qscheme, reduce_range=reduce_range)
for _ in range(10):
X = torch.randn(N)
my_obs(X)
ref_obs(X)
ref_qparams = ref_obs.calculate_qparams()
my_qparams = my_obs.calculate_qparams()
self.assertEqual(ref_qparams, my_qparams)
class TestFakeQuantize(TestCase):
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),
qparams=hu.qparams(dtypes=torch.qint8)))
def test_fq_module_per_channel(self, device, X):
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
X.requires_grad_()
fq_module = FakeQuantize(default_per_channel_weight_observer, quant_min, quant_max, ch_axis=axis).to(device)
Y_prime = fq_module(X)
assert fq_module.scale is not None
assert fq_module.zero_point is not None
Y = _fake_quantize_per_channel_affine_reference(X, fq_module.scale,
fq_module.zero_point, axis, quant_min, quant_max)
np.testing.assert_allclose(Y.cpu().detach().numpy(), Y_prime.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
# Test backward
dout = torch.rand_like(X, dtype=torch.float, device=device)
Y_prime.backward(dout)
dX = _fake_quantize_per_channel_affine_grad_reference(dout, X, fq_module.scale,
fq_module.zero_point, axis, quant_min, quant_max)
np.testing.assert_allclose(dX.cpu().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
def test_fq_serializable_per_channel(self):
observer = default_per_channel_weight_observer
quant_min = -128
quant_max = 127
fq_module = FakeQuantize(observer, quant_min, quant_max)
X = torch.tensor([[-5, -3.5, -2, 0, 3, 5, 7], [1, 3, 2, 5, 6.5, 8, 10]], dtype=torch.float32)
y_ref = fq_module(X)
state_dict = fq_module.state_dict()
self.assertEqual(state_dict['scale'], [0.054902, 0.078431])
self.assertEqual(state_dict['zero_point'], [0, 0])
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_dict[key])
def _get_buffer_ids(module):
"""
Object addresses stay constant if and only if all modifications are in-place
"""
return [id(v) for k, v in module._buffers.items()]
class TestDistributed(QuantizationTestCase):
def test_observers_preserve_buffers(self):
"""
Tests that observers only modify buffers in place. Note: this is important
because nn.DataParallel depends on this assumption to work correctly.
However, DataParallel does not expose IDs of the replicas, so we test it
without DataParallel in order to easily access the object IDs.
"""
observer_types = [
torch.ao.quantization.MinMaxObserver.with_args(dtype=torch.qint8),
torch.ao.quantization.MovingAverageMinMaxObserver.with_args(dtype=torch.qint8),
torch.ao.quantization.PerChannelMinMaxObserver.with_args(dtype=torch.qint8),
torch.ao.quantization.MovingAveragePerChannelMinMaxObserver.with_args(dtype=torch.qint8),
torch.ao.quantization.HistogramObserver.with_args(dtype=torch.qint8),
torch.ao.quantization.RecordingObserver.with_args(dtype=torch.qint8),
torch.ao.quantization.PlaceholderObserver.with_args(dtype=torch.float16),
]
for observer_type in observer_types:
observer = observer_type()
buffer_ids_before = _get_buffer_ids(observer)
for _i in range(5):
inputs = torch.rand((4, 4, 4))
observer(inputs)
buffer_ids_after = _get_buffer_ids(observer)
self.assertEqual(
buffer_ids_before,
buffer_ids_after,
msg="{}: Buffers must be modified in place".format(str(observer)))
def test_fake_quant_preserves_buffers(self):
"""
Tests that fake quant only modifies buffers in place. Note: this is important
because nn.DataParallel depends on this assumption to work correctly.
However, DataParallel does not expose IDs of the replicas, so we test it
without DataParallel in order to easily access the object IDs.
"""
model = torch.ao.quantization.FakeQuantize()
buffer_ids_before = _get_buffer_ids(model)
for _i in range(5):
inputs = torch.rand((4, 4, 4))
model(inputs)
model.apply(torch.ao.quantization.enable_fake_quant)
model.apply(torch.ao.quantization.disable_fake_quant)
model.apply(torch.ao.quantization.enable_observer)
model.apply(torch.ao.quantization.disable_observer)
buffer_ids_after = _get_buffer_ids(model)
self.assertEqual(
buffer_ids_before,
buffer_ids_after,
msg="FakeQuant: Buffers must be modified in place")
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_qat_data_parallel(self):
"""
Tests that doing QAT in nn.DataParallel does not crash.
"""
if 'fbgemm' not in torch.backends.quantized.supported_engines:
return
with override_quantized_engine('fbgemm'):
device = torch.device('cuda')
model = nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.Conv2d(3, 1, 1, bias=False),
nn.BatchNorm2d(1),
nn.ReLU(),
nn.Conv2d(1, 2, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(2),
nn.AvgPool2d(14),
nn.Sigmoid(),
torch.ao.quantization.DeQuantStub(),
)
torch.ao.quantization.fuse_modules(model, [['1', '2', '3'], ['4', '5']], inplace=True)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig('fbgemm')
torch.ao.quantization.prepare_qat(model, inplace=True)
model = nn.DataParallel(model, device_ids=[0, 1])
model.to(device)
model.train()
for epoch in range(3):
inputs = torch.rand(2, 3, 28, 28).to(device)
model(inputs)
if epoch >= 1:
model.apply(torch.ao.quantization.disable_observer)
if epoch >= 2:
model.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
quant_model = copy.deepcopy(model.module)
quant_model = torch.ao.quantization.convert(quant_model.eval().cpu(), inplace=False)
with torch.no_grad():
out = quant_model(torch.rand(1, 3, 28, 28))
def test_qat_convbn_fused_syncbn_replacement(self):
"""
Tests that SyncBatchNorm replacement works for fused ConvBN.
"""
if 'fbgemm' not in torch.backends.quantized.supported_engines:
return
with override_quantized_engine('fbgemm'):
# create conv-bn
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv = nn.Conv2d(4, 1, 3, padding=1)
self.bn = nn.BatchNorm2d(1)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
model = Model()
# fuse it
fused_model = torch.ao.quantization.fuse_modules(
model,
[['conv', 'bn']],
)
# convert to QAT
fused_model.qconfig = torch.ao.quantization.get_default_qconfig('fbgemm')
torch.ao.quantization.prepare_qat(fused_model, inplace=True)
# replace with DDP
fused_model = nn.SyncBatchNorm.convert_sync_batchnorm(fused_model)
self.assertTrue(
isinstance(fused_model.conv.bn, nn.SyncBatchNorm),
"Expected BN to be converted to SyncBN")
def test_syncbn_preserves_qconfig(self):
"""
Makes sure that if a BatchNorm is not fused and a qconfig exists,
convering the module to SyncBatchNorm preserves the qconfig.
"""
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.BatchNorm2d(1),
)
m[1].qconfig = torch.ao.quantization.default_qconfig
m = torch.nn.SyncBatchNorm.convert_sync_batchnorm(m)
self.assertTrue(
hasattr(m[1], "qconfig"),
"missing qconfig after SyncBatchNorm conversion")
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@override_qengines
def test_device_affinity(self):
"""
Tests that converting a model to QAT respects device affinity
"""
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
model = Model()
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(torch.backends.quantized.engine)
device = torch.device('cuda:0')
model.to(device)
torch.ao.quantization.prepare_qat(model, inplace=True)
model_devices = {p.device for p in model.parameters()} | \
{p.device for p in model.buffers()}
self.assertEqual(len(model_devices), 1)
model_device = next(iter(model_devices))
self.assertEqual(model_device, device)
# ensure that running an input on CUDA works without any needed changes
input = torch.randn(4, 1, 4, 4, device=device)
model(input)
class TestFusedObsFakeQuantModule(TestCase):
@given(
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
)
)
@settings(deadline=None)
def test_fused_obs_fq_module(self, device):
# Set up the parameters
x = torch.randn(5, 5, device=device)
running_min_op = torch.tensor(float("inf"), device=device)
running_max_op = torch.tensor(float("-inf"), device=device)
avg_const = 0.01
scale = torch.tensor([1.0], device=device)
zero_point = torch.tensor([0], dtype=torch.int, device=device)
# Run the forward on the Module
mod = FusedMovingAvgObsFakeQuantize()
torch.ao.quantization.enable_fake_quant(mod)
torch.ao.quantization.enable_observer(mod)
mod.to(device)
out = mod(x)
# Run the operator directly
pt_op = torch.fused_moving_avg_obs_fake_quant
out_ref = pt_op(
x,
mod.observer_enabled,
mod.fake_quant_enabled,
running_min_op,
running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
False,
)
# Compare params with reference
torch.testing.assert_allclose(out, out_ref)
torch.testing.assert_allclose(
running_min_op, mod.activation_post_process.min_val
)
torch.testing.assert_allclose(
running_max_op, mod.activation_post_process.max_val
)
@given(
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
)
)
@settings(deadline=None)
def test_fused_obs_fq_moving_avg_module(self, device):
# Set up the parameters
running_min_op = torch.tensor(float("inf"), device=device)
running_max_op = torch.tensor(float("-inf"), device=device)
avg_const = 0.001
scale = torch.tensor([1.0], device=device)
zero_point = torch.tensor([0], dtype=torch.int, device=device)
mod = FusedMovingAvgObsFakeQuantize(averaging_constant=0.001)
mod.to(device)
mod.observer_enabled[0] = 0
mod.fake_quant_enabled[0] = 0
for i in range(10):
x = torch.randn(5, 5, device=device)
if i > 2:
mod.observer_enabled[0] = 1
if i > 4:
mod.fake_quant_enabled[0] = 1
# Run the forward on the Module
out = mod(x)
# Run the operator directly
pt_op = torch.fused_moving_avg_obs_fake_quant
out_ref = pt_op(
x,
mod.observer_enabled,
mod.fake_quant_enabled,
running_min_op,
running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
False,
)
# Compare params with reference
torch.testing.assert_allclose(out, out_ref)
torch.testing.assert_allclose(
running_min_op, mod.activation_post_process.min_val
)
torch.testing.assert_allclose(
running_max_op, mod.activation_post_process.max_val
)
@given(
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
)
)
@settings(deadline=None)
def test_compare_fused_obs_fq_oss_module(self, device):
mod = FusedMovingAvgObsFakeQuantize()
torch.ao.quantization.enable_fake_quant(mod)
torch.ao.quantization.enable_observer(mod)
mod.to(device)
mod_ref = FakeQuantize()
torch.ao.quantization.enable_fake_quant(mod_ref)
torch.ao.quantization.enable_observer(mod_ref)
mod_ref.to(device)
for i in range(10):
x = torch.randn(5, 5, device=device)
out = mod(x)
out_ref = mod_ref(x)
torch.testing.assert_allclose(out, out_ref)
torch.testing.assert_allclose(
mod_ref.activation_post_process.min_val,
mod.activation_post_process.min_val,
)
torch.testing.assert_allclose(
mod_ref.activation_post_process.max_val,
mod.activation_post_process.max_val,
)
def test_fused_mod_per_channel(self):
devices = ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
m = 5
n = 10
for device in devices:
running_min_op = torch.empty(m, device=device).fill_(float("inf"))
running_max_op = torch.empty(m, device=device).fill_(float("-inf"))
avg_const = 0.001
scale = torch.empty(m, device=device).fill_(0.1)
zero_point = torch.empty(m, dtype=torch.int, device=device).fill_(0)
obs = FusedMovingAvgObsFakeQuantize.with_args(
averaging_constant=avg_const,
observer=MovingAveragePerChannelMinMaxObserver,
)
mod = obs()
mod = torch.jit.script(mod)
mod.to(device)
for i in range(10):
x = torch.randn(m, n, device=device)
if i > 2:
mod.observer_enabled[0] = 1
if i > 4:
mod.fake_quant_enabled[0] = 1
# Run the forward on the Module
out = mod(x)
# Run the operator directly
pt_op = torch.fused_moving_avg_obs_fake_quant
out_ref = pt_op(
x,
mod.observer_enabled,
mod.fake_quant_enabled,
running_min_op,
running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
True,
False,
)
# Compare params with reference
torch.testing.assert_allclose(out, out_ref)
if mod.observer_enabled[0]:
torch.testing.assert_allclose(
running_min_op, mod.activation_post_process.min_val
)
torch.testing.assert_allclose(
running_max_op, mod.activation_post_process.max_val
)
if mod.fake_quant_enabled:
torch.testing.assert_allclose(scale, mod.scale)
torch.testing.assert_allclose(zero_point, mod.zero_point)
torch.testing.assert_allclose(mod.state_dict()['activation_post_process.min_val'], running_min_op)
torch.testing.assert_allclose(mod.state_dict()['activation_post_process.max_val'], running_max_op)
def test_fused_mod_reduce_range(self):
obs = FusedMovingAvgObsFakeQuantize(quant_min=0, quant_max=255, dtype=torch.quint8, reduce_range=True)
self.assertEqual(obs.quant_min, 0)
self.assertEqual(obs.quant_max, 127)
def test_default_fused_qat_config(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(2, 2)
self.relu = nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
for qengine in ["fbgemm", "qnnpack"]:
model = Model()
model.linear.weight = torch.nn.Parameter(torch.randn(2, 2))
sample_input = torch.randn(2, 2)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine, version=1)
ref_model = torch.ao.quantization.QuantWrapper(model)
ref_model = torch.ao.quantization.prepare_qat(ref_model)
ref_model(sample_input)
count_fake_quant = 0
for name, mod in ref_model.named_modules():
if name.endswith('weight_fake_quant'):
count_fake_quant += 1
self.assertEqual(type(mod), FusedMovingAvgObsFakeQuantize)
if name.count('activation_post_process') == 1 and 'weight_fake_quant' not in name:
count_fake_quant += 1
self.assertEqual(type(mod), FusedMovingAvgObsFakeQuantize)
self.assertEqual(count_fake_quant, 3)
if qengine == "fbgemm":
self.assertEqual(ref_model.quant.activation_post_process.quant_min, 0)
self.assertEqual(ref_model.quant.activation_post_process.quant_max, 127)
self.assertEqual(type(ref_model.module.linear.weight_fake_quant.activation_post_process),
MovingAveragePerChannelMinMaxObserver)
else:
self.assertEqual(ref_model.quant.activation_post_process.quant_min, 0)
self.assertEqual(ref_model.quant.activation_post_process.quant_max, 255)
self.assertEqual(type(ref_model.module.linear.weight_fake_quant.activation_post_process),
MovingAverageMinMaxObserver)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
"instead.")
| 41.967494
| 132
| 0.588708
|
4a08de61a1d07c50b27b9bc25a65b60cf5fc3265
| 6,820
|
py
|
Python
|
libtb/sieve/__init__.py
|
MelonSmasher/turkey-bite
|
b21bf82076e7c00c3ae74fc9d36761f0960cbbaf
|
[
"MIT"
] | 4
|
2020-08-20T02:22:07.000Z
|
2020-08-20T14:27:52.000Z
|
libtb/sieve/__init__.py
|
MelonSmasher/turkey-bite
|
b21bf82076e7c00c3ae74fc9d36761f0960cbbaf
|
[
"MIT"
] | null | null | null |
libtb/sieve/__init__.py
|
MelonSmasher/turkey-bite
|
b21bf82076e7c00c3ae74fc9d36761f0960cbbaf
|
[
"MIT"
] | 1
|
2020-08-20T08:41:50.000Z
|
2020-08-20T08:41:50.000Z
|
class Filters(object):
# Packet types we care about
packets = ['dns', 'browser.history']
valids = ['OK']
def __init__(self, config):
"""Sieve class responsible for filtering out messages that should be ignored.
This lessens the load on the queue workers and makes ES lighter.
"""
self.config = config
# Packetbeat DNS filters
def dns(self, data):
# If we are filtering invalid packets
if self.config['drop_error_packets']:
# Is this status OK?
if data['status'] not in self.valids:
return False
# For inbound requests
if 'client' in data.keys():
if 'ip' in data['client'].keys():
if data['client']['ip'] in self.config['ignore']['clients']:
return False
# For outbound requests
if 'destination' in data.keys():
if 'ip' in data['destination']:
if data['destination']['ip'] in self.config['ignore']['clients']:
return False
if 'network' in data.keys():
if 'direction' in data['network'].keys():
if data['network']['direction'] == 'outbound':
if self.config['drop_replies']:
return False
# Do we have a registered domain key?
if 'registered_domain' in data['dns']['question'].keys():
if data['dns']['question']['registered_domain'].strip().lower() in self.config['ignore']['domains']:
return False
# Do we have a etld_plus_one key?
if 'etld_plus_one' in data['dns']['question'].keys():
if data['dns']['question']['etld_plus_one'].strip().lower() in self.config['ignore']['domains']:
return False
# Do we have a name key?
if 'resource' in data.keys():
if data['resource'].strip().lower() in self.config['ignore']['hosts']:
return False
for d in self.config['ignore']['domains']:
if data['resource'].strip().lower().endswith(d):
return False
# Do we have a name key?
if 'name' in data['dns']['question'].keys():
if data['dns']['question']['name'].strip().lower() in self.config['ignore']['hosts']:
return False
for d in self.config['ignore']['domains']:
if data['dns']['question']['name'].strip().lower().endswith(d):
return False
# If we made it here, we're good
return True
# Browserbeat filters
def browserbeat(self, data):
ignore_clients = self.config['browserbeat']['ignore']['clients']
ignore_users = self.config['browserbeat']['ignore']['users']
ignore_domains = self.config['browserbeat']['ignore']['domains']
ignore_hosts = self.config['browserbeat']['ignore']['hosts']
# Dive down into the data structure
if 'data' in data.keys():
if 'event' in data['data'].keys():
if 'data' in data['data']['event'].keys():
# Client level rules
if 'client' in data['data']['event']['data'].keys():
# Filter ignored client hostnames
if 'Hostname' in data['data']['event']['data']['client'].keys():
# Filter fqdn hostname
if 'hostname' in data['data']['event']['data']['client']['Hostname'].keys():
if data['data']['event']['data']['client']['Hostname']['hostname'] in ignore_clients:
return False
# Filter short hostname
if 'short' in data['data']['event']['data']['client']['Hostname'].keys():
if data['data']['event']['data']['client']['Hostname']['short'] in ignore_clients:
return False
# Filter ignored IPs
if 'ip_addresses' in data['data']['event']['data']['client'].keys():
for ip in data['data']['event']['data']['client']['ip_addresses']:
if ip in ignore_clients:
return False
# Filter ignored users
if 'user' in data['data']['event']['data']['client'].keys():
if data['data']['event']['data']['client']['user'] in ignore_users:
return False
# History entry level rules
if 'entry' in data['data']['event']['data'].keys():
if 'url_data' in data['data']['event']['data']['entry'].keys():
if 'Host' in data['data']['event']['data']['entry']['url_data'].keys():
host = data['data']['event']['data']['entry']['url_data']['Host']
if host:
if ':' in host:
# Deal with hosts that have a port in the string
host = host.split(':')[0]
# Should we ignore this host
if host in ignore_hosts:
return False
# Deal with ignored domains
domain = host
if '.' in domain:
parts = domain.split('.')
domain = '.'.join([parts[len(parts) - 2], parts[len(parts) - 1]])
if domain in ignore_domains:
return False
else:
# If we get here we aren't user how to process this
return False
# If we made it here, we're good
return True
def should_process(self, data):
# Ensure that the data is now a dict
if isinstance(data, dict):
# If `type` is a key in the dict
if 'type' in data.keys():
# Is this packet one of the types we can process?
if data['type'] in self.packets:
# different filters for different packet types
if data['type'] == 'dns':
return self.dns(data)
elif data['type'] == 'browser.history':
return self.browserbeat(data)
else:
return False
# If we made it here, we don't want this packet
return False
| 47.034483
| 117
| 0.463196
|
4a08de6c639ba5e9da5eb9965c142ae241e78625
| 352
|
py
|
Python
|
ctflearn/read-in-color/read-in-color.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | 9
|
2021-04-20T15:28:36.000Z
|
2022-03-08T19:53:48.000Z
|
ctflearn/read-in-color/read-in-color.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | null | null | null |
ctflearn/read-in-color/read-in-color.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | 6
|
2021-06-24T03:25:21.000Z
|
2022-02-20T21:44:52.000Z
|
#!/usr/bin/env python
from PIL import Image
img = Image.open('color_img.png', 'r')
img = img.convert('RGB')
pixels = img.load()
w, h = img.size
for y in range(h):
s = []
for x in range(w):
if pixels[x,y] == pixels[x-1,y]:continue
r, g, b = pixels[x, y]
print(''.join(map(chr, [r,g,b])), end='')
print()
break
| 19.555556
| 49
| 0.539773
|
4a08ded96c022ae7d2df86ddd4f773ba025437a0
| 1,466
|
py
|
Python
|
Python/set-matrix-zeroes.py
|
ZhiliangGong/LeetCode
|
a1fc7311ddc50eb43f43fc51d3290f2c91fd4fa1
|
[
"MIT"
] | 5
|
2018-09-06T03:12:33.000Z
|
2022-03-03T18:57:11.000Z
|
Python/set-matrix-zeroes.py
|
pnandini/LeetCode
|
e746c3298be96dec8e160da9378940568ef631b1
|
[
"MIT"
] | 1
|
2018-07-10T03:28:43.000Z
|
2018-07-10T03:28:43.000Z
|
Python/set-matrix-zeroes.py
|
pnandini/LeetCode
|
e746c3298be96dec8e160da9378940568ef631b1
|
[
"MIT"
] | 5
|
2018-09-06T03:12:35.000Z
|
2021-07-03T09:00:56.000Z
|
# Time: O(m * n)
# Space: O(1)
#
# Given a m x n matrix, if an element is 0, set its entire row and column to 0. Do it in place.
#
# Follow up:
# Did you use extra space?
# A straight forward solution using O(mn) space is probably a bad idea.
# A simple improvement uses O(m + n) space, but still not the best solution.
# Could you devise a constant space solution?
#
class Solution:
# @param matrix, a list of lists of integers
# RETURN NOTHING, MODIFY matrix IN PLACE.
def setZeroes(self, matrix):
first_col = reduce(lambda acc, i: acc or matrix[i][0] == 0, xrange(len(matrix)), False)
first_row = reduce(lambda acc, j: acc or matrix[0][j] == 0, xrange(len(matrix[0])), False)
for i in xrange(1, len(matrix)):
for j in xrange(1, len(matrix[0])):
if matrix[i][j] == 0:
matrix[i][0], matrix[0][j] = 0, 0
for i in xrange(1, len(matrix)):
for j in xrange(1, len(matrix[0])):
if matrix[i][0] == 0 or matrix[0][j] == 0:
matrix[i][j] = 0
if first_col:
for i in xrange(len(matrix)):
matrix[i][0] = 0
if first_row:
for j in xrange(len(matrix[0])):
matrix[0][j] = 0
if __name__ == "__main__":
matrix = [ [1, 0, 1, 1]
, [1, 1, 0, 1]
, [1, 1, 1, 0]
, [1, 1, 1, 1]]
Solution().setZeroes(matrix)
print matrix
| 33.318182
| 98
| 0.532742
|
4a08deef33ee5e484346fc9cf23c6b47e95e842c
| 3,314
|
py
|
Python
|
app/app/settings.py
|
egiev/django-rest-api
|
95ae1f5839f0abe2649044a0cb383c9fb8976635
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
egiev/django-rest-api
|
95ae1f5839f0abe2649044a0cb383c9fb8976635
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
egiev/django-rest-api
|
95ae1f5839f0abe2649044a0cb383c9fb8976635
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1f_v#0k25%uw)kk5#5t1$x40@(c%@%d4p12&x%d8+dr+$49$n*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'users'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| 25.492308
| 91
| 0.685878
|
4a08dfb755ae10455c5d0638f3f47f181cc92842
| 778
|
py
|
Python
|
_includes/snippets/python/monkey-patching/attributes_from_outside.py
|
MarauderXtreme/justus
|
229703abda2bd90d34232c97c64999c216d98239
|
[
"CC-BY-4.0"
] | 1
|
2020-02-02T16:09:43.000Z
|
2020-02-02T16:09:43.000Z
|
_includes/snippets/python/monkey-patching/attributes_from_outside.py
|
MrNick7373/justusadam.github.io
|
0660026a7846389647cb3ce52d9f776aab10e3cd
|
[
"CC-BY-4.0"
] | 1
|
2020-02-02T13:22:42.000Z
|
2020-02-02T13:22:42.000Z
|
_includes/snippets/python/monkey-patching/attributes_from_outside.py
|
MarauderXtreme/justus
|
229703abda2bd90d34232c97c64999c216d98239
|
[
"CC-BY-4.0"
] | null | null | null |
#! /usr/bin/env python3
class TestClass:
def __init__(self):
pass
def method_1(self):
# defining an instance attribute from inside another method
self.instance_foo = 4
my_instance = TestClass()
print(
hasattr(my_instance, 'instance_foo')
) # =>> False
# the instance_foo attribute does not exist yet
my_instance.method_1()
print(
my_instance.instance_foo
) # =>> 4
# now it does
print(
hasattr(my_instance, 'instance_bar')
) # =>> False
my_instance.instance_bar = 'hello'
print(
my_instance.instance_bar
) # =>> hello
del my_instance.instance_foo
my_instance.instance_foo
# =>> AttributeError: 'TestClass' object has no attribute 'instance_foo'
# trying to call non-existing attributes causes an AttributeError
| 18.093023
| 72
| 0.703085
|
4a08e1b50063a45a2d3c4a9f5cf3798eec24eec3
| 175
|
py
|
Python
|
health_check/urls.py
|
uktrade/dnb-service
|
c8f22af82af70f33b8d6bf92e3ca6992fce1f220
|
[
"MIT"
] | 4
|
2019-12-03T14:59:50.000Z
|
2020-04-28T12:42:24.000Z
|
health_check/urls.py
|
uktrade/dnb-service
|
c8f22af82af70f33b8d6bf92e3ca6992fce1f220
|
[
"MIT"
] | 17
|
2019-04-11T13:12:57.000Z
|
2022-01-13T10:08:07.000Z
|
health_check/urls.py
|
uktrade/dnb-service
|
c8f22af82af70f33b8d6bf92e3ca6992fce1f220
|
[
"MIT"
] | 3
|
2021-05-11T16:13:57.000Z
|
2022-03-08T15:57:19.000Z
|
from django.urls import path
from .views import HealthCheckP1View
app_name = 'health_check'
urlpatterns = [
path('check/', HealthCheckP1View.as_view(), name='p1'),
]
| 14.583333
| 59
| 0.72
|
4a08e24c21243703d74ddeee0c8b2f2b6a2c728f
| 3,895
|
py
|
Python
|
securicad/enterprise/organizations.py
|
foreseeti/securicad-enterprise-sdk
|
aef4db2530caa0de78a56a3e519efa62bcee39d8
|
[
"Apache-2.0"
] | 3
|
2020-09-28T09:13:39.000Z
|
2020-12-10T09:44:03.000Z
|
securicad/enterprise/organizations.py
|
foreseeti/securicad-enterprise-sdk
|
aef4db2530caa0de78a56a3e519efa62bcee39d8
|
[
"Apache-2.0"
] | 2
|
2021-02-19T13:55:10.000Z
|
2021-12-28T07:51:53.000Z
|
securicad/enterprise/organizations.py
|
foreseeti/securicad-enterprise-sdk
|
aef4db2530caa0de78a56a3e519efa62bcee39d8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020-2021 Foreseeti AB <https://foreseeti.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Dict, List, Optional
if TYPE_CHECKING:
from securicad.enterprise.client import Client
from securicad.enterprise.projects import Project
from securicad.enterprise.users import User
class Organization:
def __init__(self, client: "Client", tag: str, name: str) -> None:
self.client = client
self.tag = tag
self.name = name
@staticmethod
def from_dict(client: "Client", dict_org: Dict[str, Any]) -> "Organization":
return Organization(client=client, tag=dict_org["tag"], name=dict_org["name"])
def update(self, *, name: str) -> None:
data: Dict[str, Any] = {"tag": self.tag, "name": name}
dict_org = self.client._post("organization", data)
self.name = dict_org["name"]
def delete(self) -> None:
self.client._delete("organization", {"tag": self.tag})
def list_users(self) -> List["User"]:
dict_org = self.client.organizations._get_dict_organization_by_tag(self.tag)
users = []
for dict_user in dict_org["users"]:
users.append(self.client.users.get_user_by_uid(dict_user["id"]))
return users
def list_projects(self) -> List["Project"]:
dict_org = self.client.organizations._get_dict_organization_by_tag(self.tag)
projects = []
for dict_project in dict_org["projects"]:
projects.append(
self.client.projects.get_project_by_pid(dict_project["pid"])
)
return projects
class Organizations:
def __init__(self, client: "Client") -> None:
self.client = client
def _list_dict_organizations(self) -> List[Dict[str, Any]]:
dict_organizations = self.client._get("organization/all")
return dict_organizations
def _get_dict_organization_by_tag(self, tag: str) -> Dict[str, Any]:
dict_organization = self.client._get(f"organization/{tag}")
return dict_organization
def list_organizations(self) -> List[Organization]:
dict_orgs = self._list_dict_organizations()
organizations = []
for dict_org in dict_orgs:
organizations.append(
Organization.from_dict(client=self.client, dict_org=dict_org)
)
return organizations
def get_organization_by_tag(self, tag: str) -> Organization:
dict_org = self._get_dict_organization_by_tag(tag)
return Organization.from_dict(client=self.client, dict_org=dict_org)
def get_organization_by_name(self, name: str) -> Organization:
organizations = self.list_organizations()
for organization in organizations:
if organization.name == name:
return organization
for organization in organizations:
if organization.name.lower() == name.lower():
return organization
raise ValueError(f"Invalid organization {name}")
def create_organization(
self, name: str, license: Optional[str] = None
) -> Organization:
data: Dict[str, Any] = {"name": name}
if license is not None:
data["license"] = license
dict_org = self.client._put("organization", data)
return Organization.from_dict(client=self.client, dict_org=dict_org)
| 38.564356
| 86
| 0.668293
|
4a08e29f7d675518e2a7f21cc67e4c90837b165a
| 3,838
|
py
|
Python
|
benchmark/startQiskit940.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit940.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit940.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=37
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.cx(input_qubit[3],input_qubit[2]) # number=28
prog.y(input_qubit[2]) # number=33
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[3]) # number=30
prog.cz(input_qubit[4],input_qubit[3]) # number=31
prog.h(input_qubit[3]) # number=32
prog.h(input_qubit[2]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.h(input_qubit[1]) # number=34
prog.cz(input_qubit[3],input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=36
prog.x(input_qubit[0]) # number=23
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
prog.x(input_qubit[1]) # number=27
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit940.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 29.984375
| 82
| 0.604221
|
4a08e2b7f17ddc64728d2e87f2cf5c5e4ab97eff
| 157
|
py
|
Python
|
apilinux/settings.py
|
JonathanArrance/API_linux
|
1613380eda95b9554206add6d22c90263f95bb48
|
[
"MIT"
] | null | null | null |
apilinux/settings.py
|
JonathanArrance/API_linux
|
1613380eda95b9554206add6d22c90263f95bb48
|
[
"MIT"
] | 4
|
2019-02-11T00:22:34.000Z
|
2019-02-11T00:27:23.000Z
|
apilinux/settings.py
|
JonathanArrance/API_linux
|
1613380eda95b9554206add6d22c90263f95bb48
|
[
"MIT"
] | 1
|
2020-05-05T23:26:52.000Z
|
2020-05-05T23:26:52.000Z
|
#!/usr/bin/python
import os
LOCAL_HOST = os.getenv('LOCAL_HOST','localhost')
SECRET_KEY = 'the quick brown fox jumps over the lazy dog'
API_VERSION = '1.0'
| 26.166667
| 59
| 0.732484
|
4a08e3305b6fe020e003415182a33ebd6854e4e5
| 427
|
py
|
Python
|
kubernetes_typed/client/models/v2beta2_container_resource_metric_source.py
|
nikhiljha/kubernetes-typed
|
4f4b969aa400c88306f92560e56bda6d19b2a895
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_typed/client/models/v2beta2_container_resource_metric_source.py
|
nikhiljha/kubernetes-typed
|
4f4b969aa400c88306f92560e56bda6d19b2a895
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_typed/client/models/v2beta2_container_resource_metric_source.py
|
nikhiljha/kubernetes-typed
|
4f4b969aa400c88306f92560e56bda6d19b2a895
|
[
"Apache-2.0"
] | null | null | null |
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V2beta2ContainerResourceMetricSourceDict generated type."""
from typing import TypedDict
from kubernetes_typed.client import V2beta2MetricTargetDict
V2beta2ContainerResourceMetricSourceDict = TypedDict(
"V2beta2ContainerResourceMetricSourceDict",
{
"container": str,
"name": str,
"target": V2beta2MetricTargetDict,
},
total=False,
)
| 26.6875
| 62
| 0.740047
|
4a08e3effae2058c00b3b4cdf262ce7fd2479ea3
| 670
|
py
|
Python
|
students/K33401/Polyakov_Sergey/lr1/part2/server2.py
|
spolyakovs/ITMO_ICT_WebDevelopment_2020-2021
|
b83b609676554afd6cd5d0cf989cda7e0d571000
|
[
"MIT"
] | null | null | null |
students/K33401/Polyakov_Sergey/lr1/part2/server2.py
|
spolyakovs/ITMO_ICT_WebDevelopment_2020-2021
|
b83b609676554afd6cd5d0cf989cda7e0d571000
|
[
"MIT"
] | null | null | null |
students/K33401/Polyakov_Sergey/lr1/part2/server2.py
|
spolyakovs/ITMO_ICT_WebDevelopment_2020-2021
|
b83b609676554afd6cd5d0cf989cda7e0d571000
|
[
"MIT"
] | null | null | null |
import socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('localhost', 5678))
server_socket.listen(5)
def calculate_area_of_trapezoid(base1, base2, height):
return (base1 + base2) / 2 * height
while True:
client_socket, address = server_socket.accept()
client_socket.send('Enter parameters of trapezoid separated by space: base1 base2 height'.encode())
data = client_socket.recv(1024).decode('ascii').split(' ')
result = calculate_area_of_trapezoid(int(data[0]), int(data[1]), int(data[2]))
client_socket.send(str(result).encode())
| 37.222222
| 103
| 0.750746
|
4a08e5878c4c3dce5205cacd576e953414e67080
| 1,365
|
py
|
Python
|
setup.py
|
hrithikwel8/Speech_Emotion_Recognition
|
d0ea8a371c60627d318dd69c96eb3189e260fb8c
|
[
"MIT"
] | null | null | null |
setup.py
|
hrithikwel8/Speech_Emotion_Recognition
|
d0ea8a371c60627d318dd69c96eb3189e260fb8c
|
[
"MIT"
] | null | null | null |
setup.py
|
hrithikwel8/Speech_Emotion_Recognition
|
d0ea8a371c60627d318dd69c96eb3189e260fb8c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=7.0', ]
test_requirements = [ ]
setup(
author="Hrithik Chourasia",
author_email='hrithik8wel@gmail.com',
python_requires='>=3.6',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Verbal Communication Quality Monitoring _ Feedback System",
entry_points={
'console_scripts': [
'SER=SER.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='SER',
name='SER',
packages=find_packages(include=['SER', 'SER.*']),
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/hrithikwel8/SER',
version='0.1.0',
zip_safe=False,
)
| 27.3
| 76
| 0.627839
|
4a08e5cdadc19af21d0b3a85703b62e95378b3cc
| 787
|
py
|
Python
|
other-programs/systemspaperorbits.py
|
atomsite/ath-dust-plot
|
b63dc548dc859f33e0b096703a9b98c1f1cdc832
|
[
"MIT"
] | null | null | null |
other-programs/systemspaperorbits.py
|
atomsite/ath-dust-plot
|
b63dc548dc859f33e0b096703a9b98c1f1cdc832
|
[
"MIT"
] | null | null | null |
other-programs/systemspaperorbits.py
|
atomsite/ath-dust-plot
|
b63dc548dc859f33e0b096703a9b98c1f1cdc832
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("../")
import athenaplotlib as apl
import numpy as np
import matplotlib.pyplot as plt
class Star:
def __init__(self,mass,period,ecc):
self.mass = mass
self.period = period*86400
self.ecc = ecc
self.phaseoff = 0.0
return
wr104 = [Star(10,245,0.06),Star(20,245,0.06)]
wr98a = [Star(10,566,0.0),Star(18,566,0.0)]
wr140 = [Star(14.9,2869,0.896),Star(35.9,2869,0.896)]
def genorbits(s):
orbit = []
for phase in np.arange(0.0,1.0,0.001):
time = phase * s[0].period
pos = apl.calcOrbit(s[0],s[1],time)
orbit.append((pos[0][0],pos[0][1],pos[1][0],pos[1][1]))
return orbit
wr140pos = genorbits(wr140)
print(np.shape(wr140pos[0]))
print(wr140pos[0][:])
plt.scatter(wr140pos[:][0],wr140pos[:][1])
plt.show()
| 18.738095
| 59
| 0.631512
|
4a08e5e4ea0c6f79c3cdff115b85766d622ecbbb
| 1,675
|
py
|
Python
|
testinfra/modules/__init__.py
|
farzadghanei/testinfra
|
02a9338b1e8eef7da8737099ee1445341e454fe1
|
[
"Apache-2.0"
] | 1
|
2019-10-27T15:22:21.000Z
|
2019-10-27T15:22:21.000Z
|
testinfra/modules/__init__.py
|
farzadghanei/testinfra
|
02a9338b1e8eef7da8737099ee1445341e454fe1
|
[
"Apache-2.0"
] | null | null | null |
testinfra/modules/__init__.py
|
farzadghanei/testinfra
|
02a9338b1e8eef7da8737099ee1445341e454fe1
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import importlib
modules = {
'addr': 'addr:Addr',
'ansible': 'ansible:Ansible',
'command': 'command:Command',
'docker': 'docker:Docker',
'environment': 'environment:Environment',
'file': 'file:File',
'group': 'group:Group',
'interface': 'interface:Interface',
'iptables': 'iptables:Iptables',
'mount_point': 'mountpoint:MountPoint',
'package': 'package:Package',
'pip_package': 'pip:PipPackage',
'process': 'process:Process',
'puppet_resource': 'puppet:PuppetResource',
'facter': 'puppet:Facter',
'salt': 'salt:Salt',
'service': 'service:Service',
'socket': 'socket:Socket',
'sudo': 'sudo:Sudo',
'supervisor': 'supervisor:Supervisor',
'sysctl': 'sysctl:Sysctl',
'system_info': 'systeminfo:SystemInfo',
'user': 'user:User',
'block_device': 'blockdevice:BlockDevice',
}
def get_module_class(name):
modname, classname = modules[name].split(':')
modname = '.'.join([__name__, modname])
module = importlib.import_module(modname)
return getattr(module, classname)
| 32.843137
| 74
| 0.68597
|
4a08e614223b07bce01096b8c1eb516bc2d0916b
| 2,011
|
py
|
Python
|
shapeworld/captions/existential.py
|
shgoren/ShapeWorld
|
a6633bddbf195d0dc4cbdbe07564a98149337bcf
|
[
"MIT"
] | 1
|
2021-04-08T16:14:25.000Z
|
2021-04-08T16:14:25.000Z
|
shapeworld/captions/existential.py
|
shgoren/ShapeWorld
|
a6633bddbf195d0dc4cbdbe07564a98149337bcf
|
[
"MIT"
] | null | null | null |
shapeworld/captions/existential.py
|
shgoren/ShapeWorld
|
a6633bddbf195d0dc4cbdbe07564a98149337bcf
|
[
"MIT"
] | 1
|
2021-02-23T17:10:46.000Z
|
2021-02-23T17:10:46.000Z
|
from __future__ import division
from shapeworld import util
from shapeworld.captions import Caption, EntityType, Relation
class Existential(Caption):
__slots__ = ('restrictor', 'body')
def __init__(self, restrictor, body):
assert isinstance(restrictor, EntityType)
assert isinstance(body, Relation)
self.restrictor = restrictor
self.body = body
def model(self):
return dict(
component=str(self),
restrictor=self.restrictor.model(),
body=self.body.model()
)
def reverse_polish_notation(self):
return self.restrictor.reverse_polish_notation() + self.body.reverse_polish_notation() + [str(self)]
def agreement(self, predication, world):
rstr_predication = predication.get_sub_predication()
rstr_body_predication = predication.get_sub_predication()
assert rstr_body_predication <= rstr_predication
# if rstr_body_predication.num_agreeing == rstr_body_predication.num_predication:
# return 2.0
# elif rstr_predication.num_not_disagreeing == 0 and body_predication.num_not_disagreeing == 0:
# return -2.0
if rstr_body_predication.num_agreeing > 0:
return 1.0
elif rstr_body_predication.num_not_disagreeing == 0:
return -1.0
else:
return 0.0
# rstr_body_predication = predication.copy()
# rstr_body_predication.apply(predicate=self.restrictor)
# rstr_body_predication.apply(predicate=self.body)
# if rstr_body_predication.num_agreeing == rstr_body_predication.num_predication:
# return 2.0
# elif rstr_predication.num_not_disagreeing == 0 and body_predication.num_not_disagreeing == 0:
# return -2.0
# if rstr_body_predication.num_agreeing > 0:
# return 1.0
# elif rstr_body_predication.num_not_disagreeing == 0:
# return -1.0
# else:
# return 0.0
| 35.910714
| 108
| 0.658876
|
4a08e6365eee7e76e44065f6fc4c875a85b33305
| 4,088
|
py
|
Python
|
tests/assert_utils.py
|
Pow3r5/manim
|
2972a64342aa5ae72977b444f653b05250ab1f8f
|
[
"MIT"
] | 2
|
2022-03-31T08:31:00.000Z
|
2022-03-31T08:31:43.000Z
|
tests/assert_utils.py
|
Pow3r5/manim
|
2972a64342aa5ae72977b444f653b05250ab1f8f
|
[
"MIT"
] | 21
|
2022-03-02T15:25:49.000Z
|
2022-03-07T11:15:45.000Z
|
tests/assert_utils.py
|
DD2480-Group-10/manim
|
e147a9fc6c117332221e42437481f3efba76499a
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import os
from pathlib import Path
from pprint import pformat
def assert_file_exists(filepath: str | os.PathLike) -> None:
"""Assert that filepath points to an existing file. Print all the elements (files and dir) of the parent dir of the given filepath.
This is mostly to have better assert message than using a raw assert os.path.isfile(filepath).
Parameters
----------
filepath
Filepath to check.
Raises
------
AssertionError
If filepath does not point to a file (if the file does not exist or it's a dir).
"""
path = Path(filepath)
if not path.is_file():
elems = pformat([path.name for path in list(path.parent.iterdir())])
message = f"{path.absolute()} is not a file. Other elements in the parent directory are \n{elems}"
raise AssertionError(message)
def assert_dir_exists(dirpath: str | os.PathLike) -> None:
"""Assert that directory exists.
Parameters
----------
dirpath
Path to directory to check.
Raises
------
AssertionError
If dirpath does not point to a directory (if the file does exist or it's a file).
"""
path = Path(dirpath)
if not path.is_dir():
elems = pformat([path.name for path in list(path.parent.iterdir())])
message = f"{path.absolute()} is not a directory. Other elements in the parent directory are \n{elems}"
raise AssertionError(message)
def assert_dir_filled(dirpath: str | os.PathLike) -> None:
"""Assert that directory exists and contains at least one file or directory (or file like objects like symlinks on Linux).
Parameters
----------
dirpath
Path to directory to check.
Raises
------
AssertionError
If dirpath does not point to a directory (if the file does exist or it's a file) or the directory is empty.
"""
if len(os.listdir(dirpath)) == 0:
raise AssertionError(f"{dirpath} is an empty directory.")
def assert_file_not_exists(filepath: str | os.PathLike) -> None:
"""Assert that filepath does not point to an existing file. Print all the elements (files and dir) of the parent dir of the given filepath.
This is mostly to have better assert message than using a raw assert os.path.isfile(filepath).
Parameters
----------
filepath
Filepath to check.
Raises
------
AssertionError
If filepath does point to a file.
"""
path = Path(filepath)
if path.is_file():
elems = pformat([path.name for path in list(path.parent.iterdir())])
message = f"{path.absolute()} is a file. Other elements in the parent directory are \n{elems}"
raise AssertionError(message)
def assert_dir_not_exists(dirpath: str | os.PathLike) -> None:
"""Assert that directory does not exist.
Parameters
----------
dirpath
Path to directory to check.
Raises
------
AssertionError
If dirpath points to a directory.
"""
path = Path(dirpath)
if path.is_dir():
elems = pformat([path.name for path in list(path.parent.iterdir())])
message = f"{path.absolute()} is a directory. Other elements in the parent directory are \n{elems}"
raise AssertionError(message)
def assert_shallow_dict_compare(a: dict, b: dict, message_start: str) -> None:
"""Assert that Directories ``a`` and ``b`` are the same.
``b`` is treated as the expected values that ``a`` shall abide by.
Print helpful error with custom message start.
"""
mismatch: list[str] = []
for b_key, b_value in b.items():
if b_key not in a:
mismatch.append(f"Missing item {b_key}: {b_value}")
elif b_value != a[b_key]:
mismatch.append(f"For {b_key} got {a[b_key]}, expected {b_value}")
for a_key, a_value in a.items():
if a_key not in b:
mismatch.append(f"Extraneous item {a_key}: {a_value}")
mismatch_str = "\n".join(mismatch)
assert len(mismatch) == 0, f"{message_start}\n{mismatch_str}"
| 31.689922
| 143
| 0.643346
|
4a08e68142c1fe8854b08afaf9cf43d0bdad327b
| 93
|
py
|
Python
|
sqltest/datasource/csv/__init__.py
|
stayrascal/sqltest
|
bc9f36755ec04f410e0931696a8624b15429b2b3
|
[
"MIT"
] | null | null | null |
sqltest/datasource/csv/__init__.py
|
stayrascal/sqltest
|
bc9f36755ec04f410e0931696a8624b15429b2b3
|
[
"MIT"
] | 1
|
2021-10-19T14:40:02.000Z
|
2021-10-19T14:40:02.000Z
|
sqltest/datasource/csv/__init__.py
|
stayrascal/sqltest
|
bc9f36755ec04f410e0931696a8624b15429b2b3
|
[
"MIT"
] | null | null | null |
from sqltest.datasource.csv.csv_reader import CsvDatasetReader
__all__ = [CsvDatasetReader]
| 23.25
| 62
| 0.849462
|
4a08e71fdd63e55f685e2e9c0470c8e065629956
| 11,956
|
py
|
Python
|
colour/models/rgb/tests/test_derivation.py
|
Ron024/colour
|
6adbfcb61c559cc164957dac01c08ad93b9e84e3
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/rgb/tests/test_derivation.py
|
Ron024/colour
|
6adbfcb61c559cc164957dac01c08ad93b9e84e3
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/rgb/tests/test_derivation.py
|
Ron024/colour
|
6adbfcb61c559cc164957dac01c08ad93b9e84e3
|
[
"BSD-3-Clause"
] | 1
|
2019-12-11T19:48:27.000Z
|
2019-12-11T19:48:27.000Z
|
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.rgb.derivation` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import re
import unittest
from itertools import permutations
from six import text_type
from colour.models import (
normalised_primary_matrix, chromatically_adapted_primaries,
primaries_whitepoint, RGB_luminance_equation, RGB_luminance)
from colour.models.rgb.derivation import xy_to_z
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = [
'Testxy_to_z', 'TestNormalisedPrimaryMatrix',
'TestChromaticallyAdaptedPrimaries', 'TestPrimariesWhitepoint',
'TestRGBLuminanceEquation', 'TestRGBLuminance'
]
class Testxy_to_z(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.derivation.xy_to_z` definition unit
tests methods.
"""
def test_xy_to_z(self):
"""
Tests :func:`colour.models.rgb.derivation.xy_to_z` definition.
"""
np.testing.assert_almost_equal(
xy_to_z(np.array([0.2500, 0.2500])), 0.50000000, decimal=7)
np.testing.assert_almost_equal(
xy_to_z(np.array([0.0001, -0.0770])), 1.07690000, decimal=7)
np.testing.assert_almost_equal(
xy_to_z(np.array([0.0000, 1.0000])), 0.00000000, decimal=7)
def test_n_dimensional_xy_to_z(self):
"""
Tests :func:`colour.models.rgb.derivation.xy_to_z` definition
n-dimensional arrays support.
"""
xy = np.array([0.25, 0.25])
z = xy_to_z(xy)
xy = np.tile(xy, (6, 1))
z = np.tile(
z,
6,
)
np.testing.assert_almost_equal(xy_to_z(xy), z, decimal=7)
xy = np.reshape(xy, (2, 3, 2))
z = np.reshape(z, (2, 3))
np.testing.assert_almost_equal(xy_to_z(xy), z, decimal=7)
@ignore_numpy_errors
def test_nan_xy_to_z(self):
"""
Tests :func:`colour.models.rgb.derivation.xy_to_z` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=2))
for case in cases:
xy_to_z(case)
class TestNormalisedPrimaryMatrix(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.derivation.normalised_primary_matrix`
definition unit tests methods.
"""
def test_normalised_primary_matrix(self):
"""
Tests :func:`colour.models.rgb.derivation.normalised_primary_matrix`
definition.
"""
np.testing.assert_almost_equal(
normalised_primary_matrix(
np.array(
[0.73470, 0.26530, 0.00000, 1.00000, 0.00010, -0.07700]),
np.array([0.32168, 0.33767])),
np.array([
[0.95255240, 0.00000000, 0.00009368],
[0.34396645, 0.72816610, -0.07213255],
[0.00000000, 0.00000000, 1.00882518],
]),
decimal=7)
np.testing.assert_almost_equal(
normalised_primary_matrix(
np.array([0.640, 0.330, 0.300, 0.600, 0.150, 0.060]),
np.array([0.3127, 0.3290])),
np.array([
[0.41239080, 0.35758434, 0.18048079],
[0.21263901, 0.71516868, 0.07219232],
[0.01933082, 0.11919478, 0.95053215],
]),
decimal=7)
@ignore_numpy_errors
def test_nan_normalised_primary_matrix(self):
"""
Tests :func:`colour.models.rgb.derivation.normalised_primary_matrix`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=2))
for case in cases:
P = np.array(np.vstack([case, case, case]))
W = np.array(case)
try:
normalised_primary_matrix(P, W)
except np.linalg.linalg.LinAlgError:
pass
class TestChromaticallyAdaptedPrimaries(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.derivation.\
chromatically_adapted_primaries` definition unit tests methods.
"""
def test_chromatically_adapted_primaries(self):
"""
Tests :func:`colour.models.rgb.derivation.\
chromatically_adapted_primaries` definition.
"""
np.testing.assert_almost_equal(
chromatically_adapted_primaries(
np.array(
[0.73470, 0.26530, 0.00000, 1.00000, 0.00010, -0.07700]),
np.array([0.32168, 0.33767]), np.array([0.34570, 0.35850])),
np.array([
[0.73431182, 0.26694964],
[0.02211963, 0.98038009],
[-0.05880375, -0.12573056],
]),
decimal=7)
np.testing.assert_almost_equal(
chromatically_adapted_primaries(
np.array([0.640, 0.330, 0.300, 0.600, 0.150, 0.060]),
np.array([0.31270, 0.32900]), np.array([0.34570, 0.35850])),
np.array([
[0.64922534, 0.33062196],
[0.32425276, 0.60237128],
[0.15236177, 0.06118676],
]),
decimal=7)
np.testing.assert_almost_equal(
chromatically_adapted_primaries(
np.array([0.640, 0.330, 0.300, 0.600, 0.150, 0.060]),
np.array([0.31270, 0.32900]), np.array([0.34570, 0.35850]),
'Bradford'),
np.array([
[0.64844144, 0.33085331],
[0.32119518, 0.59784434],
[0.15589322, 0.06604921],
]),
decimal=7)
@ignore_numpy_errors
def test_nan_chromatically_adapted_primaries(self):
"""
Tests :func:`colour.models.rgb.derivation.\
chromatically_adapted_primaries` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=2))
for case in cases:
P = np.array(np.vstack([case, case, case]))
W = np.array(case)
chromatically_adapted_primaries(P, W, W)
class TestPrimariesWhitepoint(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.derivation.primaries_whitepoint`
definition unit tests methods.
"""
def test_primaries_whitepoint(self):
"""
Tests :func:`colour.models.rgb.derivation.primaries_whitepoint`
definition.
"""
P, W = primaries_whitepoint(
np.array([
[0.95255240, 0.00000000, 0.00009368],
[0.34396645, 0.72816610, -0.07213255],
[0.00000000, 0.00000000, 1.00882518],
]))
np.testing.assert_almost_equal(
P,
np.array([
[0.73470, 0.26530],
[0.00000, 1.00000],
[0.00010, -0.07700],
]),
decimal=7)
np.testing.assert_almost_equal(
W, np.array([0.32168, 0.33767]), decimal=7)
P, W = primaries_whitepoint(
np.array([
[0.41240000, 0.35760000, 0.18050000],
[0.21260000, 0.71520000, 0.07220000],
[0.01930000, 0.11920000, 0.95050000],
]))
np.testing.assert_almost_equal(
P,
np.array([
[0.64007450, 0.32997051],
[0.30000000, 0.60000000],
[0.15001662, 0.06000665],
]),
decimal=7)
np.testing.assert_almost_equal(
W, np.array([0.31271591, 0.32900148]), decimal=7)
@ignore_numpy_errors
def test_nan_primaries_whitepoint(self):
"""
Tests :func:`colour.models.rgb.derivation.primaries_whitepoint`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
M = np.array(np.vstack([case, case, case]))
primaries_whitepoint(M)
class TestRGBLuminanceEquation(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.derivation.RGB_luminance_equation`
definition unit tests methods.
"""
def test_RGB_luminance_equation(self):
"""
Tests :func:`colour.models.rgb.derivation.RGB_luminance_equation`
definition.
"""
self.assertIsInstance(
RGB_luminance_equation(
np.array(
[0.73470, 0.26530, 0.00000, 1.00000, 0.00010, -0.07700]),
np.array([0.32168, 0.33767])), text_type)
# TODO: Simplify that monster.
pattern = (
'Y\\s?=\\s?[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?.'
'\\(R\\)\\s?[+-]\\s?[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?.'
'\\(G\\)\\s?[+-]\\s?[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?.'
'\\(B\\)')
P = np.array([0.73470, 0.26530, 0.00000, 1.00000, 0.00010, -0.07700])
self.assertTrue(
re.match(pattern,
RGB_luminance_equation(P, np.array([0.32168, 0.33767]))))
class TestRGBLuminance(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.derivation.RGB_luminance` definition
unit tests methods.
"""
def test_RGB_luminance(self):
"""
Tests :func:`colour.models.rgb.derivation.RGB_luminance`
definition.
"""
self.assertAlmostEqual(
RGB_luminance(
np.array([0.18, 0.18, 0.18]),
np.array(
[0.73470, 0.26530, 0.00000, 1.00000, 0.00010, -0.07700]),
np.array([0.32168, 0.33767])),
0.18000000,
places=7)
self.assertAlmostEqual(
RGB_luminance(
np.array([0.21959402, 0.06986677, 0.04703877]),
np.array(
[0.73470, 0.26530, 0.00000, 1.00000, 0.00010, -0.07700]),
np.array([0.32168, 0.33767])),
0.123014562384318,
places=7)
self.assertAlmostEqual(
RGB_luminance(
np.array([0.45620519, 0.03081071, 0.04091952]),
np.array([0.6400, 0.3300, 0.3000, 0.6000, 0.1500, 0.0600]),
np.array([0.31270, 0.32900])),
0.121995947729870,
places=7)
def test_n_dimensional_RGB_luminance(self):
"""
Tests :func:`colour.models.rgb.derivation.RGB_luminance` definition
n_dimensional arrays support.
"""
RGB = np.array([0.18, 0.18, 0.18]),
P = np.array([0.73470, 0.26530, 0.00000, 1.00000, 0.00010, -0.07700]),
W = np.array([0.32168, 0.33767])
Y = RGB_luminance(RGB, P, W)
RGB = np.tile(RGB, (6, 1))
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(RGB_luminance(RGB, P, W), Y)
RGB = np.reshape(RGB, (2, 3, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(RGB_luminance(RGB, P, W), Y)
@ignore_numpy_errors
def test_nan_RGB_luminance(self):
"""
Tests :func:`colour.models.rgb.derivation.RGB_luminance`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
RGB = np.array(case)
P = np.array(np.vstack([case[0:2], case[0:2], case[0:2]]))
W = np.array(case[0:2])
try:
RGB_luminance(RGB, P, W)
except np.linalg.linalg.LinAlgError:
pass
if __name__ == '__main__':
unittest.main()
| 32.313514
| 78
| 0.549682
|
4a08e733b770c1e9f532d04d9efbff7644798cee
| 28,919
|
py
|
Python
|
pandas/core/internals/construction.py
|
juliansmidek/pandas
|
8945a4267588ec2608bec7be6745f6beff0373da
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/internals/construction.py
|
juliansmidek/pandas
|
8945a4267588ec2608bec7be6745f6beff0373da
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/internals/construction.py
|
juliansmidek/pandas
|
8945a4267588ec2608bec7be6745f6beff0373da
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
"""
Functions for preparing various inputs passed to the DataFrame or Series
constructors before passing them to a BlockManager.
"""
from __future__ import annotations
from collections import abc
from typing import (
TYPE_CHECKING,
Any,
Dict,
Hashable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
)
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
dict_compat,
maybe_cast_to_datetime,
maybe_convert_platform,
maybe_infer_to_datetimelike,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_list_like,
is_named_tuple,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas.core import (
algorithms,
common as com,
)
from pandas.core.arrays import (
Categorical,
DatetimeArray,
)
from pandas.core.construction import (
extract_array,
sanitize_array,
)
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import (
Index,
ensure_index,
get_objs_combined_axis,
union_indexes,
)
from pandas.core.internals.array_manager import (
ArrayManager,
SingleArrayManager,
)
from pandas.core.internals.blocks import (
ensure_block_shape,
new_block,
)
from pandas.core.internals.managers import (
BlockManager,
SingleBlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks,
)
if TYPE_CHECKING:
from numpy.ma.mrecords import MaskedRecords
# ---------------------------------------------------------------------
# BlockManager Interface
def arrays_to_mgr(
arrays,
arr_names,
index,
columns,
dtype: Optional[DtypeObj] = None,
verify_integrity: bool = True,
typ: Optional[str] = None,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
arr_names = ensure_index(arr_names)
if verify_integrity:
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
else:
index = ensure_index(index)
columns = ensure_index(columns)
# from BlockManager perspective
axes = [columns, index]
if typ == "block":
return create_block_manager_from_arrays(arrays, arr_names, axes)
elif typ == "array":
if len(columns) != len(arrays):
assert len(arrays) == 0
arrays = [np.array([], dtype=object) for _ in range(len(columns))]
return ArrayManager(arrays, [index, columns])
else:
raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
def rec_array_to_mgr(
data: Union[MaskedRecords, np.recarray, np.ndarray],
index,
columns,
dtype: Optional[DtypeObj],
copy: bool,
typ: str,
):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
else:
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
if isinstance(data, np.ma.MaskedArray):
new_arrays = fill_masked_arrays(data, arr_columns)
else:
# error: Incompatible types in assignment (expression has type
# "List[ExtensionArray]", variable has type "List[ndarray]")
new_arrays = arrays # type: ignore[assignment]
# create the manager
# error: Argument 1 to "reorder_arrays" has incompatible type "List[ndarray]";
# expected "List[ExtensionArray]"
arrays, arr_columns = reorder_arrays(
new_arrays, arr_columns, columns # type: ignore[arg-type]
)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype, typ=typ)
if copy:
mgr = mgr.copy()
return mgr
def fill_masked_arrays(data: MaskedRecords, arr_columns: Index) -> List[np.ndarray]:
"""
Convert numpy MaskedRecords to ensure mask is softened.
"""
new_arrays = []
for col in arr_columns:
arr = data[col]
fv = arr.fill_value
mask = ma.getmaskarray(arr)
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
return new_arrays
def mgr_to_mgr(mgr, typ: str):
"""
Convert to specific type of Manager. Does not copy if the type is already
correct. Does not guarantee a copy otherwise.
"""
new_mgr: Manager
if typ == "block":
if isinstance(mgr, BlockManager):
new_mgr = mgr
else:
if mgr.ndim == 2:
new_mgr = arrays_to_mgr(
mgr.arrays, mgr.axes[0], mgr.axes[1], mgr.axes[0], typ="block"
)
else:
new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index)
elif typ == "array":
if isinstance(mgr, ArrayManager):
new_mgr = mgr
else:
if mgr.ndim == 2:
arrays = [mgr.iget_values(i).copy() for i in range(len(mgr.axes[0]))]
new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]])
else:
new_mgr = SingleArrayManager([mgr.internal_values()], [mgr.index])
else:
raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
return new_mgr
# ---------------------------------------------------------------------
# DataFrame Constructor Interface
def ndarray_to_mgr(
values, index, columns, dtype: Optional[DtypeObj], copy: bool, typ: str
) -> Manager:
# used in DataFrame.__init__
# input must be a ndarray, list, Series, Index, ExtensionArray
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = Index([values.name])
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
if is_extension_array_dtype(values) or isinstance(dtype, ExtensionDtype):
# GH#19157
if isinstance(values, np.ndarray) and values.ndim > 1:
# GH#12513 a EA dtype passed with a 2D array, split into
# multiple EAs that view the values
values = [values[:, n] for n in range(values.shape[1])]
else:
values = [values]
if columns is None:
columns = Index(range(len(values)))
return arrays_to_mgr(values, columns, index, columns, dtype=dtype, typ=typ)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None and not is_dtype_equal(values.dtype, dtype):
shape = values.shape
flat = values.ravel()
if not is_integer_dtype(dtype):
# TODO: skipping integer_dtype is needed to keep the tests passing,
# not clear it is correct
# Note: we really only need _try_cast, but keeping to exposed funcs
values = sanitize_array(
flat, None, dtype=dtype, copy=copy, raise_cast_failure=True
)
else:
try:
values = construct_1d_ndarray_preserving_na(
flat, dtype=dtype, copy=False
)
except Exception as err:
# e.g. ValueError when trying to cast object dtype to float64
msg = f"failed to cast to '{dtype}' (Exception was: {err})"
raise ValueError(msg) from err
values = values.reshape(shape)
# _prep_ndarray ensures that values.ndim == 2 at this point
index, columns = _get_axes(
values.shape[0], values.shape[1], index=index, columns=columns
)
values = values.T
_check_values_indices_shape_match(values, index, columns)
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values.dtype):
if values.ndim == 2 and values.shape[0] != 1:
# transpose and separate blocks
dvals_list = [maybe_infer_to_datetimelike(row) for row in values]
dvals_list = [ensure_block_shape(dval, 2) for dval in dvals_list]
# TODO: What about re-joining object columns?
dvals_list = [maybe_squeeze_dt64tz(x) for x in dvals_list]
block_values = [
new_block(dvals_list[n], placement=n, ndim=2)
for n in range(len(dvals_list))
]
else:
datelike_vals = maybe_infer_to_datetimelike(values)
datelike_vals = maybe_squeeze_dt64tz(datelike_vals)
nb = new_block(datelike_vals, placement=slice(len(columns)), ndim=2)
block_values = [nb]
else:
new_values = maybe_squeeze_dt64tz(values)
nb = new_block(new_values, placement=slice(len(columns)), ndim=2)
block_values = [nb]
if len(columns) == 0:
block_values = []
return create_block_manager_from_blocks(block_values, [columns, index])
def _check_values_indices_shape_match(
values: np.ndarray, index: Index, columns: Index
) -> None:
"""
Check that the shape implied by our axes matches the actual shape of the
data.
"""
if values.shape[0] != len(columns):
# Could let this raise in Block constructor, but we get a more
# helpful exception message this way.
if values.shape[1] == 0:
raise ValueError("Empty data passed with indices specified.")
passed = values.T.shape
implied = (len(index), len(columns))
raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
def maybe_squeeze_dt64tz(dta: ArrayLike) -> ArrayLike:
"""
If we have a tzaware DatetimeArray with shape (1, N), squeeze to (N,)
"""
# TODO(EA2D): kludge not needed with 2D EAs
if isinstance(dta, DatetimeArray) and dta.ndim == 2 and dta.tz is not None:
assert dta.shape[0] == 1
dta = dta[0]
return dta
def dict_to_mgr(
data: Dict, index, columns, dtype: Optional[DtypeObj], typ: str
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
Used in DataFrame.__init__
"""
arrays: Union[Sequence[Any], Series]
if columns is not None:
from pandas.core.series import Series
arrays = Series(data, index=columns, dtype=object)
data_names = arrays.index
missing = arrays.isna()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
nan_dtype: DtypeObj
if dtype is None or (
isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.flexible)
):
# GH#1783
nan_dtype = np.dtype("object")
else:
nan_dtype = dtype
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
arrays.loc[missing] = [val] * missing.sum()
else:
keys = list(data.keys())
columns = data_names = Index(keys)
arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]
# GH#24096 need copy to be deep for datetime64tz case
# TODO: See if we can avoid these copies
arrays = [arr if not isinstance(arr, ABCIndex) else arr._data for arr in arrays]
arrays = [
arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays
]
return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype, typ=typ)
def nested_data_to_arrays(
data: Sequence,
columns: Optional[Index],
index: Optional[Index],
dtype: Optional[DtypeObj],
):
"""
Convert a single sequence of arrays to multiple arrays.
"""
# By the time we get here we have already checked treat_as_nested(data)
if is_named_tuple(data[0]) and columns is None:
columns = ensure_index(data[0]._fields)
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
if index is None:
if isinstance(data[0], ABCSeries):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
# GH#38845 hit in test_constructor_categorical
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
return arrays, columns, index
def treat_as_nested(data) -> bool:
"""
Check if we should use nested_data_to_arrays.
"""
return len(data) > 0 and is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1
# ---------------------------------------------------------------------
def _prep_ndarray(values, copy: bool = True) -> np.ndarray:
if not isinstance(values, (np.ndarray, ABCSeries, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
elif isinstance(values, range):
arr = np.arange(values.start, values.stop, values.step, dtype="int64")
return arr[..., np.newaxis]
def convert(v):
if not is_list_like(v) or isinstance(v, ABCDataFrame):
return v
elif not hasattr(v, "dtype") and not isinstance(v, (list, tuple, range)):
# TODO: should we cast these to list?
return v
v = extract_array(v, extract_numpy=True)
res = maybe_convert_platform(v)
return res
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except (ValueError, TypeError):
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError(f"Must pass 2-d input. shape={values.shape}")
return values
def _homogenize(data, index: Index, dtype: Optional[DtypeObj]):
oindex = None
homogenized = []
for val in data:
if isinstance(val, ABCSeries):
if dtype is not None:
val = val.astype(dtype)
if val.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
val = val.reindex(index, copy=False)
# TODO extract_array should be preferred, but that gives failures for
# `extension/test_numpy.py` (extract_array will convert numpy arrays
# to PandasArray), see https://github.com/pandas-dev/pandas/issues/40021
# val = extract_array(val, extract_numpy=True)
val = val._values
else:
if isinstance(val, dict):
if oindex is None:
oindex = index.astype("O")
if isinstance(index, (ABCDatetimeIndex, ABCTimedeltaIndex)):
val = dict_compat(val)
else:
val = dict(val)
val = lib.fast_multiget(val, oindex._values, default=np.nan)
val = sanitize_array(
val, index, dtype=dtype, copy=False, raise_cast_failure=False
)
homogenized.append(val)
return homogenized
def extract_index(data) -> Index:
"""
Try to infer an Index from the passed data, raise ValueError on failure.
"""
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes: List[Union[List[Hashable], Index]] = []
have_raw_arrays = False
have_series = False
have_dicts = False
for val in data:
if isinstance(val, ABCSeries):
have_series = True
indexes.append(val.index)
elif isinstance(val, dict):
have_dicts = True
indexes.append(list(val.keys()))
elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(val))
if not indexes and not raw_lengths:
raise ValueError("If using all scalar values, you must pass an index")
if have_series:
index = union_indexes(indexes)
elif have_dicts:
index = union_indexes(indexes, sort=False)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError("All arrays must be of the same length")
if have_dicts:
raise ValueError(
"Mixing dicts with non-Series may lead to ambiguous ordering."
)
if have_series:
assert index is not None # for mypy
if lengths[0] != len(index):
msg = (
f"array length {lengths[0]} does not match index "
f"length {len(index)}"
)
raise ValueError(msg)
else:
index = ibase.default_index(lengths[0])
# error: Argument 1 to "ensure_index" has incompatible type "Optional[Index]";
# expected "Union[Union[Union[ExtensionArray, ndarray], Index, Series],
# Sequence[Any]]"
return ensure_index(index) # type: ignore[arg-type]
def reorder_arrays(
arrays: List[ArrayLike], arr_columns: Index, columns: Optional[Index]
) -> Tuple[List[ArrayLike], Index]:
# reorder according to the columns
if columns is not None and len(columns) and len(arr_columns):
indexer = ensure_index(arr_columns).get_indexer(columns)
arr_columns = ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _get_names_from_index(data) -> Index:
has_some_name = any(getattr(s, "name", None) is not None for s in data)
if not has_some_name:
return ibase.default_index(len(data))
index: List[Hashable] = list(range(len(data)))
count = 0
for i, s in enumerate(data):
n = getattr(s, "name", None)
if n is not None:
index[i] = n
else:
index[i] = f"Unnamed {count}"
count += 1
return Index(index)
def _get_axes(
N: int, K: int, index: Optional[Index], columns: Optional[Index]
) -> Tuple[Index, Index]:
# helper to create the axes as indexes
# return axes or defaults
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
return index, columns
def dataclasses_to_dicts(data):
"""
Converts a list of dataclass instances to a list of dictionaries.
Parameters
----------
data : List[Type[dataclass]]
Returns
--------
list_dict : List[dict]
Examples
--------
>>> @dataclass
>>> class Point:
... x: int
... y: int
>>> dataclasses_to_dicts([Point(1,2), Point(2,3)])
[{"x":1,"y":2},{"x":2,"y":3}]
"""
from dataclasses import asdict
return list(map(asdict, data))
# ---------------------------------------------------------------------
# Conversion of Inputs to Arrays
def to_arrays(
data, columns: Optional[Index], dtype: Optional[DtypeObj] = None
) -> Tuple[List[ArrayLike], Index]:
"""
Return list of arrays, columns.
"""
if isinstance(data, ABCDataFrame):
if columns is not None:
arrays = [
data._ixs(i, axis=1).values
for i, col in enumerate(data.columns)
if col in columns
]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
if data.dtype.names is not None:
# i.e. numpy structured array
columns = ensure_index(data.dtype.names)
arrays = [data[name] for name in columns]
return arrays, columns
return [], ensure_index([])
elif isinstance(data[0], Categorical):
if columns is None:
columns = ibase.default_index(len(data))
return data, columns
elif isinstance(data, np.ndarray) and data.dtype.names is not None:
# e.g. recarray
columns = Index(list(data.dtype.names))
arrays = [data[k] for k in columns]
return arrays, columns
if isinstance(data[0], (list, tuple)):
arr = _list_to_arrays(data)
elif isinstance(data[0], abc.Mapping):
arr, columns = _list_of_dict_to_arrays(data, columns)
elif isinstance(data[0], ABCSeries):
arr, columns = _list_of_series_to_arrays(data, columns)
else:
# last ditch effort
data = [tuple(x) for x in data]
arr = _list_to_arrays(data)
content, columns = _finalize_columns_and_data(arr, columns, dtype)
return content, columns
def _list_to_arrays(data: List[Union[Tuple, List]]) -> np.ndarray:
# Returned np.ndarray has ndim = 2
# Note: we already check len(data) > 0 before getting hre
if isinstance(data[0], tuple):
content = lib.to_object_array_tuples(data)
else:
# list of lists
content = lib.to_object_array(data)
return content
def _list_of_series_to_arrays(
data: List,
columns: Optional[Index],
) -> Tuple[np.ndarray, Index]:
# returned np.ndarray has ndim == 2
if columns is None:
# We know pass_data is non-empty because data[0] is a Series
pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]
columns = get_objs_combined_axis(pass_data, sort=False)
indexer_cache: Dict[int, np.ndarray] = {}
aligned_values = []
for s in data:
index = getattr(s, "index", None)
if index is None:
index = ibase.default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = extract_array(s, extract_numpy=True)
aligned_values.append(algorithms.take_nd(values, indexer))
# error: Argument 1 to "vstack" has incompatible type "List[ExtensionArray]";
# expected "Sequence[Union[Union[int, float, complex, str, bytes, generic],
# Sequence[Union[int, float, complex, str, bytes, generic]],
# Sequence[Sequence[Any]], _SupportsArray]]"
content = np.vstack(aligned_values) # type: ignore[arg-type]
return content, columns
def _list_of_dict_to_arrays(
data: List[Dict],
columns: Optional[Index],
) -> Tuple[np.ndarray, Index]:
"""
Convert list of dicts to numpy arrays
if `columns` is not passed, column names are inferred from the records
- for OrderedDict and dicts, the column names match
the key insertion-order from the first record to the last.
- For other kinds of dict-likes, the keys are lexically sorted.
Parameters
----------
data : iterable
collection of records (OrderedDict, dict)
columns: iterables or None
Returns
-------
content : np.ndarray[object, ndim=2]
columns : Index
"""
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, dict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
columns = ensure_index(columns)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = lib.dicts_to_array(data, list(columns))
return content, columns
def _finalize_columns_and_data(
content: np.ndarray, # ndim == 2
columns: Optional[Index],
dtype: Optional[DtypeObj],
) -> Tuple[List[ArrayLike], Index]:
"""
Ensure we have valid columns, cast object dtypes if possible.
"""
contents = list(content.T)
try:
columns = _validate_or_indexify_columns(contents, columns)
except AssertionError as err:
# GH#26429 do not raise user-facing AssertionError
raise ValueError(err) from err
if len(contents) and contents[0].dtype == np.object_:
contents = _convert_object_array(contents, dtype=dtype)
return contents, columns
def _validate_or_indexify_columns(
content: List[np.ndarray], columns: Optional[Index]
) -> Index:
"""
If columns is None, make numbers as column names; Otherwise, validate that
columns have valid length.
Parameters
----------
content : list of np.ndarrays
columns : Index or None
Returns
-------
Index
If columns is None, assign positional column index value as columns.
Raises
------
1. AssertionError when content is not composed of list of lists, and if
length of columns is not equal to length of content.
2. ValueError when content is list of lists, but length of each sub-list
is not equal
3. ValueError when content is list of lists, but length of sub-list is
not equal to length of content
"""
if columns is None:
columns = ibase.default_index(len(content))
else:
# Add mask for data which is composed of list of lists
is_mi_list = isinstance(columns, list) and all(
isinstance(col, list) for col in columns
)
if not is_mi_list and len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError(
f"{len(columns)} columns passed, passed data had "
f"{len(content)} columns"
)
elif is_mi_list:
# check if nested list column, length of each sub-list should be equal
if len({len(col) for col in columns}) > 1:
raise ValueError(
"Length of columns passed for MultiIndex columns is different"
)
# if columns is not empty and length of sublist is not equal to content
elif columns and len(columns[0]) != len(content):
raise ValueError(
f"{len(columns[0])} columns passed, passed data had "
f"{len(content)} columns"
)
return columns
def _convert_object_array(
content: List[np.ndarray], dtype: Optional[DtypeObj]
) -> List[ArrayLike]:
"""
Internal function to convert object array.
Parameters
----------
content: List[np.ndarray]
dtype: np.dtype or ExtensionDtype
Returns
-------
List[ArrayLike]
"""
# provide soft conversion of object dtypes
def convert(arr):
if dtype != np.dtype("O"):
arr = lib.maybe_convert_objects(arr)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays
| 30.995713
| 88
| 0.604689
|
4a08e97a13a9f40bf992da5901d296e43cceafa9
| 2,135
|
py
|
Python
|
anvil/plugins/maya/publish/extract_model.py
|
icyvapor/config
|
0b367f02da9cf5c7dcfb5253d99da2f1dfc5e018
|
[
"MIT"
] | null | null | null |
anvil/plugins/maya/publish/extract_model.py
|
icyvapor/config
|
0b367f02da9cf5c7dcfb5253d99da2f1dfc5e018
|
[
"MIT"
] | null | null | null |
anvil/plugins/maya/publish/extract_model.py
|
icyvapor/config
|
0b367f02da9cf5c7dcfb5253d99da2f1dfc5e018
|
[
"MIT"
] | null | null | null |
import pyblish.api
class ExtractAvaModel(pyblish.api.InstancePlugin):
"""Produce a stripped down Maya file from instance
This plug-in takes into account only nodes relevant to models
and discards anything else, especially deformers along with
their intermediate nodes.
"""
label = "Model"
order = pyblish.api.ExtractorOrder
hosts = ["maya"]
families = ["anvil.model"]
def process(self, instance):
import os
import anvil.plugins.publish.utils as utils
from maya import cmds
from avalon import maya
dirname = utils.format_staging_dir(
root=instance.context.data["workspaceDir"],
time=instance.context.data["time"],
name=instance.data["name"])
try:
os.makedirs(dirname)
except OSError:
pass
filename = "{name}.ma".format(**instance.data)
path = os.path.join(dirname, filename)
# Perform extraction
self.log.info("Performing extraction..")
with maya.maintained_selection(), maya.without_extension():
self.log.info("Extracting %s" % str(list(instance)))
cmds.select(instance, noExpand=True)
cmds.file(path,
force=True,
typ="mayaAscii",
exportSelected=True,
preserveReferences=False,
# Shader assignment is the responsibility of
# riggers, for animators, and lookdev, for rendering.
shader=False,
# Construction history inherited from collection
# This enables a selective export of nodes relevant
# to this particular plug-in.
constructionHistory=False)
# Store reference for integration
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
instance.data["stagingDir"] = dirname
self.log.info("Extracted {instance} to {path}".format(**locals()))
| 32.348485
| 75
| 0.581265
|
4a08ea1c257a8c235d5360272d698f2bd8b3e216
| 3,135
|
py
|
Python
|
reedScrape/spiders/settings.py
|
blakiseskream/doylehowl
|
b0f337ac8da8a33a573ca2d594f376fe9fc84211
|
[
"MIT"
] | 1
|
2017-06-30T04:42:17.000Z
|
2017-06-30T04:42:17.000Z
|
spiders/spiders/settings.py
|
Surrinders/test
|
2f21f42b958ca96b1795b8ea00be9db4a0bfdfee
|
[
"MIT"
] | null | null | null |
spiders/spiders/settings.py
|
Surrinders/test
|
2f21f42b958ca96b1795b8ea00be9db4a0bfdfee
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Scrapy settings for spiders project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'spiders'
SPIDER_MODULES = ['spiders.spiders']
NEWSPIDER_MODULE = 'spiders.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'spiders (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'spiders.middlewares.SpidersSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'spiders.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'spiders.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 34.450549
| 109
| 0.777033
|
4a08ec523794024ac342cb4798396bf3b25ab1ba
| 1,412
|
py
|
Python
|
tests/test_backtest_utils.py
|
mberk/flumine
|
6216bcc233326cf07852fca9c7d39a18cee265ad
|
[
"MIT"
] | null | null | null |
tests/test_backtest_utils.py
|
mberk/flumine
|
6216bcc233326cf07852fca9c7d39a18cee265ad
|
[
"MIT"
] | null | null | null |
tests/test_backtest_utils.py
|
mberk/flumine
|
6216bcc233326cf07852fca9c7d39a18cee265ad
|
[
"MIT"
] | null | null | null |
import unittest
from unittest import mock
from flumine.backtest import utils
class NewDateTimeTest(unittest.TestCase):
@mock.patch("flumine.backtest.utils.config")
def test_new_date_time(self, mock_config):
mock_config.current_time = 123
x = utils.NewDateTime
self.assertEqual(x.utcnow(), 123)
class SimulatedDateTimeTest(unittest.TestCase):
def setUp(self):
self.s = utils.SimulatedDateTime()
def test_init(self):
self.assertIsNone(self.s._real_datetime)
@mock.patch("flumine.backtest.utils.config")
def test_reset_real_datetime(self, mock_config):
mock_real_datetime = mock.Mock()
self.s._real_datetime = mock_real_datetime
self.s.reset_real_datetime()
mock_real_datetime.utcnow.assert_called()
self.assertEqual(mock_config.current_time, mock_real_datetime.utcnow())
@mock.patch("flumine.backtest.utils.config")
def test_call(self, mock_config):
mock_dt = mock.Mock()
self.s(mock_dt)
self.assertEqual(mock_config.current_time, mock_dt)
@mock.patch("flumine.backtest.utils.config")
def test_context_manager(self, mock_config):
with self.s as datetime:
mock_config.current_time = 456
self.assertEqual(datetime.utcnow(), 456)
import datetime
self.assertIsInstance(datetime.datetime.utcnow(), datetime.datetime)
| 32.090909
| 79
| 0.70255
|
4a08ed0af9ccf7b474af59f885811c1c3743d643
| 1,066
|
py
|
Python
|
swissdutch/dutch.py
|
s-farry/swissdutch
|
df98cec67c3b1673e797d1f1de0a5822fad19803
|
[
"MIT"
] | 10
|
2015-09-30T20:38:13.000Z
|
2021-12-22T17:20:51.000Z
|
swissdutch/dutch.py
|
s-farry/swissdutch
|
df98cec67c3b1673e797d1f1de0a5822fad19803
|
[
"MIT"
] | 2
|
2020-04-26T19:11:38.000Z
|
2021-02-20T15:01:20.000Z
|
swissdutch/dutch.py
|
s-farry/swissdutch
|
df98cec67c3b1673e797d1f1de0a5822fad19803
|
[
"MIT"
] | 2
|
2019-03-27T00:21:49.000Z
|
2021-03-13T14:37:06.000Z
|
import operator
import itertools
from swissdutch.swiss import SwissPairingEngine
from swissdutch.pairing import ScoreBracket, PairingContext
class DutchPairingEngine(SwissPairingEngine):
def __init__(self, top_seed_colour_selection_fn=None, bye_value=1):
super().__init__(top_seed_colour_selection_fn, bye_value)
def _pair_round(self):
score_brackets = self._create_score_brackets()
ctx = PairingContext(self._round_no, self._last_round,
self._bye_value, score_brackets)
for sb in ctx:
sb.generate_pairings(ctx)
ctx.finalize_pairings()
return self._players
def _create_score_brackets(self):
self._players.sort(key=operator.attrgetter('pairing_no'))
self._players.sort(key=operator.attrgetter('score'), reverse=True)
return [ScoreBracket(score, players)
for score, players
in itertools.groupby(self._players,
key=operator.attrgetter('score'))]
| 36.758621
| 74
| 0.660413
|
4a08ee0f615bdf3e1d7a6b09c1e770c26dd191a2
| 5,364
|
py
|
Python
|
kubric/core/cameras.py
|
ScottyLectronica/kubric
|
31930b4a8517d1fc5987bb1502e47f130209505a
|
[
"Apache-2.0"
] | null | null | null |
kubric/core/cameras.py
|
ScottyLectronica/kubric
|
31930b4a8517d1fc5987bb1502e47f130209505a
|
[
"Apache-2.0"
] | null | null | null |
kubric/core/cameras.py
|
ScottyLectronica/kubric
|
31930b4a8517d1fc5987bb1502e47f130209505a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 The Kubric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import traitlets as tl
from kubric.core.assets import UndefinedAsset
from kubric.core import objects
from kubric.kubric_typing import ArrayLike
class Camera(objects.Object3D):
""" Base class for all types of cameras. """
@tl.default("background")
def _get_background_default(self):
return True
@property
def intrinsics(self):
raise NotImplementedError
def project_point(self, point3d, frame=None):
""" Compute the image space coordinates [0, 1] for a given point in world coordinates."""
with self.at_frame(frame):
homo_transform = np.linalg.inv(self.matrix_world)
homo_intrinsics = np.zeros((3, 4), dtype=np.float32)
homo_intrinsics[:, :3] = self.intrinsics
point4d = np.concatenate([point3d, [1.]])
projected = homo_intrinsics @ homo_transform @ point4d
image_coords = projected / projected[2]
image_coords[2] = np.sign(projected[2])
return image_coords
def z_to_depth(self, z: ArrayLike) -> np.ndarray:
raise NotImplementedError
class UndefinedCamera(Camera, UndefinedAsset):
""" Marker object that indicates that a camera instance attribute has not been set. """
class PerspectiveCamera(Camera):
""" A :class:`Camera` that uses perspective projection.
Args:
focal_length (float): The focal length of the camera lens in `mm`.
`Default = 50`
sensor_width (float): Horizontal size of the camera sensor in `mm`.
`Default = 36`
"""
focal_length = tl.Float(50)
sensor_width = tl.Float(36)
def __init__(self,
focal_length: float = 50,
sensor_width: float = 36,
position=(0., 0., 0.),
quaternion=None, up="Y", front="-Z", look_at=None, euler=None, **kwargs):
super().__init__(focal_length=focal_length, sensor_width=sensor_width, position=position,
quaternion=quaternion, up=up, front=front, look_at=look_at, euler=euler,
**kwargs)
@property
def field_of_view(self) -> float:
""" The (horizontal) field of view (fov) in radians.
.. math:: \\texttt{fov} = 2 * \\arctan{ \\frac{\\texttt{sensor_width}}{2 * \\texttt{fl}} }
Setting the :py:attr:`field_of_view` will internally adjust the :py:obj:`focal_length` (fl),
but keep the :py:attr:`sensor_width`.
"""
return 2 * np.arctan2(self.sensor_width / 2, self.focal_length)
@field_of_view.setter
def field_of_view(self, fov: float) -> None:
self.focal_length = self.sensor_width / (2 * np.tan(fov / 2))
@property
def sensor_height(self):
scene = self.active_scene
return self.sensor_width / scene.resolution[0] * scene.resolution[1]
@property
def intrinsics(self):
width, height = 1., 1. # self.active_scene.resolution
f_x = self.focal_length / self.sensor_width * width
f_y = self.focal_length / self.sensor_height * height
p_x = width / 2.
p_y = height / 2.
return np.array([
[f_x, 0, -p_x],
[0, -f_y, -p_y],
[0, 0, -1],
])
def z_to_depth(self, z: ArrayLike) -> np.ndarray:
z = np.array(z)
assert z.ndim >= 3
h, w, _ = z.shape[-3:]
pixel_centers_x = (np.arange(-w/2, w/2, dtype=np.float32) + 0.5) / w * self.sensor_width
pixel_centers_y = (np.arange(-h/2, h/2, dtype=np.float32) + 0.5) / h * self.sensor_height
squared_distance_from_center = np.sum(np.square(np.meshgrid(
pixel_centers_x, # X-Axis (columns)
pixel_centers_y, # Y-Axis (rows)
indexing="xy",
)), axis=0)
depth_scaling = np.sqrt(1 + squared_distance_from_center / self.focal_length**2)
depth_scaling = depth_scaling.reshape((1,) * (z.ndim - 3) + depth_scaling.shape + (1,))
return z * depth_scaling
class OrthographicCamera(Camera):
"""A :class:`Camera` that uses orthographic projection."""
orthographic_scale = tl.Float(6.0)
def __init__(self, orthographic_scale=6.0, position=(0., 0., 0.),
quaternion=None, up="Y", front="-Z", look_at=None, euler=None, **kwargs):
super().__init__(orthographic_scale=orthographic_scale, position=position,
quaternion=quaternion, up=up, front=front, look_at=look_at, euler=euler,
**kwargs)
@property
def intrinsics(self):
fx = fy = 2.0 / self.orthographic_scale
return np.array([
[fx, 0, 0],
[0, fy, 0],
[0, 0, -1],
])
def z_to_depth(self, z: ArrayLike) -> np.ndarray:
# not sure if depth is even well defined in orthographic
# for now just return the z value
return z
| 34.831169
| 97
| 0.634601
|
4a08ee5f9bab8f9b8a3902208e5ded830c34eb24
| 3,554
|
py
|
Python
|
tor_async_couchdb/tests/tamper_unit_tests.py
|
simonsdave/tor-async-couchdb
|
286b0acaca636f5e937b159d56db573973cd17a1
|
[
"MIT"
] | 1
|
2019-01-27T21:07:14.000Z
|
2019-01-27T21:07:14.000Z
|
tor_async_couchdb/tests/tamper_unit_tests.py
|
simonsdave/tor-async-couchdb
|
286b0acaca636f5e937b159d56db573973cd17a1
|
[
"MIT"
] | 5
|
2015-04-22T17:52:07.000Z
|
2017-05-16T01:56:45.000Z
|
tor_async_couchdb/tests/tamper_unit_tests.py
|
simonsdave/tor-async-couchdb
|
286b0acaca636f5e937b159d56db573973cd17a1
|
[
"MIT"
] | 2
|
2015-05-04T20:19:46.000Z
|
2016-05-30T09:59:59.000Z
|
"""This module contains the tamper module's unit tests."""
import shutil
import tempfile
import unittest
from keyczar import keyczar
from keyczar import keyczart
from .. import tamper
class TempDirectory(object):
def __init__(self):
object.__init__(self)
self._dir_name = None
def __enter__(self):
self._dir_name = tempfile.mkdtemp()
return self._dir_name
def __exit__(self, exc_type, exc_value, traceback):
if self._dir_name:
shutil.rmtree(self._dir_name, ignore_errors=True)
class TamperTestCase(unittest.TestCase):
"""A collection of unit tests for the tamper module."""
def test_happy_path(self):
with TempDirectory() as dir_name:
keyczart.Create(
dir_name,
"some purpose",
keyczart.keyinfo.SIGN_AND_VERIFY)
keyczart.AddKey(
dir_name,
keyczart.keyinfo.PRIMARY)
signer = keyczar.Signer.Read(dir_name)
doc = {
"dave": "was",
"here": "today",
}
pre_sign_doc_len = len(doc)
tamper.sign(signer, doc)
self.assertEqual(len(doc), 1 + pre_sign_doc_len)
self.assertTrue(tamper.verify(signer, doc))
def test_verify_fails_when_doc_tampered_with(self):
with TempDirectory() as dir_name:
keyczart.Create(
dir_name,
"some purpose",
keyczart.keyinfo.SIGN_AND_VERIFY)
keyczart.AddKey(
dir_name,
keyczart.keyinfo.PRIMARY)
signer = keyczar.Signer.Read(dir_name)
doc = {
"dave": "was",
"here": "today",
}
pre_sign_doc_len = len(doc)
tamper.sign(signer, doc)
self.assertEqual(len(doc), 1 + pre_sign_doc_len)
doc["bindle"] = "berry"
self.assertFalse(tamper.verify(signer, doc))
def test_verify_fails_when_sig_removed(self):
with TempDirectory() as dir_name:
keyczart.Create(
dir_name,
"some purpose",
keyczart.keyinfo.SIGN_AND_VERIFY)
keyczart.AddKey(
dir_name,
keyczart.keyinfo.PRIMARY)
signer = keyczar.Signer.Read(dir_name)
doc = {
"dave": "was",
"here": "today",
}
pre_sign_doc_len = len(doc)
tamper.sign(signer, doc)
self.assertEqual(len(doc), 1 + pre_sign_doc_len)
del doc[tamper._tampering_sig_prop_name]
self.assertEqual(len(doc), pre_sign_doc_len)
self.assertFalse(tamper.verify(signer, doc))
def test_verify_fails_when_sig_tampered_with(self):
with TempDirectory() as dir_name:
keyczart.Create(
dir_name,
"some purpose",
keyczart.keyinfo.SIGN_AND_VERIFY)
keyczart.AddKey(
dir_name,
keyczart.keyinfo.PRIMARY)
signer = keyczar.Signer.Read(dir_name)
doc = {
"dave": "was",
"here": "today",
}
pre_sign_doc_len = len(doc)
tamper.sign(signer, doc)
self.assertEqual(len(doc), 1 + pre_sign_doc_len)
doc[tamper._tampering_sig_prop_name] = "dave"
self.assertFalse(tamper.verify(signer, doc))
| 26.924242
| 61
| 0.544176
|
4a08effce8a0d815a8290ffbf97ea38737487d47
| 1,191
|
py
|
Python
|
[1] BEGINNER/2896 - Aproveite a Oferta.py
|
tiago040/URI-SOLUTIONS
|
519d3950252a6002e8926416b2f8217ba08fe721
|
[
"MIT"
] | 1
|
2022-03-15T03:03:26.000Z
|
2022-03-15T03:03:26.000Z
|
[1] BEGINNER/2896 - Aproveite a Oferta.py
|
tiago040/URI-SOLUTIONS
|
519d3950252a6002e8926416b2f8217ba08fe721
|
[
"MIT"
] | null | null | null |
[1] BEGINNER/2896 - Aproveite a Oferta.py
|
tiago040/URI-SOLUTIONS
|
519d3950252a6002e8926416b2f8217ba08fe721
|
[
"MIT"
] | null | null | null |
'''
Um supermercado está fazendo uma promoção de venda de refrigerantes. Se um dia você comprar refrigerantes e levar os cascos vazios no dia seguinte, ela troca cada conjunto de K garrafas vazias por uma garrafa cheia. Um cliente quer aproveitar ao máximo essa oferta e por isso comprou várias garrafas no primeiro dia da promoção. Agora ele quer saber quantas garrafas terá ao final do segundo dia da promoção, se usá-la ao máximo.
Faça um programa para calcular isso.
Entrada
A primeira linha de entrada contém inteiro T (1 ≤ T ≤ 10000) , que indica o número de casos de teste. Em cada uma das T linhas a seguir vêm dois inteiros N e K (1 ≤ K, N ≤ 10000), respectivamente o número de refrigerantes comprados e o número de garrafas vazias para ganhar uma cheia.
Saída
Para cada caso de teste imprima o número de garrafas que o cliente terá no segundo dia, se aproveitar ao máximo a oferta.
'''
T = int(input())
for t in range(T):
entrada = str(input()).split()
ref_comprado = int(entrada[0])
garrafa_vazia = int(entrada[1])
if ref_comprado >= garrafa_vazia:
print(ref_comprado//garrafa_vazia + ref_comprado%garrafa_vazia)
else:
print(ref_comprado)
| 56.714286
| 430
| 0.75063
|
4a08f00bd29971dcfc5975bb62419cdfbe1a31f7
| 550
|
py
|
Python
|
blog/urls.py
|
metzondernaam/blog_test
|
941e6e1382b9a770e2177bf827b7d0006d4c2a07
|
[
"Unlicense"
] | null | null | null |
blog/urls.py
|
metzondernaam/blog_test
|
941e6e1382b9a770e2177bf827b7d0006d4c2a07
|
[
"Unlicense"
] | null | null | null |
blog/urls.py
|
metzondernaam/blog_test
|
941e6e1382b9a770e2177bf827b7d0006d4c2a07
|
[
"Unlicense"
] | null | null | null |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'blog.views.home', name='home'),
# url(r'^blog/', include('blog.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| 30.555556
| 71
| 0.672727
|
4a08f0ded4852b1cdb544d4191cee9233eb9819b
| 3,139
|
py
|
Python
|
djangoecommerce/settings.py
|
rgnaldo/djangoecommerce
|
ae13378077444af9cf2044acfe867599670e9a59
|
[
"CC0-1.0"
] | null | null | null |
djangoecommerce/settings.py
|
rgnaldo/djangoecommerce
|
ae13378077444af9cf2044acfe867599670e9a59
|
[
"CC0-1.0"
] | null | null | null |
djangoecommerce/settings.py
|
rgnaldo/djangoecommerce
|
ae13378077444af9cf2044acfe867599670e9a59
|
[
"CC0-1.0"
] | null | null | null |
"""
Django settings for djangoecommerce project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7i3x&dkaywep@d5p8#lucd0f2b4hs+gb%^ujho##b$tw%-v^e8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Recife'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| 25.520325
| 91
| 0.697674
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.