hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
258082e4d5a04c0a557f83c8074d8edbead1b428 | 683 | py | Python | combinePDF.py | LuisTarma/PythonCombinePDF | 2bab57ec186f17fc280f6abbbb446ee69f28460e | [
"Apache-2.0"
] | 4 | 2019-12-08T08:18:24.000Z | 2021-11-15T09:55:15.000Z | combinePDF.py | LuisTarma/PythonCombinePDF | 2bab57ec186f17fc280f6abbbb446ee69f28460e | [
"Apache-2.0"
] | null | null | null | combinePDF.py | LuisTarma/PythonCombinePDF | 2bab57ec186f17fc280f6abbbb446ee69f28460e | [
"Apache-2.0"
] | 1 | 2020-05-17T22:32:08.000Z | 2020-05-17T22:32:08.000Z | # combinePDF.py
# Author: Tiffany Tse
# Date: Oct 09,2018
# Updated: Oct 10, 2018
# Description: Combining pdf locally
# Dependencies: os, pyPDF2
import os
from PyPDF2 import PdfFileMerger
# Directory of the where script and py are located
fileDir = os.path.dirname(os.path.realpath(__file__))
print "This is fileDir: " + fileDir
#pdf get all the pdfs to merge
pdfToMerge = [pdfIndex for pdfIndex in os.listdir(fileDir) if pdfIndex.endswith(".pdf")]
print "The files to merge are:"
print pdfToMerge
merger = PdfFileMerger()
for pdf in pdfToMerge:
merger.append(open(pdf, 'rb'))
with open("result.pdf", "wb") as fout:
merger.write(fout)
merger.close()
print "Done!"
| 23.551724 | 88 | 0.729136 |
3fdce24ded05ebe522ad1f8158129781b989720f | 647 | py | Python | flask_app/__init__.py | Silicrex/HackathonProject | 887f322d1786c1aabe1c1c831c5f3082133f4998 | [
"BSD-3-Clause"
] | null | null | null | flask_app/__init__.py | Silicrex/HackathonProject | 887f322d1786c1aabe1c1c831c5f3082133f4998 | [
"BSD-3-Clause"
] | null | null | null | flask_app/__init__.py | Silicrex/HackathonProject | 887f322d1786c1aabe1c1c831c5f3082133f4998 | [
"BSD-3-Clause"
] | null | null | null | from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
app = Flask(__name__) # The central Flask instance
app.config.from_object(Config) # Load config from Config class
db = SQLAlchemy(app) # Database instance
migrate = Migrate(app, db) # Migration engine instance
login = LoginManager(app) # Login handler instance
login.login_view = 'login' # Inform Flask-Login of the login view function
boostrap = Bootstrap(app)
from flask_app import routes, models # Import below to avoid circular import
| 38.058824 | 77 | 0.802164 |
98d52552c330d335aab0be181aa87a7a436b6035 | 54 | py | Python | T22-04/program.py | maa76/SSof-Project1920 | 9b4ad9ac41a648c425fcfcd49cd52ff84e528bde | [
"MIT"
] | 2 | 2019-11-20T19:26:07.000Z | 2019-11-22T00:42:23.000Z | T22-04/program.py | maa76/SSof-Project1920 | 9b4ad9ac41a648c425fcfcd49cd52ff84e528bde | [
"MIT"
] | 2 | 2019-11-28T05:21:24.000Z | 2019-11-28T05:21:58.000Z | T22-04/program.py | maa76/SSof-Project1920 | 9b4ad9ac41a648c425fcfcd49cd52ff84e528bde | [
"MIT"
] | 25 | 2019-11-27T01:40:56.000Z | 2019-12-04T23:38:59.000Z | a = c("asd")
b = 10
if a == b:
t(a)
w(a)
w(b)
| 7.714286 | 12 | 0.333333 |
d727c3c85adcf155d22093e6f6fa0c761b66fc4a | 14,752 | py | Python | blackjack_simulator.py | dennis-ho/blackjack-simulator | 8cf10f19c529f6db19b3155c07efd7455351e0ce | [
"MIT"
] | null | null | null | blackjack_simulator.py | dennis-ho/blackjack-simulator | 8cf10f19c529f6db19b3155c07efd7455351e0ce | [
"MIT"
] | null | null | null | blackjack_simulator.py | dennis-ho/blackjack-simulator | 8cf10f19c529f6db19b3155c07efd7455351e0ce | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import csv
import logging
import math
import random
H = 'H' # Hit
S = 'S' # Stand
Dh = 'DH' # Double Down if allowed, otherwise Hit
Ds = 'DS' # Double Down if allowed, otherwise Stand
P = 'P' # Split
Ph = 'PH' # Split if allowed, otherwise Hit
Rh = 'RH' # Surrender if allowed, otherwise Hit
Rs = 'RS' # Surrender if allowed, otherwise Stand
Rp = 'RP' # Surrender if allowed, otherwise Split
split_strategy = [
#2 3 4 5 6 7 8 9 10 A
[Ph, Ph, P, P, P, P, H, H, H, H], # 2/2
[Ph, Ph, P, P, P, P, H, H, H, H], # 3/3
[H, H, H, Ph, Ph, H, H, H, H, H], # 4/4
[Dh, Dh, Dh, Dh, Dh, Dh, Dh, Dh, H, H], # 5/5
[Ph, P, P, P, P, H, H, H, H, H], # 6/6
[P, P, P, P, P, P, H, H, H, H], # 7/7
[P, P, P, P, P, P, P, P, P, Rp], # 8/8
[P, P, P, P, P, S, P, P, S, S], # 9/9
[S, S, S, S, S, S, S, S, S, S], # 10/10
[P, P, P, P, P, P, P, P, P, P], # A/A
]
soft_strategy = [
#2 3 4 5 6 7 8 9 10 A
[H, H, H, Dh, Dh, H, H, H, H, H], # 13
[H, H, H, Dh, Dh, H, H, H, H, H], # 14
[H, H, Dh, Dh, Dh, H, H, H, H, H], # 15
[H, H, Dh, Dh, Dh, H, H, H, H, H], # 16
[H, Dh, Dh, Dh, Dh, H, H, H, H, H], # 17
[Ds, Ds, Ds, Ds, Ds, S, S, H, H, H], # 18
[S, S, S, S, Ds, S, S, S, S, S], # 19
[S, S, S, S, S, S, S, S, S, S], # 20
[S, S, S, S, S, S, S, S, S, S], # 21
]
hard_strategy = [
#2 3 4 5 6 7 8 9 10 A
[H, H, H, H, H, H, H, H, H, H], # 4
[H, H, H, H, H, H, H, H, H, H], # 5
[H, H, H, H, H, H, H, H, H, H], # 6
[H, H, H, H, H, H, H, H, H, H], # 7
[H, H, H, H, H, H, H, H, H, H], # 8
[H, Dh, Dh, Dh, Dh, H, H, H, H, H], # 9
[Dh, Dh, Dh, Dh, Dh, Dh, Dh, Dh, H, H], # 10
[Dh, Dh, Dh, Dh, Dh, Dh, Dh, Dh, Dh, Dh], # 11
[H, H, S, S, S, H, H, H, H, H], # 12
[S, S, S, S, S, H, H, H, H, H], # 13
[S, S, S, S, S, H, H, H, H, H], # 14
[S, S, S, S, S, H, H, H, Rh, Rh], # 15
[S, S, S, S, S, H, H, Rh, Rh, Rh], # 16
[S, S, S, S, S, S, S, S, S, Rs], # 17
[S, S, S, S, S, S, S, S, S, S], # 18
[S, S, S, S, S, S, S, S, S, S], # 19
[S, S, S, S, S, S, S, S, S, S], # 20
[S, S, S, S, S, S, S, S, S, S], # 21
]
def insurance_strategy_counting(table):
return 'I' if table.true_count() >= 2.3 else 'N'
def insurance_strategy_never(table):
return 'N'
def insurance_strategy_even_money(table):
return 'I' if table.curr().is_blackjack() == 21 else 'N'
def basic_strategy(table, available_actions):
if 'I' in available_actions:
action = insurance_strategy_never(table)
elif 'P' in available_actions:
action = split_strategy[table.curr().cards[0] - 2][table.dealer_hand.cards[0] - 2]
elif table.curr().is_soft():
action = soft_strategy[table.curr().value() - 13][table.dealer_hand.cards[0] - 2]
else:
action = hard_strategy[table.curr().value() - 4][table.dealer_hand.cards[0] - 2]
return action[0] if action[0] in available_actions else action[1]
def count_hi_lo(cards):
count_val = sum([1 if card in [2, 3, 4, 5, 6] else -1 if card in [10, 11] else 0 for card in cards])
return count_val
class Hand:
def __init__(self):
self.cards = []
self.bet = 1
self.actions = []
self.insured = False
self.surrendered = False
self.from_split = False
self.count_hist = []
def value(self):
hand_val = sum(self.cards)
soft_aces = sum(1 for card in self.cards if card == 11)
while hand_val > 21 and soft_aces > 0:
hand_val -= 10
soft_aces -= 1
return hand_val
def is_blackjack(self):
return self.value() == 21 and len(self.cards) == 2 and 'P' not in self.actions and not self.from_split
def is_soft(self):
hand_val = sum(self.cards)
soft_aces = sum(1 for card in self.cards if card == 11)
while hand_val > 21 and soft_aces > 0:
hand_val -= 10
soft_aces -= 1
return soft_aces > 0
def __repr__(self):
return str(self.cards)
class Table:
def __init__(self):
self.shoe = []
self.discard_pile = []
self.shoe_id = -1
self.dealer_hand = Hand()
self.player_hand = [Hand()]
self.curr_idx = 0
self.cards_remaining = 0
self.shuffle_pending = False
def new_shoe(self, decks=8, penetration=6.5):
self.shoe_id += 1
self.shoe = [2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11] * 4 * decks
random.shuffle(self.shoe)
self.shoe.insert(int(math.floor(penetration * 52)), 0) # 0 value card indicates cut card
self.discard_pile = [self.next_card()] # Burn first card
self.cards_remaining = len(self.shoe)
self.shuffle_pending = False
def initial_deal(self):
self.clean_table()
self.dealer_hand = Hand()
self.player_hand = [Hand()]
self.curr_idx = 0
self.cards_remaining = len(self.shoe)
self.curr().count_hist.append((self.run_count(), self.true_count()))
self.player_hand[0].count_hist.append((self.run_count(), self.true_count()))
self.player_hand[0].cards.append(self.next_card())
self.dealer_hand.cards.append(self.next_card())
self.player_hand[0].cards.append(self.next_card())
self.dealer_hand.cards.append(self.next_card())
def next_card(self):
card = self.shoe.pop(0)
if card == 0: # Cut card
self.shuffle_pending = True
card = self.shoe.pop(0)
return card
def curr(self):
return self.player_hand[self.curr_idx]
def total_bet(self):
return sum([hand.bet for hand in self.player_hand])
def num_actions_taken(self):
return sum([len(hand.actions) for hand in self.player_hand])
def available_actions(self):
actions = []
if self.curr_idx is None: # All player hands finished
return actions
if self.dealer_hand.cards[0] == 11 and self.num_actions_taken() == 0:
actions.append('I')
actions.append('N')
return actions
if len(self.curr().cards) == 2:
actions.append('D') # Note: Double down on blackjack is not wise but surprisingly is technically allowed
if len(self.curr().cards) == 2 and 'P' not in self.curr().actions and self.curr_idx == 0:
actions.append('R') # Can not surrender after split
if len(self.player_hand) < 4 and len(self.curr().cards) == 2 and self.curr().cards[0] == self.curr().cards[1]:
if self.curr().cards[0] != 11 or len(self.player_hand) == 1: # Assumes re-splitting Aces is not allowed
actions.append('P')
if self.curr().value() <= 21: # Note: You can still hit on 21 if you really want to
actions.append('H')
actions.append('S')
return actions
def do_action(self, action):
self.curr().count_hist.append((self.run_count(), self.true_count()))
if action == 'I':
self.curr().insured = True
if self.dealer_hand.is_blackjack():
if action in ['I', 'N']:
self.curr().actions.append(action)
self.curr_idx = None
return
self.curr().actions.append(action)
if action == 'D':
self.curr().bet *= 2
if action == 'H' or action == 'D':
self.curr().cards.append(self.next_card())
if action == 'P':
new_hand = Hand()
new_hand.from_split = True
new_hand.cards.append(self.curr().cards.pop())
self.player_hand.append(new_hand)
self.curr().cards.append(self.next_card())
if new_hand.cards[0] == 11: # Only 1 card when splitting Aces
self.curr_idx += 1
new_hand.cards.append(self.next_card())
if action == 'R':
self.curr().surrendered = True
self.curr().bet /= 2
if action == 'S' or action == 'D' or action == 'R' or self.curr().value() > 21 or \
self.curr().from_split and self.curr().cards[0] == 11 or action is None:
if self.curr_idx == len(self.player_hand) - 1:
self.curr_idx = None
self.finish_dealer_hand()
else: # There is another hand waiting from a previous split
self.curr_idx += 1
self.curr().cards.append(self.next_card())
def finish_dealer_hand(self):
while self.dealer_hand.value() < 17 or self.dealer_hand.value() == 17 and self.dealer_hand.is_soft():
self.dealer_hand.cards.append(self.next_card())
def results(self):
result_val = {
'shoe_id': self.shoe_id,
'cards_remaining': self.cards_remaining, # Value at the START of the round before dealing cards
'dealer_up': self.dealer_hand.cards[0],
'initial_hand':
self.player_hand[0].cards[:2] if len(self.player_hand) == 1 else [self.player_hand[0].cards[0]] * 2,
'dealer_final': self.dealer_hand,
'dealer_final_value': 'BJ' if self.dealer_hand.is_blackjack() else self.dealer_hand.value(),
'player_final': self.player_hand,
'player_final_value': ['BJ' if hand.is_blackjack() else hand.value() for hand in self.player_hand],
'actions_taken': [hand.actions for hand in self.player_hand],
'run_count': self.player_hand[0].count_hist[0][0], # Value at the START of the round before dealing cards
'true_count': self.player_hand[0].count_hist[0][1], # Value at the START of the round before dealing cards
'win': 0,
}
insurance_cost = self.player_hand[0].bet * 0.5
if self.player_hand[0].insured:
result_val['win'] -= insurance_cost # Charge insurance
if self.dealer_hand.is_blackjack():
if self.player_hand[0].insured:
result_val['win'] += insurance_cost * 3 # Insurance pays 2:1 plus return original charge
for hand in self.player_hand:
if self.dealer_hand.is_blackjack():
result_val['win'] -= hand.bet if not hand.is_blackjack() else 0
elif hand.value() > 21:
result_val['win'] -= hand.bet
elif hand.surrendered:
result_val['win'] -= hand.bet
elif hand.is_blackjack():
result_val['win'] += 1.5 * hand.bet
elif self.dealer_hand.value() > 21:
result_val['win'] += hand.bet
elif hand.value() > self.dealer_hand.value():
result_val['win'] += hand.bet
elif hand.value() < self.dealer_hand.value():
result_val['win'] -= hand.bet
return result_val
def run_count(self):
return count_hi_lo(self.discard_pile)
def true_count(self):
decks_remaining = len(self.shoe) / 52
return math.trunc(self.run_count() / decks_remaining)
def clean_table(self):
try:
self.discard_pile.append(self.dealer_hand.cards[1]) # Upcard position switches with hole card when flipped
self.discard_pile.append(self.dealer_hand.cards[0])
self.discard_pile.extend(self.dealer_hand.cards[2:])
self.discard_pile.extend([c for cards in [hand.cards for hand in self.player_hand] for c in cards][::-1])
except IndexError:
pass
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output_path', help='Path to save CSV results', default='blackjack_simulator.csv')
parser.add_argument('--log_path', help='Path to log file', default='blackjack_simulator.log')
parser.add_argument('--log_level', help='Level of messages to write to log file', default='info')
parser.add_argument('--hands', help='Number of hands to play', default=100)
parser.add_argument('--decks', help='Number of decks to use in a shoe', default=8)
parser.add_argument('--pen', help='Deck penetration (number of decks played before shuffling)', default=6.5)
args = parser.parse_args()
log_levels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'critical': logging.CRITICAL,
}
log_level = log_levels[args.log_level.lower()]
logging.basicConfig(filename=args.log_path, format='%(levelname)s:%(message)s', level=log_level, filemode='w')
logging.info('Program arguments %s', args)
csv_columns = [
'shoe_id',
'cards_remaining',
'dealer_up',
'initial_hand',
'dealer_final',
'dealer_final_value',
'player_final',
'player_final_value',
'actions_taken',
'run_count',
'true_count',
'win',
]
with open(args.output_path, 'w') as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=csv_columns)
csv_writer.writeheader()
table = Table()
hands_played = 0
while hands_played < int(args.hands):
table.new_shoe(args.decks, args.pen)
while not table.shuffle_pending:
table.initial_deal()
while table.curr_idx is not None:
logging.debug('dealer: %s', table.dealer_hand)
logging.debug('player: %s', table.player_hand)
logging.debug('current player hand index: %s', table.curr_idx)
actions = table.available_actions()
logging.debug('available actions: %s', actions)
action = basic_strategy(table, actions) if len(actions) > 0 else None
logging.debug('action taken: %s', action)
table.do_action(action)
logging.debug('dealer final: %s', table.dealer_hand)
logging.debug('player final: %s', table.player_hand)
logging.info('results: %s', table.results())
with open(args.output_path, 'a') as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=csv_columns)
csv_writer.writerow(table.results())
hands_played += 1
logging.debug('hands played: %s', hands_played)
logging.debug('-------------------------------')
if hands_played >= int(args.hands):
return
if __name__ == '__main__':
main()
| 38.020619 | 119 | 0.550298 |
c5886ea022fb0ba6b4e09cdca8512278b33d4b3f | 1,972 | py | Python | venv/lib/python3.8/site-packages/vsts/work_item_tracking_process_template/v4_1/models/check_template_existence_result.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/work_item_tracking_process_template/v4_1/models/check_template_existence_result.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/work_item_tracking_process_template/v4_1/models/check_template_existence_result.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class CheckTemplateExistenceResult(Model):
"""CheckTemplateExistenceResult.
:param does_template_exist: Indicates whether a template exists.
:type does_template_exist: bool
:param existing_template_name: The name of the existing template.
:type existing_template_name: str
:param existing_template_type_id: The existing template type identifier.
:type existing_template_type_id: str
:param requested_template_name: The name of the requested template.
:type requested_template_name: str
"""
_attribute_map = {
'does_template_exist': {'key': 'doesTemplateExist', 'type': 'bool'},
'existing_template_name': {'key': 'existingTemplateName', 'type': 'str'},
'existing_template_type_id': {'key': 'existingTemplateTypeId', 'type': 'str'},
'requested_template_name': {'key': 'requestedTemplateName', 'type': 'str'}
}
def __init__(self, does_template_exist=None, existing_template_name=None, existing_template_type_id=None, requested_template_name=None):
super(CheckTemplateExistenceResult, self).__init__()
self.does_template_exist = does_template_exist
self.existing_template_name = existing_template_name
self.existing_template_type_id = existing_template_type_id
self.requested_template_name = requested_template_name
| 51.894737 | 141 | 0.630325 |
470f7c8ff6c95c50405a9ed6afdbdb319e214c02 | 1,657 | py | Python | var/spack/repos/builtin/packages/hibench/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/hibench/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/hibench/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Hibench(MavenPackage):
"""HiBench is a big data benchmark suite that helps evaluate different big
data frameworks in terms of speed, throughput and system resource
utilizations. It contains a set of Hadoop,Spark and streaming workloads,
including Sort, WordCount, TeraSort, Repartition, Sleep, SQL,PageRank,
Nutch indexing, Bayes, Kmeans, NWeight and enhanced DFSIO, etc."""
homepage = "https://github.com/Intel-bigdata/HiBench"
url = "https://github.com/Intel-bigdata/HiBench/archive/HiBench-7.1.tar.gz"
version('7.1', sha256='96572a837d747fb6347f2b906fd5f7fb97a62095435326ccfee5e75777a5c210')
version('7.0', sha256='89b01f3ad90b758f24afd5ea2bee997c3d700ce9244b8a2b544acc462ab0e847')
version('6.0', sha256='179f5415903f4029bd0ea1101a3d4c67faf88ca46a993d8179582299ad730f79')
version('5.0', sha256='32d6a7bc1010d90b2f22906896a03cd1980e617beb07b01716e3d04de5760ed4')
version('4.1', sha256='07551763aa30f04d32870c323524b5fc0fc2e968d7081d8916575bdeb4fd1381')
version('4.0', sha256='de58ed5e9647ffe28c2a905a8830b661bbd09db334eb5b3472c8186553407e0e')
version('3.0.0', sha256='869771e73593caac3a9b2fb14a10041a485d248074ba38cca812c934897db63d')
version('2.2.1', sha256='f8531cbaff8d93bfd1c0742fec5dbb375bfeeb9ec1b39b4e857120e933a2c9ec')
version('2.2', sha256='5f68e22339cdd141b846d8b1d7134b2b8ff5fbd5e847e406214dc845f5d005cf')
| 57.137931 | 97 | 0.780326 |
a6a173dfa16946d1b343a80b4e42d5cc67ea6e07 | 397 | py | Python | lolexport/log.py | dleiferives/lolexport | 894c97240893da829e96f46e2c4cdebf85846412 | [
"MIT"
] | 2 | 2021-02-23T09:21:07.000Z | 2022-03-25T15:02:50.000Z | lolexport/log.py | dleiferives/lolexport | 894c97240893da829e96f46e2c4cdebf85846412 | [
"MIT"
] | 5 | 2021-02-24T01:26:36.000Z | 2022-02-27T13:05:27.000Z | lolexport/log.py | dleiferives/lolexport | 894c97240893da829e96f46e2c4cdebf85846412 | [
"MIT"
] | 1 | 2022-02-27T02:17:17.000Z | 2022-02-27T02:17:17.000Z | from os import environ
import logging
from logzero import setup_logger # type: ignore[import]
# https://docs.python.org/3/library/logging.html#logging-levels
loglevel: int = logging.DEBUG # (10)
if "LOLEXPORT" in environ:
loglevel = int(environ["LOLEXPORT"])
# logzero handles this fine, can be imported/configured
# multiple times
logger = setup_logger(name="lolexport", level=loglevel)
| 28.357143 | 63 | 0.758186 |
676ef66a3f3c3d91528be28cf7186a5e710394ec | 3,853 | py | Python | selfdrive/car/nissan/interface.py | pevdh/openpilot | fca82ba503a663ec97b7ba89c2c3da80aef739b2 | [
"MIT"
] | 1 | 2020-09-09T12:08:10.000Z | 2020-09-09T12:08:10.000Z | selfdrive/car/nissan/interface.py | pevdh/openpilot | fca82ba503a663ec97b7ba89c2c3da80aef739b2 | [
"MIT"
] | null | null | null | selfdrive/car/nissan/interface.py | pevdh/openpilot | fca82ba503a663ec97b7ba89c2c3da80aef739b2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from cereal import car
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import create_event, EventTypes as ET
from selfdrive.car.nissan.values import CAR
from selfdrive.car import STD_CARGO_KG, scale_rot_inertia, scale_tire_stiffness, gen_empty_fingerprint
from selfdrive.car.interfaces import CarInterfaceBase
class CarInterface(CarInterfaceBase):
def __init__(self, CP, CarController, CarState):
super().__init__(CP, CarController, CarState)
self.cp_adas = self.CS.get_adas_can_parser(CP)
@staticmethod
def compute_gb(accel, speed):
return float(accel) / 4.0
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), has_relay=False, car_fw=[]):
ret = CarInterfaceBase.get_std_params(candidate, fingerprint, has_relay)
ret.carName = "nissan"
ret.safetyModel = car.CarParams.SafetyModel.nissan
ret.steerLimitAlert = False
ret.enableCamera = True
ret.steerRateCost = 0.5
ret.steerActuatorDelay = 0.1
ret.lateralTuning.pid.kf = 0.00006
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0.0], [0.0]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.01], [0.005]]
ret.steerMaxBP = [0.] # m/s
ret.steerMaxV = [1.]
if candidate == CAR.XTRAIL:
ret.mass = 1610 + STD_CARGO_KG
ret.wheelbase = 2.705
ret.centerToFront = ret.wheelbase * 0.44
ret.steerRatio = 17
elif candidate == CAR.LEAF:
ret.mass = 1610 + STD_CARGO_KG
ret.wheelbase = 2.705
ret.centerToFront = ret.wheelbase * 0.44
ret.steerRatio = 17
ret.steerControlType = car.CarParams.SteerControlType.angle
ret.radarOffCan = True
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = scale_rot_inertia(ret.mass, ret.wheelbase)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront, ret.tireStiffnessRear = scale_tire_stiffness(ret.mass, ret.wheelbase, ret.centerToFront)
return ret
# returns a car.CarState
def update(self, c, can_strings):
self.cp.update_strings(can_strings)
self.cp_cam.update_strings(can_strings)
self.cp_adas.update_strings(can_strings)
ret = self.CS.update(self.cp, self.cp_adas, self.cp_cam)
ret.canValid = self.cp.can_valid and self.cp_adas.can_valid and self.cp_cam.can_valid
ret.yawRate = self.VM.yaw_rate(ret.steeringAngle * CV.DEG_TO_RAD, ret.vEgo)
buttonEvents = []
be = car.CarState.ButtonEvent.new_message()
be.type = car.CarState.ButtonEvent.Type.accelCruise
buttonEvents.append(be)
events = self.create_common_events(ret)
if ret.cruiseState.enabled and not self.cruise_enabled_prev:
events.append(create_event('pcmEnable', [ET.ENABLE]))
if not ret.cruiseState.enabled:
events.append(create_event('pcmDisable', [ET.USER_DISABLE]))
if self.CS.lkas_enabled:
events.append(create_event('invalidLkasSetting', [ET.PERMANENT]))
ret.events = events
# update previous brake/gas pressed
self.gas_pressed_prev = ret.gasPressed
self.brake_pressed_prev = ret.brakePressed
self.cruise_enabled_prev = ret.cruiseState.enabled
self.CS.out = ret.as_reader()
return self.CS.out
def apply(self, c):
can_sends = self.CC.update(c.enabled, self.CS, self.frame, c.actuators,
c.cruiseControl.cancel, c.hudControl.visualAlert,
c.hudControl.leftLaneVisible,c.hudControl.rightLaneVisible,
c.hudControl.leftLaneDepart, c.hudControl.rightLaneDepart)
self.frame += 1
return can_sends
| 37.048077 | 116 | 0.717623 |
cf81db59081b1a13c016144ee94bff7e2930f3bc | 6,471 | py | Python | ple/ple_peru/doctype/libro_electronico_diario_simplificado/libro_electronico_diario_simplificado.py | ovenube/ple | d016e7ba6e949645613865be7198db689ca2e11f | [
"MIT"
] | 1 | 2021-02-20T19:18:51.000Z | 2021-02-20T19:18:51.000Z | ple/ple_peru/doctype/libro_electronico_diario_simplificado/libro_electronico_diario_simplificado.py | ovenube/ple | d016e7ba6e949645613865be7198db689ca2e11f | [
"MIT"
] | 1 | 2021-02-20T19:21:54.000Z | 2021-02-20T19:21:54.000Z | ple/ple_peru/doctype/libro_electronico_diario_simplificado/libro_electronico_diario_simplificado.py | ovenube/ple | d016e7ba6e949645613865be7198db689ca2e11f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, seethersan and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from ple.ple_peru.utils import Utils, to_file
class LibroElectronicoDiarioSimplificado(Utils):
def get_account(self, company, year, periodo, primer=None):
account_list = []
from_date, to_date = self.get_dates(year, periodo)
if primer == "1":
account = frappe.db.sql("""select
DATE_FORMAT(NOW(),'%Y%m%d') as periodo,
SUBSTRING(name,1,POSITION('-' in name)-2) as codigo_asiento,
SUBSTRING(name,POSITION('-' in name)+2) as descripcion_asiento,
'01' as codigo_plan,
'PLAN CONTABLE GENERAL EMPRESARIAL' as descripcion_plan,
"" as codigo_cuenta,
"" as descripcion_cuenta,
'1' as indicador_cuenta
from
`tabAccount`
where SUBSTRING(name,1,POSITION('-' in name)-1) > 100
and company = '"""+company+"'", as_dict=True)
for d in account:
account_list.append({
'periodo': d.periodo,
'codigo_asiento': d.codigo_asiento,
'descripcion_asiento': d.descripcion_asiento,
'codigo_plan': d.codigo_plan,
'descripcion_plan': d.descripcion_plan,
'codigo_cuenta': d.codigo_cuenta,
'descripcion_cuenta': d.descripcion_cuenta,
'indicador_cuenta': d.indicador_cuenta
})
else:
account = frappe.db.sql("""select
CONCAT(DATE_FORMAT(gl.posting_date,'%Y%m'),'00') as periodo,
REPLACE(voucher_no, '-', '') as cuo,
CONCAT('M', IF(voucher_type = 'Sales Invoice',
(SELECT
COUNT(name)
FROM
`tabGL Entry` as gl_1
WHERE gl_1.voucher_no = gl.voucher_no
AND SUBSTRING(gl_1.account, 1, 2) <= SUBSTRING(gl.account, 1, 2)),
(SELECT
COUNT(name)
FROM
`tabGL Entry` as gl_1
WHERE gl_1.voucher_no = gl.voucher_no
AND SUBSTRING(gl_1.account, 1, 2) >= SUBSTRING(gl.account, 1, 2)))) as correlativo_asiento,
SUBSTRING(gl.account,1,POSITION('-' in gl.account)-2) as codigo_asiento,
"" as cuo_ue,
"" as centro_costo,
IF(gl.account_currency = 'SOL', 'PEN', gl.account_currency) as tipo_moneda,
IF(voucher_type = 'Purchase Invoice',
(select
`codigo_tipo_documento`
from
`tabPurchase Invoice`where name=voucher_no),
(select
`codigo_tipo_documento`
from
`tabSales Invoice`
where name=voucher_no)) as codigo_documento,
IF(voucher_type = 'Purchase Invoice',
(select
`tax_id`
from
`tabPurchase Invoice`where name=voucher_no),
(select
`tax_id`
from
`tabSales Invoice`
where name=voucher_no)) as tax_id,
IF(voucher_type = 'Purchase Invoice',
(select
IF(LENGTH(codigo_comprobante) = 1, CONCAT('0', codigo_comprobante), codigo_comprobante)
from
`tabPurchase Invoice`where name=voucher_no),
(select
IF(LENGTH(codigo_comprobante) = 1, CONCAT('0', codigo_comprobante), codigo_comprobante)
from
`tabSales Invoice`
where name=voucher_no)) as codigo_comprobante,
IF(voucher_type = 'Purchase Invoice',IFNULL(
(select
bill_series
from
`tabPurchase Invoice`
where name=voucher_no),''),
SUBSTRING_INDEX(SUBSTRING_INDEX(voucher_no,'-',-2),'-',1)) as serie_comprobante,
IF(voucher_type = 'Purchase Invoice',
(select
bill_no
from
`tabPurchase Invoice`
where name=voucher_no), SUBSTRING_INDEX(SUBSTRING_INDEX(voucher_no,'-',-2),'-',-1)) as numero_comprobante,
DATE_FORMAT(gl.posting_date,'%d/%m/%Y') as fecha_contable,
DATE_FORMAT(gl.posting_date,'%d/%m/%Y') as fecha_vencimiento,
DATE_FORMAT(gl.posting_date,'%d/%m/%Y') as fecha_emision,
gl.remarks as glosa,
'' as glosa_referencial,
IF(gl.debit_in_account_currency = 0, '0.00', ROUND(gl.debit_in_account_currency, 2)) as debe,
IF(gl.credit_in_account_currency = 0, '0.00', ROUND(gl.credit_in_account_currency, 2)) as haber,
IF(voucher_type = 'Purchase Invoice',
CONCAT('080100&',
(select
CONCAT(DATE_FORMAT(IFNULL(bill_expiration_date,bill_date),'%Y%m'),'00&', REPLACE(voucher_no, '-', ''), '&','M2')
from
`tabPurchase Invoice` purchase_invoice
where purchase_invoice.name=voucher_no)),
(IF(voucher_type = 'Sales Invoice', CONCAT('140100&',
(select
CONCAT(DATE_FORMAT(due_date,'%Y%m'),'00&', REPLACE(voucher_no, '-', ''),'&', 'M1')
from
`tabSales Invoice` sales_invoice
where sales_invoice.name=voucher_no)),''))) as estructurado,
'1' as estado
from
`tabGL Entry` gl
where SUBSTRING(account,1,POSITION('-' in account)-1) > 100
and posting_date >= '""" + str(from_date) + """'
and posting_date <= '""" + str(to_date) + """'
and company = '"""+company+"""'
order by posting_date""", as_dict=True)
for d in account:
account_list.append({
'periodo': d.periodo,
'cuo': d.cuo,
'correlativo_asiento': d.correlativo_asiento,
'codigo_asiento': d.codigo_asiento,
'cuo_ue': d.cuo_ue,
'centro_costo': d.centro_costo,
'tipo_moneda': d.tipo_moneda,
'tipo_documento': d.codigo_documento,
'tax_id': d.tax_id,
'codigo_comprobante': d.codigo_comprobante,
'serie_comprobante': d.serie_comprobante,
'numero_comprobante': d.numero_comprobante,
'fecha_contable': d.fecha_contable,
'fecha_vencimiento': d.fecha_vencimiento,
'fecha_emision': d.fecha_emision,
'glosa': d.glosa,
'glosa_referencial': d.glosa_referencial,
'debe': d.debe,
'haber': d.haber,
'estructurado': d.estructurado,
'estado': d.estado
})
return account_list
def export_libro_diario_simplificado(self, company, year, periodo, ruc, primer):
tipo = "diario_simplificado"
data = self.get_account(company, year, periodo, primer)
codigo_periodo = self.ple_name(year, periodo)
if primer == "1":
nombre = "LE" + str(ruc) + codigo_periodo + '00050400' + '00' + '1' + '1' + '1' + '1'
else:
nombre = "LE" + str(ruc) + codigo_periodo + '00050200' + '00' + '1' + '1' + '1' + '1'
nombre = nombre + ".txt"
return to_file(data, tipo, nombre, primer)
| 38.064706 | 123 | 0.625251 |
688776a7438b9583c123a0f61f01003fdc6a4fea | 6,868 | py | Python | saleor/graphql/shipping/mutations.py | overboardfreak/Project-On-Saleor | 3dd05c2705f614d3472d249e2a79f6ed82274a43 | [
"BSD-3-Clause"
] | 6 | 2019-01-06T08:39:20.000Z | 2022-03-04T18:07:47.000Z | saleor/graphql/shipping/mutations.py | alexredondosk8/saleor | 1ca28980eccb5c71e8f021c2dba66346c244d89e | [
"BSD-3-Clause"
] | 5 | 2021-03-18T23:51:16.000Z | 2022-03-11T23:45:26.000Z | saleor/graphql/shipping/mutations.py | alexredondosk8/saleor | 1ca28980eccb5c71e8f021c2dba66346c244d89e | [
"BSD-3-Clause"
] | null | null | null | from textwrap import dedent
import graphene
from ...dashboard.shipping.forms import default_shipping_zone_exists
from ...shipping import models
from ..core.mutations import ModelDeleteMutation, ModelMutation
from ..core.scalars import Decimal, WeightScalar
from .enums import ShippingMethodTypeEnum
from .types import ShippingZone
class ShippingPriceInput(graphene.InputObjectType):
name = graphene.String(
description='Name of the shipping method. Visible to customers')
price = Decimal(description='Shipping price of the shipping method.')
minimum_order_price = Decimal(
description='Minimum order price to use this shipping method',
required=False)
maximum_order_price = Decimal(
description='Maximum order price to use this shipping method',
required=False)
minimum_order_weight = WeightScalar(
description='Minimum order weight to use this shipping method',
required=False)
maximum_order_weight = WeightScalar(
description='Maximum order weight to use this shipping method',
required=False)
type = ShippingMethodTypeEnum(
description='Shipping type: price or weight based.')
shipping_zone = graphene.ID(
description='Shipping zone this method belongs to.',
name='shippingZone')
class ShippingZoneInput(graphene.InputObjectType):
name = graphene.String(
description='Shipping zone\'s name. Visible only to the staff.')
countries = graphene.List(
graphene.String,
description='List of countries in this shipping zone.')
default = graphene.Boolean(
description=dedent("""
Is default shipping zone, that will be used
for countries not covered by other zones."""))
class ShippingZoneMixin:
@classmethod
def clean_input(cls, info, instance, input, errors):
cleaned_input = super().clean_input(info, instance, input, errors)
default = cleaned_input.get('default')
if default is not None:
if default_shipping_zone_exists(instance.pk):
cls.add_error(
errors, 'default', 'Default shipping zone already exists.')
elif cleaned_input.get('countries'):
cleaned_input['countries'] = []
else:
cleaned_input['default'] = False
return cleaned_input
class ShippingZoneCreate(ShippingZoneMixin, ModelMutation):
shipping_zone = graphene.Field(
ShippingZone, description='Created shipping zone.')
class Arguments:
input = ShippingZoneInput(
description='Fields required to create a shipping zone.',
required=True)
class Meta:
description = 'Creates a new shipping zone.'
model = models.ShippingZone
@classmethod
def user_is_allowed(cls, user, input):
return user.has_perm('shipping.manage_shipping')
class ShippingZoneUpdate(ShippingZoneMixin, ModelMutation):
shipping_zone = graphene.Field(
ShippingZone, description='Updated shipping zone.')
class Arguments:
id = graphene.ID(
description='ID of a shipping zone to update.', required=True)
input = ShippingZoneInput(
description='Fields required to update a shipping zone.',
required=True)
class Meta:
description = 'Updates a new shipping zone.'
model = models.ShippingZone
@classmethod
def user_is_allowed(cls, user, input):
return user.has_perm('shipping.manage_shipping')
class ShippingZoneDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(
required=True, description='ID of a shipping zone to delete.')
class Meta:
description = 'Deletes a shipping zone.'
model = models.ShippingZone
@classmethod
def user_is_allowed(cls, user, input):
return user.has_perm('shipping.manage_shipping')
class ShippingPriceMixin:
@classmethod
def clean_input(cls, info, instance, input, errors):
cleaned_input = super().clean_input(info, instance, input, errors)
type = cleaned_input.get('type')
if not type:
return cleaned_input
if type == ShippingMethodTypeEnum.PRICE.value:
min_price = cleaned_input.get('minimum_order_price')
max_price = cleaned_input.get('maximum_order_price')
if min_price is None:
cls.add_error(
errors, 'minimum_order_price',
'Minimum order price is required'
' for Price Based shipping.')
elif max_price is not None and max_price <= min_price:
cls.add_error(
errors, 'maximum_order_price',
'Maximum order price should be larger than the minimum.')
else:
min_weight = cleaned_input.get('minimum_order_weight')
max_weight = cleaned_input.get('maximum_order_weight')
if min_weight is None:
cls.add_error(
errors, 'minimum_order_weight',
'Minimum order weight is required for'
' Weight Based shipping.')
elif max_weight is not None and max_weight <= min_weight:
cls.add_error(
errors, 'maximum_order_weight',
'Maximum order weight should be larger than the minimum.')
return cleaned_input
class ShippingPriceCreate(ShippingPriceMixin, ModelMutation):
class Arguments:
input = ShippingPriceInput(
description='Fields required to create a shipping price',
required=True)
class Meta:
description = 'Creates a new shipping price.'
model = models.ShippingMethod
@classmethod
def user_is_allowed(cls, user, input):
return user.has_perm('shipping.manage_shipping')
class ShippingPriceUpdate(ShippingPriceMixin, ModelMutation):
class Arguments:
id = graphene.ID(
description='ID of a shipping price to update.', required=True)
input = ShippingPriceInput(
description='Fields required to update a shipping price',
required=True)
class Meta:
description = 'Updates a new shipping price.'
model = models.ShippingMethod
@classmethod
def user_is_allowed(cls, user, input):
return user.has_perm('shipping.manage_shipping')
class ShippingPriceDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(
required=True, description='ID of a shipping price to delete.')
class Meta:
description = 'Deletes a shipping price.'
model = models.ShippingMethod
@classmethod
def user_is_allowed(cls, user, input):
return user.has_perm('shipping.manage_shipping')
| 34.862944 | 79 | 0.655504 |
c6a03e3f3f55710af48c8ccd2c18664611810482 | 7,752 | py | Python | cirq-core/cirq/ops/pauli_gates_test.py | LLcat1217/Cirq | b88069f7b01457e592ad69d6b413642ef11a56b8 | [
"Apache-2.0"
] | 1 | 2022-02-05T22:17:39.000Z | 2022-02-05T22:17:39.000Z | cirq-core/cirq/ops/pauli_gates_test.py | LLcat1217/Cirq | b88069f7b01457e592ad69d6b413642ef11a56b8 | [
"Apache-2.0"
] | 4 | 2022-01-16T14:12:15.000Z | 2022-02-24T03:58:46.000Z | cirq-core/cirq/ops/pauli_gates_test.py | LLcat1217/Cirq | b88069f7b01457e592ad69d6b413642ef11a56b8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import pytest
import cirq
def test_equals():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(cirq.X, cirq.ops.pauli_gates.X, cirq.XPowGate())
eq.add_equality_group(cirq.Y, cirq.ops.pauli_gates.Y, cirq.YPowGate())
eq.add_equality_group(cirq.Z, cirq.ops.pauli_gates.Z, cirq.ZPowGate())
def test_phased_pauli_product():
assert cirq.X.phased_pauli_product(cirq.I) == (1, cirq.X)
assert cirq.X.phased_pauli_product(cirq.X) == (1, cirq.I)
assert cirq.X.phased_pauli_product(cirq.Y) == (1j, cirq.Z)
assert cirq.X.phased_pauli_product(cirq.Z) == (-1j, cirq.Y)
assert cirq.Y.phased_pauli_product(cirq.I) == (1, cirq.Y)
assert cirq.Y.phased_pauli_product(cirq.X) == (-1j, cirq.Z)
assert cirq.Y.phased_pauli_product(cirq.Y) == (1, cirq.I)
assert cirq.Y.phased_pauli_product(cirq.Z) == (1j, cirq.X)
assert cirq.Z.phased_pauli_product(cirq.I) == (1, cirq.Z)
assert cirq.Z.phased_pauli_product(cirq.X) == (1j, cirq.Y)
assert cirq.Z.phased_pauli_product(cirq.Y) == (-1j, cirq.X)
assert cirq.Z.phased_pauli_product(cirq.Z) == (1, cirq.I)
def test_isinstance():
assert isinstance(cirq.X, cirq.XPowGate)
assert isinstance(cirq.Y, cirq.YPowGate)
assert isinstance(cirq.Z, cirq.ZPowGate)
assert not isinstance(cirq.X, cirq.YPowGate)
assert not isinstance(cirq.X, cirq.ZPowGate)
assert not isinstance(cirq.Y, cirq.XPowGate)
assert not isinstance(cirq.Y, cirq.ZPowGate)
assert not isinstance(cirq.Z, cirq.XPowGate)
assert not isinstance(cirq.Z, cirq.YPowGate)
def test_by_index():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(cirq.X, *[cirq.Pauli.by_index(i) for i in (-3, 0, 3, 6)])
eq.add_equality_group(cirq.Y, *[cirq.Pauli.by_index(i) for i in (-2, 1, 4, 7)])
eq.add_equality_group(cirq.Z, *[cirq.Pauli.by_index(i) for i in (-1, 2, 5, 8)])
def test_relative_index():
assert cirq.X.relative_index(cirq.X) == 0
assert cirq.X.relative_index(cirq.Y) == -1
assert cirq.X.relative_index(cirq.Z) == 1
assert cirq.Y.relative_index(cirq.X) == 1
assert cirq.Y.relative_index(cirq.Y) == 0
assert cirq.Y.relative_index(cirq.Z) == -1
assert cirq.Z.relative_index(cirq.X) == -1
assert cirq.Z.relative_index(cirq.Y) == 1
assert cirq.Z.relative_index(cirq.Z) == 0
def test_by_relative_index():
assert cirq.Pauli.by_relative_index(cirq.X, -1) == cirq.Z
assert cirq.Pauli.by_relative_index(cirq.X, 0) == cirq.X
assert cirq.Pauli.by_relative_index(cirq.X, 1) == cirq.Y
assert cirq.Pauli.by_relative_index(cirq.X, 2) == cirq.Z
assert cirq.Pauli.by_relative_index(cirq.X, 3) == cirq.X
assert cirq.Pauli.by_relative_index(cirq.Y, -1) == cirq.X
assert cirq.Pauli.by_relative_index(cirq.Y, 0) == cirq.Y
assert cirq.Pauli.by_relative_index(cirq.Y, 1) == cirq.Z
assert cirq.Pauli.by_relative_index(cirq.Y, 2) == cirq.X
assert cirq.Pauli.by_relative_index(cirq.Y, 3) == cirq.Y
assert cirq.Pauli.by_relative_index(cirq.Z, -1) == cirq.Y
assert cirq.Pauli.by_relative_index(cirq.Z, 0) == cirq.Z
assert cirq.Pauli.by_relative_index(cirq.Z, 1) == cirq.X
assert cirq.Pauli.by_relative_index(cirq.Z, 2) == cirq.Y
assert cirq.Pauli.by_relative_index(cirq.Z, 3) == cirq.Z
def test_too_many_qubits():
a, b = cirq.LineQubit.range(2)
with pytest.raises(ValueError, match='single qubit'):
_ = cirq.X.on(a, b)
x = cirq.X(a)
with pytest.raises(ValueError, match=r'len\(new_qubits\)'):
_ = x.with_qubits(a, b)
def test_relative_index_consistency():
for pauli_1 in (cirq.X, cirq.Y, cirq.Z):
for pauli_2 in (cirq.X, cirq.Y, cirq.Z):
shift = pauli_2.relative_index(pauli_1)
assert cirq.Pauli.by_relative_index(pauli_1, shift) == pauli_2
def test_gt():
assert not cirq.X > cirq.X
assert not cirq.X > cirq.Y
assert cirq.X > cirq.Z
assert cirq.Y > cirq.X
assert not cirq.Y > cirq.Y
assert not cirq.Y > cirq.Z
assert not cirq.Z > cirq.X
assert cirq.Z > cirq.Y
assert not cirq.Z > cirq.Z
def test_gt_other_type():
with pytest.raises(TypeError):
_ = cirq.X > object()
def test_lt():
assert not cirq.X < cirq.X
assert cirq.X < cirq.Y
assert not cirq.X < cirq.Z
assert not cirq.Y < cirq.X
assert not cirq.Y < cirq.Y
assert cirq.Y < cirq.Z
assert cirq.Z < cirq.X
assert not cirq.Z < cirq.Y
assert not cirq.Z < cirq.Z
def test_lt_other_type():
with pytest.raises(TypeError):
_ = cirq.X < object()
def test_str():
assert str(cirq.X) == 'X'
assert str(cirq.Y) == 'Y'
assert str(cirq.Z) == 'Z'
def test_repr():
assert repr(cirq.X) == 'cirq.X'
assert repr(cirq.Y) == 'cirq.Y'
assert repr(cirq.Z) == 'cirq.Z'
def test_third():
assert cirq.X.third(cirq.Y) == cirq.Z
assert cirq.Y.third(cirq.X) == cirq.Z
assert cirq.Y.third(cirq.Z) == cirq.X
assert cirq.Z.third(cirq.Y) == cirq.X
assert cirq.Z.third(cirq.X) == cirq.Y
assert cirq.X.third(cirq.Z) == cirq.Y
assert cirq.X.third(cirq.X) == cirq.X
assert cirq.Y.third(cirq.Y) == cirq.Y
assert cirq.Z.third(cirq.Z) == cirq.Z
def test_commutes():
for A, B in itertools.product([cirq.X, cirq.Y, cirq.Z], repeat=2):
assert cirq.commutes(A, B) == (A == B)
with pytest.raises(TypeError):
assert cirq.commutes(cirq.X, 'X')
assert cirq.commutes(cirq.X, 'X', default='default') == 'default'
assert cirq.commutes(cirq.Z, cirq.read_json(json_text=cirq.to_json(cirq.Z)))
def test_unitary():
np.testing.assert_equal(cirq.unitary(cirq.X), cirq.unitary(cirq.X))
np.testing.assert_equal(cirq.unitary(cirq.Y), cirq.unitary(cirq.Y))
np.testing.assert_equal(cirq.unitary(cirq.Z), cirq.unitary(cirq.Z))
def test_apply_unitary():
cirq.testing.assert_has_consistent_apply_unitary(cirq.X)
cirq.testing.assert_has_consistent_apply_unitary(cirq.Y)
cirq.testing.assert_has_consistent_apply_unitary(cirq.Z)
def test_identity_multiplication():
a, b, c = cirq.LineQubit.range(3)
assert cirq.X(a) * cirq.I(a) == cirq.X(a)
assert cirq.X(a) * cirq.I(b) == cirq.X(a)
assert cirq.X(a) * cirq.Y(b) * cirq.I(c) == cirq.X(a) * cirq.Y(b)
assert cirq.I(c) * cirq.X(a) * cirq.Y(b) == cirq.X(a) * cirq.Y(b)
with pytest.raises(TypeError):
_ = cirq.H(c) * cirq.X(a) * cirq.Y(b)
with pytest.raises(TypeError):
_ = cirq.X(a) * cirq.Y(b) * cirq.H(c)
with pytest.raises(TypeError):
_ = cirq.I(a) * str(cirq.Y(b))
def test_powers():
assert isinstance(cirq.X, cirq.Pauli)
assert isinstance(cirq.Y, cirq.Pauli)
assert isinstance(cirq.Z, cirq.Pauli)
assert not isinstance(cirq.X**-0.5, cirq.Pauli)
assert not isinstance(cirq.Y**0.2, cirq.Pauli)
assert not isinstance(cirq.Z**0.5, cirq.Pauli)
assert isinstance(cirq.X**-0.5, cirq.XPowGate)
assert isinstance(cirq.Y**0.2, cirq.YPowGate)
assert isinstance(cirq.Z**0.5, cirq.ZPowGate)
assert isinstance(cirq.X**1, cirq.Pauli)
assert isinstance(cirq.Y**1, cirq.Pauli)
assert isinstance(cirq.Z**1, cirq.Pauli)
| 34.762332 | 83 | 0.672472 |
bf39d4b3ac681ae0893c8e7bed39af081af76267 | 958 | py | Python | ptp/tools/wapiti/signatures.py | owtf/ptp | b43e581d7646330810f526432c689c3d88995df9 | [
"BSD-3-Clause"
] | 23 | 2015-03-22T09:18:35.000Z | 2022-03-10T23:28:13.000Z | ptp/tools/wapiti/signatures.py | owtf/ptp | b43e581d7646330810f526432c689c3d88995df9 | [
"BSD-3-Clause"
] | 22 | 2015-07-12T12:23:40.000Z | 2017-02-26T12:39:48.000Z | ptp/tools/wapiti/signatures.py | owtf/ptp | b43e581d7646330810f526432c689c3d88995df9 | [
"BSD-3-Clause"
] | 14 | 2015-06-03T19:16:22.000Z | 2022-03-10T23:28:15.000Z | """
:synopsis: Wapiti does not provide ranking for the vulnerabilities it has found. This file tries to define a ranking
for every vulnerability Wapiti might find.
.. moduleauthor:: Tao Sauvage
"""
from ptp.libptp.constants import HIGH, MEDIUM, LOW, INFO
#: :data: :class:`dict` of the categories with their rank.
SIGNATURES = {
# High ranked vulnerabilities
'SQL Injection': HIGH,
'Blind SQL Injection': HIGH,
'Command execution': HIGH,
'Cross Site Scripting': HIGH,
# Medium ranked vulnerabilities
'Htaccess Bypass': MEDIUM,
'CRLF Injection': MEDIUM,
'CRLF': MEDIUM,
# Low ranked vulnerabilities
'File Handling': LOW, # a.k.a Path or Directory listing
'Resource consumption': LOW, # TODO: Is this higher than LOW?
# Informational ranked vulnerabilities
'Backup file': INFO,
'Potentially dangerous file': INFO, # TODO: Is this higher than INFO?
'Internal Server Error': INFO}
| 27.371429 | 116 | 0.689979 |
0d0841caecbd4dd02dc8166911bf37e38d83ce9c | 977 | py | Python | apiv3/urls.py | smithdc1/djangopackages | 8762a787fe968cbf5f772c6199d1362743c85db9 | [
"MIT"
] | 1 | 2021-08-10T13:33:37.000Z | 2021-08-10T13:33:37.000Z | apiv3/urls.py | smithdc1/djangopackages | 8762a787fe968cbf5f772c6199d1362743c85db9 | [
"MIT"
] | null | null | null | apiv3/urls.py | smithdc1/djangopackages | 8762a787fe968cbf5f772c6199d1362743c85db9 | [
"MIT"
] | 1 | 2021-09-27T08:48:32.000Z | 2021-09-27T08:48:32.000Z |
from . import views
from django.urls import path
app_name = "apiv3"
# New URLs
urlpatterns = [
path('grids/', view=views.grid_list,
name="grid_list",
),
path('grids/<slug:slug>/', view=views.grid_detail,
name="grid_detail",
),
path('grids/<slug:slug>/packages/', view=views.grid_packages_list,
name="grid_packages_list",
),
path('packages/', view=views.package_list,
name="package_list",
),
path('packages/<slug:slug>/', view=views.package_detail,
name="package_detail",
),
path('categories/', view=views.category_list,
name="category_list"
),
path('categories/<slug:slug>/', view=views.category_detail,
name="category_detail"
),
path('users/<slug:github_account>/', view=views.user_detail,
name="user_detail"
),
path('users/', view=views.user_list,
name="user_list"
),
path('', view=views.index,
name="index"
)
]
| 24.425 | 70 | 0.60696 |
573db1ce531cd6a2468e86a7a124ab15f094a47f | 1,893 | py | Python | node_modules/extsprintf/deps/javascriptlint/javascriptlint/visitation.py | tross78/placematch | 07bb4463b75bc68c3ee275c288890232f91533af | [
"MIT"
] | 3 | 2015-02-22T18:06:22.000Z | 2016-04-07T07:26:49.000Z | node_modules/extsprintf/deps/javascriptlint/javascriptlint/visitation.py | tross78/placematch | 07bb4463b75bc68c3ee275c288890232f91533af | [
"MIT"
] | 4 | 2016-02-04T15:45:32.000Z | 2016-02-05T04:25:23.000Z | node_modules/extsprintf/deps/javascriptlint/javascriptlint/visitation.py | tross78/placematch | 07bb4463b75bc68c3ee275c288890232f91533af | [
"MIT"
] | 1 | 2016-02-02T06:52:43.000Z | 2016-02-02T06:52:43.000Z | # vim: ts=4 sw=4 expandtab
""" This is an abstract module for visiting specific nodes. This is useed to
traverse the tree to generate warnings.
"""
def visit(event, *args):
""" This decorator is used to indicate which nodes the function should
examine. The function should accept (self, node) and return the relevant
node or None. """
def _decorate(fn):
fn._visit_event = event
fn._visit_nodes = args
return fn
return _decorate
def make_visitors(visitors, klasses):
""" Searches klasses for all member functions decorated with @visit and
fills a dictionary that looks like:
visitors = {
'event_name': {
'node_type' : [func1, func2]
}
}
"""
assert isinstance(visitors, dict)
# Intantiate an instance of each class
for klass in klasses:
if klass.__name__.lower() != klass.__name__:
raise ValueError, 'class names must be lowercase'
if not klass.__doc__:
raise ValueError, 'missing docstring on class %s' % klass.__name__
# Look for functions with the "_visit_nodes" property.
visitor = klass()
for func in [getattr(visitor, name) for name in dir(visitor)]:
event_visitors = None
for node_kind in getattr(func, '_visit_nodes', ()):
# Group visitors by event (e.g. push vs pop)
if not event_visitors:
try:
event_visitors = visitors[func._visit_event]
except KeyError:
event_visitors = visitors[func._visit_event] = {}
# Map from node_kind to the function
try:
event_visitors[node_kind].append(func)
except KeyError:
event_visitors[node_kind] = [func]
return visitors
| 35.716981 | 78 | 0.58954 |
6ec2e42536594c28897f6645f5ce0db47668af5b | 5,538 | py | Python | utils/others/jointencoder_args.py | guopeiming/JointCPNER | 1978665a8e2559fbb4d418105f5bca47cf90addb | [
"Apache-2.0"
] | 1 | 2021-02-23T13:59:28.000Z | 2021-02-23T13:59:28.000Z | utils/others/jointencoder_args.py | guopeiming/JointCPNER | 1978665a8e2559fbb4d418105f5bca47cf90addb | [
"Apache-2.0"
] | null | null | null | utils/others/jointencoder_args.py | guopeiming/JointCPNER | 1978665a8e2559fbb4d418105f5bca47cf90addb | [
"Apache-2.0"
] | null | null | null | # @Author : guopeiming
# @Contact : guopeiming.gpm@{qq, gmail}.com
import argparse
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Neural model for NLP')
# [Data]
parser.add_argument('--joint_input', type=str, default='./data/onto/joint_char/', help='path of input data')
parser.add_argument('--parsing_input', type=str, default='./data/onto/parsing_char/', help='path of input data')
parser.add_argument('--language', type=str, choices=['chinese', 'arabic', 'english'], default='chinese', help='language')
parser.add_argument('--transliterate', default='', type=str, help='whether to transliterate when using BERT/XLNet')
# [Train]
parser.add_argument('--debug', default=False, type=bool, help='debug mode')
parser.add_argument('--seed', default=2021, type=int, help='seed of random')
parser.add_argument('--cuda', default=True, type=bool, help='whether to use cuda')
parser.add_argument('--gpuid', default=7, type=int, help='id of gpu')
parser.add_argument('--batch_size', default=4, type=int, help='how many insts per batch to load')
parser.add_argument('--accum_steps', default=4, type=int, help='the number of accumulated steps before backward')
parser.add_argument('--shuffle', default=True, type=bool, help='set True to get the data reshuffled at every epoch')
parser.add_argument('--drop_last', default=False, type=bool, help='whether to drop the last data')
parser.add_argument('--num_workers', default=2, type=int, help='how many subprocesses to use for data loading')
parser.add_argument('--epoch', default=35, type=int, help='max training epoch')
parser.add_argument('--log_interval', default=100, type=int, help='interval on print log info')
parser.add_argument('--eval_interval', default=700, type=int, help='interval on print evaluate model')
parser.add_argument('--early_stop', default=True, type=bool, help='early stop')
parser.add_argument('--patience', default=3, type=int, help='early stop patience epoch')
parser.add_argument('--save', default=False, type=bool, help='whether to save model')
# [Optimizer]
parser.add_argument('--optim', default='Adam', type=str, help='optimizer used')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
parser.add_argument('--lr_fine_tune', default=0.00001, type=float, help='fine tune learning rate')
parser.add_argument('--weight_decay', default=1e-2, type=float, help='lambda')
parser.add_argument('--clip_grad', default=False, type=bool, help='whether to ues util.clip')
parser.add_argument('--clip_grad_max_norm', default=4.0, type=float, help='clip_grad_max_norm')
parser.add_argument('--warmup_steps', default=8000, type=int, help='warm up steps')
parser.add_argument('--lr_decay_factor', default=1.000001, type=float, help='decay factor of lr after warm up')
# [Model]
parser.add_argument('--name', default='JointModel', type=str, help='name of model')
parser.add_argument('--subword', default='character_based', type=str, choices=['character_based', 'endpoint', 'startpoint', 'max_pool', 'avg_pool'], help='the method to represent word from BERT subword')
# if language is chinese, when character-based, use_pos_tag should be False.
parser.add_argument('--use_pos_tag', default=False, type=bool, help='whether to use pos_tag')
# [Model-Embedding]
parser.add_argument('--bert_path', default='./data/model/bert-base-chinese/', type=str, help='path of BERT')
parser.add_argument('--d_model', default=1024, type=int, help='model dimension')
parser.add_argument('--partition', default=True, type=bool, help='whether to use content and position partition')
parser.add_argument('--pos_tag_emb_dropout', default=0.2, type=float, help='pos tag dropout')
parser.add_argument('--position_emb_dropout', default=0.0, type=float, help='position embedding dropout')
parser.add_argument('--bert_emb_dropout', default=0.2, type=float, help='bert embedding dropout')
parser.add_argument('--emb_dropout', default=0.0, type=float, help='embedding dropout')
# [Model-Encoder]
parser.add_argument('--layer_num', default=3, type=int, help='encoder layer num')
parser.add_argument('--hidden_dropout', default=0.2, type=float, help='hidden states dropout in transformer')
parser.add_argument('--attention_dropout', default=0.2, type=float, help='attention dropout in transformer')
parser.add_argument('--dim_ff', default=2048, type=int, help='dim of ff sublayer in transformer')
parser.add_argument('--nhead', default=8, type=int, help='head number')
parser.add_argument('--kqv_dim', default=64, type=int, help='dimention of kqv')
# [Model-classifier]
parser.add_argument('--label_hidden', default=1250, type=int, help='dimention of label_hidden')
# [Loss]
parser.add_argument('--lambda_scaler', default=0.3, type=float, help='cross loss scaler')
parser.add_argument('--alpha_scaler', default=0.6, type=float, help='cross loss scaler')
# [Evaluation]
parser.add_argument('--evalb_path', default='./EVALB_SPMRL/', type=str, help='path of evaluation script')
# [Constants]
parser.add_argument('--DATASET_MAX_SNT_LENGTH', default=200, type=str, help='when sentence length larger than it, drop it')
parser.add_argument('--BATCH_MAX_SNT_LENGTH', default=80, type=str, help='when sentence max len bigger than it, split batch to sub-batch')
args = parser.parse_args()
return args
| 71 | 207 | 0.717046 |
f0b08a5ba41ac597a8633116014df698252a7fe5 | 4,089 | py | Python | recognize_functions.py | CRM-UAM/Romualdo-HackUPC2017 | 9880330b648605339ada0be75c75752131edaf0e | [
"MIT"
] | null | null | null | recognize_functions.py | CRM-UAM/Romualdo-HackUPC2017 | 9880330b648605339ada0be75c75752131edaf0e | [
"MIT"
] | null | null | null | recognize_functions.py | CRM-UAM/Romualdo-HackUPC2017 | 9880330b648605339ada0be75c75752131edaf0e | [
"MIT"
] | null | null | null | import numpy as np
import time
import cv2
import sys
from voz import *
VALOR_UMBRAL=50
UMBRAL=450000
class Coordenadas:
def __init__(self, x, y):
self.x = x
self.y = y
class Persona:
def __init__(self, name, picture):
self.name = name
self.picture = picture
def reconocer(controller):
#Espacio de coordenadas
coordenadas = []
#Frames antiguos
antiguo = None
#Cargamos el archivo cascade
rostroCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
cap = cv2.VideoCapture(0)
cap.set(38, 1)
cap.set(3,320)
cap.set(4,240)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Difuminado Gaussiano para reducir ruido
gray = cv2.GaussianBlur(gray,(21,21),0)
if (len(peopleOnCamera) == 0):
if (bloqueo(antiguo, gray)):
antiguo = gray
rostros = rostroCascade.detectMultiScale(
gray,
scaleFactor = 1.3,
minNeighbors = 10,
minSize= (30,30),
flags = cv2.CASCADE_SCALE_IMAGE
)
i=0
n_rostros = len(rostros)
actualizarRostro(rostros,peopleOnCamera,controller)
actualizarPersona(rostros, peopleOnCamera, peopleAll, controller)
for (x, y, w, h) in rostros:
i = actualizarPosicion(x, y, coordenadas, n_rostros)
if (i == None):
continue
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('Tu face'+str(i),frame[y:y+h, x:x+w])
# Display the resulting frame
cv2.imshow('Rostros',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
exit()
def actualizarPosicion(x,y, coordenadas, n_rostros):
for coord in coordenadas:
r1 = x - coord.x
r2 = y - coord.y
if ( abs(r1) < VALOR_UMBRAL and abs(r2) < VALOR_UMBRAL ):
coord.x=x
coord.y=y
return coordenadas.index(coord)
if (n_rostros == len(coordenadas)):
return None
coord=Coordenadas(x,y)
coordenadas.append(coord)
return coordenadas.index(coord)
def nuevoRostro(rostros,controller):
auxiliar=set()
for r in rostro:
persona_rostro = Persone(None, r)
for p_a in peopleAll:
if (mismaPersona(p_a.picture, r) == True):
auxiliar.pop(persona_rostro)
break
else:
auxiliar.add(Persona(None,r))
aux = list(auxiliar)
peopleAll.append(aux)
controller.someoneLooksAtMe(aux)
def actualizarPersona(rostros,controller):
auxiliar=set()
for p_a in peopleAll:
for r in rostros:
if (mismaPersona(p_a.picture, r) == True):
auxiliar.pop(p_a)
break
else:
auxiliar.add(p_a)
for aux in auxiliar:
peopleAll.pop(peopleAll.index(aux))
aux=list(auxiliar)
peopleAll.extend(aux)
controller.someoneLeaves(aux)
def bloqueo(antiguo, frame, controller):
if(antiguo==None):
return
# Resta absoluta
resta = cv2.absdiff(antiguo, frame)
#print resta.sum()
if(resta.sum() > UMBRAL):
controller.movementDetected()
return
return
def mismaPersona(firstPerson, secondPerson):
# compute the Structural Similarity Index (SSIM) between the two
# images, ensuring that the difference image is returned
diff = absdiff(firstPerson, secondPerson)
# threshold the difference image, followed by finding contours to
# obtain the regions of the two input images that differ
thresh = cv2.threshold(diff, 0, 255,cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# loop over the contours
errorArea=0
for c in cnts:
# compute the bounding box of the contour and then draw the
# bounding box on both input images
(x, y, w, h) = cv2.boundingRect(c)
errorArea+=w*h
if errorArea<0.3*cv2.findContours(firstPerson.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE):
return True
else:
return False
| 23.635838 | 101 | 0.668623 |
ded6679cb9d07be8e26f8f76023d7060780fd2c2 | 7,696 | py | Python | viper/icon_viper/actions/list_malware/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | viper/icon_viper/actions/list_malware/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | viper/icon_viper/actions/list_malware/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "List malware for this project"
class Input:
PROJECT_NAME = "project_name"
class Output:
MALWARE = "malware"
class ListMalwareInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"project_name": {
"type": "string",
"title": "Project Name",
"order": 1
}
},
"required": [
"project_name"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ListMalwareOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"malware": {
"type": "array",
"title": "Malware",
"items": {
"$ref": "#/definitions/Malware"
},
"order": 1
}
},
"required": [
"malware"
],
"definitions": {
"Analysis": {
"type": "object",
"title": "Analysis",
"properties": {
"cmd_line": {
"type": "string",
"title": "Command",
"description": "Command for running analysis",
"order": 2
},
"id": {
"type": "integer",
"title": "Analysis ID",
"description": "ID of analysis",
"order": 1
},
"results": {
"type": "string",
"title": "Result",
"description": "Result of analysis",
"order": 4
},
"stored_at": {
"type": "string",
"title": "Created At",
"description": "Datetime when analysis was created",
"order": 3
}
}
},
"Malware": {
"type": "object",
"title": "Malware",
"properties": {
"analysis_set": {
"type": "array",
"title": "Connected Analysis",
"description": "Analysis connected with malware",
"items": {
"$ref": "#/definitions/Analysis"
},
"order": 15
},
"crc32": {
"type": "string",
"title": "CRC32",
"description": "CRC32 hash",
"order": 3
},
"created_at": {
"type": "string",
"title": "Created At",
"description": "Date when the file was created",
"order": 12
},
"id": {
"type": "integer",
"title": "ID",
"description": "Malware ID",
"order": 1
},
"md5": {
"type": "string",
"title": "MD5",
"description": "MD5 hash",
"order": 4
},
"mime": {
"type": "string",
"title": "MIME",
"description": "MIME type",
"order": 11
},
"name": {
"type": "string",
"title": "Name",
"description": "Malware name",
"order": 2
},
"note_set": {
"type": "array",
"title": "Malware Notes",
"description": "Notes added to malware",
"items": {
"$ref": "#/definitions/Note"
},
"order": 17
},
"parent": {
"type": "string",
"title": "Parent File",
"description": "Parent file name",
"order": 13
},
"parent_id": {
"type": "string",
"title": "Parent ID",
"description": "ID of parent file",
"order": 14
},
"sha1": {
"type": "string",
"title": "SHA1",
"description": "SHA1 hash",
"order": 5
},
"sha256": {
"type": "string",
"title": "SHA256",
"description": "SHA256 hash",
"order": 6
},
"sha512": {
"type": "string",
"title": "SHA512",
"description": "SHA512 hash",
"order": 7
},
"size": {
"type": "integer",
"title": "Size In Bytes",
"description": "Size of malware file in bytes",
"order": 8
},
"ssdeep": {
"type": "string",
"title": "Ssdeep",
"description": "Ssdeep",
"order": 9
},
"tag_set": {
"type": "array",
"title": "Malware Tags",
"description": "Tags added to malware",
"items": {
"$ref": "#/definitions/Tag"
},
"order": 16
},
"type": {
"type": "string",
"title": "Type",
"description": "Malware type",
"order": 10
}
},
"definitions": {
"Analysis": {
"type": "object",
"title": "Analysis",
"properties": {
"cmd_line": {
"type": "string",
"title": "Command",
"description": "Command for running analysis",
"order": 2
},
"id": {
"type": "integer",
"title": "Analysis ID",
"description": "ID of analysis",
"order": 1
},
"results": {
"type": "string",
"title": "Result",
"description": "Result of analysis",
"order": 4
},
"stored_at": {
"type": "string",
"title": "Created At",
"description": "Datetime when analysis was created",
"order": 3
}
}
},
"Note": {
"type": "object",
"title": "Note",
"properties": {
"body": {
"type": "string",
"title": "Description",
"description": "Note description",
"order": 3
},
"id": {
"type": "integer",
"title": "ID",
"description": "Note ID",
"order": 1
},
"title": {
"type": "string",
"title": "Title",
"description": "Note title",
"order": 2
}
}
},
"Tag": {
"type": "object",
"title": "Tag",
"properties": {
"id": {
"type": "integer",
"title": "ID",
"description": "Tag ID",
"order": 1
},
"tag": {
"type": "string",
"title": "Name",
"description": "Tag name",
"order": 2
}
}
}
}
},
"Note": {
"type": "object",
"title": "Note",
"properties": {
"body": {
"type": "string",
"title": "Description",
"description": "Note description",
"order": 3
},
"id": {
"type": "integer",
"title": "ID",
"description": "Note ID",
"order": 1
},
"title": {
"type": "string",
"title": "Title",
"description": "Note title",
"order": 2
}
}
},
"Tag": {
"type": "object",
"title": "Tag",
"properties": {
"id": {
"type": "integer",
"title": "ID",
"description": "Tag ID",
"order": 1
},
"tag": {
"type": "string",
"title": "Name",
"description": "Tag name",
"order": 2
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 23.463415 | 66 | 0.383056 |
1109493ae6bf83b41e9a8d8e737191946936a99f | 7,170 | py | Python | core/model/metric/adm_kl.py | CharleyZhao123/LibFewShot | 8a6b0fc9b89900b04fe2f57982db8f8871322426 | [
"MIT"
] | 1 | 2021-09-20T15:39:28.000Z | 2021-09-20T15:39:28.000Z | core/model/metric/adm_kl.py | CharleyZhao123/LibFewShot | 8a6b0fc9b89900b04fe2f57982db8f8871322426 | [
"MIT"
] | null | null | null | core/model/metric/adm_kl.py | CharleyZhao123/LibFewShot | 8a6b0fc9b89900b04fe2f57982db8f8871322426 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@inproceedings{DBLP:conf/ijcai/LiWHSGL20,
author = {Wenbin Li and
Lei Wang and
Jing Huo and
Yinghuan Shi and
Yang Gao and
Jiebo Luo},
title = {Asymmetric Distribution Measure for Few-shot Learning},
booktitle = {Proceedings of the Twenty-Ninth International Joint Conference on
Artificial Intelligence, {IJCAI} 2020},
pages = {2957--2963},
year = {2020},
url = {https://doi.org/10.24963/ijcai.2020/409},
doi = {10.24963/ijcai.2020/409}
}
https://arxiv.org/abs/2002.00153
Adapted from https://github.com/WenbinLee/ADM.
"""
import torch
from torch import nn
from core.utils import accuracy
from .metric_model import MetricModel
class KLLayer(nn.Module):
def __init__(self, way_num, shot_num, query_num, n_k, device, CMS=False):
super(KLLayer, self).__init__()
self.way_num = way_num
self.shot_num = shot_num
self.query_num = query_num
self.n_k = n_k
self.device = device
self.CMS = CMS
def _cal_cov_matrix_batch(self, feat): # feature: e * Batch * descriptor_num * 64
e, _, n_local, c = feat.size()
feature_mean = torch.mean(feat, 2, True) # e * Batch * 1 * 64
feat = feat - feature_mean
cov_matrix = torch.matmul(feat.permute(0, 1, 3, 2), feat) # ebc1 * eb1c = ebcc
cov_matrix = torch.div(cov_matrix, n_local - 1)
cov_matrix = cov_matrix + 0.01 * torch.eye(c).to(self.device) # broadcast from the last dim
return feature_mean, cov_matrix
def _cal_cov_batch(self, feat): # feature: e * 25 * 64 * 21 * 21
e, b, c, h, w = feat.size()
feat = feat.view(e, b, c, -1).permute(0, 1, 3, 2)
feat_mean = torch.mean(feat, 2, True) # e * Batch * 1 * 64
feat = feat - feat_mean
cov_matrix = torch.matmul(feat.permute(0, 1, 3, 2), feat)
cov_matrix = torch.div(cov_matrix, h * w - 1)
cov_matrix = cov_matrix + 0.01 * torch.eye(c).to(self.device)
return feat_mean, cov_matrix
def _calc_kl_dist_batch(self, mean1, cov1, mean2, cov2):
"""
:param mean1: e * 75 * 1 * 64
:param cov1: e * 75 * 64 * 64
:param mean2: e * 5 * 1 * 64
:param cov2: e * 5 * 64 * 64
:return:
"""
cov2_inverse = torch.inverse(cov2) # e * 5 * 64 * 64
mean_diff = -(mean1 - mean2.squeeze(2).unsqueeze(1)) # e * 75 * 5 * 64
# Calculate the trace
matrix_prod = torch.matmul(
cov1.unsqueeze(2), cov2_inverse.unsqueeze(1)
) # e * 75 * 5 * 64 * 64
trace_dist = torch.diagonal(matrix_prod, offset=0, dim1=-2, dim2=-1) # e * 75 * 5 * 64
trace_dist = torch.sum(trace_dist, dim=-1) # e * 75 * 5
# Calcualte the Mahalanobis Distance
maha_prod = torch.matmul(
mean_diff.unsqueeze(3), cov2_inverse.unsqueeze(1)
) # e * 75 * 5 * 1 * 64
maha_prod = torch.matmul(maha_prod, mean_diff.unsqueeze(4)) # e * 75 * 5 * 1 * 1
maha_prod = maha_prod.squeeze(4)
maha_prod = maha_prod.squeeze(3) # e * 75 * 5
matrix_det = torch.logdet(cov2).unsqueeze(1) - torch.logdet(cov1).unsqueeze(2)
kl_dist = trace_dist + maha_prod + matrix_det - mean1.size(3)
return kl_dist / 2.0
def _cal_support_remaining(self, S): # S: e * 5 * 441 * 64
e, w, d, c = S.shape
episode_indices = torch.tensor(
[j for i in range(S.size(1)) for j in range(S.size(1)) if i != j]
).to(self.device)
S_new = torch.index_select(S, 1, episode_indices)
S_new = S_new.view([e, w, -1, c])
return S_new
# Calculate KL divergence Distance
def _cal_adm_sim(self, query_feat, support_feat):
"""
:param query_feat: e * 75 * 64 * 21 * 21
:param support_feat: e * 25 * 64 * 21 * 21
:return:
"""
# query_mean: e * 75 * 1 * 64 query_cov: e * 75 * 64 * 64
e, b, c, h, w = query_feat.size()
e, s, _, _, _ = support_feat.size()
query_mean, query_cov = self._cal_cov_batch(query_feat)
query_feat = query_feat.view(e, b, c, -1).permute(0, 1, 3, 2).contiguous()
# Calculate the mean and covariance of the support set
support_feat = support_feat.view(e, s, c, -1).permute(0, 1, 3, 2).contiguous()
support_set = support_feat.view(e, self.way_num, self.shot_num * h * w, c)
# s_mean: e * 5 * 1 * 64 s_cov: e * 5 * 64 * 64
s_mean, s_cov = self._cal_cov_matrix_batch(support_set)
# Calculate the Wasserstein Distance
kl_dis = -self._calc_kl_dist_batch(query_mean, query_cov, s_mean, s_cov) # e * 75 * 5
if self.CMS: # ADM_KL_CMS
# Find the remaining support set
support_set_remain = self._cal_support_remaining(support_set)
s_remain_mean, s_remain_cov = self._cal_cov_matrix_batch(
support_set_remain
) # s_remain_mean: e * 5 * 1 * 64 s_remain_cov: e * 5 * 64 * 64
kl_dis2 = self._calc_kl_dist_batch(
query_mean, query_cov, s_remain_mean, s_remain_cov
) # e * 75 * 5
kl_dis = kl_dis + kl_dis2
return kl_dis
def forward(self, query_feat, support_feat):
return self._cal_adm_sim(query_feat, support_feat)
class ADM_KL(MetricModel):
def __init__(self, n_k=3, CMS=False, **kwargs):
super(ADM_KL, self).__init__(**kwargs)
self.n_k = n_k
self.klLayer = KLLayer(self.way_num, self.shot_num, self.query_num, n_k, self.device, CMS)
self.loss_func = nn.CrossEntropyLoss()
def set_forward(self, batch):
"""
:param batch:
:return:
"""
image, global_target = batch
image = image.to(self.device)
episode_size = image.size(0) // (self.way_num * (self.shot_num + self.query_num))
feat = self.emb_func(image)
(
support_feat,
query_feat,
support_target,
query_target,
) = self.split_by_episode(feat, mode=2)
output = self.klLayer(query_feat, support_feat).view(
episode_size * self.way_num * self.query_num, -1
)
acc = accuracy(output, query_target)
return output, acc
def set_forward_loss(self, batch):
"""
:param batch:
:return:
"""
image, global_target = batch
image = image.to(self.device)
episode_size = image.size(0) // (self.way_num * (self.shot_num + self.query_num))
feat = self.emb_func(image)
(
support_feat,
query_feat,
support_target,
query_target,
) = self.split_by_episode(feat, mode=2)
# assume here we will get n_dim=5
output = self.klLayer(query_feat, support_feat).view(
episode_size * self.way_num * self.query_num, -1
)
loss = self.loss_func(output, query_target)
acc = accuracy(output, query_target)
return output, acc, loss
| 35.85 | 100 | 0.580335 |
5404ba0c4099f6d8718375e33f1c69e4c0bc7eae | 2,980 | py | Python | 07_gashlycrumb/test.py | ddijk/tiny_python_projects | a9416039559b34bf8d36f1c043d6670f86a4583f | [
"MIT"
] | null | null | null | 07_gashlycrumb/test.py | ddijk/tiny_python_projects | a9416039559b34bf8d36f1c043d6670f86a4583f | [
"MIT"
] | null | null | null | 07_gashlycrumb/test.py | ddijk/tiny_python_projects | a9416039559b34bf8d36f1c043d6670f86a4583f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""tests for gashlycrumb.py"""
import os
import re
import random
import string
from subprocess import getstatusoutput
prg = './gashlycrumb.py'
# --------------------------------------------------
def file_flag():
"""Either -f or --file"""
return '-f' if random.randint(0, 1) else '--file'
# --------------------------------------------------
def test_exists():
"""exists"""
assert os.path.isfile(prg)
# --------------------------------------------------
def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput(f'{prg} {flag}')
assert rv == 0
assert re.match("usage", out, re.IGNORECASE)
# --------------------------------------------------
def test_bad_file():
"""Test for bad --file"""
bad = random_string()
letter = random.choice(string.ascii_lowercase)
rv, out = getstatusoutput(f'{prg} {letter} -f {bad}')
assert rv != 0
expected = f"No such file or directory: '{bad}'"
assert re.search(expected, out)
# --------------------------------------------------
def test_a():
"""Test for 'a'"""
rv, out = getstatusoutput(f'{prg} a')
assert rv == 0
expected = 'A is for Amy who fell down the stairs.'
assert out.strip() == expected
# --------------------------------------------------
def test_b_c():
"""Test for 'b c'"""
rv, out = getstatusoutput(f'{prg} b c')
assert rv == 0
expected = ('B is for Basil assaulted by bears.\n'
'C is for Clara who wasted away.')
assert out.strip() == expected
# --------------------------------------------------
def test_y():
"""Test for 'y'"""
rv, out = getstatusoutput(f'{prg} Y')
assert rv == 0
expected = 'Y is for Yorick whose head was bashed in.'
assert out.strip() == expected
# --------------------------------------------------
def test_o_alternate():
""" Test for 'o' from 'alternate.txt' """
rv, out = getstatusoutput(f'{prg} o P q -f alternate.txt')
assert rv == 0
expected = ('O is for Orville, who fell in a canyon.\n'
'P is for Paul, strangled by his banyan.\n'
'Q is for Quintanna, flayed in the night.')
assert out.strip() == expected
# --------------------------------------------------
def test_bad_letter():
"""Test for bad input"""
rv, out = getstatusoutput(f'{prg} 5 CH')
assert rv == 0
expected = ('I do not know "5".\n' 'I do not know "CH".')
assert out.strip() == expected
# --------------------------------------------------
def random_string():
"""generate a random string"""
k = random.randint(5, 10)
return ''.join(random.choices(string.ascii_letters + string.digits, k=k))
# --------------------------------------------------
def test_list_to_upper():
"""Test map to upper"""
mylist=['aap','noot', 'mies']
res = [x.upper() for x in mylist]
assert res == ['AAP','NOOT','MIES']
| 25.042017 | 77 | 0.474497 |
acfdadfb81f369c78c783f1b32400870685a6acf | 1,030 | py | Python | coremltools/test/sklearn_tests/test_feature_names.py | LaudateCorpus1/coremltools | 777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc | [
"BSD-3-Clause"
] | 2,740 | 2017-10-03T23:19:01.000Z | 2022-03-30T15:16:39.000Z | coremltools/test/sklearn_tests/test_feature_names.py | LaudateCorpus1/coremltools | 777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc | [
"BSD-3-Clause"
] | 1,057 | 2017-10-05T22:47:01.000Z | 2022-03-31T23:51:15.000Z | coremltools/test/sklearn_tests/test_feature_names.py | LaudateCorpus1/coremltools | 777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc | [
"BSD-3-Clause"
] | 510 | 2017-10-04T19:22:28.000Z | 2022-03-31T12:16:52.000Z | # Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import coremltools.models._feature_management as fm
import coremltools.models.datatypes as dt
import unittest
from coremltools._deps import _HAS_SKLEARN
@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.")
class FeatureManagementTests(unittest.TestCase):
def test_all_strings(self):
features = ["a", "b", "c"]
processed_features = [
("a", dt.Double()),
("b", dt.Double()),
("c", dt.Double()),
]
out = fm.process_or_validate_features(features)
self.assertEqual(out, processed_features)
self.assertTrue(fm.is_valid_feature_list(out))
def test_single_array(self):
self.assertEqual(
fm.process_or_validate_features("a", num_dimensions=10),
[("a", dt.Array(10))],
)
| 34.333333 | 82 | 0.666019 |
10a067b03b079d9e7f7278e2eeb4b87d4ca94834 | 6,560 | py | Python | rl/agents/cem.py | svdt/keras-rl | f5b560410daa1e8e86af6ac7bfb18f14c35c504b | [
"MIT"
] | null | null | null | rl/agents/cem.py | svdt/keras-rl | f5b560410daa1e8e86af6ac7bfb18f14c35c504b | [
"MIT"
] | null | null | null | rl/agents/cem.py | svdt/keras-rl | f5b560410daa1e8e86af6ac7bfb18f14c35c504b | [
"MIT"
] | null | null | null | from __future__ import division
from collections import deque
from copy import deepcopy
import numpy as np
import keras.backend as K
from keras.models import Model
from rl.core import Agent
from rl.util import *
class CEMAgent(Agent):
"""Write me
"""
def __init__(self, model, nb_actions, memory, batch_size=50, nb_steps_warmup=1000,
train_interval=50, elite_frac=0.05, memory_interval=1, theta_init=None,
noise_decay_const=0.0, noise_ampl=0.0, **kwargs):
super(CEMAgent, self).__init__(**kwargs)
# Parameters.
self.nb_actions = nb_actions
self.batch_size = batch_size
self.elite_frac = elite_frac
self.num_best = int(self.batch_size * self.elite_frac)
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
self.memory_interval = memory_interval
# if using noisy CEM, the minimum standard deviation will be ampl * exp (- decay_const * step )
self.noise_decay_const = noise_decay_const
self.noise_ampl = noise_ampl
# default initial mean & cov, override this by passing an theta_init argument
self.init_mean = 0.0
self.init_stdev = 1.0
# Related objects.
self.memory = memory
self.model = model
self.shapes = [w.shape for w in model.get_weights()]
self.sizes = [w.size for w in model.get_weights()]
self.num_weights = sum(self.sizes)
# store the best result seen during training, as a tuple (reward, flat_weights)
self.best_seen = (-np.inf, np.zeros(self.num_weights))
self.theta = np.zeros(self.num_weights*2)
self.update_theta(theta_init)
# State.
self.episode = 0
self.compiled = False
self.reset_states()
def compile(self):
self.model.compile(optimizer='sgd', loss='mse')
self.compiled = True
def load_weights(self, filepath):
self.model.load_weights(filepath)
def save_weights(self, filepath, overwrite=False):
self.model.save_weights(filepath, overwrite=overwrite)
def get_weights_flat(self,weights):
weights_flat = np.zeros(self.num_weights)
pos = 0
for i_layer, size in enumerate(self.sizes):
weights_flat[pos:pos+size] = weights[i_layer].flatten()
pos += size
return weights_flat
def get_weights_list(self,weights_flat):
weights = []
pos = 0
for i_layer, size in enumerate(self.sizes):
arr = weights_flat[pos:pos+size].reshape(self.shapes[i_layer])
weights.append(arr)
pos += size
return weights
def reset_states(self):
self.recent_observation = None
self.recent_action = None
def select_action(self, state, stochastic=False):
batch = np.array([state])
if self.processor is not None:
batch = self.processor.process_state_batch(batch)
action = self.model.predict_on_batch(batch).flatten()
if stochastic or self.training:
return np.random.choice(np.arange(self.nb_actions), p=np.exp(action) / np.sum(np.exp(action)))
return np.argmax(action)
def update_theta(self,theta):
if (theta is not None):
assert theta.shape == self.theta.shape, "Invalid theta, shape is {0} but should be {1}".format(theta.shape,self.theta.shape)
assert (not np.isnan(theta).any()), "Invalid theta, NaN encountered"
assert (theta[self.num_weights:] >= 0.).all(), "Invalid theta, standard deviations must be nonnegative"
self.theta = theta
else:
means = np.ones(self.num_weights) * self.init_mean
stdevs = np.ones(self.num_weights) * self.init_stdev
self.theta = np.hstack((means,stdevs))
def choose_weights(self):
mean = self.theta[:self.num_weights]
std = self.theta[self.num_weights:]
weights_flat = std * np.random.randn(self.num_weights) + mean
sampled_weights = self.get_weights_list(weights_flat)
self.model.set_weights(sampled_weights)
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
action = self.select_action(state)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
@property
def layers(self):
return self.model.layers[:]
def backward(self, reward, terminal):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
if terminal:
params = self.get_weights_flat(self.model.get_weights())
self.memory.finalize_episode(params)
if self.step > self.nb_steps_warmup and self.episode % self.train_interval == 0:
params, reward_totals = self.memory.sample(self.batch_size)
best_idx = np.argsort(np.array(reward_totals))[-self.num_best:]
best = np.vstack([params[i] for i in best_idx])
if reward_totals[best_idx[-1]] > self.best_seen[0]:
self.best_seen = (reward_totals[best_idx[-1]], params[best_idx[-1]])
metrics = [np.mean(np.array(reward_totals)[best_idx])]
if self.processor is not None:
metrics += self.processor.metrics
min_std = self.noise_ampl * np.exp(-self.step * self.noise_decay_const)
mean = np.mean(best, axis=0)
std = np.std(best, axis=0) + min_std
new_theta = np.hstack((mean, std))
self.update_theta(new_theta)
self.choose_weights()
self.episode += 1
return metrics
def _on_train_end(self):
self.model.set_weights(self.get_weights_list(self.best_seen[1]))
@property
def metrics_names(self):
names = ['mean_best_reward']
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
| 37.062147 | 136 | 0.627591 |
503dd910ee3ae91b404de79df44bbf83367e2347 | 1,264 | py | Python | Week_7/translate_mRNA.py | actaylor05/learning_python | d8c72fdb7c07bac4176a4418f83d75013db2245a | [
"MIT"
] | null | null | null | Week_7/translate_mRNA.py | actaylor05/learning_python | d8c72fdb7c07bac4176a4418f83d75013db2245a | [
"MIT"
] | null | null | null | Week_7/translate_mRNA.py | actaylor05/learning_python | d8c72fdb7c07bac4176a4418f83d75013db2245a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import biotools as bt
# Use argparse
# Write a program that translates mRNA
# Assume the protein encoded is the longest ORF
# Use a dictionary for translation and store in biotools
parser = argparse.ArgumentParser(
description='Translates mRNA.')
parser.add_argument('--file', required=True, type=str,
metavar='<str>', help='FASTA file')
arg = parser.parse_args()
for name, seq in bt.read_fasta(arg.file):
pro = []
seq1 = bt.longest_orf(seq)
for i in range(0, len(seq1), 3):
codon = seq1[i:i+3]
if codon in bt.aa: pro.append(bt.aa[codon])
else: pro.append('X') # how to deal with Ns and such
print(f'>{name}')
print(''.join(pro))
"""
python3 translate_mRNA.py --file transcripts.fasta.gz
>CBG00001.1
MTFCENKNLPKPPSDRCQVVVISILSMILDFYLKYNPDKHWAHLFYGASPILEILVIFGMLANSVYGNKLAMFACVLDLVSGVFCLLTLPVISVAENATGVRLHLPYISTFHSQFSFQVSTPVDLFYVATFLGFVSTILILLFLILDALKFMKLRKLRNEDLEKEKKMNPIEKV*
>CBG00006.1
MNGVEKVNKYFDIKDKRDFLYHFGFGVDTLDIKAVFGDTKFVCTGGSPGRFKLYAEWFAKETSIPCSENLSRSDRFVIYKTGPVCWINHGMGTPSLSIMLVESFKLMHHAGVKNPTFIRLGTSGGVGVPPGTVVVSTGAMNAELGDTYVQVIAGKRIERPTQLDATLREALCAVGKEKNIPVETGKTMCADDFYEGQMRLDGYFCDYEEEDKYAFLRKLNSLGVRNIEMESTCFASFTCRAGFPSAIVCVTLLNRMDGDQVQIDKEKYIEYEERPFRLVTAYIRQQTGV*
etc.
"""
| 30.829268 | 290 | 0.802215 |
88c48bf92629ae84b1da10e99efc74471bd1b24d | 7,990 | py | Python | server.py | openHPI/Embedded-Smart-Home-2016 | f234f58bcde5f841e793d8c9a63388b0a333e143 | [
"MIT"
] | 1 | 2017-07-11T14:14:48.000Z | 2017-07-11T14:14:48.000Z | server.py | openHPI/Embedded-Smart-Home-2016 | f234f58bcde5f841e793d8c9a63388b0a333e143 | [
"MIT"
] | null | null | null | server.py | openHPI/Embedded-Smart-Home-2016 | f234f58bcde5f841e793d8c9a63388b0a333e143 | [
"MIT"
] | 1 | 2020-06-18T20:44:05.000Z | 2020-06-18T20:44:05.000Z | import RPi.GPIO as GPIO
from http.server import BaseHTTPRequestHandler, HTTPServer
import threading
import spidev
import time
from datetime import datetime
import os
import json
global sensor_display_thread
# HTTPRequestHandler class
class FirstHTTP_RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
response = dict()
response["temp"] = sensor_display_thread.get_temp()
response["tempout"] = sensor_display_thread.get_temp_out()
response["humidity"] = sensor_display_thread.get_humidity()
response["door"] = sensor_display_thread.get_door()
message = json.dumps(response)
self.wfile.write(bytes(message, "utf-8"))
return
class SensorDisplayThread(threading.Thread):
spi = None;
temp = 0
temp_out = 0
humidity = 0
door = 0
# Commands
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
# Entry flags
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
# Control flags
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
# Move flags
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
# Function set flags
LCD_8BITMODE = 0x10
LCD_4BITMODE = 0x00
LCD_2LINE = 0x08
LCD_1LINE = 0x00
LCD_5x10DOTS = 0x04
LCD_5x8DOTS = 0x00
LCD_ROW_OFFSETS = (0x00,0x40,0x14,0x54)
#GPIO Pins
lcd_rs = 25
lcd_en = 23
lcd_d4 = 26
lcd_d5 = 16
lcd_d6 = 20
lcd_d7 = 21
lcd_backlight = 24
#Display Settings
lcd_columns = 16
lcd_rows = 2
def __init__(self):
threading.Thread.__init__(self)
def get_temp(self):
return self.temp
def get_temp_out(self):
return self.temp_out
def get_humidity(self):
return self.humidity
def get_door(self):
return self.door
# Function to read SPI data from MCP3008 chip
# Channel must be an integer 0-7
def read_channel(self, channel):
adc = self.spi.xfer2([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
return data
# Function to convert data to voltage level,
# rounded to specified number of decimal places.
def convert_volts(self, data, places):#implementit
volts = (data * 3.3) / float(1023)
volts = round(volts,places)
return volts
def volts_to_celsius(self, volts, places):
return round((volts * 100) - 273.15, places)
def volts_to_humidity(self, volts, temp, places):
if temp <= 22.5:
humidity = round((volts - 0.128) / 0.0286, places)
elif temp <= 27.5:
humidity = round((volts - 0.085) / 0.0294, places)
else:
humidity = round((volts - 0.038) / 0.0296, places)
return humidity
def initDisplay(self):
for pin in (self.lcd_rs, self.lcd_en, self.lcd_d4, self.lcd_d5, self.lcd_d6, self.lcd_d7, self.lcd_backlight):
GPIO.setup(pin, GPIO.OUT)
# Initialisiere Display
self.write8(0x33)
self.write8(0x32)
self.write8(self.LCD_DISPLAYCONTROL | self.LCD_DISPLAYON | self.LCD_CURSOROFF | self.LCD_BLINKOFF)
self.write8(self.LCD_FUNCTIONSET | self.LCD_4BITMODE | self.LCD_1LINE | self.LCD_2LINE | self.LCD_5x8DOTS)
self.write8(self.LCD_ENTRYMODESET | self.LCD_ENTRYLEFT | self.LCD_ENTRYSHIFTDECREMENT)
self.write8(self.LCD_CLEARDISPLAY) # Display leeren
time.sleep(0.5)
def write8(self, value, char_mode=False):
#Zu schnnelles Schreiben verhindern
time.sleep(0.001)
# Set character / data bit.
GPIO.output(self.lcd_rs, char_mode)
# Schreibe mehrwertige 4 Bit.
GPIO.output(self.lcd_d4, ((value >> 4) & 1) > 0)
GPIO.output(self.lcd_d5, ((value >> 5) & 1) > 0)
GPIO.output(self.lcd_d6, ((value >> 6) & 1) > 0)
GPIO.output(self.lcd_d7, ((value >> 7) & 1) > 0)
self.pulse_enable()
# Schreibe niederwertigere 4 Bit.
GPIO.output(self.lcd_d4, (value & 1) > 0)
GPIO.output(self.lcd_d5, ((value >> 1) & 1) > 0)
GPIO.output(self.lcd_d6, ((value >> 2) & 1) > 0)
GPIO.output(self.lcd_d7, ((value >> 3) & 1) > 0)
self.pulse_enable()
def pulse_enable(self):
GPIO.output(self.lcd_en, 0)
time.sleep(0.001)
GPIO.output(self.lcd_en, 1)
time.sleep(0.001)
GPIO.output(self.lcd_en, 0)
time.sleep(0.001)
def set_cursor(self, col, row):
if row > self.lcd_rows:
row = self.lcd_rows-1
self.write8(self.LCD_SETDDRAMADDR | (col + self.LCD_ROW_OFFSETS[row]))
def message(self, text):
"""Schreibe text auf display."""
line = 0
print(text)
for char in text:
if char == '\n':
line += 1
col = 0 if (self.LCD_ENTRYLEFT | self.LCD_ENTRYSHIFTDECREMENT) & self.LCD_ENTRYLEFT > 0 else self.lcd_columns-1
self.set_cursor(col, line)
else:
self.write8(ord(char), True)
def run(self):
GPIO.setmode(GPIO.BCM)
self.initDisplay()
while True:
#GPIO
GPIO.setup(12, GPIO.OUT)
# SPI bus
self.spi = spidev.SpiDev()
self.spi.open(0,0)
### READ SENSOR DATA ###
# Define sensor channels
temp_in_channel = 1
temp_out_channel = 2
humid_in_channel = 0
# Read the sensor data
adc_temp_in = self.read_channel(temp_in_channel)
volts_temp_in = self.convert_volts(adc_temp_in, 4)
celcius_temp_in = self.volts_to_celsius(volts_temp_in, 1)
adc_temp_out = self.read_channel(temp_out_channel)
volts_temp_out = self.convert_volts(adc_temp_out, 4)
celcius_temp_out = self.volts_to_celsius(volts_temp_out, 1)
adc_humid_in = self.read_channel(humid_in_channel)
volts_humid_in = self.convert_volts(adc_humid_in, 4)
humidity_in = self.volts_to_humidity(volts_humid_in, celcius_temp_in, 1)
# Save results for json and display
self.temp = celcius_temp_in
self.temp_out = celcius_temp_out
self.humidity = humidity_in
self.door = GPIO.input(12)
### WRITTE INFORMATION ON DISPLAY ###
# Current Date
timestamp = datetime.now()
str_time = timestamp.strftime('%d.%m.%y')
GPIO.output(self.lcd_backlight, 1)
self.message(str(int(round(celcius_temp_in))) + chr(223) + 'C '
+ str(int(round(humidity_in, 0))) + '% '
+ str(int(round(celcius_temp_out))) + chr(223) + 'C\n '
+ str_time)
# Reset coursor position
self.set_cursor(0, 0)
# Wait before repeating loop
time.sleep(10)
sensor_display_thread = SensorDisplayThread()
sensor_display_thread.start()
server_address = ('0.0.0.0', 8080)
httpd = HTTPServer(server_address, FirstHTTP_RequestHandler)
print('running server...')
httpd.serve_forever()
| 29.702602 | 127 | 0.581852 |
b20f88fb9795e1b2e76dec7261899de5e78f1709 | 732 | py | Python | setup.py | anijackich/sdamgia-api | efae4176ea07c0c3127d1044adf01dcc1e171b6c | [
"MIT"
] | 5 | 2021-04-08T01:18:05.000Z | 2022-01-16T09:57:01.000Z | setup.py | anijackich/sdamgia-api | efae4176ea07c0c3127d1044adf01dcc1e171b6c | [
"MIT"
] | 2 | 2021-04-21T13:48:04.000Z | 2021-09-05T17:28:50.000Z | setup.py | anijackich/sdamgia-api | efae4176ea07c0c3127d1044adf01dcc1e171b6c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='sdamgia-api',
version='0.1.8',
author='anijack',
author_email='anijackich@gmail.com',
description='Python модуль для взаимодействия с образовательным порталом СДАМ ГИА',
long_description=open('README.md', encoding="utf8").read(),
long_description_content_type='text/markdown',
url='https://github.com/anijackich/sdamgia-api',
license='MIT',
install_requires=['requests', 'beautifulsoup4', 'pyppeteer', 'grabzit', 'html2image'],
packages = ['sdamgia'],
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3'
]
)
| 31.826087 | 90 | 0.65847 |
246118c25308df88d92c1061cdda3967f8e56066 | 23 | py | Python | refcount/__init__.py | jmp75/pyrefcount | 89ba62977fe21d755077f43fefb01451fec3733a | [
"Net-SNMP",
"Xnet",
"MIT"
] | 1 | 2021-01-12T10:35:12.000Z | 2021-01-12T10:35:12.000Z | tests/__init__.py | csiro-hydroinformatics/pyrefcount | daf3a94ba33e181f2311395d345a6aa0ef001f99 | [
"Net-SNMP",
"Xnet",
"MIT"
] | 2 | 2019-01-02T00:53:16.000Z | 2019-01-06T22:00:09.000Z | tests/__init__.py | csiro-hydroinformatics/pyrefcount | daf3a94ba33e181f2311395d345a6aa0ef001f99 | [
"Net-SNMP",
"Xnet",
"MIT"
] | null | null | null | # required for python2
| 11.5 | 22 | 0.782609 |
1e765ba00bdd1470dbd63f254c8589b6f69e1c82 | 983 | py | Python | src/dataload/sources/ensembl/ensembl_interpro.py | karawallace/mygene | 35bf066eb50bc929b4bb4e2423d47b4c98797526 | [
"Apache-2.0"
] | null | null | null | src/dataload/sources/ensembl/ensembl_interpro.py | karawallace/mygene | 35bf066eb50bc929b4bb4e2423d47b4c98797526 | [
"Apache-2.0"
] | null | null | null | src/dataload/sources/ensembl/ensembl_interpro.py | karawallace/mygene | 35bf066eb50bc929b4bb4e2423d47b4c98797526 | [
"Apache-2.0"
] | null | null | null | from .ensembl_base import EnsemblParser
__metadata__ = {
'__collection__': 'ensembl_interpro',
'id_type': 'ensembl_gene'
}
def load_genedoc(self=None):
ep = EnsemblParser()
ensembl2interpro = ep.load_ensembl2interpro()
return ensembl2interpro
def get_mapping(self=None):
mapping = {
"interpro": {
"dynamic": False,
#"path": "just_name",
"properties": {
"id": {
"type": "string",
"analyzer": "string_lowercase",
#"index_name": "interpro"
},
"desc": {
"type": "string",
"index": "no",
"include_in_all": False
},
"short_desc": {
"type": "string",
"index": "no",
"include_in_all": False
}
}
}
}
return mapping
| 24.575 | 51 | 0.427263 |
f44d53808b8a76114bf06a3aa5cf1b9e0b0a4208 | 222 | py | Python | proto/run_codegen.py | QWERDF007/Serving | 436a1a1c26949fa0c072eb1ada7dd9288d885743 | [
"Apache-2.0"
] | null | null | null | proto/run_codegen.py | QWERDF007/Serving | 436a1a1c26949fa0c072eb1ada7dd9288d885743 | [
"Apache-2.0"
] | null | null | null | proto/run_codegen.py | QWERDF007/Serving | 436a1a1c26949fa0c072eb1ada7dd9288d885743 | [
"Apache-2.0"
] | null | null | null | """Runs protoc with the gRPC plugin to generate messages and gRPC stubs."""
from grpc_tools import protoc
protoc.main((
'',
'-I.',
'--python_out=.',
'--grpc_python_out=.',
'pipeline_service.proto',))
| 20.181818 | 75 | 0.630631 |
fb2ffba7722f5074aa77024778b6cadcc6b05a6d | 1,264 | py | Python | tensorflow/neural_network_decision_tree.py | ssehztirom/DNDT | 38f25cf6aa8531c50fee8dc6a755234d8ee53a89 | [
"Unlicense"
] | null | null | null | tensorflow/neural_network_decision_tree.py | ssehztirom/DNDT | 38f25cf6aa8531c50fee8dc6a755234d8ee53a89 | [
"Unlicense"
] | null | null | null | tensorflow/neural_network_decision_tree.py | ssehztirom/DNDT | 38f25cf6aa8531c50fee8dc6a755234d8ee53a89 | [
"Unlicense"
] | null | null | null | import tensorflow as tf
from functools import reduce
def tf_kron_prod(a, b):
res = tf.compat.v1.einsum('ij,ik->ijk', a, b)
res = tf.compat.v1.reshape(res, [-1, tf.compat.v1.reduce_prod(res.shape[1:])])
return res
def tf_bin(x, cut_points, temperature=0.1):
# x is a N-by-1 matrix (column vector)
# cut_points is a D-dim vector (D is the number of cut-points)
# this function produces a N-by-(D+1) matrix, each row has only one element being one and the rest are all zeros
D = cut_points.get_shape().as_list()[0]
W = tf.compat.v1.reshape(tf.compat.v1.linspace(1.0, D + 1.0, D + 1), [1, -1])
cut_points = tf.sort(cut_points) # make sure cut_points is monotonically increasing
b = tf.compat.v1.cumsum(tf.compat.v1.concat([tf.compat.v1.constant(0.0, shape=[1]), -cut_points], 0))
h = tf.compat.v1.matmul(x, W) + b
res = tf.compat.v1.nn.softmax(h / temperature)
return res
def nn_decision_tree(x, cut_points_list, leaf_score, temperature=0.1):
# cut_points_list contains the cut_points for each dimension of feature
leaf = reduce(tf_kron_prod,
map(lambda z: tf_bin(x[:, z[0]:z[0] + 1], z[1], temperature), enumerate(cut_points_list)))
return tf.compat.v1.matmul(leaf, leaf_score)
| 43.586207 | 116 | 0.675633 |
cbb7c2c818794826f986e01a5e53af0c20c50909 | 820 | py | Python | config.py | cyrustabatab/Flitter | 6f3569988ff3448c6d916216785817b57ed39783 | [
"MIT"
] | 1 | 2021-07-29T23:35:48.000Z | 2021-07-29T23:35:48.000Z | config.py | cyrustabatab/Flitter | 6f3569988ff3448c6d916216785817b57ed39783 | [
"MIT"
] | 1 | 2021-06-02T02:21:32.000Z | 2021-06-02T02:21:32.000Z | config.py | cyrustabatab/Flitter | 6f3569988ff3448c6d916216785817b57ed39783 | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = ['your-email@example.com']
LANGUAGES = ['en', 'es']
MS_TRANSLATOR_KEY = os.environ.get('MS_TRANSLATOR_KEY')
POSTS_PER_PAGE = 25
| 37.272727 | 71 | 0.706098 |
5f6d35edfb5ca49c2a4e4393150affe531da6590 | 1,970 | py | Python | 1.py | ankitpandey2708/upvote-bot | 0d6482082de69ff5aa1dc4f90f7844d3d9f6e302 | [
"MIT"
] | 2 | 2021-01-15T15:52:16.000Z | 2021-01-20T23:28:18.000Z | 1.py | ankitpandey2708/upvote-bot | 0d6482082de69ff5aa1dc4f90f7844d3d9f6e302 | [
"MIT"
] | null | null | null | 1.py | ankitpandey2708/upvote-bot | 0d6482082de69ff5aa1dc4f90f7844d3d9f6e302 | [
"MIT"
] | 1 | 2021-01-20T23:28:25.000Z | 2021-01-20T23:28:25.000Z | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time, threading
from configparser import ConfigParser
# Reading configuration file
parser = ConfigParser()
parser.read('config.ini')
parameters = {} # Dictionary for storing the parsed parameters
for section_name in parser.sections(): # Parsing the configuration file and reading it into the dictionary
for name, value in parser.items(section_name):
parameters[name] = value
# Automating your browser
driver = webdriver.Chrome('chromedriver.exe')
driver.get("https://www.quora.com")
time.sleep(3)
form = driver.find_element_by_class_name('regular_login')
email = form.find_element_by_name("email")
email.send_keys(parameters["email_id"])
password = form.find_element_by_name("password")
password.send_keys(parameters["pass_word"])
password.send_keys(Keys.RETURN)
time.sleep(3)
# Getting to other user's link ;)
answers_link = "https://www.quora.com/" + parameters["user_name"]
driver.get(answers_link)
# Let's retrieve the whole page so that you don't miss a single answer
while 1:
a = driver.execute_script("return document.body.scrollHeight;")
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(7)
b = driver.execute_script("return document.body.scrollHeight;")
if(a==b):
break;
# Time to upvote the answers ;)
driver.execute_script("window.a = document.getElementsByClassName('u-hardware-accelerated');") # Retrieving all the upvote items in an array
driver.execute_script("for(var i=0; i<a.length; i+=2) { a[i].click(); }") # Clicking on each and every item one by one
print('All answers upvoted')
| 42.826087 | 161 | 0.640102 |
70d229302a19e9ce8c17a648a354d5b51dffb190 | 16,257 | py | Python | Python/Shape Constrained risk measures/Prototype code ( not cleaned )/quantile.py | MohamedMkaouar/Some_Projects | 8170126dc91f313638595f9f4b81e9ae8b308334 | [
"Apache-2.0"
] | null | null | null | Python/Shape Constrained risk measures/Prototype code ( not cleaned )/quantile.py | MohamedMkaouar/Some_Projects | 8170126dc91f313638595f9f4b81e9ae8b308334 | [
"Apache-2.0"
] | null | null | null | Python/Shape Constrained risk measures/Prototype code ( not cleaned )/quantile.py | MohamedMkaouar/Some_Projects | 8170126dc91f313638595f9f4b81e9ae8b308334 | [
"Apache-2.0"
] | 1 | 2021-02-02T17:09:04.000Z | 2021-02-02T17:09:04.000Z | import gc
gc.collect()
import numpy as np
import pandas as pd
import scipy as sp
import sklearn as skl
from sklearn.model_selection import cross_val_score
from math import *
import random as rd
import cvxpy as cp
import multiprocessing as mp
import matplotlib.pyplot as plt
import gc
import statsmodels.api as sm
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
import time
def maternkernel(x,y,gamma):
x=np.array(x)
y=np.array(y)
return (1+sqrt(3)*sp.linalg.norm(x-y)/gamma)*exp(-sqrt(3)*sp.linalg.norm(x-y)/gamma)
def minmaxkernel(x,y,gamma):
aux=x
auy=y
x=np.array(x)
y=np.array(y)
if len(x.shape)==0:
x=[aux]
y=[auy]
d=len(x)
res=0
for i in range(d):
res=res+min(x[i],y[i])
return res
def pinball(z,t):
if t>1 or t<0:
print("tau must be in [0,1] \n")
t=float(input("try an other tau"))
return(0.5*cp.abs(z)+(t-0.5)*z)
def expectile(z,t):
if t>1 or t<0:
print("tau must be in [0,1] \n")
t=float(input("try an other tau"))
if z.is_nonpos():
return (1-t)*z**2
else:
return t*z**2
#testing the pinball loss function output
out=[]
for i in np.arange(-5,5,0.1):
out.append(pinball(i,0.5))
#linear kernel
def linearkernel(x,y,gamma):
x=np.array(x)
y=np.array(y)
return x.T*y+gamma
#laplacian kernel
def LaplaceKernel(x,y,gamma):
x=np.array(x)
y=np.array(y)
return exp(-sp.linalg.norm(x-y)/gamma)
def SigmoidKernel(x,y,gamma):
x=np.array(x)
y=np.array(y)
return np.tanh((1/N2)*x.T*y+gamma)
#gaussian kernel
def gausskernel(x,y,gamma):
x=np.array(x)
y=np.array(y)
return np.exp((-gamma**(-0.5))*sp.linalg.norm(x-y)**2)
#gram matrix
def computeG(X,gamma):
N2=len(X)
G=np.zeros((len(X),len(X)))
for i in range(len(X)):
for j in range(len(X)):
G[i,j]=gausskernel(X[i],X[j],gamma)
return G
def get_fq(x,q,N,M,A,points,gamma):
value1=0
for n in range(N):
value1+= A[n,q]*gausskernel(data[n],x,gamma)
value2=0
for m in range(N,M+N):
value2+= A[m,q]*gausskernel(points[m-N],x,gamma)
return(value1+value2)
def getperformance(X,Z,Y,An,Q,N,M,tau):
res=0
for q in range(Q):
for n in range(len(Y)):
res+=pinball(Y[n]-(get_fq(X[n],q,N,M,An,Z,gamma)+(b[q]).value),tau[q])
return((1/N)*res.value)
def create_folds(X,k):
return(KFold(n_splits=k).split(X))
#function to extract a sub matrix
def extractSubMatrix(
matrix,
rowStartIdx, rowEndIdx,
colStartIdx, colEndIdx):
result = [
x[ colStartIdx : colEndIdx ]
for x in matrix[ rowStartIdx : rowEndIdx ]
]
return result
# df=pd.read_csv("C:/Users/Bechir/Documents/scrm/code/SyntheticData.csv",skiprows=1)
#
# df.columns=["stock0","loss"]
# minX0=min(df["stock0"])
# #minX1=min(df["stock1"])
# maxX0=max(df["stock0"])
# #maxX1=max(df["stock1"])
# delta= 6
# X0=np.arange(minX0,maxX0,delta)
# #X1=np.arange(minX1,maxX1,delta)
#y=df["loss"]
#we test the code on the engel data set , scaled using R and saved in a csv file
df = pd.read_csv("C:/Users/malex/Desktop/scrm/code/SyntheticData.csv")
df.columns=["stock0","loss between time 1&2"]
y=df["loss between time 1&2"]
data=[]
for i in range(len(df["stock0"])):
data.append(df["stock0"][i])
# X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.5, random_state=42)
# foldX=[]
# foldY=[]
# y_train=np.array(y_train)
# X_train=np.array(X_train)
# y=[]
# # data=np.array(data)
# # for i in range(len(X_train)):
# # data.append(X_train[i])
# # y.append(y_train[i])
# data=np.array(data)
# y=df["loss between time 1&2"]
# y=np.array(y)
# # for i in create_folds(data,2):
# # foldX.append(data[i].tolist())
# # foldY.append(y[i].tolist())
#
# foldX=[data[0:int(len(data)/2)],data[int(len(data)/2):int(len(data))]]
# foldY=[y[0:int(len(data)/2)],y[int(len(data)/2):int(len(data))]]
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.3, random_state=42)
data=X_train
y=y_train
foldX=[]
foldY=[]
y_train=np.array(y_train)
X_train=np.array(X_train)
y=[]
data=[]
for i in range(len(X_train)):
data.append(X_train[i])
y.append(y_train[i])
data=np.array(data)
y=np.array(y)
for i,j in create_folds(X_train,2):
foldX.append(data[i].tolist())
foldY.append(y[i].tolist())
distance=[]
for i in range(len(data)):
for j in range(len(data)):
distance.append(abs(data[i]-data[j]))
#go for 2 folds
data=[]
y=[]
data1=np.array(foldX[0])
y1=np.array(foldY[0])
datatest1=np.array(foldX[1])
ytest1=np.array(foldY[1])
data2=np.array(foldX[1])
y2=np.array(foldY[1])
datatest2=np.array(foldX[0])
ytest2=np.array(foldY[0])
#data3=np.array(foldX[0]+foldX[2])
# y3=np.array(foldY[0]+foldY[2])
# datatest3=np.array(foldX[1])
# ytest3=np.array(foldY[1])
DataX=[data1,data2]
DataY=[y1,y2]
TestX=[datatest1,datatest2]
TestY=[ytest1,ytest2]
lmdg_v=[20.25, 91.125, 410.0625, 1845.28125,5000, 8303.765625,20000,40000]
gamma_v=[np.median(distance)]
b_v=[(10**log(i))*max(np.abs(df['loss between time 1&2'])) for i in [exp(1),3,6,exp(2),10,20]]
perf=[]
performance=[]
lmdf=cp.Parameter()
values=[]
perf2=[]
X_test=np.array(X_test)
y_test=np.array(y_test)
start_time = time.time()
for gamma in gamma_v:
print("s=",gamma)
for lmdg in lmdg_v:
for lmdb in b_v:
lmd=lmdg
#lmdb=873.4562
#print("lmd=",lmd)
for i in range(2):
#print("i=",i)
data=DataX[i]
y=DataY[i]
start_time2 = time.time()
minX0=min(df["stock0"])
maxX0=max(df["stock0"])
# minX1=min(df["stock1"])
# maxX1=max(df["stock1"])
#delta net
delta= 6
points=[]
points=(np.arange(minX0,maxX0,delta)).tolist()
# for k in np.arange(minX0,maxX0+1,delta):
# for j in np.arange(minX1,maxX1+1,delta):
# points.append([k,j])
data2=data
data=[]
for k in range(len(data2)):
data.append(data2[k])
X=data+points
#pinball loss function
#computing the gram matrix
G=computeG(X,gamma)
Geps=G+(10**(-4)*np.ones((len(X),len(X))))
#computing the eta coefficient
eta=sqrt(2)*(1-exp(-sqrt(2*delta**2)/(gamma**2)))**(0.5)
#computing the W and U matrices
Q=5
I=Q-1
W=np.zeros((I,Q))
j=-1
for l in range(Q-1):
j=j+1
while j>=l:
W[l,j]=-1
W[l,j+1]=1
break
U=W
e=np.zeros((Q-1,Q-1))
l,j=0,-1
for l in range(Q-1):
j=j+1
while j>=l:
e[l,j]=1
break
eq=np.zeros((Q,Q))
l,j=0,-1
for l in range(Q):
j=j+1
while j>=l:
eq[l,j]=1
break
N=len(data)
#optimization problem
tau=[0.1,0.3,0.5,0.7,0.95]
l=0
q=0
M=len(points)
A=cp.Variable((M+N,Q))
b=cp.Variable(Q)
Gsqrt=sp.linalg.sqrtm(Geps)
hi=((Geps@(A@W.T))[N:N+M])
hj=(Geps@(A@W.T))
soc_constraint=[(1/eta)*(U@b)[l]+(1/(eta))*cp.min((hi@e[l]))>=cp.norm((Gsqrt@hj)@e[l],2) for l in range(Q-1)]
obj=0
Gn=np.array(extractSubMatrix(G,0,N,0,N+M))
y=np.array(y)
for q in range(Q):
for n in range(N):
obj+=pinball(y[n]-((Gn@A)[n,q]+b[q]),tau[q])
hl=(Gsqrt@A)
f1=0
for q in range(Q):
f1=f1+cp.norm(hl@eq[q],2)**2
bn=cp.norm(b,2)
prob = cp.Problem(cp.Minimize((1/N)*obj),soc_constraint+[bn<=lmdb]+[f1<=lmd])
prob.solve(solver="MOSEK")
end_time2=time.time()
#print("prob value =",obj.value)
perf.append(getperformance(TestX[i].tolist(),points,TestY[i],A.value,Q,N,M,tau))
values.append((lmd/1000,lmdb))
# print("prf value",np.mean(perf))
performance.append(np.mean(perf))
perf=[]
print(min(performance))
minperf.append(min(performance))
#function to evaluate the estimated quantile function for a given quantile level tau
p=[14.71,16.59,17.098,21.34]
#plotting the quantile function curves over a scatter plot of the data
plt.rc('legend',**{'fontsize':45})
font = {'family' : 'normal',
'weight' : 'normal',
'size' :40}
plt.rc('font', **font)
colors=["r+","g+","y+","m+","c+"]
seq=np.arange(min(df["stock0"]),max(df["stock0"]),0.1)
plt.plot(data,y,"ko")
for q in range(Q-1,0,-1):
fq=[]
for i in seq:
fq.append(get_fq(i,q,N,M,A.value,points,gamma)+b.value[q])
plt.plot(seq,fq,label='$\\tau={}$'.format(tau[q]))
#plt.legend(loc=[1.01, 0.4])
plt.ylabel('$\\Psi(S_{t_1},S_{t_2},\\Theta)$')
plt.xlabel('$S_{t_1}$')
plt.show()
seq0=np.arange(min(df["stock0"]),max(df["stock0"]),0.5)
seq1=np.arange(min(df["stock1"]),max(df["stock1"]),0.5)
seq=[]
for i in range(len(seq0)):
for j in range(len(seq1)):
seq.append((seq0[i],seq1[j],get_fq([seq0[i],seq1[j]],q,N,M,A.value,points,gamma)+b.value[q]))
seq2=[]
for i in range(len(seq0)):
for j in range(len(seq1)):
seq2.append((seq0[i],seq1[j],get_fq([seq0[i],seq1[j]],q,N,M,A.value,points,gamma)+b.value[q]))
q=3
seq3=[]
for i in range(len(seq0)):
for j in range(len(seq1)):
seq3.append((seq0[i],seq1[j],get_fq([seq0[i],seq1[j]],q,N,M,A.value,points,gamma)+b.value[q]))
for q in range(Q):
fq=[]
for i in seq:
fq.append(get_fq(i,q,N,M,A.value,points,gamma)+b.value[q])
plt.plot(seq,fq,label='$\\tau={}$'.format(tau[q]))
# plt.legend(loc=[1.01, 0.4])
plt.ylabel('$\\Psi(S_{t_1},S_{t_2},\\Theta)$')
plt.xlabel('$S_{t_1}$')
plt.show()
data=X_train
distance=[]
for i in range(len(data)):
for j in range(len(data)):
distance.append(abs(data[i]-data[j]))
gamma=np.median(distance)
perf= pd.read_csv('C:/Users/malex/Desktop/scrm/code/tmpdata/perf.csv')
perf.columns=["value"]
p=perf["value"]
val=[(1.000000000000000000e+00,5.094930106957710336e+01,8.686552035132566463e-01),
(1.000000000000000000e+00,2.513537744701431365e+01,8.686553896116917528e-01),
(1.000000000000000000e+00,5.094930106957709768e+02,8.686557901811989835e-01),
(1.000000000000000000e+00,6.393674450418941291e+01,8.686550717910765940e-01),
(1.000000000000000000e+00,1.022641289787253868e+03,8.686557156782594991e-01),
(1.000000000000000000e+01,5.094930106957710336e+01,6.089390793565628845e-01),
(1.000000000000000000e+01,2.513537744701431365e+01,6.089389622996191909e-01),
(1.000000000000000000e+01,5.094930106957709768e+02,6.089392698318070174e-01),
(1.000000000000000000e+01,6.393674450418941291e+01,6.089390613973637567e-01),
(1.000000000000000000e+01,1.022641289787253868e+03,6.089392166347401547e-01),
(1.000000000000000000e+02,5.094930106957710336e+01,4.227001950140902853e-01),
(1.000000000000000000e+02,2.513537744701431365e+01,4.226998144366899135e-01),
(1.000000000000000000e+02,5.094930106957709768e+02,4.226997865846292557e-01),
(1.000000000000000000e+02,6.393674450418941291e+01,4.227000855142818980e-01),
(1.000000000000000000e+02,1.022641289787253868e+03,4.227002193322486612e-01),
(1.000000000000000000e+03,5.094930106957710336e+01,5.732623290829506058e-01),
(1.000000000000000000e+03,2.513537744701431365e+01,5.733967394937002915e-01),
(1.000000000000000000e+03,5.094930106957709768e+02,5.731529054706561155e-01),
(1.000000000000000000e+03,6.393674450418941291e+01,5.732167298289747581e-01)]
start_time = time.time()
val2=[]
for i in range(len(performance)):
val2.append((values[i][0],values[i][1]/1000,performance[i]))
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import griddata
x, y, z = zip(*val2)
z = list(map(float, z))
grid_x, grid_y = np.mgrid[min(x):max(x):50j, min(y):max(y):50j]
grid_z = griddata((x, y), z, (grid_x, grid_y), method='cubic')
fig = plt.figure()
ay = fig.gca(projection='3d')
ay.scatter(df["stock0"], df["stock1"], y, c='r', marker='o')
ax = fig.gca(projection='3d')
im=ax.plot_surface(grid_x, grid_y, grid_z, cmap=plt.cm.Spectral,color='blue')
ax.contour3D(grid_x, grid_y, grid_z, 50, cmap='binary')
#ax.contour3D(grid_x, grid_y, grid_z, 50, cmap='binary')
#ax.set_zlabel("CV score")
# ax.zaxis.labelpad = 30
# ax.set_xlabel("$\\frac{\\tilde{\\lambda}_g}{1000}$")
# ax.xaxis.labelpad = 50
# ax.set_ylabel("$\\frac{\\tilde{\\lambda}_b}{1000}$")
# ax.yaxis.labelpad = 30
# ax.view_init(60, 35)
fig.colorbar(im)
x2, y2, z2 = zip(*seq2)
z2 = list(map(float, z2))
grid_x2, grid_y2 = np.mgrid[min(x2):max(x2):50j, min(y2):max(y2):50j]
grid_z2 = griddata((x2, y2), z2, (grid_x2, grid_y2), method='cubic')
az = fig.gca(projection='3d')
im2=az.plot_surface(grid_x2, grid_y2, grid_z2, cmap=plt.cm.coolwarm,color="red")
az.contour3D(grid_x2, grid_y2, grid_z2, 50, cmap='binary')
#ax.contour3D(grid_x, grid_y, grid_z, 50, cmap='binary')
fig.colorbar(im2)
x3, y3, z3 = zip(*seq3)
z3 = list(map(float, z3))
grid_x3, grid_y3 = np.mgrid[min(x3):max(x3):50j, min(y3):max(y3):50j]
grid_z3 = griddata((x3, y3), z3, (grid_x3, grid_y3), method='cubic')
aw = fig.gca(projection='3d')
im3=aw.plot_surface(grid_x3, grid_y3, grid_z3, cmap=plt.cm.binary,color="red")
az.contour3D(grid_x3, grid_y3, grid_z3, 50, cmap='binary')
#ax.contour3D(grid_x, grid_y, grid_z, 50, cmap='binary')
fig.colorbar(im3)
ax.set_zlabel("$\\Psi(S_{t_1},S_{t_2},\\Theta)$")
ax.zaxis.labelpad = 50
ax.set_xlabel("$S_{t_1}^1$")
ax.xaxis.labelpad = 50
ax.set_ylabel("$S_{t_1}^2$")
ax.yaxis.labelpad = 50
ax.view_init(60, 35)
plt.show()
im=plt.contour(grid_x,grid_y,grid_z,levels=100)
plt.colorbar(im)
#plt.plot(lmd, lmdb,"ro")
plt.xlabel("$\\frac{\\tilde{\\lambda}_g}{1000}$")
ax.xaxis.labelpad = 20
plt.ylabel("$\\frac{\\tilde{\\lambda}_b}{1000}$")
ax.yaxis.labelpad = 30
plt.show()
end_time = time.time()
np.savetxt('C:/Users/malex/Desktop/scrm/code/data1.csv', val,delimiter=';')
y.sort()
minCV=[8.06,12,15.91,18.4,19.38,20.36,24.73]
R=[0,5,10,15,20,25,75]
plt.plot(R,minCV)
plt.xlabel("Interest Rate %")
plt.ylabel("Minimum CV score")
plt.show()
dataSize=[150,250,350,450,550]
plt.plot(dataSize,Tme)
plt.xlabel('Size of data')
plt.ylabel('time to solve the optimization problem in seconds')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
| 32.128458 | 130 | 0.559697 |
a7ae097fd43fb1c90b71aa2b8abfedbbf7be9619 | 13,219 | py | Python | chess/board/board.py | certik/chess | dc806fccc0fb9acc57c40db56e620f2c55157425 | [
"MIT"
] | 1 | 2016-05-09T00:40:16.000Z | 2016-05-09T00:40:16.000Z | chess/board/board.py | certik/chess | dc806fccc0fb9acc57c40db56e620f2c55157425 | [
"MIT"
] | null | null | null | chess/board/board.py | certik/chess | dc806fccc0fb9acc57c40db56e620f2c55157425 | [
"MIT"
] | null | null | null | class InvalidMove(Exception):
pass
class Board(object):
def __init__(self):
self._board = [None]*64
self.setup()
self._moves = []
def setup(self):
self[0, 0] = Rock(self)
self[1, 0] = Knight(self)
self[2, 0] = Bishop(self)
self[3, 0] = Queen(self)
self[4, 0] = King(self)
self[5, 0] = Bishop(self)
self[6, 0] = Knight(self)
self[7, 0] = Rock(self)
for i in range(8):
self[i, 1] = Pawn(self)
self[i, 6] = Pawn(self, black=True)
self[0, 7] = Rock(self, black=True)
self[1, 7] = Knight(self, black=True)
self[2, 7] = Bishop(self, black=True)
self[3, 7] = Queen(self, black=True)
self[4, 7] = King(self, black=True)
self[5, 7] = Bishop(self, black=True)
self[6, 7] = Knight(self, black=True)
self[7, 7] = Rock(self, black=True)
self._white_to_move=True
def __setitem__(self, key, value):
"""
i, j = key
i ... 0..7, it means a..h
j ... 0..7, it means 1..8
e.g. e4 is (4, 3)
"""
i, j = key
self._board[j*8+i] = value
def __getitem__(self, key):
i, j = key
return self._board[j*8+i]
def a2i(self, x):
if x == "a": i = 0
if x == "b": i = 1
if x == "c": i = 2
if x == "d": i = 3
if x == "e": i = 4
if x == "f": i = 5
if x == "g": i = 6
if x == "h": i = 7
return i
def i2a(self, i):
return "abcdefgh"[i]
def parse_move(self, move):
def convert_field(field):
if len(field) == 2:
x = field[0]
y = field[1]
i = self.a2i(x)
j = int(y)-1
return i, j
else:
raise InvalidMove(move)
if move[0] == "R":
piece = Rock
elif move[0] == "N":
piece = Knight
elif move[0] == "B":
piece = Bishop
elif move[0] == "Q":
piece = Queen
elif move[0] == "K":
piece = King
else:
piece = Pawn
if piece != Pawn:
move = move[1:]
if move.find("x") != -1:
capture = True
move = move.replace("x", "")
else:
capture = False
if move[-1] == "+":
check = True
move = move[:-1]
else:
check = False
helper = move[:-2]
move = move[-2:]
field = convert_field(move)
return piece, field, capture, check, helper
def find_piece(self, piece, field):
"""
Finds the piece "piece" that can go to the field "field".
"""
candidates = []
# first find all pieces of the type "piece" on the board:
for i in range(8):
for j in range(8):
if isinstance(self[i, j], piece) and \
(self[i, j].white() == self._white_to_move):
candidates += [(i, j)]
# try each of them:
candidates = [x for x in candidates if self[x].can_move(x, field)]
return candidates
def use_helper(self, helper, candidates):
if (helper != "") and (helper in "abcdefgh"):
i = self.a2i(helper)
return [x for x in candidates if x[0] == i]
if (helper != "") and (helper in "12345678"):
j = int(helper) - 1
return [x for x in candidates if x[1] == j]
return candidates
def moves_from_list(self, moves):
for move in moves:
self.move_algebraic(move)
def move_algebraic(self, move):
"""
Do one move.
"move" is given in the Short Algebraic notation.
"""
if move == "O-O":
# kingside castling
if self._white_to_move:
self.move_coordinate((4, 0), (6, 0))
self.move_coordinate((7, 0), (5, 0), True)
else:
self.move_coordinate((4, 7), (6, 7))
self.move_coordinate((7, 7), (5, 7), True)
elif move == "O-O-O":
# queenside castling
if self._white_to_move:
self.move_coordinate((4, 0), (2, 0))
self.move_coordinate((0, 0), (3, 0), True)
else:
self.move_coordinate((4, 7), (2, 7))
self.move_coordinate((0, 7), (3, 7), True)
else:
piece, field, capture, check, helper = self.parse_move(move)
if capture:
if self[field] is None:
if (piece == Pawn) and (field[1] in [2, 5]):
# this is probably en passant, so ok
pass
else:
raise InvalidMove(move)
else:
if self[field] is not None:
raise InvalidMove(move)
possible_pieces = self.find_piece(piece, field)
if len(possible_pieces) != 1:
possible_pieces = self.use_helper(helper, possible_pieces)
if len(possible_pieces) != 1:
raise InvalidMove(move)
self.move_coordinate(possible_pieces[0], field)
def move_coordinate(self, old, new, castling=False):
"""
Do one move. "old" and "new" are coordinates.
Example:
>>> b.move_coordinate((0, 0), (4, 0))
"""
p = self[old]
if p is None:
raise InvalidMove()
if not castling:
if not (self._white_to_move == (not p.black())):
raise InvalidMove()
self[old] = None
self[new] = p
# en passant:
if isinstance(p, Pawn):
if self._white_to_move:
b = self[new[0], 4]
if (new[1] == 5) and isinstance(b, Pawn) and b.black():
self[new[0], 4] = None
else:
b = self[new[0], 3]
if (new[1] == 2) and isinstance(b, Pawn) and b.white():
self[new[0], 3] = None
if not castling:
self._white_to_move = not self._white_to_move
move = "%s%d%s%d" % (self.i2a(old[0]), old[1]+1,
self.i2a(new[0]), new[1]+1)
self._moves.append(move)
def to_ascii_art(self):
s = ""
s += "+" + "---+"*8 + "\n"
for j in reversed(range(8)):
row = "|"
for i in range(8):
if self[i, j] is not None:
if self[i, j].black():
row += "#%s#|" % self[i, j].to_ascii_art()
else:
row += " %s |" % self[i, j].to_ascii_art()
else:
row += " |"
s += row + "\n"
s += "+" + "---+"*8 + "\n"
return s
def to_string(self):
s = ""
for j in reversed(range(8)):
for i in range(8):
if self[i, j] is None:
s += " "
else:
s += self[i, j].to_string()
return s
def __str__(self):
return self.to_ascii_art()
def get_moves(self):
"""
Return a list of moves in "e2e4" notation.
"""
return self._moves
class Piece(object):
def __init__(self, board, black=False):
self._board = board
self._black = black
def black(self):
return self._black
def white(self):
return not self._black
class Rock(Piece):
def to_ascii_art(self):
return "R"
def to_string(self):
if self._black:
return "R"
else:
return "r"
def can_move(self, old, new):
def r(a, b):
"""
Returns the integers between a, b, exclusive.
Example:
>>> r(3, 7)
[4, 5, 6]
>>> r(7, 3)
[4, 5, 6]
"""
a, b = sorted([a, b])
return range(a+1, b)
dx = old[0]-new[0]
dy = old[1]-new[1]
if old[1] == new[1]:
# x-movement
# check that no piece is between the old and new position
for i in r(old[0], new[0]):
if self._board[i, old[1]] is not None:
return False
return True
if old[0] == new[0]:
# y-movement
# check that no piece is between the old and new position
for j in r(old[1], new[1]):
if self._board[old[0], j] is not None:
return False
return True
return False
class Knight(Piece):
def to_ascii_art(self):
return "N"
def to_string(self):
if self._black:
return "N"
else:
return "n"
def can_move(self, old, new):
d = (old[0]-new[0])**2 + (old[1]-new[1])**2
return d == 5
class Bishop(Piece):
def to_ascii_art(self):
return "B"
def to_string(self):
if self._black:
return "B"
else:
return "b"
def can_move(self, old, new):
dx = old[0]-new[0]
dy = old[1]-new[1]
return (dx == dy) or (dx == -dy)
class Queen(Piece):
def to_ascii_art(self):
return "Q"
def to_string(self):
if self._black:
return "Q"
else:
return "q"
def can_move(self, old, new):
return Bishop(self._board, self._black).can_move(old, new) or \
Rock(self._board, self._black).can_move(old, new)
class King(Piece):
def to_ascii_art(self):
return "K"
def to_string(self):
if self._black:
return "K"
else:
return "k"
def can_move(self, old, new):
dx = old[0]-new[0]
dy = old[1]-new[1]
return (dx in [-1, 0, 1]) and (dy in [-1, 0, 1])
class Pawn(Piece):
def to_ascii_art(self):
return "p"
def to_string(self):
if self._black:
return "P"
else:
return "p"
def can_move(self, old, new):
dx = new[0]-old[0]
dy = new[1]-old[1]
if dx == 0:
if self._board[new] is None:
if self.white():
return (dy == 1) or ((dy == 2) and (old[1] == 1))
else:
return (dy == -1) or ((dy == -2) and (old[1] == 6))
if dx in [-1, 1]:
if self._board[new] is not None:
if self.white():
return dy == 1
else:
return dy == -1
else:
# check for en passant:
if self.white():
b = self._board[new[0], 4]
if (new[1] == 5) and isinstance(b, Pawn) and b.black():
return True
else:
b = self._board[new[0], 3]
if (new[1] == 2) and isinstance(b, Pawn) and b.white():
return True
return False
def main():
moves = ['d4', 'Nf6', 'c4', 'c5', 'd5', 'b5', 'cxb5', 'a6', 'e3', 'Bb7', 'Nc3', 'axb5', 'Bxb5', 'Qa5', 'Bd2', 'Qb6', 'Nf3', 'Nxd5', 'Nxd5', 'Bxd5', 'a4', 'e6', 'Bc3', 'Be7', 'O-O', 'O-O', 'Ne5', 'd6', 'Nc4', 'Bxc4', 'Qg4', 'g6', 'Qxc4', 'Nc6', 'e4', 'Nd4', 'Bxd4', 'cxd4', 'Rfc1', 'Bg5', 'Rc2', 'Kg7', 'b4', 'Rfc8', 'Qxc8', 'Rxc8', 'Rxc8', 'd3', 'Bxd3', 'Qd4', 'Rd1', 'Qxb4', 'Bc2', 'Bf6', 'h3', 'Bd4', 'Rf1', 'Bc5', 'Ra8', 'h5', 'a5', 'Qb7', 'Re8', 'Qa6', 'Bd1', 'Qxa5', 'Bf3', 'h4', 'Re7', 'Qb4', 'Rc7', 'Qb6', 'Rc8', 'Qb7', 'Re8', 'Qb2', 'Bd1', 'Qd2', 'Bf3', 'Qd4', 'Ra8', 'Kf6', 'Ra2', 'Qe5', 'Rc1', 'Qf4', 'Rc4', 'Ke5', 'Re2', 'Qg5', 'Kf1', 'Qd8', 'Rcc2', 'Qa5', 'Ra2', 'Qc3', 'Rac2', 'Qd3', 'Kg1', 'Qd1+', 'Kh2', 'Qa1', 'Ra2', 'Qc3', 'Rec2', 'Qd4', 'Rd2', 'Qc3', 'Rdc2', 'Qe1', 'Re2', 'Qb1', 'Rac2', 'Bd4', 'Rcd2', 'Qb4', 'Rc2', 'Kf6', 'g3', 'hxg3+', 'Kxg3', 'Be5+', 'Kg2', 'Qb8', 'h4', 'Qh8', 'Kh3', 'Qh6', 'Re1', 'Qf4', 'Kg2', 'Qh2+', 'Kf1', 'Qxh4', 'Rd1', 'Qh8', 'Ke2', 'Qb8', 'Rcd2', 'Qb5+', 'Rd3', 'Qc4', 'Rg1', 'Bf4', 'Rgd1', 'Ke5', 'Ke1', 'Kf6', 'Rd4', 'Qc3+', 'Ke2', 'Qc2+', 'Kf1', 'Kg5', 'Be2', 'f5', 'f3', 'Qc3', 'R1d3', 'Qc7', 'Bd1', 'Qh7', 'exf5', 'Qh1+', 'Kf2', 'Qh2+', 'Kf1', 'gxf5', 'Bb3', 'Bg3', 'f4+', 'Kf6', 'Rd2', 'Qh1+', 'Ke2', 'Qg2+', 'Kd3', 'Qf3+', 'Kc2', 'Bxf4', 'Rd1', 'Qe2+', 'Kb1', 'Be5', 'R4d2', 'Qe4+', 'Rd3', 'd5', 'Bc2', 'Qb4+', 'Rb3', 'Qa5', 'Kc1', 'Qa1+', 'Kd2', 'Qd4+', 'Kc1', 'Qc5', 'Rf1', 'Bd4', 'Kd2', 'Qc4', 'Rff3', 'Be5', 'Kd1', 'Bd6', 'Rbc3', 'Qg4', 'Ke2', 'f4', 'Kf1', 'Bc5', 'Ke2', 'Bd6', 'Bb3', 'Be5', 'Rcd3', 'Bd6', 'Kf1', 'Bc5', 'Ke1', 'Ke5', 'Kd2', 'Be3+', 'Rdxe3+', 'fxe3+', 'Rxe3+', 'Kd6', 'Bc2', 'e5', 'Ke1', 'e4', 'Kd2', 'Ke5', 'Re2', 'Qf3', 'Bd1', 'd4', 'Kc1', 'Qc3+', 'Kb1', 'd3', 'Rh2', 'Kd4', 'Ka2', 'Qa5+', 'Kb2', 'Qc3+', 'Ka2', 'Qc1', 'Rh8', 'Qxd1', 'Ka3', 'Qb1', 'Rd8+', 'Ke3']
b = Board()
b.moves_from_list(moves)
print b
print '"' + b.to_string() + '"'
if __name__ == "__main__":
main()
| 32.320293 | 1,844 | 0.439216 |
77915dab6b4de8797354be0beca009ce1c2fb57b | 5,462 | py | Python | imaginaire/discriminators/spade.py | MichaelDoron/imaginaire | 5f95b988453d391e972fa528152121d0dd3cb51a | [
"RSA-MD"
] | 1 | 2021-03-22T21:14:02.000Z | 2021-03-22T21:14:02.000Z | imaginaire/discriminators/spade.py | jrfrantz/imaginaire | 7c650977b29ea2dd12557d1fef447df9809db737 | [
"RSA-MD"
] | null | null | null | imaginaire/discriminators/spade.py | jrfrantz/imaginaire | 7c650977b29ea2dd12557d1fef447df9809db737 | [
"RSA-MD"
] | 1 | 2021-06-09T01:28:59.000Z | 2021-06-09T01:28:59.000Z | # Copyright (C) 2020 NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, check out LICENSE.md
import torch
import torch.nn as nn
from imaginaire.discriminators.fpse import FPSEDiscriminator
from imaginaire.discriminators.multires_patch import NLayerPatchDiscriminator
from imaginaire.utils.data import (get_paired_input_image_channel_number,
get_paired_input_label_channel_number)
from imaginaire.utils.distributed import master_only_print as print
class Discriminator(nn.Module):
r"""Multi-resolution patch discriminator.
Args:
dis_cfg (obj): Discriminator definition part of the yaml config file.
data_cfg (obj): Data definition part of the yaml config file.
"""
def __init__(self, dis_cfg, data_cfg):
super(Discriminator, self).__init__()
print('Multi-resolution patch discriminator initialization.')
# We assume the first datum is the ground truth image.
image_channels = get_paired_input_image_channel_number(data_cfg)
# Calculate number of channels in the input label.
if data_cfg.type == 'imaginaire.datasets.paired_videos':
num_labels = get_paired_input_label_channel_number(
data_cfg, video=True)
else:
num_labels = get_paired_input_label_channel_number(data_cfg)
# Build the discriminator.
kernel_size = getattr(dis_cfg, 'kernel_size', 3)
num_filters = getattr(dis_cfg, 'num_filters', 128)
max_num_filters = getattr(dis_cfg, 'max_num_filters', 512)
num_discriminators = getattr(dis_cfg, 'num_discriminators', 2)
num_layers = getattr(dis_cfg, 'num_layers', 5)
activation_norm_type = getattr(dis_cfg, 'activation_norm_type', 'none')
weight_norm_type = getattr(dis_cfg, 'weight_norm_type', 'spectral')
print('\tBase filter number: %d' % num_filters)
print('\tNumber of discriminators: %d' % num_discriminators)
print('\tNumber of layers in a discriminator: %d' % num_layers)
print('\tWeight norm type: %s' % weight_norm_type)
num_input_channels = image_channels + num_labels
self.discriminators = nn.ModuleList()
for i in range(num_discriminators):
net_discriminator = NLayerPatchDiscriminator(
kernel_size,
num_input_channels,
num_filters,
num_layers,
max_num_filters,
activation_norm_type,
weight_norm_type)
self.discriminators.append(net_discriminator)
print('Done with the Multi-resolution patch '
'discriminator initialization.')
fpse_kernel_size = getattr(dis_cfg, 'fpse_kernel_size', 3)
fpse_activation_norm_type = getattr(dis_cfg,
'fpse_activation_norm_type',
'none')
self.fpse_discriminator = FPSEDiscriminator(
image_channels,
num_labels,
num_filters,
fpse_kernel_size,
weight_norm_type,
fpse_activation_norm_type)
def _single_forward(self, input_label, input_image):
# Compute discriminator outputs and intermediate features from input
# images and semantic labels.
input_x = torch.cat(
(input_label, input_image), 1)
features_list = []
pred2, pred3, pred4 = self.fpse_discriminator(input_image, input_label)
output_list = [pred2, pred3, pred4]
input_downsampled = input_x
for net_discriminator in self.discriminators:
output, features = net_discriminator(input_downsampled)
output_list.append(output)
features_list.append(features)
input_downsampled = nn.functional.interpolate(
input_downsampled, scale_factor=0.5, mode='bilinear',
align_corners=True)
return output_list, features_list
def forward(self, data, net_G_output):
r"""SPADE discriminator forward.
Args:
data (dict):
- data (N x C1 x H x W tensor) : Ground truth images.
- label (N x C2 x H x W tensor) : Semantic representations.
- z (N x style_dims tensor): Gaussian random noise.
net_G_output (dict):
fake_images (N x C1 x H x W tensor) : Fake images.
Returns:
(dict):
- real_outputs (list): list of output tensors produced by
individual patch discriminators for real images.
- real_features (list): list of lists of features produced by
individual patch discriminators for real images.
- fake_outputs (list): list of output tensors produced by
individual patch discriminators for fake images.
- fake_features (list): list of lists of features produced by
individual patch discriminators for fake images.
"""
output_x = dict()
output_x['real_outputs'], output_x['real_features'] = \
self._single_forward(data['label'], data['images'])
output_x['fake_outputs'], output_x['fake_features'] = \
self._single_forward(data['label'], net_G_output['fake_images'])
return output_x
| 46.288136 | 79 | 0.641157 |
629cd774b27a038b90e11fcd8c357354c09fc13f | 1,781 | py | Python | setup.py | lyubov888L/trio-typing | f32f17b0f242daf2d42407f383ca581d64b6c299 | [
"Apache-2.0",
"MIT"
] | null | null | null | setup.py | lyubov888L/trio-typing | f32f17b0f242daf2d42407f383ca581d64b6c299 | [
"Apache-2.0",
"MIT"
] | null | null | null | setup.py | lyubov888L/trio-typing | f32f17b0f242daf2d42407f383ca581d64b6c299 | [
"Apache-2.0",
"MIT"
] | null | null | null | from setuptools import setup
import os
exec(open("trio_typing/_version.py", encoding="utf-8").read())
LONG_DESC = open("README.rst", encoding="utf-8").read()
stub_packages = ["async_generator-stubs", "outcome-stubs", "trio-stubs"]
setup(
name="trio-typing",
version=__version__,
description="Static type checking support for Trio and related projects",
url="https://github.com/python-trio/trio-typing",
long_description=LONG_DESC,
long_description_content_type="text/x-rst",
author="Joshua Oreman",
author_email="oremanj@gmail.com",
license="MIT -or- Apache License 2.0",
packages=["async_generator-stubs", "outcome-stubs", "trio-stubs", "trio_typing"],
include_package_data=True,
install_requires=[
"trio >= 0.16.0",
# mypy can't be installed on PyPy due to its dependency
# on typed-ast
"mypy >= 0.780; implementation_name == 'cpython'",
"typing_extensions >= 3.7.4",
"mypy_extensions >= 0.4.2",
],
keywords=["async", "trio", "mypy"],
classifiers=[
"License :: OSI Approved :: MIT License",
"License :: OSI Approved :: Apache Software License",
"Framework :: Trio",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: BSD",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
],
)
| 37.104167 | 85 | 0.623807 |
c63408b682b376bffeec716ddbae0d475d97ccd9 | 6,231 | py | Python | pybind/slxos/v16r_1_00b/vrf/address_family/ipv6/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/vrf/address_family/ipv6/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/vrf/address_family/ipv6/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import unicast
class ipv6(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-vrf - based on the path /vrf/address-family/ipv6. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__unicast',)
_yang_name = 'ipv6'
_rest_name = 'ipv6'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__unicast = YANGDynClass(base=unicast.unicast, is_container='container', presence=True, yang_name="unicast", rest_name="unicast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv6 unicast address Family', u'cli-full-no': None, u'cli-add-mode': None, u'cli-full-command': None, u'callpoint': u'vrfAfIpv6Ucast', u'cli-mode-name': u'vrf-$(vrf-name)-ipv6-unicast'}}, namespace='urn:brocade.com:mgmt:brocade-vrf', defining_module='brocade-vrf', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'vrf', u'address-family', u'ipv6']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'vrf', u'address-family', u'ipv6']
def _get_unicast(self):
"""
Getter method for unicast, mapped from YANG variable /vrf/address_family/ipv6/unicast (container)
"""
return self.__unicast
def _set_unicast(self, v, load=False):
"""
Setter method for unicast, mapped from YANG variable /vrf/address_family/ipv6/unicast (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_unicast is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_unicast() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicast.unicast, is_container='container', presence=True, yang_name="unicast", rest_name="unicast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv6 unicast address Family', u'cli-full-no': None, u'cli-add-mode': None, u'cli-full-command': None, u'callpoint': u'vrfAfIpv6Ucast', u'cli-mode-name': u'vrf-$(vrf-name)-ipv6-unicast'}}, namespace='urn:brocade.com:mgmt:brocade-vrf', defining_module='brocade-vrf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """unicast must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=unicast.unicast, is_container='container', presence=True, yang_name="unicast", rest_name="unicast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv6 unicast address Family', u'cli-full-no': None, u'cli-add-mode': None, u'cli-full-command': None, u'callpoint': u'vrfAfIpv6Ucast', u'cli-mode-name': u'vrf-$(vrf-name)-ipv6-unicast'}}, namespace='urn:brocade.com:mgmt:brocade-vrf', defining_module='brocade-vrf', yang_type='container', is_config=True)""",
})
self.__unicast = t
if hasattr(self, '_set'):
self._set()
def _unset_unicast(self):
self.__unicast = YANGDynClass(base=unicast.unicast, is_container='container', presence=True, yang_name="unicast", rest_name="unicast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv6 unicast address Family', u'cli-full-no': None, u'cli-add-mode': None, u'cli-full-command': None, u'callpoint': u'vrfAfIpv6Ucast', u'cli-mode-name': u'vrf-$(vrf-name)-ipv6-unicast'}}, namespace='urn:brocade.com:mgmt:brocade-vrf', defining_module='brocade-vrf', yang_type='container', is_config=True)
unicast = __builtin__.property(_get_unicast, _set_unicast)
_pyangbind_elements = {'unicast': unicast, }
| 50.25 | 591 | 0.706789 |
fbf10e292458d1c4322ee11a330dfcd162ab2c01 | 15,010 | py | Python | batch/getting-started-sample/src/batch_runner.py | manalotoj/python-samples | cadd00ccdb2218a82220484bfc28eb952043d75c | [
"MIT"
] | null | null | null | batch/getting-started-sample/src/batch_runner.py | manalotoj/python-samples | cadd00ccdb2218a82220484bfc28eb952043d75c | [
"MIT"
] | null | null | null | batch/getting-started-sample/src/batch_runner.py | manalotoj/python-samples | cadd00ccdb2218a82220484bfc28eb952043d75c | [
"MIT"
] | null | null | null | from __future__ import print_function
import datetime
from datetime import timedelta
import io
import os
import sys
import time
import local_config
import uuid
try:
input = raw_input
except NameError:
pass
import azure.storage.blob as azureblob
from azure.storage.blob import ContainerPermissions
# from azure.storage.blob import ResourceTypes, AccountSasPermissions
# from azure.storage.blob import generate_account_sas, BlobServiceClient
import azure.batch.batch_service_client as batch
import azure.batch.batch_auth as batch_auth
import azure.batch.models as batchmodels
sys.path.append('.')
sys.path.append('..')
# Update the Batch and Storage account credential strings in config.py with values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
def get_container_url(container_name):
url = "https://stbatchofcookies.blob.core.windows.net/{0}".format(container_name)
return url
def get_secure_container_url(container_name, sas_token):
url = "https://stbatchofcookies.blob.core.windows.net/{0}?{1}".format(container_name, sas_token)
return url
def create_blob_client():
blob_service_client = azureblob.BlockBlobService(account_name=local_config._BATCH_ACCOUNT_NAME, account_key=local_config._BATCH_ACCOUNT_KEY) #BlobServiceClient.from_connection_string(connection_string)
return blob_service_client
# [START create_sas_token]
def create_sas_token(client, containerName):
sas_token = client.generate_container_shared_access_signature(
containerName,
ContainerPermissions(write=True, read=True, list=True),
expiry=datetime.datetime.utcnow() + timedelta(hours=1), start=datetime.datetime.utcnow() + timedelta(hours=-1), )
return sas_token
# [END create_sas_token]
def query_yes_no(question, default="yes"):
"""
Prompts the user for yes/no input, displaying the specified question text.
:param str question: The text of the prompt for input.
:param str default: The default if the user hits <ENTER>. Acceptable values
are 'yes', 'no', and None.
:rtype: str
:return: 'yes' or 'no'
"""
valid = {'y': 'yes', 'n': 'no'}
if default is None:
prompt = ' [y/n] '
elif default == 'yes':
prompt = ' [Y/n] '
elif default == 'no':
prompt = ' [y/N] '
else:
raise ValueError("Invalid default answer: '{}'".format(default))
while 1:
choice = input(question + prompt).lower()
if default and not choice:
return default
try:
return valid[choice[0]]
except (KeyError, IndexError):
print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def print_batch_exception(batch_exception):
"""
Prints the contents of the specified Batch exception.
:param batch_exception:
"""
print('-------------------------------------------')
print('Exception encountered:')
if batch_exception.error and \
batch_exception.error.message and \
batch_exception.error.message.value:
print(batch_exception.error.message.value)
if batch_exception.error.values:
print()
for mesg in batch_exception.error.values:
print('{}:\t{}'.format(mesg.key, mesg.value))
print('-------------------------------------------')
def upload_file_to_container(block_blob_client, container_name, file_path):
"""
Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
blob_name = os.path.basename(file_path)
print('Uploading file {} to container [{}]...'.format(file_path,
container_name))
block_blob_client.create_blob_from_path(container_name,
blob_name,
file_path)
sas_token = block_blob_client.generate_blob_shared_access_signature(
container_name,
blob_name,
permission=azureblob.BlobPermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
sas_url = block_blob_client.make_blob_url(container_name,
blob_name,
sas_token=sas_token)
return batchmodels.ResourceFile(http_url=sas_url, file_path=blob_name)
def get_container_sas_token(block_blob_client,
container_name, blob_permissions):
"""
Obtains a shared access signature granting the specified permissions to the
container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param BlobPermissions blob_permissions:
:rtype: str
:return: A SAS token granting the specified permissions to the container.
"""
# Obtain the SAS token for the container, setting the expiry time and
# permissions. In this case, no start time is specified, so the shared
# access signature becomes valid immediately.
container_sas_token = \
block_blob_client.generate_container_shared_access_signature(
container_name,
permission=blob_permissions,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
return container_sas_token
def create_pool(batch_service_client, pool_id):
"""
Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sku
"""
print('Creating pool [{}]...'.format(pool_id))
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
new_pool = batch.models.PoolAddParameter(
id=pool_id,
virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
image_reference=batchmodels.ImageReference(
publisher="Canonical",
offer="UbuntuServer",
sku="18.04-LTS",
version="latest"
),
node_agent_sku_id="batch.node.ubuntu 18.04"),
vm_size=local_config._POOL_VM_SIZE,
target_dedicated_nodes=local_config._POOL_NODE_COUNT
)
batch_service_client.pool.add(new_pool)
def create_job(batch_service_client, job_id, pool_id):
"""
Creates a job with the specified ID, associated with the specified pool.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID for the job.
:param str pool_id: The ID for the pool.
"""
print('Creating job [{}]...'.format(job_id))
job = batch.models.JobAddParameter(
id=job_id,
pool_info=batch.models.PoolInformation(pool_id=pool_id))
batch_service_client.job.add(job)
def add_tasks(batch_service_client, job_id, input_files, output_container):
tasks = list()
for idx, input_file in enumerate(input_files):
command = "cmd /c type {0}".format(input_file.file_path)
tasks.append(
{
"id": 'Task{}'.format(idx),
"commandLine": command,
"resourceFiles": [input_file],
"environmentSettings": [],
"userIdentity": {
"autoUser": {
"scope": "pool",
"elevationLevel": "nonadmin"
}
}
,
"outputFiles": [
{
"destination":
{"container":
{"containerUrl": output_container}
},
"filePattern": "../std*.txt",
"uploadOptions": {"uploadCondition": "taskcompletion"}
}
]
}
)
batch_service_client.task.add_collection(job_id, tasks)
def wait_for_tasks_to_complete(batch_service_client, job_id, timeout):
"""
Returns when all tasks in the specified job reach the Completed state.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The id of the job whose tasks should be to monitored.
:param timedelta timeout: The duration to wait for task completion. If all
tasks in the specified job do not reach Completed state within this time
period, an exception will be raised.
"""
timeout_expiration = datetime.datetime.now() + timeout
print("Monitoring all tasks for 'Completed' state, timeout in {}..."
.format(timeout), end='')
while datetime.datetime.now() < timeout_expiration:
print('.', end='')
sys.stdout.flush()
tasks = batch_service_client.task.list(job_id)
incomplete_tasks = [task for task in tasks if
task.state != batchmodels.TaskState.completed]
if not incomplete_tasks:
print()
return True
else:
time.sleep(1)
print()
raise RuntimeError("ERROR: Tasks did not reach 'Completed' state within "
"timeout period of " + str(timeout))
def print_task_output(batch_service_client, job_id, encoding=None):
"""Prints the stdout.txt file for each task in the job.
:param batch_client: The batch client to use.
:type batch_client: `batchserviceclient.BatchServiceClient`
:param str job_id: The id of the job with task output files to print.
"""
print('Printing task output...')
tasks = batch_service_client.task.list(job_id)
for task in tasks:
node_id = batch_service_client.task.get(
job_id, task.id).node_info.node_id
print("Task: {}".format(task.id))
print("Node: {}".format(node_id))
stream = batch_service_client.file.get_from_task(
job_id, task.id, local_config._STANDARD_OUT_FILE_NAME)
file_text = _read_stream_as_string(
stream,
encoding)
print("Standard output:")
print(file_text)
def _read_stream_as_string(stream, encoding):
"""Read stream as string
:param stream: input stream generator
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str
"""
output = io.BytesIO()
try:
for data in stream:
output.write(data)
if encoding is None:
encoding = 'utf-8'
return output.getvalue().decode(encoding)
finally:
output.close()
raise RuntimeError('could not write data to stream or decode bytes')
if __name__ == '__main__':
start_time = datetime.datetime.now().replace(microsecond=0)
print('Sample start: {}'.format(start_time))
print()
# get container sas token
blob_service_client = create_blob_client()
sas_token = create_sas_token(blob_service_client, 'output')
print(sas_token)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=local_config._STORAGE_ACCOUNT_NAME,
account_key=local_config._STORAGE_ACCOUNT_KEY)
# Use the blob client to create the containers in Azure Storage if they
# don't yet exist.
input_container_name = 'input'
blob_client.create_container(input_container_name, fail_on_exist=False)
# The collection of data files that are to be processed by the tasks.
input_file_paths = [os.path.join(sys.path[0], 'taskdata0.txt'),
os.path.join(sys.path[0], 'taskdata1.txt'),
os.path.join(sys.path[0], 'taskdata2.txt')]
# Upload the data files.
input_files = [
upload_file_to_container(blob_client, input_container_name, file_path)
for file_path in input_file_paths]
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batch_auth.SharedKeyCredentials(local_config._BATCH_ACCOUNT_NAME,
local_config._BATCH_ACCOUNT_KEY)
batch_client = batch.BatchServiceClient(
credentials,
batch_url=local_config._BATCH_ACCOUNT_URL)
try:
# Create the pool that will contain the compute nodes that will execute the
# tasks.
#create_pool(batch_client, config._POOL_ID)
jobId = local_config._JOB_ID + "-" + str(uuid.uuid4())
# Create the job that will run the tasks.
create_job(batch_client, jobId, local_config._POOL_ID)
# Add the tasks to the job.
add_tasks(batch_client, jobId, input_files, get_secure_container_url("output", sas_token))
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(batch_client,
jobId,
datetime.timedelta(minutes=30))
print(" Success! All tasks reached the 'Completed' state within the "
"specified timeout period.")
# Print the stdout.txt and stderr.txt files for each task to the console
print_task_output(batch_client, jobId)
except batchmodels.BatchErrorException as err:
print_batch_exception(err)
raise
# Clean up storage resources
print('Deleting container [{}]...'.format(input_container_name))
blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print('Sample end: {}'.format(end_time))
print('Elapsed time: {}'.format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
#if query_yes_no('Delete job?') == 'yes':
batch_client.job.delete(jobId)
# if query_yes_no('Delete pool?') == 'yes':
# batch_client.pool.delete(local_config._POOL_ID)
print()
input('Press ENTER to exit...')
| 35.738095 | 205 | 0.649967 |
bd1d178b0f8666f454209c2fafd802128d42c88d | 3,998 | py | Python | Federal/Formatter/DateFormatter.py | Jaseibert/Federal | 86a69d4247a1926f5632eb9b7c68ee80f20081ff | [
"MIT"
] | 1 | 2019-03-21T21:56:32.000Z | 2019-03-21T21:56:32.000Z | Federal/Formatter/DateFormatter.py | Jaseibert/Federal | 86a69d4247a1926f5632eb9b7c68ee80f20081ff | [
"MIT"
] | null | null | null | Federal/Formatter/DateFormatter.py | Jaseibert/Federal | 86a69d4247a1926f5632eb9b7c68ee80f20081ff | [
"MIT"
] | null | null | null | import datetime as dt
import re
class DateFormatter(object):
def __init__(self,start=None,end=None):
self.start = start
self.end = end
return
############################################################################################
#DateTime Formatting
############################################################################################
def date_formatter(self,dates,delim):
d = r"[{}]".format(delim)
if re.search(d, dates) is not None:
try:
val = dt.datetime.strptime(str(dates),'%m'+delim+'%d'+delim+'%Y')
if val is not None:
return val
except ValueError:
try:
val = dt.datetime.strptime(str(dates),'%d'+delim+'%m'+delim+'%Y')
if val is not None:
return val
except ValueError:
val = dt.datetime.strptime(str(dates),'%Y'+delim+'%m'+delim+'%d')
if val is not None:
return val
else:
raise ValueError('Date cannot be formatted')
############################################################################################
#StartDate Formatting
############################################################################################
def start_date(self, date=None, year=None, month=None, day=None, full=False):
""" This function defines the start date for the query"""
try:
if date is not None:
#Logic For other Formats
if self.date_formatter(date,'/') is not None:
self.start = self.date_formatter(date,'/')
return self.start
elif self.date_formatter(date,'-') is not None:
self.start = self.date_formatter(date,'-')
return self.start
elif self.date_formatter(date,'.') is not None:
self.start = self.date_formatter(date,'.')
return self.start
else:
raise ValueError('Not a Valid Date. It must be (m/d/y), (d/m/y), or (y/m/d)..')
elif full is not False:
self.start = dt.datetime.strptime('1800-01-01','%Y-%m-%d')
return self.start
else:
self.start = dt.datetime(year, month, day)
return self.start
except ValueError:
print("Check the date that you submitted. It must be (m/d/y), (d/m/y), or (y/m/d)..")
############################################################################################
#EndDate Formatting
############################################################################################
def end_date(self, date=None, year=None, month=None, day=None, full=False):
""" This function defines the end date for the query."""
try:
if date is not None:
#Logic For other Formats
if self.date_formatter(date,'/') is not None:
self.end = self.date_formatter(date,'/')
return self.end
elif self.date_formatter(date,'-') is not None:
self.end = self.date_formatter(date,'-')
return self.end
elif self.date_formatter(date,'.') is not None:
self.end = self.date_formatter(date,'.')
return self.end
else:
raise ValueError('Not a Valid Date. It must be (m/d/y), (d/m/y), or (y/m/d)..')
elif full is not False:
self.end = dt.datetime.now()
return self.end
else:
self.end = dt.datetime(year, month, day)
return self.end
except ValueError:
print("Check the date that you submitted. It must be (m/d/y), (d/m/y), or (y/m/d)..")
| 44.921348 | 99 | 0.426463 |
daa865fff3df2b2dd67cb2dc14bcb6d89b969e19 | 4,567 | py | Python | leetcode_python/Greedy/patching-array.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Greedy/patching-array.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Greedy/patching-array.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | """
330. Patching Array
Hard
Given a sorted integer array nums and an integer n, add/patch elements to the array such that any number in the range [1, n] inclusive can be formed by the sum of some elements in the array.
Return the minimum number of patches required.
Example 1:
Input: nums = [1,3], n = 6
Output: 1
Explanation:
Combinations of nums are [1], [3], [1,3], which form possible sums of: 1, 3, 4.
Now if we add/patch 2 to nums, the combinations are: [1], [2], [3], [1,3], [2,3], [1,2,3].
Possible sums are 1, 2, 3, 4, 5, 6, which now covers the range [1, 6].
So we only need 1 patch.
Example 2:
Input: nums = [1,5,10], n = 20
Output: 2
Explanation: The two patches can be [2, 4].
Example 3:
Input: nums = [1,2,2], n = 5
Output: 0
Constraints:
1 <= nums.length <= 1000
1 <= nums[i] <= 104
nums is sorted in ascending order.
1 <= n <= 231 - 1
"""
# V0
# V1
# https://blog.csdn.net/zml66666/article/details/113185521
# V1'
# IDEA : GREEDY
# https://leetcode.com/problems/patching-array/discuss/1432712/Python-greedy
class Solution:
def minPatches(self, nums: List[int], n: int) -> int:
# assume that we have an array covers from 0 to n
# if new value x is less than or equel to n, then we can create combinations from 0 to n+x
# as we can add x to the previous formed combination
# otherwise, we add n+1 to the num so that we can go further
curr_end = 0
res = 0
curr = 0
while (curr_end < n):
if curr >= len(nums) or nums[curr] > curr_end + 1: # cant add anything from the original list
curr_end += curr_end + 1
res += 1
else:
curr_end += nums[curr]
curr += 1
return res
# V1'
# IDEA : GREEDY WINDOW
# https://leetcode.com/problems/patching-array/discuss/266608/Python-greedy-window
class Solution:
def minPatches(self, nums: List[int], n: int) -> int:
reach = 0
index = 0
res = 0
while reach < n:
if index < len(nums) and reach >= nums[index] - 1:
reach += nums[index]
index += 1
else:
reach = reach + reach + 1
res += 1
return res
# V1''
# https://leetcode.com/problems/patching-array/discuss/78547/Greedy-solution-in-Python
# IDEA :
# I used a greedy algorithm. When traversing through the given number list, consider each number as a goal and resource. When in the for loop for the ith number, try to add some numbers so that you can represent every number in the range [ 1, nums[i] ). Then, add the ith number to your source for further loops.
# To reach the goal, suppose all the resource (the numbers smaller than the goal) sums to a number sum, then, sum+1 is what we don't have. So we need to add a sum+1 to our resource. And now you can represent all the numbers not bigger than sum+sum+1
class Solution(object):
def minPatches(self, nums, n):
"""
:type nums: List[int]
:type n: int
:rtype: int
"""
count = 0
sum = 0
for x in nums:
if sum >= n:
return count
while sum < x-1: # x-1 is the goal; when reaches the goal, we can represent [1, x)
count += 1
sum += sum + 1 # add a resource number
if sum >= n:
return count
sum += x
while sum + 1 <= n:
count += 1
sum += sum + 1
return count
# V1''''
# https://leetcode.com/problems/patching-array/discuss/78514/Python-solution
class Solution(object):
def minPatches(self, nums, n):
i=0
count=0
s=1
while s<=n:
if i<len(nums) and s>=nums[i]:
s+=nums[i]
i+=1
else:
s<<=1
count+=1
return count
# V1''''
# https://leetcode.com/problems/patching-array/solution/
# JAVA
# public class Solution {
# public int minPatches(int[] nums, int n) {
# int patches = 0, i = 0;
# long miss = 1; // use long to avoid integer overflow error
# while (miss <= n) {
# if (i < nums.length && nums[i] <= miss) // miss is covered
# miss += nums[i++];
# else { // patch miss to the array
# miss += miss;
# patches++; // increase the answer
# }
# }
# return patches;
# }
# }
# V2 | 30.245033 | 312 | 0.556164 |
0eadece991a1bc60994e6457acbdfc19aa90a82f | 1,206 | py | Python | office365/entity_collection.py | andrewcchoi/Office365-REST-Python-Client | 43db12ae532c804c75a3a34f7b0d7d79e30fdac3 | [
"MIT"
] | null | null | null | office365/entity_collection.py | andrewcchoi/Office365-REST-Python-Client | 43db12ae532c804c75a3a34f7b0d7d79e30fdac3 | [
"MIT"
] | null | null | null | office365/entity_collection.py | andrewcchoi/Office365-REST-Python-Client | 43db12ae532c804c75a3a34f7b0d7d79e30fdac3 | [
"MIT"
] | null | null | null | from office365.runtime.client_object_collection import ClientObjectCollection
from office365.runtime.queries.create_entity_query import CreateEntityQuery
from office365.runtime.resource_path import ResourcePath
class EntityCollection(ClientObjectCollection):
def __getitem__(self, key):
"""
:param key: key is used to address a Entity resource by either an index in collection
or by resource id
:type key: int or str
:rtype: EntityCollection
"""
if type(key) == int:
return super(EntityCollection, self).__getitem__(key)
return self._item_type(self.context, ResourcePath(key, self.resource_path))
def add_from_json(self, params):
"""Creates a Entity resource from JSON
:type params: dict
:rtype: office365.entity.Entity
"""
entity_object = self._item_type(self.context)
self.add_child(entity_object)
qry = CreateEntityQuery(self, params, entity_object)
self.context.add_query(qry)
return entity_object
@property
def context(self):
"""
:rtype: office365.graph_client.GraphClient
"""
return self._context
| 31.736842 | 93 | 0.676617 |
aec54e7963ebd2fcf50ec9a861f2a6bc32b8310e | 817 | py | Python | rtcbproj/rtcb/tournament/models.py | RedTurtle/rtcb-backend | f097eae54a12ba4f3983869fef627ea1d55a37d1 | [
"Apache-2.0"
] | null | null | null | rtcbproj/rtcb/tournament/models.py | RedTurtle/rtcb-backend | f097eae54a12ba4f3983869fef627ea1d55a37d1 | [
"Apache-2.0"
] | 6 | 2020-02-11T22:47:25.000Z | 2021-06-09T17:29:29.000Z | rtcbproj/rtcb/tournament/models.py | RedTurtle/rtcb-backend | f097eae54a12ba4f3983869fef627ea1d55a37d1 | [
"Apache-2.0"
] | 1 | 2018-10-05T07:54:18.000Z | 2018-10-05T07:54:18.000Z | # -*- coding: utf-8 -*-
from django.db import models
from rtcb.team.models import Team
class Tournament(models.Model):
name = models.CharField(
verbose_name="Tournament name",
max_length=50,
)
teams = models.ManyToManyField(
Team,
verbose_name="Squadre",
related_name="tournaments",
related_query_name="tournament",
)
def __str__(self):
return self.name
class Round(models.Model):
""" Giornata del torneo
"""
title = models.CharField(
max_length=50,
null=True,
blank=True,
)
counter = models.PositiveIntegerField(
default=0
)
tournament = models.ForeignKey(
Tournament,
related_name='tournament',
null=True,
on_delete=models.CASCADE,
)
| 19 | 42 | 0.598531 |
8b63bf78c58576eac80f86f208b01673adcd65ee | 1,529 | py | Python | bitmovin_api_sdk/encoding/encodings/streams/inputs/inputs_api.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 11 | 2019-07-03T10:41:16.000Z | 2022-02-25T21:48:06.000Z | bitmovin_api_sdk/encoding/encodings/streams/inputs/inputs_api.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 8 | 2019-11-23T00:01:25.000Z | 2021-04-29T12:30:31.000Z | bitmovin_api_sdk/encoding/encodings/streams/inputs/inputs_api.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 13 | 2020-01-02T14:58:18.000Z | 2022-03-26T12:10:30.000Z | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.encoding_stream_input import EncodingStreamInput
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
class InputsApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(InputsApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def list(self, encoding_id, stream_id, **kwargs):
# type: (string_types, string_types, dict) -> EncodingStreamInput
"""Stream Input Analysis Details
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param stream_id: Id of the stream.
:type stream_id: string_types, required
:return: List of input analysis details
:rtype: EncodingStreamInput
"""
return self.api_client.get(
'/encoding/encodings/{encoding_id}/streams/{stream_id}/inputs',
path_params={'encoding_id': encoding_id, 'stream_id': stream_id},
pagination_response=True,
type=EncodingStreamInput,
**kwargs
)
| 35.55814 | 80 | 0.691956 |
28a5825e9d493b78c71d8fb6f18ee5a77e3490c1 | 12,975 | py | Python | Data_setting & Analysis/Final_Presentation/airpy/data.py | UPRMG/Classification_Airbnb | 2113199fd37c798bdec49402cef9238821168f33 | [
"MIT"
] | 1 | 2018-04-21T03:25:03.000Z | 2018-04-21T03:25:03.000Z | Data_setting & Analysis/airpy/data.py | UPRMG/Classification_Airbnb | 2113199fd37c798bdec49402cef9238821168f33 | [
"MIT"
] | null | null | null | Data_setting & Analysis/airpy/data.py | UPRMG/Classification_Airbnb | 2113199fd37c798bdec49402cef9238821168f33 | [
"MIT"
] | null | null | null |
# coding: utf-8
# ### Import
# In[1]:
from bs4 import BeautifulSoup
import requests
import numpy as np
import pandas as pd
import xgboost
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn.metrics import *
from IPython.core.display import Image
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
from sklearn.tree import export_graphviz
import io
from sklearn.preprocessing import Imputer
import pydot
from sklearn import preprocessing
import lightgbm as lgb
from scipy.stats import mode
import re
from datetime import datetime
from lightgbm import plot_importance
import warnings
warnings.filterwarnings('ignore')
# ---
# ### Date read
# In[12]:
age_gender_bkts = pd.read_csv("age_gender_bkts.csv")
countries = pd.read_csv("countries.csv")
sessions = pd.read_csv("sessions.csv")
test_users = pd.read_csv("test_users.csv")
train_users_2 = pd.read_csv("train_users_2.csv")
sample_submission_NDF = pd.read_csv("sample_submission_NDF.csv")
merged_sessions = pd.read_csv("merged_sessions.csv")
# ---
# ### Date setting - Base1
# In[13]:
def pre_age_set_data(train_users_2, test_users):
check = pd.concat([train_users_2, test_users], ignore_index=True)
check["first_affiliate_tracked"] = check["first_affiliate_tracked"].replace(np.nan, "untracked")
check["date_account_created"] = pd.to_datetime(check["date_account_created"], format = "%Y-%m-%d")
check["timestamp_first_active"] = pd.to_datetime(check["timestamp_first_active"], format="%Y%m%d%H%M%S")
s_lag = check["timestamp_first_active"] - check["date_account_created"]
check["lag_days"] = s_lag.apply(lambda x : -1 * x.days)
check["lag_seconds"] = s_lag.apply(lambda x : x.seconds)
s_all_check = (check['age'] < 120) & (check['gender'] != '-unknown-')
check['faithless_sign'] = s_all_check.apply(lambda x : 0 if x == True else 1)
pre_age = check.drop("date_first_booking",axis = 1)
pre_age['date_account_created_y'] = pre_age["date_account_created"].apply(lambda x : x.year)
pre_age['date_account_created_m'] = pre_age["date_account_created"].apply(lambda x : x.month)
pre_age['date_account_created_d'] = pre_age["date_account_created"].apply(lambda x : x.day)
pre_age['timestamp_first_active_y'] = pre_age["timestamp_first_active"].apply(lambda x : x.year)
pre_age['timestamp_first_active_m'] = pre_age["timestamp_first_active"].apply(lambda x : x.month)
pre_age['timestamp_first_active_d'] = pre_age["timestamp_first_active"].apply(lambda x : x.day)
pre_age = pre_age.drop("date_account_created" , axis=1)
pre_age = pre_age.drop("timestamp_first_active" , axis=1)
return check, pre_age
# ---
# ### Date setting - Base2
# In[14]:
def pre_age_predict_data(pre_age):
pre_age['age'] = pre_age['age'].fillna(-1)
pre_age_sub = pre_age.filter(items = ['age', 'country_destination','id'])
pre_age_dum = pre_age.filter(items = ['affiliate_channel', 'affiliate_provider',
'first_affiliate_tracked', 'first_browser', 'first_device_type',
'language', 'signup_app', 'signup_flow',
'signup_method', 'date_account_created_y', 'date_account_created_m',
'date_account_created_d', 'timestamp_first_active_y',
'timestamp_first_active_m', 'timestamp_first_active_d',"lag_days","lag_seconds",
"faithless_sign"])
pre_age_dum[['date_account_created_y', 'date_account_created_m', 'date_account_created_d', 'timestamp_first_active_y','timestamp_first_active_m', 'timestamp_first_active_d']] = pre_age_dum[['date_account_created_y', 'date_account_created_m', 'date_account_created_d', 'timestamp_first_active_y', 'timestamp_first_active_m', 'timestamp_first_active_d']].astype(str)
pre_age_dum = pd.get_dummies(pre_age_dum)
pre_age_dum_con = pd.concat([pre_age_dum, pre_age_sub], axis=1)
pre_age_dum_con["age"] = pre_age_dum_con["age"].replace(-1, np.nan)
pre_age_mission = pre_age_dum_con[pre_age_dum_con["age"].isna()].reset_index()
pre_age_train = pre_age_dum_con[pre_age_dum_con["age"].notna()].reset_index()
pre_age_mission_test = pre_age_mission.drop("index", axis=1)
pre_age_train_test = pre_age_train.drop("index", axis=1)
pre_age_mission_test_drop = pre_age_mission_test.drop(['id', 'age', 'country_destination'], axis=1)
pre_age_train_test_drop = pre_age_train_test.drop(['id', 'age', 'country_destination'], axis=1)
return pre_age_mission_test, pre_age_train_test, pre_age_mission, pre_age_train, pre_age_mission_test_drop, pre_age_train_test_drop
# In[15]:
def pre_age_predict_data_cat(pre_age_train):
bins = [0, 15, 25, 35, 60, 9999]
labels = ["미성년자", "청년", "중년", "장년", "노년"]
cats = pd.cut(pre_age_train['age'], bins, labels=labels)
cats = pd.DataFrame(cats)
return cats
# ---
# ### Predict gender data setting - Only gender
# In[16]:
def add_gender(pre_age):
pred_gen_data = pd.read_csv("model_gen_lgb.csv")
pre_gen_sub = pre_age.filter(items = ['age', 'country_destination', 'id', 'gender'])
pre_gen_dum = pre_age.filter(items = ['affiliate_channel', 'affiliate_provider',
'first_affiliate_tracked', 'first_browser', 'first_device_type',
'language', 'signup_app', 'signup_flow',
'signup_method', 'date_account_created_y', 'date_account_created_m',
'date_account_created_d', 'timestamp_first_active_y',
'timestamp_first_active_m', 'timestamp_first_active_d',"lag_days","lag_seconds",
"faithless_sign"])
pre_gen_dum = pd.get_dummies(pre_gen_dum)
pre_gen_dum_con = pd.concat([pre_gen_dum, pre_gen_sub], axis=1)
pre_gen_dum_con["gender"] = pre_gen_dum_con["gender"].replace(['-unknown-', 'OTHER'], np.nan)
pre_gen_mission = pre_gen_dum_con[pre_gen_dum_con["gender"].isna()].reset_index()
pre_gen_train = pre_gen_dum_con[pre_gen_dum_con["gender"].notna()].reset_index()
pre_gen_mission_test = pre_gen_mission.drop("index", axis=1)
pre_gen_train_test = pre_gen_train.drop("index", axis=1)
pre_gen_mission_test_drop = pre_gen_mission_test.drop(['id', 'age', 'country_destination', "gender"], axis=1)
pre_gen_train_test_drop = pre_gen_train_test.drop(['id', 'age', 'country_destination', "gender"], axis=1)
pre_gen_mission_test_la = pd.concat([pre_gen_mission_test, pred_gen_data], axis=1)
pre_gen_mission_test_la = pre_gen_mission_test_la.drop("gender", axis=1)
pre_gen_mission_test_la = pre_gen_mission_test_la.rename(columns={"0": 'gender'})
last_gen_add = pd.concat([pre_gen_mission_test_la, pre_gen_train_test])
last_gen_add = last_gen_add.filter(items = ["id",'gender'])
return last_gen_add
# ---
# ### Holiday, Weekend, Day of week data setting - Only Holiday
# In[17]:
def holiday(train_users_2, test_users):
def get_holidays(year):
response = requests.get("https://www.timeanddate.com/calendar/custom.html?year="+str(year)+" &country=1&cols=3&df=1&hol=25")
dom = BeautifulSoup(response.content, "html.parser")
trs = dom.select("table.cht.lpad tr")
df = pd.DataFrame(columns=["date", "holiday"])
for tr in trs:
datestr = tr.select_one("td:nth-of-type(1)").text
date = datetime.strptime("{} {}".format(year, datestr), '%Y %b %d')
holiday = tr.select_one("td:nth-of-type(2)").text
df.loc[len(df)] = {"date" : date, "holiday": 1}
return df
holiday_ls = []
for year in range(2009, 2015):
df = get_holidays(year)
holiday_ls.append(df)
holiday_df = pd.concat(holiday_ls)
check = pd.concat([train_users_2, test_users], ignore_index=True)
check["timestamp_first_active"] = check["timestamp_first_active"].apply(lambda x : str(x)[:8])
pre_age_hol = check.filter(items=['id','timestamp_first_active'])
pre_age_hol['week'] = pd.to_datetime(check["timestamp_first_active"], format="%Y-%m-%d")
pre_age_hol["week"] = pre_age_hol['week'].dt.weekday
pre_age_hol["weekend"] = pre_age_hol["week"].apply(lambda x : 1 if x>=5 else 0)
pre_age_hol_dum = pd.get_dummies(pre_age_hol['week'])
hdfd = pd.concat([pre_age_hol,pre_age_hol_dum],axis=1)
hdfd = hdfd.drop("week",axis=1)
hdfd = hdfd.rename(columns={0:"mon",1:"tue",2:"wed",3:"thur",4:"fri",5:"sat",6:"sun"})
hdfd['timestamp_first_active'] = pd.to_datetime(hdfd["timestamp_first_active"])
add_hol = pd.merge(hdfd, holiday_df, left_on='timestamp_first_active', right_on="date", how="left")
add_hol = add_hol.drop(["timestamp_first_active",'date'],axis=1)
add_hol = add_hol.fillna(0)
return add_hol
# ---
# ### Predict age data setting - Merge (age+gender+holiday)
# In[8]:
# model_age_forest
# model_age_xg
# model_age_lgb
def predict_age_add(pre_age_mission_test, pre_age_train_test, last_gen_add, add_hol):
pred_age_data = pd.read_csv("model_age_lgb.csv")
pre_age_mission_test_la = pd.concat([pre_age_mission_test, pred_age_data], axis=1)
pre_age_mission_test_la = pre_age_mission_test_la.drop("age", axis=1)
# pre_age_mission_test_la["0"] = pre_age_mission_test_la["0"].replace({'age1':25,"age2":29,"age3":34,\
# "age4":40,"age5":55})
pre_age_mission_test_la["0"] = pre_age_mission_test_la["0"].replace({'미성년자':10,"청년":25,"중년":35, "장년":45,"노년":60})
pre_age_mission_test_la = pre_age_mission_test_la.rename(columns={"0": 'age'})
pre_age_train_test_la = pre_age_train_test.drop("age", axis=1)
pre_age_train_test_la['age'] = pre_age_train_test["age"]
last_age_add = pd.concat([pre_age_mission_test_la, pre_age_train_test_la])
train_set = train_users_2['id']
train_set = pd.DataFrame(train_set)
test_set = test_users['id']
test_set = pd.DataFrame(test_set)
last_gen_add_dum = pd.get_dummies(last_gen_add["gender"])
last_gen_add_dum = pd.concat([last_gen_add['id'], last_gen_add_dum], axis=1)
last_train_data = pd.merge(train_set, last_age_add, on="id", how="left")
last_train_data = pd.merge(last_train_data, last_gen_add_dum, on="id", how="left")
last_test_data = pd.merge(test_set, last_age_add, on="id", how="left")
last_test_data = pd.merge(last_test_data, last_gen_add_dum, on="id", how="left")
last_train_data = pd.merge(last_train_data, add_hol, on='id', how="left")
last_test_data = pd.merge(last_test_data, add_hol, on='id', how="left")
le = preprocessing.LabelEncoder()
y_label = le.fit_transform(last_train_data["country_destination"])
return last_train_data, last_test_data, y_label, le
# ---
# ### All data merge and make CSV - Last
# In[9]:
def last_data_setting(last_train_data, last_test_data):
merged_sessions = pd.read_csv("merged_sessions.csv")
merged_sessions_dum = merged_sessions.drop(['id','secs_elapsed','secs_sum','secs_mean'], axis=1)
merged_sessions_dum = pd.get_dummies(merged_sessions_dum)
ses_dum = pd.concat([merged_sessions_dum,merged_sessions[['id','secs_elapsed','secs_sum','secs_mean']]],axis=1)
last_train_data_add = pd.merge(last_train_data, ses_dum, on="id", how="left")
last_test_data_add = pd.merge(last_test_data, ses_dum, on="id", how="left")
## impute the missing value using median
impute_list = last_test_data_add.columns.tolist()
impute_list.remove("id")
impute_list.remove("country_destination")
imp = Imputer(missing_values='NaN', strategy='median', axis=0)
last_train_data_add[impute_list] = imp.fit_transform(last_train_data_add[impute_list])
last_test_data_add[impute_list] = imp.fit_transform(last_test_data_add[impute_list])
last_train_data_add.to_csv("last_train_data_add.csv", index=False)
last_test_data_add.to_csv("last_test_data_add.csv", index=False)
return last_train_data_add, last_test_data_add
# ---
| 38.731343 | 566 | 0.666821 |
430916cacfa1c56e26a1c03a6b41293f5e172a28 | 5,449 | py | Python | clients/kratos/python/ory_kratos_client/exceptions.py | extraymond/sdk | d4e9ffe7335648b7af3fb8d4363e7991d1ba36b5 | [
"Apache-2.0"
] | null | null | null | clients/kratos/python/ory_kratos_client/exceptions.py | extraymond/sdk | d4e9ffe7335648b7af3fb8d4363e7991d1ba36b5 | [
"Apache-2.0"
] | null | null | null | clients/kratos/python/ory_kratos_client/exceptions.py | extraymond/sdk | d4e9ffe7335648b7af3fb8d4363e7991d1ba36b5 | [
"Apache-2.0"
] | null | null | null | """
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.7.0-alpha.1
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
class OpenApiException(Exception):
"""The base exception class for all OpenAPIExceptions"""
class ApiTypeError(OpenApiException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(OpenApiException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiAttributeError(OpenApiException, AttributeError):
def __init__(self, msg, path_to_item=None):
"""
Raised when an attribute reference or assignment fails.
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiAttributeError, self).__init__(full_msg)
class ApiKeyError(OpenApiException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(OpenApiException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
class NotFoundException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(NotFoundException, self).__init__(status, reason, http_resp)
class UnauthorizedException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(UnauthorizedException, self).__init__(status, reason, http_resp)
class ForbiddenException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ForbiddenException, self).__init__(status, reason, http_resp)
class ServiceException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ServiceException, self).__init__(status, reason, http_resp)
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, int):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result
| 33.84472 | 446 | 0.617361 |
f0402ad2bcf38d52e551f2ab8a4508747eac269f | 1,384 | py | Python | Exercicios em python/ex95.py | GabrielSantos25/Python | 208eec0144587aa4e0aa7fa00da29ffa0478eac8 | [
"MIT"
] | null | null | null | Exercicios em python/ex95.py | GabrielSantos25/Python | 208eec0144587aa4e0aa7fa00da29ffa0478eac8 | [
"MIT"
] | null | null | null | Exercicios em python/ex95.py | GabrielSantos25/Python | 208eec0144587aa4e0aa7fa00da29ffa0478eac8 | [
"MIT"
] | null | null | null | time = list()
jogador = dict()
partidas = list()
while True:
jogador.clear()
jogador['nome'] = str(input('Nome do jogador: '))
tot = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
partidas.clear()
for c in range(0, tot):
partidas.append(int(input(f'Quantos gols na partida {c+1}? ')))
jogador['gols'] = partidas[:]
jogador['total'] = sum(partidas)
time.append(jogador.copy())
while True:
resp = str(input('Quer continuar (S/N)? ')).upper()[0]
if resp in 'SN':
break
print('Erro! Responda apenas S ou N.')
if resp == 'N':
break
# cabeçalho {
print('-='*30)
print('Cod ', end='')
for i in jogador.keys():
print(f'{i:<15}',end='')
print()
# }
# tabela {
print('-'*40)
for k, v in enumerate(time):
print(f'{k:>3} ',end='')
for d in v.values():
print(f'{str(d):<15}', end='')
print()
print('-'*40)
# }
# Busca de dados {
while True:
busca = int(input('Mostrar dados de qual jogador? '))
if busca == 999:
break
if busca >= len(time):
print(f'Erro! Não existe jogador com codigo {busca}!')
else:
print(f' -- LEVANTAMENTO DO JOGADOR {time[busca]["nome"]}:')
for i, g in enumerate(time[busca]['gols']):
print(f' No jogo {i+1} fez {g} gols.')
print('-'*40)
# }
print('<< ENCERRADO >>') | 25.163636 | 71 | 0.540462 |
65194962c99b1598a22d23cf2c9a5a4aaa2cd730 | 2,585 | py | Python | Global/detection_models/antialiasing.py | abdullahselek/Bringing-Old-Photos-Back-to-Life | 41491171487a08121038e8c08f4bdc218d7d16e6 | [
"MIT"
] | 1 | 2022-03-17T05:08:25.000Z | 2022-03-17T05:08:25.000Z | Global/detection_models/antialiasing.py | abdullahselek/Bringing-Old-Photos-Back-to-Life | 41491171487a08121038e8c08f4bdc218d7d16e6 | [
"MIT"
] | null | null | null | Global/detection_models/antialiasing.py | abdullahselek/Bringing-Old-Photos-Back-to-Life | 41491171487a08121038e8c08f4bdc218d7d16e6 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn.parallel
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class Downsample(nn.Module):
# https://github.com/adobe/antialiased-cnns
def __init__(
self, pad_type="reflect", filt_size=3, stride=2, channels=None, pad_off=0
):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [
int(1.0 * (filt_size - 1) / 2),
int(np.ceil(1.0 * (filt_size - 1) / 2)),
int(1.0 * (filt_size - 1) / 2),
int(np.ceil(1.0 * (filt_size - 1) / 2)),
]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.0)
self.channels = channels
# print('Filter size [%i]'%filt_size)
if self.filt_size == 1:
a = np.array(
[
1.0,
]
)
elif self.filt_size == 2:
a = np.array([1.0, 1.0])
elif self.filt_size == 3:
a = np.array([1.0, 2.0, 1.0])
elif self.filt_size == 4:
a = np.array([1.0, 3.0, 3.0, 1.0])
elif self.filt_size == 5:
a = np.array([1.0, 4.0, 6.0, 4.0, 1.0])
elif self.filt_size == 6:
a = np.array([1.0, 5.0, 10.0, 10.0, 5.0, 1.0])
elif self.filt_size == 7:
a = np.array([1.0, 6.0, 15.0, 20.0, 15.0, 6.0, 1.0])
filt = torch.Tensor(a[:, None] * a[None, :])
filt = filt / torch.sum(filt)
self.register_buffer(
"filt", filt[None, None, :, :].repeat((self.channels, 1, 1, 1))
)
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if self.filt_size == 1:
if self.pad_off == 0:
return inp[:, :, :: self.stride, :: self.stride]
else:
return self.pad(inp)[:, :, :: self.stride, :: self.stride]
else:
return F.conv2d(
self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1]
)
def get_pad_layer(pad_type):
if pad_type in ["refl", "reflect"]:
PadLayer = nn.ReflectionPad2d
elif pad_type in ["repl", "replicate"]:
PadLayer = nn.ReplicationPad2d
elif pad_type == "zero":
PadLayer = nn.ZeroPad2d
else:
print("Pad type [%s] not recognized" % pad_type)
return PadLayer
| 31.91358 | 81 | 0.518762 |
c3c85cc789675966c20fdd88b5e867529058d1de | 5,454 | py | Python | raspberry/run.py | tienthegainz/OutOfStockSystem | 84919ceded5b2e267535a6584b14423d6c5d102e | [
"MIT"
] | 5 | 2021-04-07T09:58:18.000Z | 2021-11-14T02:06:16.000Z | raspberry/run.py | tienthegainz/OutOfStockSystem | 84919ceded5b2e267535a6584b14423d6c5d102e | [
"MIT"
] | 1 | 2022-03-12T01:02:05.000Z | 2022-03-12T01:02:05.000Z | raspberry/run.py | tienthegainz/OutOfStockSystem | 84919ceded5b2e267535a6584b14423d6c5d102e | [
"MIT"
] | null | null | null | import io
import time
import picamera
from base64 import b64encode
import requests
import numpy as np
import cv2
import sys
import signal
import socketio
socket_url = 'http://10.42.0.1'
post_url = 'http://10.42.0.1:5001'
camera_info = {
"id": 1,
"password": "123"
}
def signal_handler(sig, frame):
respond = requests.delete(
'{}/camera/active/{}'.format(post_url, camera_info['id']), json=camera_info, timeout=2)
print(respond)
print('Exit gracefully')
sys.exit(0)
if __name__ == '__main__':
# Handle signal
signal.signal(signal.SIGINT, signal_handler)
# Notify server about camera
respond = requests.post(
'{}/camera/active'.format(post_url), json=camera_info, timeout=2)
if respond.status_code != 200:
raise Exception('Request error {}'.format(respond))
print('Register camera with id: {} <== {}'.format(
camera_info['id'], respond.json()))
try:
socketIO = socketio.Client()
socketIO.connect('http://10.42.0.1:5001')
with picamera.PiCamera() as camera:
# config
camera.framerate = 15
print('Resolution: {}\nFramerate: {}\n'.format(
camera.resolution, camera.framerate))
time.sleep(1)
stream = io.BytesIO()
is_tracked = False
count = 0
states = []
multiTracker = None
# Notify server about camera
for _ in camera.capture_continuous(stream, 'jpeg', use_video_port=True):
if count % 100 == 0:
print('Detect the shelf')
# POST image to server for detecting
multiTracker = cv2.MultiTracker_create()
base64_image = b64encode(
stream.getvalue()).decode('utf-8')
respond = requests.post(
'{}/product/detect'.format(post_url), json={
'id': camera_info['id'],
"image": base64_image
}, timeout=4)
if respond.status_code != 200:
raise Exception('Request error {} - {}'.format(respond.status_code, respond.json()['msg']))
data = np.frombuffer(stream.getvalue(), dtype=np.uint8)
image = cv2.imdecode(data, 1)
image = image[:, :, ::-1]
data = respond.json()
print(data)
if data['info'] != None:
states = data['info']
print('Init done: {}'.format(states))
is_tracked = True
# assign tracker
for state in states:
bbox = tuple(state['bbox'])
multiTracker.add(
cv2.TrackerKCF_create(), image, bbox)
else:
print('No object detected')
is_tracked = False
else:
try:
t = time.time()
base64_image = b64encode(
stream.getvalue()).decode('utf-8')
if is_tracked:
if count % 5 == 0:
data = np.frombuffer(
stream.getvalue(), dtype=np.uint8)
image = cv2.imdecode(data, 1)
image = image[:, :, ::-1]
success, boxes = multiTracker.update(image)
if success:
for i, bbox in enumerate(boxes):
states[i]['bbox'] = [
int(v) for v in bbox]
print(states)
socketIO.emit(
'camera', {
"id": camera_info['id'],
"password": camera_info['password'],
"image": base64_image,
"info": states,
"fire_check": (count % 120 == 0),
"track_start": (count % 100 == 1)
})
else:
socketIO.emit(
'camera', {
"id": camera_info['id'],
"password": camera_info['password'],
"image": base64_image,
"fire_check": (count % 120 == 0),
"track_start": (count % 100 == 1)
})
print('sending image after {}'.format(time.time()-t))
except BadNamespaceError:
print('The socket message has been lost')
count += 1
# Reset the stream for the next capture
stream.seek(0)
stream.truncate()
except Exception as e:
print(e)
respond = requests.delete(
'{}/camera/active/{}'.format(post_url, camera_info['id']), json=camera_info, timeout=2)
print(respond)
| 40.4 | 115 | 0.426659 |
ea08f91c68b65cc3a246a1a92f8fbf29663c17e6 | 14,040 | py | Python | umamusume-light/test.py | mEnow-ast/ui-theme-template | f0108295d4af0d741018fe12299173fac2d6aa08 | [
"MIT"
] | null | null | null | umamusume-light/test.py | mEnow-ast/ui-theme-template | f0108295d4af0d741018fe12299173fac2d6aa08 | [
"MIT"
] | null | null | null | umamusume-light/test.py | mEnow-ast/ui-theme-template | f0108295d4af0d741018fe12299173fac2d6aa08 | [
"MIT"
] | null | null | null | import os
import glob
import sys
import datetime
import tkinter
import openpyxl as op
import subprocess as sp
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
def is_empty(cell) :
"""
cell : 空欄か判断するセル
"""
return cell.value is None or not str(cell.value).strip()
def cast_cereal(date):
"""
date : シリアル値へ変更したい日付(str)
"""
orderday = date[0:8]
dt = datetime.datetime.strptime(orderday, '%Y%m%d')- datetime.datetime(1899, 12, 31)
return dt.days + 1
def merge_month_exl(folder_path, file_path):
"""
folder_path : 処理をしたいフォルダ
file_path : 排出されるファイル名
Excelの日付と合致しないときは、下記どちらかの処理をしてください。
1)Excel:「ファイル」→「オプション」→「詳細設定」→「1904年から計算する」のチェックを外す
2)「serial = dt.days + 1」を「serial = dt.days」にする
"""
# テンプレート読み込み
lb = op.load_workbook(r"temp.xlsx")
print(lb)
ls = lb.worksheets[0]
leng = 4 #4行名からスタートする 件数カウントは1からなので-3している
# フォルダ内のファイル一覧を取得
exl_dir = glob.glob(folder_path+"\起算日確認_*")
# 変換用リプレイス文字
paht_head = folder_path + r"\起算日確認_"
# 集計シートへ書き込み
for i,sheet in enumerate(exl_dir) :
try :
wb = op.load_workbook(sheet)
except Exception as a:
print(a)
return "ZIP"
ws = wb.worksheets[0]
strs = exl_dir[i].replace(paht_head, "") #パス等を消し、日付を先頭にする
if "翌日" in strs : # 翌日Excelの処理
serial = cast_cereal(strs) +1
for y,rows in enumerate(ws.iter_rows(min_row=2)) : # 書き込み処理
if all(is_empty(c) for c in rows): # 行すべてが空白であればfor文を終える
break
ls.cell(row=leng, column=1).value = leng - 3 # 件数カウント
ls.cell(row=leng, column=2).value = ws.cell(row=y+2, column=1).value # 基地局ID
ls.cell(row=leng, column=3).value = ws.cell(row=y+2, column=2).value # 基地局名称
ls.cell(row=leng, column=4).value = serial # 日付
ls.cell(row=leng, column=5).value = "翌日チェック" # 作業日
leng += 1 # データの長さを+1
else : # 当日Excelの処理
serial = cast_cereal(strs)
for y,rows in enumerate(ws.iter_rows(min_row=2)) : # 書き込み処理
if all(is_empty(c) for c in rows): # 行すべてが空白であればfor文を終える
break
ls.cell(row=leng, column=1).value = leng - 3 #件数カウント
ls.cell(row=leng, column=2).value = ws.cell(row=y+2, column=1).value # 基地局ID
ls.cell(row=leng, column=3).value = ws.cell(row=y+2, column=2).value # 基地局名称
ls.cell(row=leng, column=4).value = serial #日付
ls.cell(row=leng, column=5).value = "当日チェック" # 作業日
leng += 1 # データの長さを+1
# 注文書番号の記載
orderday = exl_dir[-1].replace(paht_head, "")
if "2021" in orderday :
month = int(orderday[4:6])
order_id = 6 + month * 2
orders = "注文書番号 :M000538228-" + str(order_id-1) + "、M000538228-" + str(order_id)
ls.cell(row=1,column=4).value = orders
else :
month = int(orderday[4:6])
order_id = 30 + month * 2
orders = "注文書番号 :M000538228-" + str(order_id-1) + "、M000538228-" + str(order_id)
ls.cell(row=1,column=4).value = orders
# 年月日書き込み
orderday = orderday[0:8]
dt_now = datetime.datetime.strptime(orderday, '%Y%m%d')
today = dt_now.strftime('%Y年 %m月') + "分 三技協・作業報告書(別紙5)"
ls.cell(row=1,column=2).value = today
# ファイルの保存
lb.save(file_path)
#結果フラグ
return "complete"
def ask_folder():
"""
参照ボタンの動作
"""
path = filedialog.askdirectory()
folder_path.set(path)
print(path)
def ask_file():
"""
選択ボタンの動作
"""
path = filedialog.asksaveasfilename(filetypes=[("excel", "*.xlsx")], defaultextension=".xlsx")
file_path.set(path)
print(path)
def close_window():
"""
主ポップアップを閉じる
"""
main_win.destroy()
def app():
"""
実行ボタンの動作
"""
input_dir = folder_path.get()
# 保存するexcelファイルを指定
file_name = file_path.get()
# 両方に値がない場合は実行しない
if not input_dir or not file_name:
return
# [同じ階層に作成する]がチェックされている場合、前パスを付ける
if same_check.get() == True :
if input_dir not in file_name :
output_file = input_dir + r"/" + file_name
else :
output_file = file_name
else :
if r":" in file_name :
output_file = file_name
else :
check = messagebox.showinfo("エラー", "予期せぬエラーが発生しました。\nパスが正しいかどうか確認してください。")
if check == "ok" : return
# 拡張子がない場合、付ける
if ".xlsx" not in output_file :
output_file += ".xlsx"
# 結合実行 ファイル作成の可否でポップアップを表示
result = merge_month_exl(input_dir, output_file)
if result == "complete" :
check = messagebox.askquestion("完了", "完了しました。\n作成したフォルダを表示しますか?")
if check == "yes" :
dirname = os.path.dirname(output_file) #フォルダ名を取得
sp.Popen(['explorer', dirname.replace("/", "\\")], shell=True)
close_window()
else : close_window()
elif result == "ZIP" :
check = messagebox.showinfo("エラー", "予期せぬエラーが発生しました。\nファイルの権限が撤廃されているか確認してください。")
if check == "ok" : return
if __name__ == "__main__" :
"""
主ウィンドウの作成・表示
"""
# メインウィンドウ
main_win = tkinter.Tk()
main_win.title("月末集計")
main_win.geometry("500x150")
# メインフレーム
main_frm = ttk.Frame(main_win)
main_frm.grid(column=0, row=0, sticky=tkinter.NSEW, padx=5, pady=10)
#パラメータ
folder_path = tkinter.StringVar()
file_path = tkinter.StringVar()
same_check = tkinter.BooleanVar()
# ウィジェット(フォルダ名)
folder_label = ttk.Label(main_frm, text="フォルダ指定")
folder_box = ttk.Entry(main_frm, textvariable=folder_path)
folder_btn = ttk.Button(main_frm, text="参照", command=ask_folder)
# ウィジェット(保存先)
file_label = ttk.Label(main_frm, text="ファイル名")
file_box = ttk.Entry(main_frm, textvariable=file_path)
file_btn = ttk.Button(main_frm, text="選択", command=ask_file)
# ウィジェット作成(階層チェックボックス)
same_folder = ttk.Checkbutton(main_frm, var=same_check,
text="選択したフォルダと同じ階層に出力する(ファイル名のみ入力する)")
# ウィジェット(実行ボタン)
app_btn = ttk.Button(main_frm, text="実行", command=app)
# ウィジェットの配置
folder_label.grid(column=0, row=0, pady=10)
folder_box.grid(column=1, row=0, sticky=tkinter.EW, padx=5)
folder_btn.grid(column=2, row=0)
file_label.grid(column=0, row=1, pady=10)
file_box.grid(column=1, row=1, sticky=tkinter.EW, padx=5)
file_btn.grid(column=2, row=1)
same_folder.grid(column=1, row=2)
app_btn.grid(column=1, row=3)
# 配置設定
main_win.columnconfigure(0, weight=1)
main_win.rowconfigure(0, weight=1)
main_frm.columnconfigure(1, weight=1)
# 描画開始
main_win.mainloop()import os
import glob
import sys
import datetime
import tkinter
import openpyxl as op
import subprocess as sp
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
def is_empty(cell) :
"""
cell : 空欄か判断するセル
"""
return cell.value is None or not str(cell.value).strip()
def cast_cereal(date):
"""
date : シリアル値へ変更したい日付(str)
"""
orderday = date[0:8]
dt = datetime.datetime.strptime(orderday, '%Y%m%d')- datetime.datetime(1899, 12, 31)
return dt.days + 1
def merge_month_exl(folder_path, file_path):
"""
folder_path : 処理をしたいフォルダ
file_path : 排出されるファイル名
Excelの日付と合致しないときは、下記どちらかの処理をしてください。
1)Excel:「ファイル」→「オプション」→「詳細設定」→「1904年から計算する」のチェックを外す
2)「serial = dt.days + 1」を「serial = dt.days」にする
"""
# テンプレート読み込み
lb = op.load_workbook(r"temp.xlsx")
print(lb)
ls = lb.worksheets[0]
leng = 4 #4行名からスタートする 件数カウントは1からなので-3している
# フォルダ内のファイル一覧を取得
exl_dir = glob.glob(folder_path+"\起算日確認_*")
# 変換用リプレイス文字
paht_head = folder_path + r"\起算日確認_"
# 集計シートへ書き込み
for i,sheet in enumerate(exl_dir) :
try :
wb = op.load_workbook(sheet)
except Exception as a:
print(a)
return "ZIP"
ws = wb.worksheets[0]
strs = exl_dir[i].replace(paht_head, "") #パス等を消し、日付を先頭にする
if "翌日" in strs : # 翌日Excelの処理
serial = cast_cereal(strs) +1
for y,rows in enumerate(ws.iter_rows(min_row=2)) : # 書き込み処理
if all(is_empty(c) for c in rows): # 行すべてが空白であればfor文を終える
break
ls.cell(row=leng, column=1).value = leng - 3 # 件数カウント
ls.cell(row=leng, column=2).value = ws.cell(row=y+2, column=1).value # 基地局ID
ls.cell(row=leng, column=3).value = ws.cell(row=y+2, column=2).value # 基地局名称
ls.cell(row=leng, column=4).value = serial # 日付
ls.cell(row=leng, column=5).value = "翌日チェック" # 作業日
leng += 1 # データの長さを+1
else : # 当日Excelの処理
serial = cast_cereal(strs)
for y,rows in enumerate(ws.iter_rows(min_row=2)) : # 書き込み処理
if all(is_empty(c) for c in rows): # 行すべてが空白であればfor文を終える
break
ls.cell(row=leng, column=1).value = leng - 3 #件数カウント
ls.cell(row=leng, column=2).value = ws.cell(row=y+2, column=1).value # 基地局ID
ls.cell(row=leng, column=3).value = ws.cell(row=y+2, column=2).value # 基地局名称
ls.cell(row=leng, column=4).value = serial #日付
ls.cell(row=leng, column=5).value = "当日チェック" # 作業日
leng += 1 # データの長さを+1
# 注文書番号の記載
orderday = exl_dir[-1].replace(paht_head, "")
if "2021" in orderday :
month = int(orderday[4:6])
order_id = 6 + month * 2
orders = "注文書番号 :M000538228-" + str(order_id-1) + "、M000538228-" + str(order_id)
ls.cell(row=1,column=4).value = orders
else :
month = int(orderday[4:6])
order_id = 30 + month * 2
orders = "注文書番号 :M000538228-" + str(order_id-1) + "、M000538228-" + str(order_id)
ls.cell(row=1,column=4).value = orders
# 年月日書き込み
orderday = orderday[0:8]
dt_now = datetime.datetime.strptime(orderday, '%Y%m%d')
today = dt_now.strftime('%Y年 %m月') + "分 三技協・作業報告書(別紙5)"
ls.cell(row=1,column=2).value = today
# ファイルの保存
lb.save(file_path)
#結果フラグ
return "complete"
def ask_folder():
"""
参照ボタンの動作
"""
path = filedialog.askdirectory()
folder_path.set(path)
print(path)
def ask_file():
"""
選択ボタンの動作
"""
path = filedialog.asksaveasfilename(filetypes=[("excel", "*.xlsx")], defaultextension=".xlsx")
file_path.set(path)
print(path)
def close_window():
"""
主ポップアップを閉じる
"""
main_win.destroy()
def app():
"""
実行ボタンの動作
"""
input_dir = folder_path.get()
# 保存するexcelファイルを指定
file_name = file_path.get()
# 両方に値がない場合は実行しない
if not input_dir or not file_name:
return
# [同じ階層に作成する]がチェックされている場合、前パスを付ける
if same_check.get() == True :
if input_dir not in file_name :
output_file = input_dir + r"/" + file_name
else :
output_file = file_name
else :
if r":" in file_name :
output_file = file_name
else :
check = messagebox.showinfo("エラー", "予期せぬエラーが発生しました。\nパスが正しいかどうか確認してください。")
if check == "ok" : return
# 拡張子がない場合、付ける
if ".xlsx" not in output_file :
output_file += ".xlsx"
# 結合実行 ファイル作成の可否でポップアップを表示
result = merge_month_exl(input_dir, output_file)
if result == "complete" :
check = messagebox.askquestion("完了", "完了しました。\n作成したフォルダを表示しますか?")
if check == "yes" :
dirname = os.path.dirname(output_file) #フォルダ名を取得
sp.Popen(['explorer', dirname.replace("/", "\\")], shell=True)
close_window()
else : close_window()
elif result == "ZIP" :
check = messagebox.showinfo("エラー", "予期せぬエラーが発生しました。\nファイルの権限が撤廃されているか確認してください。")
if check == "ok" : return
if __name__ == "__main__" :
"""
主ウィンドウの作成・表示
"""
# メインウィンドウ
main_win = tkinter.Tk()
main_win.title("月末集計")
main_win.geometry("500x150")
# メインフレーム
main_frm = ttk.Frame(main_win)
main_frm.grid(column=0, row=0, sticky=tkinter.NSEW, padx=5, pady=10)
#パラメータ
folder_path = tkinter.StringVar()
file_path = tkinter.StringVar()
same_check = tkinter.BooleanVar()
# ウィジェット(フォルダ名)
folder_label = ttk.Label(main_frm, text="フォルダ指定")
folder_box = ttk.Entry(main_frm, textvariable=folder_path)
folder_btn = ttk.Button(main_frm, text="参照", command=ask_folder)
# ウィジェット(保存先)
file_label = ttk.Label(main_frm, text="ファイル名")
file_box = ttk.Entry(main_frm, textvariable=file_path)
file_btn = ttk.Button(main_frm, text="選択", command=ask_file)
# ウィジェット作成(階層チェックボックス)
same_folder = ttk.Checkbutton(main_frm, var=same_check,
text="選択したフォルダと同じ階層に出力する(ファイル名のみ入力する)")
# ウィジェット(実行ボタン)
app_btn = ttk.Button(main_frm, text="実行", command=app)
# ウィジェットの配置
folder_label.grid(column=0, row=0, pady=10)
folder_box.grid(column=1, row=0, sticky=tkinter.EW, padx=5)
folder_btn.grid(column=2, row=0)
file_label.grid(column=0, row=1, pady=10)
file_box.grid(column=1, row=1, sticky=tkinter.EW, padx=5)
file_btn.grid(column=2, row=1)
same_folder.grid(column=1, row=2)
app_btn.grid(column=1, row=3)
# 配置設定
main_win.columnconfigure(0, weight=1)
main_win.rowconfigure(0, weight=1)
main_frm.columnconfigure(1, weight=1)
# 描画開始
main_win.mainloop()
| 29.620253 | 99 | 0.580627 |
74214a88bbb703bed01570d71d58a5245c7b6246 | 10,822 | py | Python | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/system/test_java_keystore.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/system/test_java_keystore.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/system/test_java_keystore.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Ansible Project
# Copyright (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import os
from ansible_collections.community.general.tests.unit.modules.utils import ModuleTestCase, set_module_args
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.tests.unit.compat.mock import Mock
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec
class TestCreateJavaKeystore(ModuleTestCase):
"""Test the creation of a Java keystore."""
def setUp(self):
"""Setup."""
super(TestCreateJavaKeystore, self).setUp()
orig_exists = os.path.exists
self.spec = ArgumentSpec()
self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file',
side_effect=lambda path, content: path)
self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands')
self.mock_os_path_exists = patch('os.path.exists',
side_effect=lambda path: True if path == '/path/to/keystore.jks' else orig_exists(path))
self.mock_selinux_context = patch('ansible.module_utils.basic.AnsibleModule.selinux_context',
side_effect=lambda path: ['unconfined_u', 'object_r', 'user_home_t', 's0'])
self.mock_is_special_selinux_path = patch('ansible.module_utils.basic.AnsibleModule.is_special_selinux_path',
side_effect=lambda path: (False, None))
self.run_commands = self.mock_run_commands.start()
self.create_file = self.mock_create_file.start()
self.selinux_context = self.mock_selinux_context.start()
self.is_special_selinux_path = self.mock_is_special_selinux_path.start()
self.os_path_exists = self.mock_os_path_exists.start()
def tearDown(self):
"""Teardown."""
super(TestCreateJavaKeystore, self).tearDown()
self.mock_create_file.stop()
self.mock_run_commands.stop()
self.mock_selinux_context.stop()
self.mock_is_special_selinux_path.stop()
self.mock_os_path_exists.stop()
def test_create_jks_success(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='changeit'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
module.exit_json = Mock()
with patch('os.remove', return_value=True):
self.run_commands.side_effect = lambda args, kwargs: (0, '', '')
create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit")
module.exit_json.assert_called_once_with(
changed=True,
cmd="keytool -importkeystore "
"-destkeystore '/path/to/keystore.jks' "
"-srckeystore '/tmp/keystore.p12' -srcstoretype pkcs12 -alias 'test' "
"-deststorepass 'changeit' -srcstorepass 'changeit' -noprompt",
msg='',
rc=0,
stdout_lines=''
)
def test_create_jks_fail_export_pkcs12(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='changeit'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
module.fail_json = Mock()
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(1, '', ''), (0, '', '')]
create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit")
module.fail_json.assert_called_once_with(
cmd="openssl pkcs12 -export -name 'test' "
"-in '/tmp/foo.crt' -inkey '/tmp/foo.key' "
"-out '/tmp/keystore.p12' "
"-passout 'pass:changeit'",
msg='',
rc=1
)
def test_create_jks_fail_import_key(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='changeit'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
module.fail_json = Mock()
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(0, '', ''), (1, '', '')]
create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit")
module.fail_json.assert_called_once_with(
cmd="keytool -importkeystore "
"-destkeystore '/path/to/keystore.jks' "
"-srckeystore '/tmp/keystore.p12' -srcstoretype pkcs12 -alias 'test' "
"-deststorepass 'changeit' -srcstorepass 'changeit' -noprompt",
msg='',
rc=1
)
class TestCertChanged(ModuleTestCase):
"""Test if the cert has changed."""
def setUp(self):
"""Setup."""
super(TestCertChanged, self).setUp()
self.spec = ArgumentSpec()
self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file',
side_effect=lambda path, content: path)
self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands')
self.run_commands = self.mock_run_commands.start()
self.create_file = self.mock_create_file.start()
def tearDown(self):
"""Teardown."""
super(TestCertChanged, self).tearDown()
self.mock_create_file.stop()
self.mock_run_commands.stop()
def test_cert_unchanged_same_fingerprint(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='changeit'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: abcd:1234:efgh', '')]
result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
self.assertFalse(result, 'Fingerprint is identical')
def test_cert_changed_fingerprint_mismatch(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='changeit'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: wxyz:9876:stuv', '')]
result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
self.assertTrue(result, 'Fingerprint mismatch')
def test_cert_changed_alias_does_not_exist(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='changeit'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''),
(1, 'keytool error: java.lang.Exception: Alias <foo> does not exist', '')]
result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
self.assertTrue(result, 'Certificate does not exist')
def test_cert_changed_fail_read_cert(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='changeit'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
module.fail_json = Mock()
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(1, '', 'Oops'), (0, 'SHA256: wxyz:9876:stuv', '')]
cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
module.fail_json.assert_called_once_with(
cmd="openssl x509 -noout -in /tmp/foo.crt -fingerprint -sha256",
msg='',
err='Oops',
rc=1
)
def test_cert_changed_fail_read_keystore(self):
set_module_args(dict(
certificate='cert-foo',
private_key='private-foo',
dest='/path/to/keystore.jks',
name='foo',
password='changeit'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
module.fail_json = Mock(return_value=True)
with patch('os.remove', return_value=True):
self.run_commands.side_effect = [(0, 'foo: wxyz:9876:stuv', ''), (1, '', 'Oops')]
cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
module.fail_json.assert_called_with(
cmd="keytool -list -alias 'foo' -keystore '/path/to/keystore.jks' -storepass 'changeit' -v",
msg='',
err='Oops',
rc=1
)
| 40.837736 | 129 | 0.59416 |
ab4ed578b9ce98b7df1651532183e5f31b3d9a48 | 907 | py | Python | visdrone/slice_visdrone.py | fcakyon/sahi-benchmark | 8ffa43afb23adb914f2ba8b4bb45dd9ce1284c42 | [
"MIT"
] | 7 | 2021-12-20T05:22:56.000Z | 2022-03-28T01:57:10.000Z | visdrone/slice_visdrone.py | fcakyon/sahi-benchmark | 8ffa43afb23adb914f2ba8b4bb45dd9ce1284c42 | [
"MIT"
] | 1 | 2022-03-19T14:52:38.000Z | 2022-03-21T13:19:05.000Z | visdrone/slice_visdrone.py | fcakyon/sahi-benchmark | 8ffa43afb23adb914f2ba8b4bb45dd9ce1284c42 | [
"MIT"
] | 3 | 2022-02-23T12:26:18.000Z | 2022-03-27T14:06:58.000Z | import fire
from sahi.scripts.slice_coco import slice
from tqdm import tqdm
SLICE_SIZE_LIST = [480, 640]
OVERLAP_RATIO_LIST = [0, 0.25]
IGNORE_NEGATIVE_SAMPLES = False
def slice_visdrone(image_dir: str, dataset_json_path: str, output_dir: str):
total_run = len(SLICE_SIZE_LIST) * len(OVERLAP_RATIO_LIST)
current_run = 1
for slice_size in SLICE_SIZE_LIST:
for overlap_ratio in OVERLAP_RATIO_LIST:
tqdm.write(
f"{current_run} of {total_run}: slicing for slice_size={slice_size}, overlap_ratio={overlap_ratio}"
)
slice(
image_dir=image_dir,
dataset_json_path=dataset_json_path,
output_dir=output_dir,
slice_size=slice_size,
overlap_ratio=overlap_ratio,
)
current_run += 1
if __name__ == "__main__":
fire.Fire(slice_visdrone)
| 30.233333 | 115 | 0.646086 |
89e20f73ec594c6a38cb1ee7af1df5c87110e9e7 | 10,478 | py | Python | webdataset/writer.py | YaboSu/webdataset | d88dd6e71c32f97352f56d831c3df4db2929a987 | [
"BSD-3-Clause"
] | null | null | null | webdataset/writer.py | YaboSu/webdataset | d88dd6e71c32f97352f56d831c3df4db2929a987 | [
"BSD-3-Clause"
] | null | null | null | webdataset/writer.py | YaboSu/webdataset | d88dd6e71c32f97352f56d831c3df4db2929a987 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (c) 2017-2019 NVIDIA CORPORATION. All rights reserved.
# This file is part of the WebDataset library.
# See the LICENSE file for licensing terms (BSD-style).
#
__all__ = "TarWriter ShardWriter".split()
import io
import pickle
import re
import tarfile
import time
import numpy as np
import PIL
import simplejson
from . import gopen
def imageencoder(image, format="PNG"): # skipcq: PYL-W0622
"""Compress an image using PIL and return it as a string.
Can handle float or uint8 images.
:param image: ndarray representing an image
:param format: compression format (PNG, JPEG, PPM)
"""
if isinstance(image, np.ndarray):
if image.dtype in [np.dtype("f"), np.dtype("d")]:
if not (np.amin(image) > -0.001 and np.amax(image) < 1.001):
raise ValueError(
f"image values out of range {np.amin(image)} {np.amax(image)}"
)
image = np.clip(image, 0.0, 1.0)
image = np.array(image * 255.0, "uint8")
image = PIL.Image.fromarray(image)
if format.upper() == "JPG":
format = "JPEG"
elif format.upper() in ["IMG", "IMAGE"]:
format = "PPM"
if format == "JPEG":
opts = dict(quality=100)
else:
opts = {}
with io.BytesIO() as result:
image.save(result, format=format, **opts)
return result.getvalue()
def bytestr(data):
if isinstance(data, bytes):
return data
if isinstance(data, str):
return data.encode("ascii")
return str(data).encode("ascii")
def torch_save_object(data):
import io
import torch
stream = io.BytesIO()
torch.save(data, stream)
return stream.getvalue()
def make_handlers():
handlers = {}
for extension in ["cls", "cls2", "class", "count", "index", "inx", "id"]:
handlers[extension] = lambda x: str(x).encode("ascii")
for extension in ["txt", "text", "transcript"]:
handlers[extension] = lambda x: x.encode("utf-8")
for extension in ["png", "jpg", "jpeg", "img", "image", "pbm", "pgm", "ppm"]:
def f(extension_):
handlers[extension] = lambda data: imageencoder(data, extension_)
f(extension)
for extension in ["pyd", "pickle"]:
handlers[extension] = pickle.dumps
for extension in ["pth"]:
handlers[extension] = torch_save_object
for extension in ["json", "jsn"]:
handlers[extension] = lambda x: simplejson.dumps(x).encode("utf-8")
for extension in ["ten", "tb"]:
from . import tenbin
def f(x): # skipcq: PYL-E0102
if isinstance(x, list):
return memoryview(tenbin.encode_buffer(x))
else:
return memoryview(tenbin.encode_buffer([x]))
handlers[extension] = f
try:
import msgpack
for extension in ["mp", "msgpack", "msg"]:
handlers[extension] = msgpack.packb
except ImportError:
pass
return handlers
default_handlers = {"default": make_handlers()}
def encode_based_on_extension1(data, tname, handlers):
if tname[0] == "_":
if not isinstance(data, str):
raise ValueError("the values of metadata must be of string type")
return data
extension = re.sub(r".*\.", "", tname).lower()
if isinstance(data, bytes):
return data
if isinstance(data, str):
return data.encode("utf-8")
handler = handlers.get(extension)
if handler is None:
raise ValueError(f"no handler found for {extension}")
return handler(data)
def encode_based_on_extension(sample, handlers):
return {
k: encode_based_on_extension1(v, k, handlers) for k, v in list(sample.items())
}
def make_encoder(spec):
if spec is False or spec is None:
def encoder(x):
return x
elif callable(spec):
encoder = spec
elif isinstance(spec, dict):
def encoder(sample):
return encode_based_on_extension(sample, spec)
elif isinstance(spec, str) or spec is True:
if spec is True:
spec = "default"
handlers = default_handlers.get(spec)
if handlers is None:
raise ValueError(f"no handler found for {spec}")
def encoder(sample):
return encode_based_on_extension(sample, handlers)
else:
raise ValueError(f"{spec}: unknown decoder spec")
if not callable(encoder):
raise ValueError(f"{spec} did not yield a callable encoder")
return encoder
class TarWriter:
"""A class for writing dictionaries to tar files.
:param fileobj: fileobj: file name for tar file (.tgz/.tar) or open file descriptor
:param encoder: sample encoding (Default value = True)
:param compress: (Default value = None)
`True` will use an encoder that behaves similar to the automatic
decoder for `Dataset`. `False` disables encoding and expects byte strings
(except for metadata, which must be strings). The `encoder` argument can
also be a `callable`, or a dictionary mapping extensions to encoders.
The following code will add two file to the tar archive: `a/b.png` and
`a/b.output.png`.
```Python
tarwriter = TarWriter(stream)
image = imread("b.jpg")
image2 = imread("b.out.jpg")
sample = {"__key__": "a/b", "png": image, "output.png": image2}
tarwriter.write(sample)
```
"""
def __init__(
self,
fileobj,
user="bigdata",
group="bigdata",
mode=0o0444,
compress=None,
encoder=True,
keep_meta=False,
):
if isinstance(fileobj, str):
if compress is False:
tarmode = "w|"
elif compress is True:
tarmode = "w|gz"
else:
tarmode = "w|gz" if fileobj.endswith("gz") else "w|"
fileobj = gopen.gopen(fileobj, "wb")
self.own_fileobj = fileobj
else:
tarmode = "w|gz" if compress is True else "w|"
self.own_fileobj = None
self.encoder = make_encoder(encoder)
self.keep_meta = keep_meta
self.stream = fileobj
self.tarstream = tarfile.open(fileobj=fileobj, mode=tarmode)
self.user = user
self.group = group
self.mode = mode
self.compress = compress
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
"""Close the tar file."""
self.tarstream.close()
if self.own_fileobj is not None:
self.own_fileobj.close()
self.own_fileobj = None
def dwrite(self, key, **kw):
"""Convenience function for `write`.
Takes key as the first argument and key-value pairs for the rest.
Replaces "_" with ".".
"""
obj = dict(__key__=key)
obj.update({k.replace("_", "."): v for k, v in kw.items()})
self.write(obj)
def write(self, obj):
"""Write a dictionary to the tar file.
:param obj: dictionary of objects to be stored
:returns: size of the entry
"""
total = 0
obj = self.encoder(obj)
if "__key__" not in obj:
raise ValueError(f"object must contain a __key__")
for k, v in list(obj.items()):
if k[0] == "_":
continue
if not isinstance(v, (bytes, bytearray, memoryview)):
raise ValueError(
f"{k} doesn't map to a bytes after encoding ({type(v)})"
)
key = obj["__key__"]
for k in sorted(obj.keys()):
if k == "__key__":
continue
if not self.keep_meta and k[0] == "_":
continue
v = obj[k]
if isinstance(v, str):
v = v.encode("utf-8")
now = time.time()
ti = tarfile.TarInfo(key + "." + k)
ti.size = len(v)
ti.mtime = now
ti.mode = self.mode
ti.uname = self.user
ti.gname = self.group
if not isinstance(v, (bytes, bytearray, memoryview)):
raise ValueError(f"converter didn't yield bytes: {k}, {type(v)}")
stream = io.BytesIO(v)
self.tarstream.addfile(ti, stream)
total += ti.size
return total
class ShardWriter:
"""Like TarWriter but splits into multiple shards.
:param pattern: output file pattern
:param maxcount: maximum number of records per shard (Default value = 100000)
:param maxsize: maximum size of each shard (Default value = 3e9)
:param kw: other options passed to TarWriter
"""
def __init__(self, pattern, maxcount=100000, maxsize=3e9, post=None, **kw):
self.verbose = 1
self.kw = kw
self.maxcount = maxcount
self.maxsize = maxsize
self.post = post
self.tarstream = None
self.shard = 0
self.pattern = pattern
self.total = 0
self.count = 0
self.size = 0
self.fname = None
self.next_stream()
def next_stream(self):
self.finish()
self.fname = self.pattern % self.shard
if self.verbose:
print(
"# writing",
self.fname,
self.count,
"%.1f GB" % (self.size / 1e9),
self.total,
)
self.shard += 1
stream = open(self.fname, "wb")
self.tarstream = TarWriter(stream, **self.kw)
self.count = 0
self.size = 0
def write(self, obj):
if (
self.tarstream is None
or self.count >= self.maxcount
or self.size >= self.maxsize
):
self.next_stream()
size = self.tarstream.write(obj)
self.count += 1
self.total += 1
self.size += size
def finish(self):
if self.tarstream is not None:
self.tarstream.close()
assert self.fname is not None
if callable(self.post):
self.post(self.fname)
self.tarstream = None
def close(self):
self.finish()
del self.tarstream
del self.shard
del self.count
del self.size
def __enter__(self):
return self
def __exit__(self, *args, **kw):
self.close()
| 29.18663 | 87 | 0.56652 |
ab9879b0cf31ac9ac5dc9ee8f5821a6bf54bcc27 | 1,807 | py | Python | floris/tools/optimization/scipy/optimization.py | ElieKadoche/floris | d18f4d263ecabf502242592f9d60815a07c7b89c | [
"Apache-2.0"
] | 91 | 2019-06-04T08:56:29.000Z | 2022-03-13T17:39:22.000Z | floris/tools/optimization/scipy/optimization.py | ElieKadoche/floris | d18f4d263ecabf502242592f9d60815a07c7b89c | [
"Apache-2.0"
] | 224 | 2019-04-08T22:03:45.000Z | 2022-03-31T17:56:09.000Z | floris/tools/optimization/scipy/optimization.py | ElieKadoche/floris | d18f4d263ecabf502242592f9d60815a07c7b89c | [
"Apache-2.0"
] | 97 | 2019-04-23T20:48:20.000Z | 2022-03-29T08:17:02.000Z | # Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import numpy as np
import matplotlib.pyplot as plt
try:
from mpi4py.futures import MPIPoolExecutor
except ImportError:
pass
class Optimization:
"""
Optimization is the base optimization class for
`~.tools.optimization.scipy` subclasses. Contains some common
methods and properties that can be used by the individual optimization
classes.
"""
def __init__(self, fi):
"""
Initializes an Optimization object by assigning a
FlorisInterface object.
Args:
fi (:py:class:`~.tools.floris_interface.FlorisInterface`):
Interface used to interact with the Floris object.
"""
self.fi = fi
# Private methods
def _reinitialize(self):
pass
def _norm(self, val, x1, x2):
return (val - x1) / (x2 - x1)
def _unnorm(self, val, x1, x2):
return np.array(val) * (x2 - x1) + x1
# Properties
@property
def nturbs(self):
"""
Number of turbines in the :py:class:`~.farm.Farm` object.
Returns:
int
"""
self._nturbs = len(self.fi.floris.farm.turbine_map.turbines)
return self._nturbs
| 26.970149 | 79 | 0.663531 |
b726465e9a830590f76761910fc4947eb2684487 | 814 | py | Python | standard/regular.py | RootCluster/rc-cluster-python | f8cb2bb3f345cb5b964116e5da49aca3de3868b1 | [
"Apache-2.0"
] | null | null | null | standard/regular.py | RootCluster/rc-cluster-python | f8cb2bb3f345cb5b964116e5da49aca3de3868b1 | [
"Apache-2.0"
] | null | null | null | standard/regular.py | RootCluster/rc-cluster-python | f8cb2bb3f345cb5b964116e5da49aca3de3868b1 | [
"Apache-2.0"
] | null | null | null | # python 中标准库应用比较广泛的是
# 1. 文章处理的 re
# 2. 日期类型的 time,datetime
# 3. 数字和数学类型的 math,random
# 4. 文件和目录访问的 pathlib,os.path
# 5. 数据压缩和归档的 tarfile
# 6. 通用操作系统的 os,logging,argparse
# 7. 多线程的 threading,queue
# 8. Internet 数据护理的 base64,json,urllib
# 9. 结构化标记处理工具的 HTML,XML
# 10. 开发工具 unitest
# 11. 调试工具 timeit
# 12. 软件包发布的 venv
# 13. 运行服务的 __main__
import re
p = re.compile('a')
print(p.match('a'))
# 正则表达式相关教程 https://github.com/ziishaned/learn-regex/blob/master/translations/README-cn.md
# . :句号匹配任意单个字符除了换行符
# [ ] :字符种类。匹配方括号内的任意字符
# [^ ] :否定的字符种类。匹配除了方括号里的任意字符
# * :匹配>=0个重复的在*号之前的字符
# + :匹配>=1个重复的+号前的字符
# ? :标记?之前的字符为可选
# {n,m} :匹配num个大括号之前的字符或字符集 (n <= num <= m)
# (xyz) :字符集,匹配与 xyz 完全相等的字符串.
# | :或运算符,匹配符号前或后的字符
# \ :转义字符,用于匹配一些保留的字符 [ ] ( ) { } . * + ? ^ $ \ |
# ^ :从开始行开始匹配
# $ :从末端开始匹配
# ^$ :表示空行
# .*? :贪婪模式
| 22.611111 | 90 | 0.654791 |
b074cba0ae13a91852c624c89b20691e8c8e6972 | 12,829 | py | Python | loaders/old_champs.py | guineawheek/ftcdata | f6515da93c7a788b00b3e88d4c507c2140d7e385 | [
"MIT"
] | 1 | 2019-05-21T08:10:41.000Z | 2019-05-21T08:10:41.000Z | loaders/old_champs.py | guineawheek/ftcdata | f6515da93c7a788b00b3e88d4c507c2140d7e385 | [
"MIT"
] | null | null | null | loaders/old_champs.py | guineawheek/ftcdata | f6515da93c7a788b00b3e88d4c507c2140d7e385 | [
"MIT"
] | null | null | null | # need beautifulsoup
from bs4 import BeautifulSoup
import asyncio
import uvloop
import datetime
import logging
import pprint
from models import *
from helpers import OPRHelper, AwardHelper, ResultsPageHelper
from db.orm import orm
class OldChamps:
@classmethod
def mk_champs(cls, year, start_date, end_date):
"""generates World Championship events given a start and end date, and division order, and year. Assumes 1champs
"""
seasons = ["Quad Quandary", "Face Off", "Hot Shot", "Get Over It", "Bowled Over", "Ring It Up", "Block Party",
"Cascade Effect", "RES-Q", "Velocity Vortex", "Relic Recovery", "Rover Ruckus"]
season_name = seasons[year-2007]
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
season = f"{year % 100:02}{(year + 1) % 100:02}"
# fyear = f"{year}-{(year+1)%1000:02d}"
if year == 2009:
city, state_prov, country = "Atlanta", "Georgia", "USA"
venue = "Georgia Dome"
address = "1 Georgia Dome Dr, Atlanta, GA 30313"
elif year < 2013:
city, state_prov, country = "St. Louis", "Missouri", "USA"
venue = "Edward Jones Dome"
address = "701 Convention Plaza, St. Louis, MO 63101"
else:
city, state_prov, country = "St. Louis", "Missouri", "USA"
venue = "Union Station"
address = "1820 Market Street, St. Louis, MO 63103"
shared = {
"year": year,
"city": city,
"state_prov": state_prov,
"country": country,
"end_date": end_date,
"event_type": EventType.WORLD_CHAMPIONSHIP,
"venue": venue,
"address": address,
"data_sources": ["USFIRST Website Archives"]
}
finals = Event(key=f"{season}cmp0",
name=f"FTC {season_name} World Championship - Finals",
playoff_type=PlayoffType.BO3_FINALS,
division_keys=[f"{season}cmp1", f"{season}cmp2"],
start_date=end_date,
**shared)
franklin = Event(key=f"{season}cmp{2}",
name=f"FTC {season_name} World Championship - Franklin Division",
playoff_type=PlayoffType.STANDARD,
parent_event_key=f"{season}cmp0",
start_date=start_date,
**shared)
edison = Event(key=f"{season}cmp{1}",
name=f"FTC {season_name} World Championship - Edison Division",
playoff_type=PlayoffType.STANDARD,
parent_event_key=f"{season}cmp0",
start_date=start_date,
**shared)
return (franklin, edison, finals)
@classmethod
def load_awards_file(cls, awards_data, year, event_key):
ret = []
for line in awards_data.split('\n'):
if not line:
continue
aname, winners = line.split('|')
atype = AwardType.to_type.get(aname.lower(), "oops!")
for i, team_data in enumerate(winners.split(','), 1):
recipient = None
place = i
if atype == AwardType.JUDGES:
team, sub_name = team_data.split(':')
award_name = f"Judge's \"{sub_name}\" Award"
place = 1
elif atype == AwardType.VOL_OF_YEAR or atype == AwardType.COMPASS:
team, recipient = team_data.split(':')
award_name = AwardType.get_names(atype, year=year)
else:
team = team_data
award_name = AwardType.get_names(atype, year=year)
award = Award(name=award_name, award_type=atype, event_key=event_key, team_key='ftc'+team, recipient_name=recipient, award_place=place)
award.name += " Winner" if award.award_place == 1 else " Finalist"
ret.append(award)
return ret
@classmethod
async def upsert_all(cls, args):
async with orm.pool.acquire() as conn:
for a in args:
for ent in a:
if isinstance(ent, orm.Model):
await ent.upsert(conn=conn)
else:
for ent2 in ent:
await ent2.upsert(conn=conn)
@classmethod
async def load_2009(cls):
year = 2009
with open("data/old_champs/2009-2010/2009CMPresultsandrankings.html") as f:
data = f.read()
with open("data/old_champs/2009-2010/awards2") as f:
awards_data = f.read()
soup = BeautifulSoup(data, 'lxml')
tables = list(soup.find_all("table"))
finals = ResultsPageHelper.load_matches(tables[0], "0910cmp0")
franklin = ResultsPageHelper.load_matches(tables[1], "0910cmp1")
edison = ResultsPageHelper.load_matches(tables[2], "0910cmp2")
franklin_rank = ResultsPageHelper.load_rankings(tables[3], franklin, has_hs=False)
edison_rank = ResultsPageHelper.load_rankings(tables[4], edison, has_hs=False)
events = cls.mk_champs(year, "2010-04-14", "2010-04-17")
awards = cls.load_awards_file(awards_data, year, events[-1].key)
await cls.finalize([finals, franklin, edison, franklin_rank, edison_rank, events, awards], events, year)
@classmethod
async def load_2010(cls):
year = 2010
with open("data/old_champs/2010-2011/2010-2011-ftc-world-championship-get-over-it!-results.html") as f:
data = f.read()
with open("data/old_champs/2010-2011/awards") as f:
awards_data = f.read()
soup = BeautifulSoup(data, 'lxml')
tables = list(soup.find_all("table"))
finals = ResultsPageHelper.load_matches(tables[0], "1011cmp0")
edison = ResultsPageHelper.load_matches(tables[1], "1011cmp1")
franklin = ResultsPageHelper.load_matches(tables[2], "1011cmp2")
edison_rank = ResultsPageHelper.load_rankings(tables[3], edison)
franklin_rank = ResultsPageHelper.load_rankings(tables[4], franklin)
events = cls.mk_champs(year, "2011-04-27", "2011-04-30")
awards = cls.load_awards_file(awards_data, year, events[-1].key)
await cls.finalize([finals, franklin, edison, franklin_rank, edison_rank, events, awards], events, year)
@classmethod
async def load_2011(cls):
year = 2011
with open("data/old_champs/2011-2012/2011-2012FTCCMPResults") as f:
data = f.read()
with open("data/old_champs/2011-2012/awards") as f:
awards_data = f.read()
soup = BeautifulSoup(data, 'lxml')
tables = list(soup.find_all("table"))
finals = ResultsPageHelper.load_matches(tables[3], "1112cmp0")
franklin = ResultsPageHelper.load_matches(tables[15], "1112cmp1")
edison = ResultsPageHelper.load_matches(tables[14], "1112cmp2")
franklin_rank = ResultsPageHelper.load_rankings(tables[13], franklin)
edison_rank = ResultsPageHelper.load_rankings(tables[12], edison)
events = cls.mk_champs(year, "2012-04-25", "2012-04-28")
awards = cls.load_awards_file(awards_data, year, events[-1].key)
await cls.finalize([finals, franklin, edison, franklin_rank, edison_rank, events, awards], events, year)
@classmethod
async def load_2012(cls):
year = 2012
with open("data/old_champs/2012-2013/Match_Results_World Championship_Edison.html") as f:
edison = ResultsPageHelper.load_matches(BeautifulSoup(f.read(), 'lxml').find("table"), "1213cmp1")
with open("data/old_champs/2012-2013/Match_Results_World Championship_Franklin.html") as f:
franklin = ResultsPageHelper.load_matches(BeautifulSoup(f.read(), 'lxml').find("table"), "1213cmp2")
with open("data/old_champs/2012-2013/finals.html") as f:
finals = ResultsPageHelper.load_matches(BeautifulSoup(f.read(), 'lxml').find("table"), "1213cmp0")
with open("data/old_champs/2012-2013/Rankings_World Championship_Edison.html") as f:
edison_rank = ResultsPageHelper.load_rankings(BeautifulSoup(f.read(), 'lxml').find("table"), edison)
with open("data/old_champs/2012-2013/Rankings_World Championship_Franklin.html") as f:
franklin_rank = ResultsPageHelper.load_rankings(BeautifulSoup(f.read(), 'lxml').find("table"), franklin)
with open("data/old_champs/2012-2013/awards") as f:
awards = cls.load_awards_file(f.read(), year, '1213cmp0')
events = cls.mk_champs(year, "2013-04-24", "2013-04-27")
await cls.finalize([finals, franklin, edison, franklin_rank, edison_rank, events, awards], events, year)
@classmethod
async def load_2013(cls):
year = 2013
# this is mostly to overwrite tya's names, and to includes awards data (which tya doesn't)
events = cls.mk_champs(year, "2014-04-24", "2014-04-26")
for e in events:
e.data_sources.append("The Yellow Alliance")
with open("data/old_champs/2013-2014/awards") as f:
awards = cls.load_awards_file(f.read(), year, '1314cmp0')
await cls.finalize([events, awards], events, 2013)
@classmethod
async def load_2014(cls):
year = 2014
# edison
with open("data/old_champs/2014-2015/MatchResultsDetails_World_Championship_Edison_T.html") as f:
edison = ResultsPageHelper.load_match_details(BeautifulSoup(f.read(), 'lxml').find("table"), "1415cmp2")
with open("data/old_champs/2014-2015/MatchResultsDetails_World_Championship_Edison_Elim.html") as f:
edison.extend(ResultsPageHelper.load_match_details(BeautifulSoup(f.read(), 'lxml').find("table"), "1415cmp2"))
# franklin
with open("data/old_champs/2014-2015/MatchResultsDetails_World_Championship_Franklin_T.html") as f:
franklin = ResultsPageHelper.load_match_details(BeautifulSoup(f.read(), 'lxml').find("table"), "1415cmp1")
with open("data/old_champs/2014-2015/MatchResultsDetails_World_Championship_Franklin_Elim.html") as f:
franklin.extend(ResultsPageHelper.load_match_details(BeautifulSoup(f.read(), 'lxml').find("table"), "1415cmp1"))
# finals
with open("data/old_champs/2014-2015/MatchResultsDetails_World_Championship_Finals.html") as f:
finals = ResultsPageHelper.load_match_details(BeautifulSoup(f.read(), 'lxml').find("table"), "1415cmp0")
# rankings
with open("data/old_champs/2014-2015/Rankings_World_Championship_Edison.html") as f:
edison_rank = ResultsPageHelper.load_rankings(BeautifulSoup(f.read(), 'lxml').find("table"), edison)
with open("data/old_champs/2014-2015/Rankings_World_Championship_Franklin.html") as f:
franklin_rank = ResultsPageHelper.load_rankings(BeautifulSoup(f.read(), 'lxml').find("table"), franklin)
with open("data/old_champs/2014-2015/awards") as f:
awards = cls.load_awards_file(f.read(), year, '1415cmp0')
events = cls.mk_champs(year, "2015-04-22", "2015-04-25")
await cls.finalize([finals, franklin, edison, franklin_rank, edison_rank, events, awards], events, year)
@classmethod
async def finalize(cls, objects, events, year):
await cls.upsert_all(objects)
logging.info(f"finalize({year}): Calculating OPRs....")
await asyncio.gather(*[OPRHelper.update_oprs(event.key) for event in events])
logging.info(f"finalize({year}): Generating winning/finalist awards...")
await asyncio.gather(*[AwardHelper.generate_winners_finalists(e, fail_silent=True) for e in events])
logging.info(f"finalize({year}): generating EventParticipants...")
await EventParticipant.generate(year)
@classmethod
def read_table(cls, table):
return [[td.get_text() for td in tr.find_all("td")] for tr in table.find_all("tr")]
@classmethod
async def load(cls):
MAX_YEAR = 2015
for i in range(2009, MAX_YEAR):
if hasattr(cls, f'load_{i}'):
await getattr(cls, f'load_{i}')()
print("...", i)
async def main():
print("Initializing database connection...")
await orm.connect(host="/run/postgresql/.s.PGSQL.5432", database="ftcdata", max_size=50)
await orm.Model.create_all_tables()
print("Loading old championship data...")
await OldChamps.load()
await orm.close()
if __name__ == "__main__":
uvloop.install()
asyncio.get_event_loop().run_until_complete(main())
| 48.048689 | 151 | 0.623353 |
daa9473741cfb814e9b6000523664bec506762d4 | 13,207 | py | Python | seqtables/core/utils/insilica_sequences.py | costas821/seqtables | e4632898a912050fcd769d90e359fd6bee6d412b | [
"MIT"
] | 7 | 2017-07-24T18:06:23.000Z | 2021-06-03T18:34:54.000Z | seqtables/core/utils/insilica_sequences.py | cchrysostomou/seqtables | e4632898a912050fcd769d90e359fd6bee6d412b | [
"MIT"
] | 6 | 2018-07-10T18:28:40.000Z | 2021-06-01T23:15:45.000Z | seqtables/core/utils/insilica_sequences.py | costas821/seqtables | e4632898a912050fcd769d90e359fd6bee6d412b | [
"MIT"
] | 2 | 2017-01-13T19:03:16.000Z | 2018-02-28T21:02:09.000Z | import numpy as np
from seqtables.core.utils.seq_table_util import degen_to_base
"""
Methods for generating a set of fake sequences
"""
def generate_sequence(seq_len=100, chars='ACTG', p_bases=[0.25, 0.25, 0.25, 0.25]):
"""
Create a random DNA sequence
Args:
seq_len (int): Total characters in sequence. default = 100
chars (str): Each character in string represents a base allowed in sequence
p_bases (nparray/list floats): probablity for each letter
Returns:
seq (str): Sequence generated
"""
assert len(chars) == len(p_bases)
seq_len_str = str(seq_len)
if not isinstance(chars, list):
chars = list(chars)
return str(np.random.choice(chars, (seq_len,), p=p_bases).astype('U1').view('U' + seq_len_str)[0])
def generate_library(
scaffold_seq, num_seqs, error_prone_rate=0, no_error_prone_pos=[], ss_pos=[],
site_saturation={}, default_site_saturation='N', return_as='seq'
):
"""
Create a fake library of DNA sequences using a scaffold sequence
Args:
scaffold_seq (str): Sequence to create a library from (starting wildtype sequence)
num_seqs (int): Number of sequences to generate
error_prone_rate (float): Error prone rate (will assume a poisson distribution)
no_error_prone_pos (list of ints): Columns/base positions that should NOT undergo mutation
ss_pos (list_of_ints): Columns/base positions that SHOULD be site saturated
site_saturation (dict): Each key should be an integer corresponding to a base position defined in ss_pos. Each value should be a either
1) A character corresponding to degenerate bases that are allowed at that position
2) A two element tuple corresponding to (letter, probability of selection)
default_site_saturation (char): Letter defining the default degenerate base distribution to use for a SS position
return_as (allowed values = 'let', 'seq'): Return values as an nparray of characters, or return as a np array of full length sequences
Returns:
list of seqs
..note:: Order of operations
If defining both an error prone event and a site saturation event at the same position, site saturation will occur first, then an error prone
..note:: base positions
Function assumes that bases start at 1 and not 0 (i.e. not python indexing)
"""
# convert positions to indices
no_error_prone_pos = [b - 1 for b in no_error_prone_pos]
# make sure all site saturated positions are included
ss_pos = sorted(ss_pos + list(site_saturation.keys()))
# generate sequences
seq_as_array = np.array([scaffold_seq]).astype('S').view('S1')
seq_list = np.tile(seq_as_array, num_seqs).reshape(num_seqs, -1)
site_saturation = {p: site_saturation[p] if p in site_saturation.copy() else default_site_saturation for p in ss_pos}
degen_to_base_rev = {v: b for b, v in degen_to_base.items()}
for l in ['A', 'C', 'T', 'G']:
degen_to_base_rev[l] = l
# perform site-saturation mutagenesis
for p in ss_pos:
ind = p - 1
# determine how to saturate bases at this position
if isinstance(site_saturation[p], str):
assert site_saturation[p] in list(degen_to_base_rev.keys())
allowed_lets = degen_to_base_rev[site_saturation[p]]
probs = [1.0 / len(allowed_lets)] * len(allowed_lets)
lets = list(allowed_lets)
elif isinstance(site_saturation[p], list):
lets = [l[0] for l in site_saturation[p]]
probs = np.array([l[1] for l in site_saturation[p]])
probs = (probs * 1.0) / probs.sum()
else:
raise Exception('Error: invalid format for site_saturation')
# randomly choose bases
seq_list[:, ind] = np.random.choice(lets, (num_seqs,), p=probs).astype('S1')
generate_error_prone(seq_list, error_prone_rate, no_error_prone_pos)
if return_as == 'seq':
# return full length sequence as an array
return seq_list.view('S' + str(seq_list.shape[1])).squeeze()
elif return_as == 'let':
# maintain view as a table of seq/pos
return seq_list
else:
raise Exception('Invalid option for return_as parameter. only allow "seq" or "let"')
def generate_error_prone(seq_list, error_prone_rate, no_error_prone_pos=[], return_as_sequences=False):
assert isinstance(seq_list, np.ndarray), 'Invalid dtype for seq_list'
num_seqs = seq_list.shape[0]
if seq_list.dtype != 'S1':
seq_list = np.array(seq_list, dtype='S').view('S1').reshape(num_seqs, -1)
num_lets = seq_list.shape[1]
# # perform error-prone mutagenesis
ep_pos = sorted(list(set(range(num_lets)) - set(no_error_prone_pos)))
# slice columns/positions we will mutate
can_mutate = seq_list[:, ep_pos].ravel()
# randomly select positions to be mutated
mutate_these_pos = np.random.choice([False, True], can_mutate.shape, p=[1.0 - error_prone_rate, error_prone_rate]).ravel()
total_mutations = mutate_these_pos.sum()
# for mutated positions, randomly choose from ACTG
new_bases = np.random.choice(list('ACTG'), (total_mutations,)).astype('S1')
# update seqs and reshape to original size
can_mutate[mutate_these_pos] = new_bases
can_mutate = can_mutate.reshape(num_seqs, len(ep_pos))
# update seq list with mutations
seq_list[:, ep_pos] = can_mutate
del mutate_these_pos, can_mutate, new_bases
if return_as_sequences:
return seq_list.view('S{0}'.format(num_lets)).squeeze()
else:
return seq_list
def add_quality_scores(
sequence_list, read_type='r1', min_quality=0, max_quality=40,
starting_mean_quality=36, ending_mean_quality=15, stdV=5, phred_adjust=33, bulk_size=None
):
"""
Adds quality scores with a moving mean as a function of distance from start of sequencing
Args:
sequence_list (np array (n x 1 matrix or n x b)): rows (n) represent number of sequences, columns (b) represent number of bases in sequence
read_type ('r1' or 'r2'): Is the sequence an r1 read or an r2 read (i.e. does sequence start at 5' or 3')
min_quality (int): minimum allowed quality in a read
max_quality (int): maximum allowed quality in a read
starting_mean_quality (mean value of the read quality in the first base)
ending_mean_quality (mean value of the read quality in the last base)
stdV (float OR function): determines how to calculate the standard deviation of sequences. If float then it will assume uniform standard deviation.
If a function then it will assume function takes in base position and returns a float
.. note:: Example for std as a function
```
# Have the standard deviation increase linearly as a function of base position
stdV=lambda(b): 2*b + 1.0
```
phred_adjust (int): character to associate with a quality score of 0
bulk_size (int): Bulk size to use when generating random quality scores (i.e. if we dont want to generate 1000000 qualities at the same time creating large
memory requiremnts, we can set bulk_size to 1000 and only generate 1000 qualtiy scores at a time)
Returns:
np array (base quality scores for each read)
"""
# guess format of sequences provided
if isinstance(sequence_list, list):
sequence_list = np.array(sequence_list)
if len(sequence_list.shape) == 1 or sequence_list.shape[1] == 1:
# len shape == 1 => does not have a 2nd dimension
return_as = 'seqs'
# assume they provided only sequences and not a matrix of sxb, so, will need to calculate sequences
max_seq_len = np.apply_along_axis(arr=sequence_list.reshape(-1, 1), func1d=lambda x: len(x[0]), axis=1).max()
else:
return_as = 'let'
# assume let are represented by columns
max_seq_len = sequence_list.shape[1]
# create a normal distribution with mean 0, and std 1
if bulk_size is None:
bulk_size = int(sequence_list.shape[0] * max_seq_len)
qualities = [
# create random values of given bulk size (convert to integer afterwards to minimize memory)
np.random.randn(
min(bulk_size, (sequence_list.shape[0] * max_seq_len) - ind),
1
).astype(np.uint8)
for ind in range(0, sequence_list.shape[0] * max_seq_len, bulk_size)
]
qualities = np.stack(qualities).reshape(sequence_list.shape[0], max_seq_len)
# calculate the mean at each base position
# use a log distribution to create a slowly decreasing curve
# for an r2 read... a * log(1 + b) = quality, where quality @ base 0 = ending_mean_quality, @(#bases) = starting_mean_quality
# r2 read...
# a * log(0 + 1) + b = ending_mean_quality
# a * log(max_seq_len + 1) + b = starting_mean_quality
# b = ending_mean_quality, a = (ending-starting)/(log(1.0/(1.0 + max_seq_len)))
b, a = ending_mean_quality, (ending_mean_quality - starting_mean_quality) / np.log(1.0 / (1.0 + max_seq_len))
mean_qualities = (a * np.log(np.arange(0, max_seq_len) + 1.0) + b).astype(np.uint8)
if read_type == 'r1':
# r1 should have a flipped version of calculated mean qualities (should start high and go low)
mean_qualities = mean_qualities[::-1]
elif read_type == 'r2':
mean_qualities = mean_qualities
else:
raise Exception('Invalid read type: ' + read_type)
if callable(stdV):
# calculate standard deviation as a function of base position
std_vals = stdV(np.arange(0, max_seq_len)).astype(np.uint8)
else:
# standard deviation is constant
std_vals = np.array([stdV] * max_seq_len).astype(np.uint8)
# add in the mean values and standardeviation for quality at each position
qualities = qualities * std_vals.reshape(1, -1) + mean_qualities.reshape(1, -1)
qualities[qualities < min_quality] = int(min_quality)
qualities[qualities > max_quality] = int(max_quality)
qualities = qualities.round().astype(np.uint8)
if return_as == 'let':
return (qualities + phred_adjust).view('S1')
else:
return (qualities + phred_adjust).view('S' + str(max_seq_len)).squeeze()
def randomly_add_indels(
sequence_list, qual_list=None, insertion_rate=0.001, deletion_rate=0.01, expected_cons_ins=1, max_ins=10,
avg_ins_qual=20, ins_qual_std=3
):
lets = np.array(sequence_list).astype('S').view('S1')
num_seqs = len(sequence_list)
lets[lets == ''.encode()] = '-'
delete_these_pos = np.random.choice([False, True], lets.shape, p=[1.0 - deletion_rate, deletion_rate]).ravel()
lets[delete_these_pos] = '-'
if not(qual_list is None):
quals = np.array(qual_list).astype('S').view('S1')
# add EMPTY qualitys for deleted bases (ascii value = 32)
quals[delete_these_pos] = ' '
if insertion_rate:
assert avg_ins_qual < 45, 'Error average ins quality should be less than 45'
ins_these_pos = np.random.choice([False, True], lets.shape, p=[1.0 - insertion_rate, insertion_rate]).ravel()
total_ins = ins_these_pos.sum()
num_ins_per_pos = np.random.poisson(expected_cons_ins, total_ins)
num_ins_per_pos[num_ins_per_pos == 0] = 1
max_ins_observed = num_ins_per_pos.max() + 1
total_letters_to_create = num_ins_per_pos.sum()
new_lets = np.random.choice(list('ACTG'), total_letters_to_create).astype('U{0}'.format(max_ins_observed))
new_quals = (np.round(np.random.normal(avg_ins_qual, ins_qual_std, total_letters_to_create)).astype(np.uint8))
new_quals[new_quals < 0] = 0
new_quals[new_quals > 45] = 45
new_quals = (new_quals + 33).view('S1').astype('U{0}'.format(max_ins_observed))
lets = lets.astype('U{0}'.format(max_ins_observed))
lets_inserted_at_pos = np.split(new_lets, num_ins_per_pos.cumsum()[:-1])
quals_inserted_at_pos = np.split(new_quals, num_ins_per_pos.cumsum()[:-1])
lets[ins_these_pos] = np.array(
[
a + ''.join(b) for a, b in zip(
lets[ins_these_pos], lets_inserted_at_pos
)
]
)
if not(qual_list is None):
quals = quals.astype('U{0}'.format(max_ins_observed))
quals[ins_these_pos] = np.array(
[
a + ''.join(b) for a, b in zip(
quals[ins_these_pos], quals_inserted_at_pos
)
]
)
new_seqs = np.apply_along_axis(
arr=lets.reshape(num_seqs, -1),
axis=1,
func1d=lambda x: ''.join(list(x)).replace('-', '')
)
if qual_list is None:
return new_seqs,
else:
new_quals = np.apply_along_axis(
arr=quals.reshape(num_seqs, -1),
axis=1,
func1d=lambda x: ''.join(list(x)).replace(' ', '')
)
return new_seqs, new_quals
| 41.926984 | 167 | 0.655183 |
17023db295f81fe53bd301f8e5ef2a9556a107da | 11,355 | py | Python | validators/bar/marker/_colorbar.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | validators/bar/marker/_colorbar.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | validators/bar/marker/_colorbar.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class ColorbarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="colorbar", parent_name="bar.marker", **kwargs):
super(ColorbarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ColorBar"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format We add one item to d3's
date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f"
would display "09~15~23.46"
tickformatstops
A tuple of :class:`new_plotly.graph_objects.bar.mar
ker.colorbar.Tickformatstop` instances or dicts
with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.bar.marker.colorbar.tickformatstopdefaults),
sets the default property values to use for
elements of bar.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of
the axis. The default value for inside tick
labels is *hide past domain*. In other cases
the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`new_plotly.graph_objects.bar.marker.colorba
r.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
bar.marker.colorbar.title.font instead. Sets
this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
bar.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
""",
),
**kwargs
)
| 47.51046 | 83 | 0.527257 |
c2ebd7dabdf04469f6872320522c3ef7fabe7c1a | 2,955 | py | Python | robustgp_experiments/init_z/jug-plot-search-uci.py | kiminh/RobustGP | cfab7a9e7f56755bc7a6241f13a6f6ac29562107 | [
"Apache-2.0"
] | 1 | 2021-01-11T18:38:25.000Z | 2021-01-11T18:38:25.000Z | robustgp_experiments/init_z/jug-plot-search-uci.py | kiminh/RobustGP | cfab7a9e7f56755bc7a6241f13a6f6ac29562107 | [
"Apache-2.0"
] | null | null | null | robustgp_experiments/init_z/jug-plot-search-uci.py | kiminh/RobustGP | cfab7a9e7f56755bc7a6241f13a6f6ac29562107 | [
"Apache-2.0"
] | null | null | null | import jug.task
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
jug.init("jug_search_uci.py", "jug_search_uci.jugdata")
from jug_search_uci import (
dataset_names, sparse_task_results, get_settings, baseline_results, baseline_exps, sparse_exps
)
plot_all_datasets = False
plot_normalised = True
# Can comment this out to run all datasets
if not plot_all_datasets:
# dataset_names = ["Wilson_energy", "Wilson_concrete", "Wilson_airfoil", "Wilson_wine"]
# dataset_names = [n for n in dataset_names if n not in
# ["Wilson_pendulum", "Pendulum_noisy", "Wilson_wine"]]
dataset_names = [n for n in dataset_names if n not in
["Wilson_pendulum", "Pendulum_noisy", "Wilson_wine", "kin40k"]]
# dataset_names = ["Naval", "Naval_noisy"]
# dataset_names = ["Wilson_stock", "Wilson_energy", "Wilson_concrete", "Wilson_airfoil"]
# Get values from tasks
sparse_results_raw = {}
sparse_results_normalised = {}
baseline_lmls = {}
for dataset_name in dataset_names:
if (type(baseline_results[dataset_name]) is float) or not baseline_results[dataset_name].can_load():
continue
baseline_lmls[dataset_name] = jug.task.value(baseline_results[dataset_name])
experiment_storage_path, Ms, common_run_settings, dataset_custom_settings = get_settings(dataset_name)
sparse_task_values = [jug.task.value(result) for result in sparse_task_results[dataset_name]]
sparse_results_raw[dataset_name] = pd.DataFrame.from_records(
sparse_task_values, columns=['elbo', 'upper', 'rmse', 'nlpp'], index=Ms
)
noise_model_lml = len(baseline_exps[dataset_name].X_train) * (-0.5 * np.log(2 * np.pi) - 0.5)
sparse_results_normalised[dataset_name] = sparse_results_raw[dataset_name].copy()
sparse_results_normalised[dataset_name].elbo -= baseline_lmls[dataset_name]
sparse_results_normalised[dataset_name].elbo /= baseline_lmls[dataset_name] - noise_model_lml
sparse_results_normalised[dataset_name].upper -= baseline_lmls[dataset_name]
sparse_results_normalised[dataset_name].index /= baseline_exps[dataset_name].X_train.shape[0]
baseline_exps[dataset_name].load()
print(f"{dataset_name:30} lik variance: {baseline_exps[dataset_name].model.likelihood.variance.numpy():.8f}"
f" lml: {baseline_lmls[dataset_name]}")
sparse_results = sparse_results_normalised if plot_normalised else sparse_results_raw
_, ax = plt.subplots()
for dataset_name in sparse_results.keys():
# ax.axhline(baseline_lmls[dataset_name])
l, = ax.plot(sparse_results[dataset_name].index, sparse_results[dataset_name].elbo,
label=f"{dataset_name} ({len(sparse_exps[dataset_name][0].X_train)})")
# ax.plot(sparse_results[dataset_name].index, sparse_results[dataset_name].upper,
# color=l.get_color(), linestyle=':')
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), fontsize='x-small', ncol=5)
plt.show()
| 49.25 | 112 | 0.741794 |
9beedb2934daed3b8fffe5c95aa39ad0bc499bbc | 1,018 | py | Python | nbaPlayerStats.py | ws2516/sportsbookProjects | 92e541d5c8bcbdae837d9f791366305d94bd1b46 | [
"CC0-1.0"
] | null | null | null | nbaPlayerStats.py | ws2516/sportsbookProjects | 92e541d5c8bcbdae837d9f791366305d94bd1b46 | [
"CC0-1.0"
] | null | null | null | nbaPlayerStats.py | ws2516/sportsbookProjects | 92e541d5c8bcbdae837d9f791366305d94bd1b46 | [
"CC0-1.0"
] | null | null | null | '''
Redditor: u/NInjas101
Ask: I want to be able to track a players points rebounds assists over time and come up
with last 3 game average, last 5 game average etc
'''
import requests
import datetime
DaysBack = 21
NumGameAverage = 5
tod = datetime.datetime.now()
d = datetime.timedelta(days = DaysBack) # 3 weeks should be enough
a = tod - d
date = str(a).split(' ')[0]
playerName = input('Player Name? ')
idUrl = 'https://www.balldontlie.io/api/v1/players?search=' + playerName
respID = requests.get(idUrl)
dataID = respID.json()['data'][0]['id']
statsUrl = 'https://www.balldontlie.io/api/v1/stats?start_date='+ date +'&player_ids[]=' + str(dataID)
respStat = requests.get(statsUrl)
data = respStat.json()['data']
assists, mins = [], []
for i in range(0,NumGameAverage):
assists += [data[i]['ast']]
mins += [data[i]['min']]
print(playerName, ' scored ', assists, ' assists in the past ', NumGameAverage, 'games.')
print(playerName, ' played ', mins, ' minutes in the past ', NumGameAverage, 'games.')
| 27.513514 | 102 | 0.687623 |
de1e8001d413b38346f572eecd15a838954d2756 | 94 | py | Python | practice/AtCoder-Beginners-Selection/ABC086A.py | miyuush/AtCoder | 9481f15b69b99f56334a623f5a63dbb5e6359522 | [
"MIT"
] | null | null | null | practice/AtCoder-Beginners-Selection/ABC086A.py | miyuush/AtCoder | 9481f15b69b99f56334a623f5a63dbb5e6359522 | [
"MIT"
] | null | null | null | practice/AtCoder-Beginners-Selection/ABC086A.py | miyuush/AtCoder | 9481f15b69b99f56334a623f5a63dbb5e6359522 | [
"MIT"
] | null | null | null | a, b = map(int, input().split())
if (a * b) % 2 == 0:
print('Even')
else:
print('Odd') | 18.8 | 32 | 0.478723 |
9e0ab0516e90c8c66c8936d7ba75a7c1d3bf6523 | 5,644 | py | Python | sweetviz/type_detection.py | DavidTitoInfantas/sweetviz | 531b17998cdecb9dc99e74a96aa938113ac46645 | [
"MIT"
] | 1,996 | 2020-06-03T19:55:02.000Z | 2022-03-30T11:53:48.000Z | sweetviz/type_detection.py | DavidTitoInfantas/sweetviz | 531b17998cdecb9dc99e74a96aa938113ac46645 | [
"MIT"
] | 103 | 2020-06-03T20:50:57.000Z | 2022-03-13T14:16:47.000Z | sweetviz/type_detection.py | DavidTitoInfantas/sweetviz | 531b17998cdecb9dc99e74a96aa938113ac46645 | [
"MIT"
] | 206 | 2020-06-05T13:25:14.000Z | 2022-03-14T18:13:48.000Z | import pandas as pd
from sweetviz.sv_types import FeatureType
from sweetviz.from_profiling_pandas import is_boolean, is_numeric, is_categorical, could_be_numeric
def determine_feature_type(series: pd.Series, counts: dict,
must_be_this_type: FeatureType, which_dataframe: str) -> object:
# Replace infinite values with NaNs to avoid issues with histograms
# TODO: INFINITE VALUE HANDLING/WARNING
# series.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan,
# inplace=True)
if counts["value_counts_without_nan"].index.inferred_type.startswith("mixed"):
raise TypeError(f"\n\nColumn [{series.name}] has a 'mixed' inferred_type (as determined by Pandas).\n"
f"This is is not currently supported; column types should not contain mixed data.\n"
f"e.g. only floats or strings, but not a combination.\n\n"
f"POSSIBLE RESOLUTIONS:\n"
f"BEST -> Make sure series [{series.name}] only contains a certain type of data (numerical OR string).\n"
f"OR -> Convert series [{series.name}] to a string (if makes sense) so it will be picked up as CATEGORICAL or TEXT.\n"
f" One way to do this is:\n"
f" df['{series.name}'] = df['{series.name}'].astype(str)\n"
f"OR -> Convert series [{series.name}] to a numerical value (if makes sense):\n"
f" One way to do this is:\n"
f" df['{series.name}'] = pd.to_numeric(df['{series.name}'], errors='coerce')\n"
f" # (errors='coerce' will transform string values to NaN, that can then be replaced if desired;"
f" consult Pandas manual pages for more details)\n"
)
try:
# TODO: must_be_this_type ENFORCING
if counts["distinct_count_without_nan"] == 0:
# Empty
var_type = FeatureType.TYPE_ALL_NAN
# var_type = FeatureType.TYPE_UNSUPPORTED
elif is_boolean(series, counts):
var_type = FeatureType.TYPE_BOOL
elif is_numeric(series, counts):
var_type = FeatureType.TYPE_NUM
elif is_categorical(series, counts):
var_type = FeatureType.TYPE_CAT
else:
var_type = FeatureType.TYPE_TEXT
except TypeError:
var_type = FeatureType.TYPE_UNSUPPORTED
# COERCE: only supporting the following for now:
# TEXT -> CAT
# CAT/BOOL -> TEXT
# CAT/BOOL -> NUM
# NUM -> CAT
# NUM -> TEXT
if must_be_this_type != FeatureType.TYPE_UNKNOWN and \
must_be_this_type != var_type and \
must_be_this_type != FeatureType.TYPE_ALL_NAN and \
var_type != FeatureType.TYPE_ALL_NAN:
if var_type == FeatureType.TYPE_TEXT and must_be_this_type == FeatureType.TYPE_CAT:
var_type = FeatureType.TYPE_CAT
elif (var_type == FeatureType.TYPE_CAT or var_type == FeatureType.TYPE_BOOL ) and \
must_be_this_type == FeatureType.TYPE_TEXT:
var_type = FeatureType.TYPE_TEXT
elif (var_type == FeatureType.TYPE_CAT or var_type == FeatureType.TYPE_BOOL) and \
must_be_this_type == FeatureType.TYPE_NUM:
# Trickiest: Coerce into numerical
if could_be_numeric(series):
var_type = FeatureType.TYPE_NUM
else:
raise TypeError(f"\n\nCannot force series '{series.name}' in {which_dataframe} to be converted from its {var_type} to\n"
f"DESIRED type {must_be_this_type}. Check documentation for the possible coercion possibilities.\n"
f"POSSIBLE RESOLUTIONS:\n"
f" -> Use the feat_cfg parameter (see docs on git) to force the column to be a specific type (may or may not help depending on the type)\n"
f" -> Modify the source data to be more explicitly of a single specific type\n"
f" -> This could also be caused by a feature type mismatch between source and compare dataframes:\n"
f" In that case, make sure the source and compared dataframes are compatible.\n")
elif var_type == FeatureType.TYPE_NUM and must_be_this_type == FeatureType.TYPE_CAT:
var_type = FeatureType.TYPE_CAT
elif var_type == FeatureType.TYPE_BOOL and must_be_this_type == FeatureType.TYPE_CAT:
var_type = FeatureType.TYPE_CAT
elif var_type == FeatureType.TYPE_NUM and must_be_this_type == FeatureType.TYPE_TEXT:
var_type = FeatureType.TYPE_TEXT
else:
raise TypeError(f"\n\nCannot convert series '{series.name}' in {which_dataframe} from its {var_type}\n"
f"to the desired type {must_be_this_type}.\nCheck documentation for the possible coercion possibilities.\n"
f"POSSIBLE RESOLUTIONS:\n"
f" -> Use the feat_cfg parameter (see docs on git) to force the column to be a specific type (may or may not help depending on the type)\n"
f" -> Modify the source data to be more explicitly of a single specific type\n"
f" -> This could also be caused by a feature type mismatch between source and compare dataframes:\n"
f" In that case, make sure the source and compared dataframes are compatible.\n")
return var_type
| 64.136364 | 171 | 0.60932 |
16fd21360f533fbd353d41bc8d65c487821576be | 97 | py | Python | test.py | ndeshmukh80/active-qa | 7b7b6ce8a00d518b7f848ae44ef2b28e90db2043 | [
"Apache-2.0"
] | null | null | null | test.py | ndeshmukh80/active-qa | 7b7b6ce8a00d518b7f848ae44ef2b28e90db2043 | [
"Apache-2.0"
] | null | null | null | test.py | ndeshmukh80/active-qa | 7b7b6ce8a00d518b7f848ae44ef2b28e90db2043 | [
"Apache-2.0"
] | null | null | null | #This is python test code to check if git file can be executable on collab
print("Hello Collab")
| 32.333333 | 74 | 0.773196 |
6b6f4359f556f47e563d275416e400d443d14840 | 7,984 | py | Python | libsemver/semver_core.py | nirenjan/pysemver | 5de92bf1f21f4ead636c6a5b2fd7a3e6cfd577a5 | [
"MIT"
] | null | null | null | libsemver/semver_core.py | nirenjan/pysemver | 5de92bf1f21f4ead636c6a5b2fd7a3e6cfd577a5 | [
"MIT"
] | null | null | null | libsemver/semver_core.py | nirenjan/pysemver | 5de92bf1f21f4ead636c6a5b2fd7a3e6cfd577a5 | [
"MIT"
] | null | null | null | """
SemVerCore top level class
"""
class SemVerCore(object):
"""
Core Semantic Version class
"""
def __init__(self, major, minor, patch, prerelease=None, build_meta=None):
"""
Initialize a SemVerCore class by providing the attributes
"""
assert isinstance(major, int) and major >= 0
self._major = major
assert isinstance(minor, int) and minor >= 0
self._minor = minor
assert isinstance(patch, int) and patch >= 0
self._patch = patch
self._prerelease = None
self.prerelease = prerelease
self._build_meta = None
self.build_metadata = build_meta
def __str__(self):
"""
Print a string representation of the SemVerCore object
"""
version_str = '%d.%d.%d' % (self._major, self._minor, self._patch)
if self._prerelease is not None:
version_str += '-' + '.'.join([str(e) for e in self._prerelease])
if self._build_meta is not None:
version_str += '+' + '.'.join([str(e) for e in self._build_meta])
return version_str
@staticmethod
def _compare_semvers(rel1, rel2):
"""
Compare two SemVerCore objects, returns negative, 0 or positive
depending on the ordering
"""
def py3cmp(obj1, obj2):
"""
Local function to replace cmp
"""
return (obj1 > obj2) - (obj1 < obj2)
# Assume the two are equal
_cmp = 0
if rel1.major != rel2.major:
_cmp = py3cmp(rel1.major, rel2.major)
elif rel1.minor != rel2.minor:
_cmp = py3cmp(rel1.minor, rel2.minor)
elif rel1.patch != rel2.patch:
_cmp = py3cmp(rel1.patch, rel2.patch)
if _cmp != 0:
return _cmp
# Prereleases have a lower precedence than the normal version
if rel1.prerelease is None:
if rel2.prerelease is not None:
_cmp = 1
else:
if rel2.prerelease is None:
_cmp = -1
if _cmp != 0:
return _cmp
# Both objects have prerelease lists, start iterating them
for el_r1, el_r2 in zip(rel1.prerelease, rel2.prerelease):
# Compare if the types are identical
if type(el_r1) == type(el_r2):
_cmp = py3cmp(el_r1, el_r2)
if _cmp != 0: break
else:
# The types can only be int or str
# int's compare lower than str's
if isinstance(el_r1, int):
_cmp = -1
break
else:
_cmp = 1
break
if _cmp == 0:
# At this point, we've compared two lists and all the elements up
# to the length of the shorter list are identical. The only
# possible difference now is the size of the lists. The longer list
# is the greater one.
_cmp = len(rel1) - len(rel2)
return _cmp
def __lt__(self, other):
"""
Return True if `self` < `other, False otherwise
Compare using standard Semantic Version comparision rules
"""
return SemVerCore._compare_semvers(self, other) in [-1]
def __le__(self, other):
"""
Return True if `self` <= `other, False otherwise
Compare using standard Semantic Version comparision rules
"""
return SemVerCore._compare_semvers(self, other) in [-1, 0]
def __gt__(self, other):
"""
Return True if `self` > `other, False otherwise
Compare using standard Semantic Version comparision rules
"""
return SemVerCore._compare_semvers(self, other) in [1]
def __ge__(self, other):
"""
Return True if `self` >= `other, False otherwise
Compare using standard Semantic Version comparision rules
"""
return SemVerCore._compare_semvers(self, other) in [0, 1]
def __eq__(self, other):
"""
Return True if `self` == `other, False otherwise
Compare using standard Semantic Version comparision rules
"""
return SemVerCore._compare_semvers(self, other) in [0]
def __ne__(self, other):
"""
Return True if `self` != `other, False otherwise
Compare using standard Semantic Version comparision rules
"""
return SemVerCore._compare_semvers(self, other) in [-1, 1]
@property
def major(self):
"""
Return the major version from the SemVerCore object
"""
return self._major
@major.setter
def major(self, value):
"""
Set the major number in the SemVerCore object
"""
try:
_major = int(value)
except TypeError:
raise TypeError('major number must be an integer')
if _major < 0:
raise TypeError('major number must be a positive integer')
self._major = _major
@property
def minor(self):
"""
Return the minor version from the SemVerCore object
"""
return self._minor
@minor.setter
def minor(self, value):
"""
Set the minor number in the SemVerCore object
"""
try:
_minor = int(value)
except TypeError:
raise TypeError('minor number must be an integer')
if _minor < 0:
raise TypeError('minor number must be a positive integer')
self._minor = _minor
@property
def patch(self):
"""
Return the patch version from the SemVerCore object
"""
return self._patch
@patch.setter
def patch(self, value):
"""
Set the patch number in the SemVerCore object
"""
try:
_patch = int(value)
except TypeError:
raise TypeError('patch number must be an integer')
if _patch < 0:
raise TypeError('patch number must be a positive integer')
self._patch = _patch
@property
def prerelease(self):
"""
Return the prerelease information from the SemVerCore object
"""
return self._prerelease
@prerelease.setter
def prerelease(self, value):
"""
Set the prerelease information in the SemVerCore object
value must be a list of strings and/or integers. If an element
is a string representation of an integer, it is converted to the
integer representation in base 10.
"""
assert value is None or isinstance(value, list)
# Convert integer portions of prerelease strings to integer
if value is not None:
parsed = []
for elem in value:
try:
elem = int(elem)
except ValueError:
assert '.' not in elem
parsed.append(elem)
if len(parsed) == 0:
parsed = None
else:
parsed = None
self._prerelease = parsed
@property
def build_metadata(self):
"""
Return the build metatdata information from the semver object
"""
return self._build_meta
@build_metadata.setter
def build_metadata(self, value):
"""
Set the build metadata in the SemVerCore object
value must be a list. Elements of the list are converted to
their corresponding string representations
"""
assert value is None or isinstance(value, list)
if value is not None:
if len(value) == 0:
_build_meta = None
else:
_build_meta = [str(e) for e in value]
assert all(['.' not in e for e in _build_meta])
else:
_build_meta = None
self._build_meta = _build_meta
| 26.613333 | 79 | 0.555361 |
0da3d1fb849bdbb4f693403c66071ba0604d6246 | 19,211 | py | Python | hpopt/sklearn.py | knowledge-learning/hp-optimization | 321486db068004b1b4b9b867876c7fbdb829076e | [
"MIT"
] | 4 | 2019-10-28T15:33:07.000Z | 2021-08-30T20:46:26.000Z | hpopt/sklearn.py | knowledge-learning/hp-optimization | 321486db068004b1b4b9b867876c7fbdb829076e | [
"MIT"
] | null | null | null | hpopt/sklearn.py | knowledge-learning/hp-optimization | 321486db068004b1b4b9b867876c7fbdb829076e | [
"MIT"
] | 2 | 2019-10-28T15:34:02.000Z | 2020-04-16T08:29:17.000Z | # coding: utf-8
import numpy as np
from scipy import sparse as sp
from collections import Counter
import spacy
import tqdm
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
# classifiers
## bayes
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import ComplementNB
from sklearn.naive_bayes import BernoulliNB
## linear
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import Perceptron
## svm
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
## trees
from sklearn.tree import DecisionTreeClassifier
## knn
from sklearn.neighbors import KNeighborsClassifier
## discriminant
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
## neural networks
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
## ensembles
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.ensemble import RandomForestRegressor
# from sklearn.ensemble import ExtraTreesClassifier
# from sklearn.ensemble import AdaBoostClassifier
# from sklearn.ensemble import BaggingClassifier
# data preprocesing
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import quantile_transform
from sklearn.preprocessing import robust_scale
from sklearn.impute import SimpleImputer
# feature preprocessing
from sklearn.decomposition import FastICA
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
from sklearn.cluster import FeatureAgglomeration
from sklearn.decomposition import KernelPCA
from sklearn.preprocessing import PolynomialFeatures
from sklearn.kernel_approximation import Nystroem
# from sklearn.feature_selection import SelectPercentile
grammar = {
'Pipeline' : 'DataPrep FeatPrep Class',
'DataPrep' : 'Encoding Rescaling Imputation Balancing',
'Encoding' : 'none | onehot',
'Rescaling' : 'none | minmax | standard | quantile',
'Imputation' : 'none | mean | median | most_frequent',
'Balancing' : 'none | weight',
'FeatPrep' : 'none | Decomp | FeatSel',
'Decomp' : 'FastICA | PCA | TruncSVD | KernelPCA',
'FastICA' : 'f(0.01,0.5)',
'PCA' : 'f(0.01,0.5)',
'TruncSVD' : 'f(0.01,0.5)',
'KernelPCA' : 'KPCAn KPCAk',
'KPCAn' : 'f(0.01,0.5)',
'KPCAk' : 'linear | poly | rbf | sigmoid | cosine',
'FeatSel' : 'FeatAgg | Poly | Nystrom',
'FeatAgg' : 'f(0.01,0.5)',
'Poly' : 'i(2,3)',
'Nystrom' : 'f(0.01,0.5)',
'Class' : 'Bayes | Linear | SVC | Tree | KNN | Discriminant | MLP',
'Bayes' : 'gaussNB | mNB | cNB | nNB',
'Linear' : 'SGD | Ridge | PA | LR | Lasso | Perceptron',
'SGD' : 'hinge | log | modified_huber | squared_hinge | perceptron',
'Ridge' : 'f(0.01, 10)',
'PA' : 'f(0.01, 10)',
'LR' : 'LRloss LRreg',
'LRloss' : 'l1 | l2',
'LRreg' : 'f(0.01, 10)',
'Lasso' : 'f(0.01, 10)',
'Perceptron' : 'l1 | l2 | elasticnet',
'SVC' : 'LinearSVC | KernelSVC',
'LinearSVC' : 'LinearSVCp LinearSVCr',
'LinearSVCp' : 'l1 | l2',
'LinearSVCr' : 'f(0.01,10)',
'KernelSVC' : 'KernelSVCk KernelSVCr',
'KernelSVCk' : 'rbf | poly | sigmoid',
'KernelSVCr' : 'f(0.01,10)',
'Tree' : 'gini | entropy',
'KNN' : 'i(1,10)',
'Discriminant' : 'qda | lda',
'MLP' : 'MLPn MLPl MLPa',
'MLPn' : 'i(10,100)',
'MLPl' : 'i(1,5)',
'MLPa' : 'identity | logistic | tanh | relu',
}
from sklearn.model_selection import train_test_split
from .ge import Grammar, PGE
from .utils import InvalidPipeline
class SklearnGrammar(Grammar):
def __init__(self, X, y):
super().__init__()
self.X = X
self.y = y
def grammar(self):
return grammar
def evaluate(self, ind, cmplx=1.0):
# 'Pipeline' : 'DataPrep FeatPrep Class',
X, y = self.X, self.y
if cmplx < 1.0:
X, _, y, _ = train_test_split(X, y, train_size=cmplx)
X, balance = self._data_prep(ind, X)
X = self._feat_prep(ind, X)
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.3)
classifier = self._classifier(ind, balance)
try:
classifier.fit(Xtrain, ytrain)
except TypeError as e:
if 'sparse' or 'must be non-negative' in str(e):
raise InvalidPipeline()
raise e
return classifier.score(Xtest, ytest)
def train(self, ind, X, y):
X, balance = self._data_prep(ind, X)
X = self._feat_prep(ind, X)
classifier = self._classifier(ind, balance)
classifier.fit(X, y)
return classifier
def process(self, ind, X):
X, _ = self._data_prep(ind, X)
X = self._feat_prep(ind, X)
return X
def _data_prep(self, ind, X):
# 'DataPrep' : 'Encoding Rescaling Imputation Balancing',
X = self._encoding(ind, X)
X = self._rescaling(ind, X)
X = self._imputation(ind, X)
balance = 'balanced' if ind.choose('none', 'weight') == 'weight' else None
return X, balance
def _encoding(self, ind, X):
# 'Encoding' : 'none | onehot',
if ind.choose('none', 'onehot') == 'onehot':
try:
if not np.all(X.astype(int) != X):
raise InvalidPipeline('Integer values required for onehot')
X = OneHotEncoder(categories='auto').fit_transform(X)
except TypeError as e:
if 'dense data is required' in str(e):
raise InvalidPipeline(str(e))
else:
raise
return X
def _rescaling(self, ind, X):
# 'Rescaling' : 'none | minmax | standard | quantile',
scaling = ind.choose(None, minmax_scale, robust_scale, quantile_transform)
if scaling:
if hasattr(X, 'toarray'):
X = X.toarray()
X = scaling(X)
return X
def _imputation(self, ind, X):
# 'Imputation' : 'none | mean | median | most_frequent',
method = ind.choose('none', 'mean', 'median', 'most_frequent')
if method != 'none':
X = SimpleImputer(strategy=method).fit_transform(X)
return X
def _feat_prep(self, ind, X):
# 'FeatPrep' : 'none | Decomp | FeatSel',
method = ind.choose(None, self._decompose, self._feat_sel)
if method:
X = method(ind, X)
return X
def _decompose(self, ind, X):
# 'Decomp' : 'FastICA | PCA | TruncSVD | KernelPCA',
method = ind.choose(self._fastica, self._pca, self._truncsvd, self._kpca)
return method(ind, X)
def _ncomp(self, ind, X):
return max(2, int(ind.nextfloat() * min(X.shape)))
def _fastica(self, ind, X):
# 'FastICA' : 'i(2,100)',
if hasattr(X, 'toarray'):
X = X.toarray()
return FastICA(n_components=self._ncomp(ind, X)).fit_transform(X)
def _pca(self, ind, X):
# 'PCA' : 'i(2,100)',
if hasattr(X, 'toarray'):
X = X.toarray()
return PCA(n_components=self._ncomp(ind, X)).fit_transform(X)
def _truncsvd(self, ind, X):
# 'TruncSVD' : 'i(2,100)',
return TruncatedSVD(n_components=self._ncomp(ind, X)).fit_transform(X)
def _kpca(self, ind, X):
# 'KernelPCA' : 'KPCAn | KPCAk',
# 'KPCAn' : 'f(0.01,0.5)' ,
# 'KPCAk' : 'linear | poly | rbf | sigmoid | cosine',
return KernelPCA(n_components=self._ncomp(ind, X),
kernel=ind.choose('linear', 'poly', 'rbf', 'sigmoid', 'cosine')).fit_transform(X)
def _feat_sel(self, ind, X):
# 'FeatSel' : 'FeatAgg | Poly | Nystrom ',
method = ind.choose(self._featagg, self._poly, self._nystrom)
return method(ind, X)
def _featagg(self, ind, X):
# 'FeatAgg' : 'f(0.01,0.5)',
if hasattr(X, 'toarray'):
X = X.toarray()
return FeatureAgglomeration(n_clusters=self._ncomp(ind, X)).fit_transform(X)
def _poly(self, ind, X):
# 'Poly' : 'i(2,3)',
return PolynomialFeatures(degree=ind.nextint()).fit_transform(X)
def _nystrom(self, ind, X):
# 'Nystrom' : 'f(0.01,0.5)',
return Nystroem(n_components=self._ncomp(ind, X)).fit_transform(X)
def _classifier(self, ind, balance):
# 'Class' : 'Bayes | Linear | SVC | Tree | KNN | Discriminat | MLP
return ind.choose(self._bayes,
self._linear,
self._svc,
self._tree,
self._knn,
self._discr,
self._mlp)(ind, balance)
def _bayes(self, ind, balance):
# 'Bayes' : 'gaussNB | mNB | cNB | nNB',
return ind.choose(GaussianNB, MultinomialNB, ComplementNB, BernoulliNB)()
def _linear(self, ind, balance):
# 'Linear' : 'SGD | Ridge | PA | LR | Lasso | Perceptron',
return ind.choose(self._sgd, self._ridge, self._pa, self._lr, self._lasso, self._perceptron)(ind, balance)
def _sgd(self, ind, balance):
# 'SGD' : 'hinge | log | modified_huber | squared_hinge | perceptron',
loss = ind.choose('hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron')
return SGDClassifier(loss=loss,
class_weight=balance)
def _ridge(self, ind, balance):
# 'Ridge' : 'f(0.01, 10)',
return RidgeClassifier(alpha=ind.nextfloat(),
class_weight=balance)
def _pa(self, ind, balance):
# 'PA' : 'f(0.01, 10)',
return PassiveAggressiveClassifier(C=ind.nextfloat(),
class_weight=balance)
def _lr(self, ind, balance):
# 'LR' : 'LRloss LRreg',
# 'LRloss' : 'l1 | l2',
# 'LRReg' : 'f(0.01, 10)',
return LogisticRegression(penalty=ind.choose('l1', 'l2'),
C=ind.nextfloat(),
solver='saga',
class_weight=balance)
def _lasso(self, ind, balance):
# 'Lasso' : 'f(0.01, 10)',
return Lasso(alpha=ind.nextfloat())
def _perceptron(self, ind, balance):
# 'Perceptron' : 'l1 | l2 | elasticnet',
return Perceptron(penalty=ind.choose('l1', 'l2', 'elasticnet'))
def _svc(self, ind, balance):
# 'SVC' : 'LinearSVC | KernelSVC',
return ind.choose(self._linearsvc, self._kernelsvc)(ind, balance)
def _linearsvc(self, ind, balance):
# 'LinearSVC' : 'LinearSVCp | LinearSVCl | LinearSVCr',
# 'LinearSVCp' : 'l1 | l2',
# 'LinearSVCr' : 'f(0.01,10)',
return LinearSVC(penalty=ind.choose('l1', 'l2'),
C=ind.nextfloat(),
dual=False,
class_weight=balance)
def _kernelsvc(self, ind, balance):
# 'KernelSVC' : 'KernelSVCk | KernelSVCr',
# 'KernelSVCk' : 'rbf | poly | sigmoid',
# 'KernelSVCr' : 'f(0.01,10)',
return SVC(kernel=ind.choose('rbf', 'poly', 'sigmoid'),
C=ind.nextfloat(),
class_weight=balance,
gamma='auto')
def _tree(self, ind, balance):
# 'Tree' : 'gini | entropy',
return DecisionTreeClassifier(criterion=ind.choose('gini', 'entropy'),
class_weight=balance)
def _knn(self, ind, balance):
# 'KNN' : 'i(1,10)',
return KNeighborsClassifier(n_neighbors=ind.nextint())
def _discr(self, ind, balance):
# 'Discriminant' : 'qda | lda',
return ind.choose(QuadraticDiscriminantAnalysis, LinearDiscriminantAnalysis)()
def _mlp(self, ind, balance):
# 'MPL' : 'MLPn | MLPl | MLPla',
# 'MLPn' : 'i(10,100)',
# 'MLPl' : 'i(1,5)',
# 'MPLa' : 'identity | logistic | tanh | relu',
neurons = ind.nextint()
layers = ind.nextint()
activation = ind.choose('identity', 'logistic', 'tanh', 'relu')
return MLPClassifier(hidden_layer_sizes=[neurons] * layers, activation=activation)
class SklearnNLPGrammar(SklearnGrammar):
def __init__(self, X, y, *args, **kwargs):
super().__init__(X=X, y=y, *args, **kwargs)
print("Loading spacy...", end="", flush=True)
self.nlp = spacy.load('en')
print("done")
print("Preprocessing sentences...", flush=True)
self.sentences = [self.nlp(s) for s in tqdm.tqdm(X)]
def grammar(self):
g = {}
g.update(grammar)
g.update({
'Pipeline' : 'TextPrep DataPrep FeatPrep Class',
'TextPrep' : 'Clean Semantic Vect',
'Encoding' : 'none',
'Clean' : 'Stopwords',
'Stopwords' :'yes | no',
'Semantic' : 'Pos Tag Dep',
'Pos' : 'yes | no',
'Tag' : 'yes | no',
'Dep' : 'yes | no',
'Vect' : 'CV | TF | TFIDF',
'CV' : 'i(1,3)',
'TF' : 'i(1,3)',
'TFIDF' : 'i(1,3)',
})
return g
def evaluate(self, ind):
# 'Pipeline' : 'TextPrep DataPrep FeatPrep Class',
X, y = self.X, self.y
X = self._text_prep(ind, X)
X, balance = self._data_prep(ind, X)
X = self._feat_prep(ind, X)
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2)
classifier = self._classifier(ind, balance)
try:
classifier.fit(Xtrain, ytrain)
except TypeError as e:
if 'sparse' in str(e) and hasattr(Xtrain, 'toarray'):
Xtrain = Xtrain.toarray()
Xtest = Xtest.toarray()
classifier.fit(Xtrain, ytrain)
else:
raise e
except ValueError as e:
if 'must be non-negative' in str(e):
raise InvalidPipeline()
raise e
return classifier.score(Xtest, ytest)
def _encoding(self, ind, X):
return X
def _text_prep(self, ind, X):
# 'TextPrep' : 'Clean Vect Semantic',
sw = self._clean(ind)
F = self._semantic(ind, X)
X = self._vect(ind, X, sw)
if F is None:
return X
if isinstance(X, np.ndarray):
return np.hstack((X, F))
else:
return sp.hstack((X, F))
def _clean(self, ind):
# preprocesamiento
if ind.nextbool():
return stopwords.words('english')
return set()
def _semantic(self, ind, X):
use_pos = ind.nextbool()
use_tag = ind.nextbool()
use_dep = ind.nextbool()
if not any((use_pos, use_tag, use_dep)):
return None
features = []
for sentence in self.sentences:
counter = Counter()
for token in sentence:
if use_pos:
counter[token.pos_] += 1
if use_tag:
counter[token.tag_] += 1
if use_dep:
counter[token.dep_] += 1
features.append(counter)
self.dv = DictVectorizer()
return self.dv.fit_transform(features)
def _vect(self, ind, X, sw):
vect = ind.choose(self._cv, self._tf, self._tfidf)
ngram = ind.nextint()
v = vect(ngram, sw)
return v.fit_transform(X)
def _cv(self, ngram, sw):
return CountVectorizer(stop_words=sw, ngram_range=(1, ngram))
def _tf(self, ngram, sw):
return TfidfVectorizer(stop_words=sw, ngram_range=(1, ngram), use_idf=False)
def _tfidf(self, ngram, sw):
return TfidfVectorizer(stop_words=sw, ngram_range=(1, ngram), use_idf=True)
class SklearnClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, incremental=False, popsize=100, select=0.2, learning=0.05, iters=100, fitness_evaluations=1, timeout=None, verbose=False, global_timeout=None):
self.popsize = popsize
self.select = select
self.learning = learning
self.iters = iters
self.timeout = timeout
self.verbose = verbose
self.fitness_evaluations = fitness_evaluations
self.global_timeout = global_timeout
self.incremental = incremental
def fit(self, X, y):
self.grammar_ = SklearnGrammar(X, y)
ge = PGE(self.grammar_, incremental=self.incremental, popsize=self.popsize, selected=self.select, learning=self.learning, timeout=self.timeout, verbose=self.verbose, fitness_evaluations=self.fitness_evaluations, global_timeout=self.global_timeout)
self.best_ = ge.run(self.iters)
self.best_sample_ = self.best_.sample()
self.best_fitness_ = ge.current_fn
self.best_.reset()
self.classifier_ = self.grammar_.train(self.best_, X, y)
def predict(self, X):
self.best_.reset()
X = self.grammar_.process(self.best_, X)
return self.classifier_.predict(X)
class SklearnNLPClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, incremental=False, popsize=100, select=0.2, learning=0.05, iters=100, timeout=None, fitness_evaluations=1, verbose=False):
self.popsize = popsize
self.select = select
self.learning = learning
self.iters = iters
self.timeout = timeout
self.verbose = verbose
self.incremental = incremental
self.fitness_evaluations = fitness_evaluations
def fit(self, X, y):
self.grammar_ = SklearnNLPGrammar(X, y)
ge = PGE(self.grammar_, incremental=self.incremental, popsize=self.popsize, selected=self.select, learning=self.learning, timeout=self.timeout, verbose=self.verbose, fitness_evaluations=self.fitness_evaluations)
self.best_ = ge.run(self.iters)
self.best_sample_ = self.best_.sample()
self.best_.reset()
self.classifier_ = self.grammar_.train(self.best_, X, y)
def predict(self, X):
self.best_.reset()
X = self.grammar_.process(self.best_, X)
return self.classifier_.predict(X)
| 34.244207 | 255 | 0.57618 |
9950762383dd8e7fabb3496cf2089d4fdce6f685 | 495 | py | Python | users/urls.py | omiguelperez/basic-django-gram | 60e304671bc3dd7bdb2dc0b4fe808b9f8ea46ad5 | [
"MIT"
] | null | null | null | users/urls.py | omiguelperez/basic-django-gram | 60e304671bc3dd7bdb2dc0b4fe808b9f8ea46ad5 | [
"MIT"
] | null | null | null | users/urls.py | omiguelperez/basic-django-gram | 60e304671bc3dd7bdb2dc0b4fe808b9f8ea46ad5 | [
"MIT"
] | null | null | null | """Users urls."""
from django.urls import path
from django.views.generic import TemplateView
from users import views
urlpatterns = [
path('login/', views.LoginView.as_view(), name='login'),
path('signup/', views.SignupView.as_view(), name='signup'),
path('logout/', views.LogoutView.as_view(), name='logout'),
path('me/profile/', views.UpdateProfileView.as_view(), name='update_profile'),
path('profile/<str:username>/', views.UserDetailView.as_view(), name='profile'),
]
| 33 | 84 | 0.69899 |
f119203f0459f52516d0040714e2e6d8996aed19 | 694 | py | Python | pyro/contrib/timeseries/__init__.py | patrickeganfoley/pyro | 3bd5e099e85f3686c66fc3b53476c3b009a77a02 | [
"Apache-2.0"
] | 2 | 2020-06-05T20:40:50.000Z | 2020-09-05T15:39:48.000Z | pyro/contrib/timeseries/__init__.py | patrickeganfoley/pyro | 3bd5e099e85f3686c66fc3b53476c3b009a77a02 | [
"Apache-2.0"
] | 1 | 2020-05-12T16:26:21.000Z | 2020-05-12T17:23:13.000Z | pyro/contrib/timeseries/__init__.py | patrickeganfoley/pyro | 3bd5e099e85f3686c66fc3b53476c3b009a77a02 | [
"Apache-2.0"
] | 1 | 2020-06-04T18:25:38.000Z | 2020-06-04T18:25:38.000Z | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
"""
The :mod:`pyro.contrib.timeseries` module provides a collection of Bayesian time series
models useful for forecasting applications.
"""
from pyro.contrib.timeseries.base import TimeSeriesModel
from pyro.contrib.timeseries.gp import IndependentMaternGP, LinearlyCoupledMaternGP, DependentMaternGP
from pyro.contrib.timeseries.lgssm import GenericLGSSM
from pyro.contrib.timeseries.lgssmgp import GenericLGSSMWithGPNoiseModel
__all__ = [
"DependentMaternGP",
"GenericLGSSM",
"GenericLGSSMWithGPNoiseModel",
"IndependentMaternGP",
"LinearlyCoupledMaternGP",
"TimeSeriesModel",
]
| 33.047619 | 102 | 0.79683 |
a9968c649d379a489ed2ab31add1f7db4f0988d0 | 5,020 | py | Python | tests/python/pants_test/java/test_util.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | 1 | 2021-05-05T18:58:28.000Z | 2021-05-05T18:58:28.000Z | tests/python/pants_test/java/test_util.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/java/test_util.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | 3 | 2020-06-30T08:28:13.000Z | 2021-07-28T09:35:57.000Z | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import unittest
from contextlib import contextmanager
from unittest.mock import Mock, patch
from pants.java.executor import Executor
from pants.java.jar.manifest import Manifest
from pants.java.util import execute_java, safe_classpath
from pants.util.contextutil import open_zip
from pants.util.dirutil import safe_mkdir, safe_mkdtemp, touch
class ExecuteJavaTest(unittest.TestCase):
"""
:API: public
"""
EXECUTOR_ERROR = Executor.Error()
TEST_MAIN = "foo.bar.main"
TEST_CLASSPATH = ["A.jar", "B.jar"]
SAFE_CLASSPATH = ["C.jar"]
SYNTHETIC_JAR_DIR = "somewhere"
def setUp(self):
"""
:API: public
"""
self.executor = Mock(spec=Executor)
self.runner = Mock(spec=Executor.Runner)
self.executor.runner = Mock(return_value=self.runner)
self.runner.run = Mock(return_value=0)
@contextmanager
def mock_safe_classpath_helper(self, create_synthetic_jar=True):
"""
:API: public
"""
with patch("pants.java.util.safe_classpath") as mock_safe_classpath:
mock_safe_classpath.side_effect = fake_safe_classpath
yield mock_safe_classpath
self.runner.run.assert_called_once_with(stdin=None, cwd=None)
if create_synthetic_jar:
self.executor.runner.assert_called_once_with(
self.SAFE_CLASSPATH, self.TEST_MAIN, args=None, jvm_options=None
)
mock_safe_classpath.assert_called_once_with(self.TEST_CLASSPATH, self.SYNTHETIC_JAR_DIR)
else:
self.executor.runner.assert_called_once_with(
self.TEST_CLASSPATH, self.TEST_MAIN, args=None, jvm_options=None
)
mock_safe_classpath.assert_not_called()
def test_execute_java_no_error(self):
"""
:API: public
"""
with self.mock_safe_classpath_helper():
self.assertEqual(
0,
execute_java(
self.TEST_CLASSPATH,
self.TEST_MAIN,
executor=self.executor,
synthetic_jar_dir=self.SYNTHETIC_JAR_DIR,
),
)
def test_execute_java_executor_error(self):
"""
:API: public
"""
with self.mock_safe_classpath_helper():
self.runner.run.side_effect = self.EXECUTOR_ERROR
with self.assertRaises(type(self.EXECUTOR_ERROR)):
execute_java(
self.TEST_CLASSPATH,
self.TEST_MAIN,
executor=self.executor,
synthetic_jar_dir=self.SYNTHETIC_JAR_DIR,
)
def test_execute_java_no_synthentic_jar(self):
"""
:API: public
"""
with self.mock_safe_classpath_helper(create_synthetic_jar=False):
self.assertEqual(
0,
execute_java(
self.TEST_CLASSPATH,
self.TEST_MAIN,
executor=self.executor,
create_synthetic_jar=False,
),
)
def fake_safe_classpath(classpath, synthetic_jar_dir):
return ExecuteJavaTest.SAFE_CLASSPATH
class SafeClasspathTest(unittest.TestCase):
def test_safe_classpath(self):
"""For directory structure like:
./
./libs/A.jar
./libs/resources/
./synthetic_jar_dir
Verify a synthetic jar with the following classpath in manifest is created:
Class-Path: ../libs/A.jar:../libs/resources/
"""
RESOURCES = "resources"
LIB_DIR = "libs"
JAR_FILE = "A.jar"
SYNTHENTIC_JAR_DIR = "synthetic_jar_dir"
basedir = safe_mkdtemp()
lib_dir = os.path.join(basedir, LIB_DIR)
synthetic_jar_dir = os.path.join(basedir, SYNTHENTIC_JAR_DIR)
resource_dir = os.path.join(lib_dir, RESOURCES)
jar_file = os.path.join(lib_dir, JAR_FILE)
for dir in (lib_dir, resource_dir, synthetic_jar_dir):
safe_mkdir(dir)
touch(jar_file)
classpath = [jar_file, resource_dir]
safe_cp = safe_classpath(classpath, synthetic_jar_dir)
self.assertEqual(1, len(safe_cp))
safe_jar = safe_cp[0]
self.assertTrue(os.path.exists(safe_jar))
self.assertEqual(synthetic_jar_dir, os.path.dirname(safe_jar))
with open_zip(safe_jar) as synthetic_jar:
self.assertEqual([Manifest.PATH], synthetic_jar.namelist())
# manifest should contain the relative path of both jar and resource directory
expected = "{}: ../{}/{} ../{}/{}/\n".format(
Manifest.CLASS_PATH, LIB_DIR, JAR_FILE, LIB_DIR, RESOURCES
).encode()
self.assertEqual(expected, synthetic_jar.read(Manifest.PATH).replace(b"\n ", b""))
| 33.466667 | 100 | 0.615538 |
12ec4aefe40f5d5c698a9c3358324a2db5272ea2 | 3,801 | py | Python | configs/repdet/repdet_repvgg_a2_repdilatedencoder_yolof_1x_coco.py | karthiksharma98/mmdetection | 295145d41a74598db98a037224f0f82c074f3fff | [
"Apache-2.0"
] | null | null | null | configs/repdet/repdet_repvgg_a2_repdilatedencoder_yolof_1x_coco.py | karthiksharma98/mmdetection | 295145d41a74598db98a037224f0f82c074f3fff | [
"Apache-2.0"
] | null | null | null | configs/repdet/repdet_repvgg_a2_repdilatedencoder_yolof_1x_coco.py | karthiksharma98/mmdetection | 295145d41a74598db98a037224f0f82c074f3fff | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_poly.py', '../_base_/default_runtime.py'
]
model = dict(
type='RepDet',
pretrained='/raid/sjx/mmdetection/pretrained_models/RepVGG/RepVGG-A2-train.pth',
backbone=dict(
type='RepVGG',
arch='A2',
out_stages=[4],
activation='ReLU',
last_channel=1024,
deploy=False),
neck=dict(
type='RepDilatedEncoder',
in_channels=1024,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4,
deploy=False),
bbox_head=dict(
type='YOLOFHead',
num_classes=80,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner',
pos_ignore_thr=0.15,
neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(
type='SGD',
lr=0.03,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(
norm_decay_mult=0.,
custom_keys={'backbone': dict(lr_mult=0.334, decay_mult=1.0)}))
lr_config = dict(warmup_iters=1500, warmup_ratio=0.00066667)
# data
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
find_unused_parameters=True
runner = dict(type='EpochBasedRunner', max_epochs=12) | 31.675 | 84 | 0.599316 |
0b5e353f75269c2e282b4d5f2134dcb4d12a771f | 1,484 | py | Python | tests/test_utils.py | obi1kenobi/csvs-to-sqlite | dc4177bfa6e4e7b46daf40ab60e060d8b3a9e3cb | [
"Apache-2.0"
] | 684 | 2017-11-14T00:51:13.000Z | 2022-03-29T15:16:00.000Z | tests/test_utils.py | obi1kenobi/csvs-to-sqlite | dc4177bfa6e4e7b46daf40ab60e060d8b3a9e3cb | [
"Apache-2.0"
] | 73 | 2017-11-14T16:42:16.000Z | 2022-03-27T19:23:14.000Z | tests/test_utils.py | obi1kenobi/csvs-to-sqlite | dc4177bfa6e4e7b46daf40ab60e060d8b3a9e3cb | [
"Apache-2.0"
] | 58 | 2017-11-22T17:08:27.000Z | 2022-03-04T03:22:36.000Z | from csvs_to_sqlite import utils
import pytest
import sqlite3
import pandas as pd
TEST_TABLES = """
CREATE TABLE foo (
id integer primary key,
value text
);
"""
@pytest.mark.parametrize("table,expected", [("foo", True), ("bar", False)])
def test_table_exists(table, expected):
conn = sqlite3.connect(":memory:")
conn.executescript(TEST_TABLES)
assert expected == utils.table_exists(conn, table)
def test_get_create_table_sql():
df = pd.DataFrame([{"number": 1, "letter": "a"}])
sql, columns = utils.get_create_table_sql("hello", df)
assert (
'CREATE TABLE "hello" (\n'
'"index" INTEGER,\n'
' "number" INTEGER,\n'
' "letter" TEXT\n'
")"
) == sql
assert {"index", "letter", "number"} == set(columns)
def test_refactor_dataframes():
df = pd.DataFrame(
[
{"name": "Terry", "score": 0.5},
{"name": "Terry", "score": 0.8},
{"name": "Owen", "score": 0.7},
]
)
conn = sqlite3.connect(":memory:")
output = utils.refactor_dataframes(
conn, [df], {"name": ("People", "first_name")}, False
)
assert 1 == len(output)
dataframe = output[0]
# There should be a 'People' table in sqlite
assert [(1, "Terry"), (2, "Owen")] == conn.execute(
"select id, first_name from People"
).fetchall()
assert (
" name score\n" "0 1 0.5\n" "1 1 0.8\n" "2 2 0.7"
) == str(dataframe)
| 26.981818 | 81 | 0.561995 |
dc37113f6f345f17fb68d7ccf246fd4c34d3e835 | 825 | py | Python | functions/search_functions.py | fvergaracl/gonsa2_1 | 0cb705ca7ba717f0316f4764bbec9bbcb08fcc34 | [
"MIT"
] | 1 | 2020-07-19T03:17:12.000Z | 2020-07-19T03:17:12.000Z | functions/search_functions.py | fvergaracl/gonsa2_1 | 0cb705ca7ba717f0316f4764bbec9bbcb08fcc34 | [
"MIT"
] | 1 | 2021-06-02T00:49:57.000Z | 2021-06-02T00:49:57.000Z | functions/search_functions.py | fvergaracl/gonsa3 | 0cb705ca7ba717f0316f4764bbec9bbcb08fcc34 | [
"MIT"
] | null | null | null | import requests
from googlesearch import search
def googlesearch(query):
my_results_list = []
for i in search(query, # The query you want to run
tld='com', # The top level domain
lang='es', # The language
num=10, # Number of results per page
start=0, # First result to retrieve
stop=50, # Last result to retrieve
pause=2.0, # Lapse between HTTP requests
):
my_results_list.append(i)
print(i)
def duckduckgo(query):
query_ = 'http://api.duckduckgo.com/?q=%s&format=json' % query
response = requests.get(query_)
if response.status_code == 200:
return True, response.text
elif response.status_code == 404:
return False, ''
| 34.375 | 66 | 0.56 |
bda1ec4fac519c53ab7646709d86b18d25a107f7 | 5,777 | py | Python | src/ploomber/jupyter/dag.py | MarcoJHB/ploomber | 4849ef6915572f7934392443b4faf138172b9596 | [
"Apache-2.0"
] | 2,141 | 2020-02-14T02:34:34.000Z | 2022-03-31T22:43:20.000Z | src/ploomber/jupyter/dag.py | MarcoJHB/ploomber | 4849ef6915572f7934392443b4faf138172b9596 | [
"Apache-2.0"
] | 660 | 2020-02-06T16:15:57.000Z | 2022-03-31T22:55:01.000Z | src/ploomber/jupyter/dag.py | MarcoJHB/ploomber | 4849ef6915572f7934392443b4faf138172b9596 | [
"Apache-2.0"
] | 122 | 2020-02-14T18:53:05.000Z | 2022-03-27T22:33:24.000Z | from pathlib import Path, PurePosixPath
import datetime
import nbformat
from ploomber.tasks import PythonCallable
class JupyterTaskResource:
def __init__(self, task, interactive, parent):
self.task = task
self.interactive = interactive
self.path = '/'.join([parent, self.task.name])
def to_model(self, content=False):
last_modified = datetime.datetime.fromtimestamp(
remove_line_number(self.task.source.loc).stat().st_mtime)
return {
'name': self.task.name,
'type': 'notebook',
'content': None if not content else self.interactive.to_nb(),
'path': self.path,
'writable': True,
'created': datetime.datetime.now(),
'last_modified': last_modified,
'mimetype': None,
'format': 'json' if content else None,
}
def __repr__(self):
return (f'{type(self).__name__}(name={self.task.name!r}, '
f'path={self.path})')
class JupyterDirectoryResource:
def __init__(self, name, path):
self.task_resources = dict()
self.name = name
self.path = path
def __setitem__(self, key, value):
self.task_resources[key] = value
def get(self, key):
return self.task_resources.get(key)
def __iter__(self):
for t in self.task_resources:
yield t
def __len__(self):
return len(self.task_resources)
def to_model(self, content=False):
content = [
t.to_model(content=False) for t in self.task_resources.values()
]
created = min(c['created'] for c in content)
last_modified = max(c['last_modified'] for c in content)
return {
'name': self.name,
'path': self.path,
'type': 'directory',
'created': created,
'last_modified': last_modified,
'format': 'json',
'mimetype': None,
'content': content,
'writable': True,
}
def __repr__(self):
return (f'{type(self).__name__}(name={self.name!r}, '
f'path={self.path}, '
f'task_resources={self.task_resources!r})')
def as_jupyter_path(path):
"""
Paths in Jupyter are delimited by / (even on Windows) and don't have
trailing leading slashes. This function takes a platform-dependent
path and converts it to a valid jupyter path
Notes
-----
https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html#api-paths
"""
relative_path = Path(path).relative_to(Path('.').resolve())
return relative_path.as_posix().strip('/')
def remove_line_number(path):
"""
Takes a path/to/file:line path and returns path/to/file path object
"""
parts = list(Path(path).parts)
parts[-1] = parts[-1].split(':')[0]
return Path(*parts)
class JupyterDAGManager:
"""
Exposes PythonCallable tasks in a dag as Jupyter notebooks
"""
def __init__(self, dag):
self.resources = dict()
for t in dag.values():
if isinstance(t, PythonCallable):
loc = remove_line_number(t.source.loc)
name = loc.name + ' (functions)'
key = str(PurePosixPath(as_jupyter_path(loc)).with_name(name))
if key not in self.resources:
self.resources[key] = JupyterDirectoryResource(name=name,
path=key)
task_resource = JupyterTaskResource(
task=t, interactive=t._interactive_developer(), parent=key)
self.resources[key][t.name] = task_resource
# self.resources_keys.append(task_resource.path)
self.resources[task_resource.path] = task_resource
pairs = ((str(PurePosixPath(path).parent), res)
for path, res in self.resources.items())
self.resources_by_root = dict()
for parent, resource in pairs:
if parent not in self.resources_by_root:
self.resources_by_root[parent] = []
self.resources_by_root[parent].append(resource)
def __contains__(self, key):
return key.strip('/') in self.resources
def __getitem__(self, key):
return self.resources[key]
def __iter__(self):
for resource in self.resources:
yield resource
def _get(self, path):
path = path.strip('/')
return self.resources.get(path)
def get(self, path, content):
"""Get model located at path
"""
resource = self._get(path)
if resource:
return resource.to_model(content)
def get_by_parent(self, parent):
parent = parent.strip('/')
# jupyter represents the current folder with an empty string
if parent == '':
parent = '.'
if parent in self.resources_by_root:
return [m.to_model() for m in self.resources_by_root[parent]]
else:
return []
def overwrite(self, model, path):
"""Overwrite a model back to the original function
"""
resource = self._get(path)
resource.interactive.overwrite(nbformat.from_dict(model['content']))
return {
'name': resource.task.name,
'type': 'notebook',
'path': path,
'writable': True,
'created': datetime.datetime.now(),
'last_modified': datetime.datetime.now(),
'content': None,
'mimetype': 'text/x-python',
'format': None,
}
def __repr__(self):
return f'{type(self).__name__}({list(self.resources)})'
| 30.405263 | 87 | 0.574347 |
e57320cfe4433494f035c8b781882222e00473bf | 57,063 | py | Python | test/ext/declarative/test_inheritance.py | hmhmj/sqlalchemy-study | eaf3a54c4cea9d623623c40b8c68d1649d79836c | [
"MIT"
] | 1 | 2018-04-02T18:41:52.000Z | 2018-04-02T18:41:52.000Z | test/ext/declarative/test_inheritance.py | hmhmj/sqlalchemy-study | eaf3a54c4cea9d623623c40b8c68d1649d79836c | [
"MIT"
] | null | null | null | test/ext/declarative/test_inheritance.py | hmhmj/sqlalchemy-study | eaf3a54c4cea9d623623c40b8c68d1649d79836c | [
"MIT"
] | 3 | 2017-09-26T13:59:24.000Z | 2020-12-04T17:51:54.000Z |
from sqlalchemy.testing import eq_, assert_raises, \
assert_raises_message, is_, is_true, is_false
from sqlalchemy.ext import declarative as decl
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import relationship, create_session, class_mapper, \
configure_mappers, clear_mappers, \
polymorphic_union, deferred, Session
from sqlalchemy.ext.declarative import declared_attr, AbstractConcreteBase, \
ConcreteBase, has_inherited_table
from sqlalchemy.testing import fixtures, mock
from test.orm.test_events import _RemoveListeners
Base = None
class DeclarativeTestBase(fixtures.TestBase, testing.AssertsExecutionResults):
def setup(self):
global Base
Base = decl.declarative_base(testing.db)
def teardown(self):
Session.close_all()
clear_mappers()
Base.metadata.drop_all()
class DeclarativeInheritanceTest(DeclarativeTestBase):
def test_we_must_copy_mapper_args(self):
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator,
'polymorphic_identity': 'person'}
class Engineer(Person):
primary_language = Column(String(50))
assert 'inherits' not in Person.__mapper_args__
assert class_mapper(Engineer).polymorphic_identity is None
assert class_mapper(Engineer).polymorphic_on is Person.__table__.c.type
def test_we_must_only_copy_column_mapper_args(self):
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
a = Column(Integer)
b = Column(Integer)
c = Column(Integer)
d = Column(Integer)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator,
'polymorphic_identity': 'person',
'version_id_col': 'a',
'column_prefix': 'bar',
'include_properties': ['id', 'a', 'b'],
}
assert class_mapper(Person).version_id_col == 'a'
assert class_mapper(Person).include_properties == set(['id', 'a', 'b'])
def test_custom_join_condition(self):
class Foo(Base):
__tablename__ = 'foo'
id = Column('id', Integer, primary_key=True)
class Bar(Foo):
__tablename__ = 'bar'
bar_id = Column('id', Integer, primary_key=True)
foo_id = Column('foo_id', Integer)
__mapper_args__ = {'inherit_condition': foo_id == Foo.id}
# compile succeeds because inherit_condition is honored
configure_mappers()
def test_joined(self):
class Company(Base, fixtures.ComparableEntity):
__tablename__ = 'companies'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
employees = relationship('Person')
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
company_id = Column('company_id', Integer,
ForeignKey('companies.id'))
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
primary_language = Column('primary_language', String(50))
class Manager(Person):
__tablename__ = 'managers'
__mapper_args__ = {'polymorphic_identity': 'manager'}
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
golf_swing = Column('golf_swing', String(50))
Base.metadata.create_all()
sess = create_session()
c1 = Company(
name='MegaCorp, Inc.',
employees=[
Engineer(name='dilbert', primary_language='java'),
Engineer(name='wally', primary_language='c++'),
Manager(name='dogbert', golf_swing='fore!')])
c2 = Company(name='Elbonia, Inc.',
employees=[Engineer(name='vlad',
primary_language='cobol')])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(sess.query(Company).filter(Company.employees.of_type(Engineer).
any(Engineer.primary_language
== 'cobol')).first(), c2)
# ensure that the Manager mapper was compiled with the Manager id
# column as higher priority. this ensures that "Manager.id"
# is appropriately treated as the "id" column in the "manager"
# table (reversed from 0.6's behavior.)
eq_(
Manager.id.property.columns,
[Manager.__table__.c.id, Person.__table__.c.id]
)
# assert that the "id" column is available without a second
# load. as of 0.7, the ColumnProperty tests all columns
# in its list to see which is present in the row.
sess.expunge_all()
def go():
assert sess.query(Manager).filter(Manager.name == 'dogbert'
).one().id
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
assert sess.query(Person).filter(Manager.name == 'dogbert'
).one().id
self.assert_sql_count(testing.db, go, 1)
def test_add_subcol_after_the_fact(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
Engineer.primary_language = Column('primary_language',
String(50))
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(primary_language='java', name='dilbert')
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).first(),
Engineer(primary_language='java', name='dilbert'))
def test_add_parentcol_after_the_fact(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
Person.name = Column('name', String(50))
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(primary_language='java', name='dilbert')
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).first(),
Engineer(primary_language='java', name='dilbert'))
def test_add_sub_parentcol_after_the_fact(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
class Admin(Engineer):
__tablename__ = 'admins'
__mapper_args__ = {'polymorphic_identity': 'admin'}
workstation = Column(String(50))
id = Column('id', Integer, ForeignKey('engineers.id'),
primary_key=True)
Person.name = Column('name', String(50))
Base.metadata.create_all()
sess = create_session()
e1 = Admin(primary_language='java', name='dilbert',
workstation='foo')
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).first(),
Admin(primary_language='java', name='dilbert', workstation='foo'))
def test_subclass_mixin(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class MyMixin(object):
pass
class Engineer(MyMixin, Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
primary_language = Column('primary_language', String(50))
assert class_mapper(Engineer).inherits is class_mapper(Person)
def test_with_undefined_foreignkey(self):
class Parent(Base):
__tablename__ = 'parent'
id = Column('id', Integer, primary_key=True)
tp = Column('type', String(50))
__mapper_args__ = dict(polymorphic_on=tp)
class Child1(Parent):
__tablename__ = 'child1'
id = Column('id', Integer, ForeignKey('parent.id'),
primary_key=True)
related_child2 = Column('c2', Integer,
ForeignKey('child2.id'))
__mapper_args__ = dict(polymorphic_identity='child1')
# no exception is raised by the ForeignKey to "child2" even
# though child2 doesn't exist yet
class Child2(Parent):
__tablename__ = 'child2'
id = Column('id', Integer, ForeignKey('parent.id'),
primary_key=True)
related_child1 = Column('c1', Integer)
__mapper_args__ = dict(polymorphic_identity='child2')
sa.orm.configure_mappers() # no exceptions here
def test_foreign_keys_with_col(self):
"""Test that foreign keys that reference a literal 'id' subclass
'id' attribute behave intuitively.
See [ticket:1892].
"""
class Booking(Base):
__tablename__ = 'booking'
id = Column(Integer, primary_key=True)
class PlanBooking(Booking):
__tablename__ = 'plan_booking'
id = Column(Integer, ForeignKey(Booking.id),
primary_key=True)
# referencing PlanBooking.id gives us the column
# on plan_booking, not booking
class FeatureBooking(Booking):
__tablename__ = 'feature_booking'
id = Column(Integer, ForeignKey(Booking.id),
primary_key=True)
plan_booking_id = Column(Integer,
ForeignKey(PlanBooking.id))
plan_booking = relationship(PlanBooking,
backref='feature_bookings')
assert FeatureBooking.__table__.c.plan_booking_id.\
references(PlanBooking.__table__.c.id)
assert FeatureBooking.__table__.c.id.\
references(Booking.__table__.c.id)
def test_single_colsonbase(self):
"""test single inheritance where all the columns are on the base
class."""
class Company(Base, fixtures.ComparableEntity):
__tablename__ = 'companies'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
employees = relationship('Person')
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
company_id = Column('company_id', Integer,
ForeignKey('companies.id'))
name = Column('name', String(50))
discriminator = Column('type', String(50))
primary_language = Column('primary_language', String(50))
golf_swing = Column('golf_swing', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
Base.metadata.create_all()
sess = create_session()
c1 = Company(
name='MegaCorp, Inc.',
employees=[
Engineer(name='dilbert', primary_language='java'),
Engineer(name='wally', primary_language='c++'),
Manager(name='dogbert', golf_swing='fore!')])
c2 = Company(name='Elbonia, Inc.',
employees=[Engineer(name='vlad',
primary_language='cobol')])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).filter(Engineer.primary_language
== 'cobol').first(),
Engineer(name='vlad'))
eq_(sess.query(Company).filter(Company.employees.of_type(Engineer).
any(Engineer.primary_language
== 'cobol')).first(), c2)
def test_single_colsonsub(self):
"""test single inheritance where the columns are local to their
class.
this is a newer usage.
"""
class Company(Base, fixtures.ComparableEntity):
__tablename__ = 'companies'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
employees = relationship('Person')
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
company_id = Column(Integer, ForeignKey('companies.id'))
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
# we have here a situation that is somewhat unique. the Person
# class is mapped to the "people" table, but it was mapped when
# the table did not include the "primary_language" or
# "golf_swing" columns. declarative will also manipulate the
# exclude_properties collection so that sibling classes don't
# cross-pollinate.
assert Person.__table__.c.company_id is not None
assert Person.__table__.c.golf_swing is not None
assert Person.__table__.c.primary_language is not None
assert Engineer.primary_language is not None
assert Manager.golf_swing is not None
assert not hasattr(Person, 'primary_language')
assert not hasattr(Person, 'golf_swing')
assert not hasattr(Engineer, 'golf_swing')
assert not hasattr(Manager, 'primary_language')
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(name='dilbert', primary_language='java')
e2 = Engineer(name='wally', primary_language='c++')
m1 = Manager(name='dogbert', golf_swing='fore!')
c1 = Company(name='MegaCorp, Inc.', employees=[e1, e2, m1])
e3 = Engineer(name='vlad', primary_language='cobol')
c2 = Company(name='Elbonia, Inc.', employees=[e3])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).filter(Engineer.primary_language
== 'cobol').first(),
Engineer(name='vlad'))
eq_(sess.query(Company).filter(Company.employees.of_type(Engineer).
any(Engineer.primary_language
== 'cobol')).first(), c2)
eq_(sess.query(Engineer).filter_by(primary_language='cobol'
).one(),
Engineer(name='vlad', primary_language='cobol'))
def test_single_cols_on_sub_base_of_joined(self):
"""test [ticket:3895]"""
class Person(Base):
__tablename__ = "person"
id = Column(Integer, primary_key=True)
type = Column(String)
__mapper_args__ = {
"polymorphic_on": type,
}
class Contractor(Person):
contractor_field = Column(String)
__mapper_args__ = {
"polymorphic_identity": "contractor",
}
class Employee(Person):
__tablename__ = "employee"
id = Column(Integer, ForeignKey(Person.id), primary_key=True)
class Engineer(Employee):
__mapper_args__ = {
"polymorphic_identity": "engineer",
}
configure_mappers()
is_false(hasattr(Person, 'contractor_field'))
is_true(hasattr(Contractor, 'contractor_field'))
is_false(hasattr(Employee, 'contractor_field'))
is_false(hasattr(Engineer, 'contractor_field'))
def test_single_cols_on_sub_to_joined(self):
"""test [ticket:3797]"""
class BaseUser(Base):
__tablename__ = 'root'
id = Column(Integer, primary_key=True)
row_type = Column(String)
__mapper_args__ = {
'polymorphic_on': row_type,
'polymorphic_identity': 'baseuser'
}
class User(BaseUser):
__tablename__ = 'user'
__mapper_args__ = {
'polymorphic_identity': 'user'
}
baseuser_id = Column(
Integer, ForeignKey('root.id'), primary_key=True)
class Bat(Base):
__tablename__ = 'bat'
id = Column(Integer, primary_key=True)
class Thing(Base):
__tablename__ = 'thing'
id = Column(Integer, primary_key=True)
owner_id = Column(Integer, ForeignKey('user.baseuser_id'))
owner = relationship('User')
class SubUser(User):
__mapper_args__ = {
'polymorphic_identity': 'subuser'
}
sub_user_custom_thing = Column(Integer, ForeignKey('bat.id'))
eq_(
User.__table__.foreign_keys,
User.baseuser_id.foreign_keys.union(
SubUser.sub_user_custom_thing.foreign_keys))
is_true(Thing.owner.property.primaryjoin.compare(
Thing.owner_id == User.baseuser_id))
def test_single_constraint_on_sub(self):
"""test the somewhat unusual case of [ticket:3341]"""
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
__hack_args_one__ = sa.UniqueConstraint(
Person.name, primary_language)
__hack_args_two__ = sa.CheckConstraint(
Person.name != primary_language)
uq = [c for c in Person.__table__.constraints
if isinstance(c, sa.UniqueConstraint)][0]
ck = [c for c in Person.__table__.constraints
if isinstance(c, sa.CheckConstraint)][0]
eq_(
list(uq.columns),
[Person.__table__.c.name, Person.__table__.c.primary_language]
)
eq_(
list(ck.columns),
[Person.__table__.c.name, Person.__table__.c.primary_language]
)
@testing.skip_if(lambda: testing.against('oracle'),
"Test has an empty insert in it at the moment")
def test_columns_single_inheritance_conflict_resolution(self):
"""Test that a declared_attr can return the existing column and it will
be ignored. this allows conditional columns to be added.
See [ticket:2472].
"""
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
class Engineer(Person):
"""single table inheritance"""
@declared_attr
def target_id(cls):
return cls.__table__.c.get(
'target_id',
Column(Integer, ForeignKey('other.id')))
@declared_attr
def target(cls):
return relationship("Other")
class Manager(Person):
"""single table inheritance"""
@declared_attr
def target_id(cls):
return cls.__table__.c.get(
'target_id',
Column(Integer, ForeignKey('other.id')))
@declared_attr
def target(cls):
return relationship("Other")
class Other(Base):
__tablename__ = 'other'
id = Column(Integer, primary_key=True)
is_(
Engineer.target_id.property.columns[0],
Person.__table__.c.target_id
)
is_(
Manager.target_id.property.columns[0],
Person.__table__.c.target_id
)
# do a brief round trip on this
Base.metadata.create_all()
session = Session()
o1, o2 = Other(), Other()
session.add_all([
Engineer(target=o1),
Manager(target=o2),
Manager(target=o1)
])
session.commit()
eq_(session.query(Engineer).first().target, o1)
def test_joined_from_single(self):
class Company(Base, fixtures.ComparableEntity):
__tablename__ = 'companies'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
employees = relationship('Person')
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
company_id = Column(Integer, ForeignKey('companies.id'))
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column(Integer, ForeignKey('people.id'),
primary_key=True)
primary_language = Column(String(50))
assert Person.__table__.c.golf_swing is not None
assert 'primary_language' not in Person.__table__.c
assert Engineer.__table__.c.primary_language is not None
assert Engineer.primary_language is not None
assert Manager.golf_swing is not None
assert not hasattr(Person, 'primary_language')
assert not hasattr(Person, 'golf_swing')
assert not hasattr(Engineer, 'golf_swing')
assert not hasattr(Manager, 'primary_language')
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(name='dilbert', primary_language='java')
e2 = Engineer(name='wally', primary_language='c++')
m1 = Manager(name='dogbert', golf_swing='fore!')
c1 = Company(name='MegaCorp, Inc.', employees=[e1, e2, m1])
e3 = Engineer(name='vlad', primary_language='cobol')
c2 = Company(name='Elbonia, Inc.', employees=[e3])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).with_polymorphic(Engineer).
filter(Engineer.primary_language
== 'cobol').first(), Engineer(name='vlad'))
eq_(sess.query(Company).filter(Company.employees.of_type(Engineer).
any(Engineer.primary_language
== 'cobol')).first(), c2)
eq_(sess.query(Engineer).filter_by(primary_language='cobol'
).one(),
Engineer(name='vlad', primary_language='cobol'))
def test_single_from_joined_colsonsub(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Manager(Person):
__tablename__ = 'manager'
__mapper_args__ = {'polymorphic_identity': 'manager'}
id = Column(Integer, ForeignKey('people.id'), primary_key=True)
golf_swing = Column(String(50))
class Boss(Manager):
boss_name = Column(String(50))
is_(
Boss.__mapper__.column_attrs['boss_name'].columns[0],
Manager.__table__.c.boss_name
)
def test_polymorphic_on_converted_from_inst(self):
class A(Base):
__tablename__ = 'A'
id = Column(Integer, primary_key=True)
discriminator = Column(String)
@declared_attr
def __mapper_args__(cls):
return {
'polymorphic_identity': cls.__name__,
'polymorphic_on': cls.discriminator
}
class B(A):
pass
is_(B.__mapper__.polymorphic_on, A.__table__.c.discriminator)
def test_add_deferred(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
Person.name = deferred(Column(String(10)))
Base.metadata.create_all()
sess = create_session()
p = Person(name='ratbert')
sess.add(p)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).all(), [Person(name='ratbert')])
sess.expunge_all()
person = sess.query(Person).filter(Person.name == 'ratbert'
).one()
assert 'name' not in person.__dict__
def test_single_fksonsub(self):
"""test single inheritance with a foreign key-holding column on
a subclass.
"""
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language_id = Column(Integer,
ForeignKey('languages.id'))
primary_language = relationship('Language')
class Language(Base, fixtures.ComparableEntity):
__tablename__ = 'languages'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
assert not hasattr(Person, 'primary_language_id')
Base.metadata.create_all()
sess = create_session()
java, cpp, cobol = Language(name='java'), Language(name='cpp'), \
Language(name='cobol')
e1 = Engineer(name='dilbert', primary_language=java)
e2 = Engineer(name='wally', primary_language=cpp)
e3 = Engineer(name='vlad', primary_language=cobol)
sess.add_all([e1, e2, e3])
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).filter(Engineer.primary_language.has(
Language.name
== 'cobol')).first(),
Engineer(name='vlad', primary_language=Language(name='cobol')))
eq_(sess.query(Engineer).filter(Engineer.primary_language.has(
Language.name
== 'cobol')).one(),
Engineer(name='vlad', primary_language=Language(name='cobol')))
eq_(sess.query(Person).join(Engineer.primary_language).order_by(
Language.name).all(),
[Engineer(name='vlad',
primary_language=Language(name='cobol')),
Engineer(name='wally', primary_language=Language(name='cpp'
)),
Engineer(name='dilbert', primary_language=Language(name='java'))])
def test_single_three_levels(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
class JuniorEngineer(Engineer):
__mapper_args__ = \
{'polymorphic_identity': 'junior_engineer'}
nerf_gun = Column(String(50))
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
assert JuniorEngineer.nerf_gun
assert JuniorEngineer.primary_language
assert JuniorEngineer.name
assert Manager.golf_swing
assert Engineer.primary_language
assert not hasattr(Engineer, 'golf_swing')
assert not hasattr(Engineer, 'nerf_gun')
assert not hasattr(Manager, 'nerf_gun')
assert not hasattr(Manager, 'primary_language')
def test_single_detects_conflict(self):
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
# test sibling col conflict
def go():
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
primary_language = Column(String(50))
assert_raises(sa.exc.ArgumentError, go)
# test parent col conflict
def go():
class Salesman(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
name = Column(String(50))
assert_raises(sa.exc.ArgumentError, go)
def test_single_no_special_cols(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
def go():
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column('primary_language',
String(50))
foo_bar = Column(Integer, primary_key=True)
assert_raises_message(sa.exc.ArgumentError,
'place primary key', go)
def test_single_no_table_args(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
def go():
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column('primary_language',
String(50))
# this should be on the Person class, as this is single
# table inheritance, which is why we test that this
# throws an exception!
__table_args__ = {'mysql_engine': 'InnoDB'}
assert_raises_message(sa.exc.ArgumentError,
'place __table_args__', go)
@testing.emits_warning("This declarative")
def test_dupe_name_in_hierarchy(self):
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
a_1 = A
class A(a_1):
__tablename__ = 'b'
id = Column(Integer(), ForeignKey(a_1.id), primary_key=True)
assert A.__mapper__.inherits is a_1.__mapper__
class OverlapColPrecedenceTest(DeclarativeTestBase):
"""test #1892 cases when declarative does column precedence."""
def _run_test(self, Engineer, e_id, p_id):
p_table = Base.metadata.tables['person']
e_table = Base.metadata.tables['engineer']
assert Engineer.id.property.columns[0] is e_table.c[e_id]
assert Engineer.id.property.columns[1] is p_table.c[p_id]
def test_basic(self):
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
class Engineer(Person):
__tablename__ = 'engineer'
id = Column(Integer, ForeignKey('person.id'), primary_key=True)
self._run_test(Engineer, "id", "id")
def test_alt_name_base(self):
class Person(Base):
__tablename__ = 'person'
id = Column("pid", Integer, primary_key=True)
class Engineer(Person):
__tablename__ = 'engineer'
id = Column(Integer, ForeignKey('person.pid'), primary_key=True)
self._run_test(Engineer, "id", "pid")
def test_alt_name_sub(self):
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
class Engineer(Person):
__tablename__ = 'engineer'
id = Column("eid", Integer, ForeignKey('person.id'),
primary_key=True)
self._run_test(Engineer, "eid", "id")
def test_alt_name_both(self):
class Person(Base):
__tablename__ = 'person'
id = Column("pid", Integer, primary_key=True)
class Engineer(Person):
__tablename__ = 'engineer'
id = Column("eid", Integer, ForeignKey('person.pid'),
primary_key=True)
self._run_test(Engineer, "eid", "pid")
class ConcreteInhTest(_RemoveListeners, DeclarativeTestBase):
def _roundtrip(self, Employee, Manager, Engineer, Boss,
polymorphic=True, explicit_type=False):
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(name='dilbert', primary_language='java')
e2 = Engineer(name='wally', primary_language='c++')
m1 = Manager(name='dogbert', golf_swing='fore!')
e3 = Engineer(name='vlad', primary_language='cobol')
b1 = Boss(name="pointy haired")
if polymorphic:
for obj in [e1, e2, m1, e3, b1]:
if explicit_type:
eq_(obj.type, obj.__mapper__.polymorphic_identity)
else:
assert_raises_message(
AttributeError,
"does not implement attribute .?'type' "
"at the instance level.",
getattr, obj, "type"
)
else:
assert "type" not in Engineer.__dict__
assert "type" not in Manager.__dict__
assert "type" not in Boss.__dict__
sess.add_all([e1, e2, m1, e3, b1])
sess.flush()
sess.expunge_all()
if polymorphic:
eq_(sess.query(Employee).order_by(Employee.name).all(),
[Engineer(name='dilbert'), Manager(name='dogbert'),
Boss(name='pointy haired'),
Engineer(name='vlad'), Engineer(name='wally')])
else:
eq_(sess.query(Engineer).order_by(Engineer.name).all(),
[Engineer(name='dilbert'), Engineer(name='vlad'),
Engineer(name='wally')])
eq_(sess.query(Manager).all(), [Manager(name='dogbert')])
eq_(sess.query(Boss).all(), [Boss(name='pointy haired')])
def test_explicit(self):
engineers = Table(
'engineers', Base.metadata,
Column('id',
Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(50)),
Column('primary_language', String(50)))
managers = Table('managers', Base.metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('golf_swing', String(50))
)
boss = Table('boss', Base.metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('golf_swing', String(50))
)
punion = polymorphic_union({
'engineer': engineers,
'manager': managers,
'boss': boss}, 'type', 'punion')
class Employee(Base, fixtures.ComparableEntity):
__table__ = punion
__mapper_args__ = {'polymorphic_on': punion.c.type}
class Engineer(Employee):
__table__ = engineers
__mapper_args__ = {'polymorphic_identity': 'engineer',
'concrete': True}
class Manager(Employee):
__table__ = managers
__mapper_args__ = {'polymorphic_identity': 'manager',
'concrete': True}
class Boss(Manager):
__table__ = boss
__mapper_args__ = {'polymorphic_identity': 'boss',
'concrete': True}
self._roundtrip(Employee, Manager, Engineer, Boss)
def test_concrete_inline_non_polymorphic(self):
"""test the example from the declarative docs."""
class Employee(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
class Engineer(Employee):
__tablename__ = 'engineers'
__mapper_args__ = {'concrete': True}
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
primary_language = Column(String(50))
name = Column(String(50))
class Manager(Employee):
__tablename__ = 'manager'
__mapper_args__ = {'concrete': True}
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
golf_swing = Column(String(50))
name = Column(String(50))
class Boss(Manager):
__tablename__ = 'boss'
__mapper_args__ = {'concrete': True}
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
golf_swing = Column(String(50))
name = Column(String(50))
self._roundtrip(Employee, Manager, Engineer, Boss, polymorphic=False)
def test_abstract_concrete_extension(self):
class Employee(AbstractConcreteBase, Base, fixtures.ComparableEntity):
pass
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
'polymorphic_identity': 'manager',
'concrete': True}
class Boss(Manager):
__tablename__ = 'boss'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
'polymorphic_identity': 'boss',
'concrete': True}
class Engineer(Employee):
__tablename__ = 'engineer'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
primary_language = Column(String(40))
__mapper_args__ = {'polymorphic_identity': 'engineer',
'concrete': True}
self._roundtrip(Employee, Manager, Engineer, Boss)
def test_concrete_extension(self):
class Employee(ConcreteBase, Base, fixtures.ComparableEntity):
__tablename__ = 'employee'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
__mapper_args__ = {
'polymorphic_identity': 'employee',
'concrete': True}
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
'polymorphic_identity': 'manager',
'concrete': True}
class Boss(Manager):
__tablename__ = 'boss'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
'polymorphic_identity': 'boss',
'concrete': True}
class Engineer(Employee):
__tablename__ = 'engineer'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
primary_language = Column(String(40))
__mapper_args__ = {'polymorphic_identity': 'engineer',
'concrete': True}
self._roundtrip(Employee, Manager, Engineer, Boss)
def test_has_inherited_table_doesnt_consider_base(self):
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
assert not has_inherited_table(A)
class B(A):
__tablename__ = 'b'
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
assert has_inherited_table(B)
def test_has_inherited_table_in_mapper_args(self):
class Test(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
type = Column(String(20))
@declared_attr
def __mapper_args__(cls):
if not has_inherited_table(cls):
ret = {
'polymorphic_identity': 'default',
'polymorphic_on': cls.type,
}
else:
ret = {'polymorphic_identity': cls.__name__}
return ret
class PolyTest(Test):
__tablename__ = 'poly_test'
id = Column(Integer, ForeignKey(Test.id), primary_key=True)
configure_mappers()
assert Test.__mapper__.polymorphic_on is Test.__table__.c.type
assert PolyTest.__mapper__.polymorphic_on is Test.__table__.c.type
def test_ok_to_override_type_from_abstract(self):
class Employee(AbstractConcreteBase, Base, fixtures.ComparableEntity):
pass
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
@property
def type(self):
return "manager"
__mapper_args__ = {
'polymorphic_identity': "manager",
'concrete': True}
class Boss(Manager):
__tablename__ = 'boss'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
@property
def type(self):
return "boss"
__mapper_args__ = {
'polymorphic_identity': "boss",
'concrete': True}
class Engineer(Employee):
__tablename__ = 'engineer'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
primary_language = Column(String(40))
@property
def type(self):
return "engineer"
__mapper_args__ = {'polymorphic_identity': "engineer",
'concrete': True}
self._roundtrip(Employee, Manager, Engineer, Boss, explicit_type=True)
class ConcreteExtensionConfigTest(
_RemoveListeners, testing.AssertsCompiledSQL, DeclarativeTestBase):
__dialect__ = 'default'
def test_classreg_setup(self):
class A(Base, fixtures.ComparableEntity):
__tablename__ = 'a'
id = Column(Integer,
primary_key=True, test_needs_autoincrement=True)
data = Column(String(50))
collection = relationship("BC", primaryjoin="BC.a_id == A.id",
collection_class=set)
class BC(AbstractConcreteBase, Base, fixtures.ComparableEntity):
pass
class B(BC):
__tablename__ = 'b'
id = Column(Integer,
primary_key=True, test_needs_autoincrement=True)
a_id = Column(Integer, ForeignKey('a.id'))
data = Column(String(50))
b_data = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "b",
"concrete": True
}
class C(BC):
__tablename__ = 'c'
id = Column(Integer,
primary_key=True, test_needs_autoincrement=True)
a_id = Column(Integer, ForeignKey('a.id'))
data = Column(String(50))
c_data = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "c",
"concrete": True
}
Base.metadata.create_all()
sess = Session()
sess.add_all([
A(data='a1', collection=set([
B(data='a1b1', b_data='a1b1'),
C(data='a1b2', c_data='a1c1'),
B(data='a1b2', b_data='a1b2'),
C(data='a1c2', c_data='a1c2'),
])),
A(data='a2', collection=set([
B(data='a2b1', b_data='a2b1'),
C(data='a2c1', c_data='a2c1'),
B(data='a2b2', b_data='a2b2'),
C(data='a2c2', c_data='a2c2'),
]))
])
sess.commit()
sess.expunge_all()
eq_(
sess.query(A).filter_by(data='a2').all(),
[
A(data='a2', collection=set([
B(data='a2b1', b_data='a2b1'),
B(data='a2b2', b_data='a2b2'),
C(data='a2c1', c_data='a2c1'),
C(data='a2c2', c_data='a2c2'),
]))
]
)
self.assert_compile(
sess.query(A).join(A.collection),
"SELECT a.id AS a_id, a.data AS a_data FROM a JOIN "
"(SELECT c.id AS id, c.a_id AS a_id, c.data AS data, "
"c.c_data AS c_data, CAST(NULL AS VARCHAR(50)) AS b_data, "
"'c' AS type FROM c UNION ALL SELECT b.id AS id, b.a_id AS a_id, "
"b.data AS data, CAST(NULL AS VARCHAR(50)) AS c_data, "
"b.b_data AS b_data, 'b' AS type FROM b) AS pjoin "
"ON pjoin.a_id = a.id"
)
def test_prop_on_base(self):
"""test [ticket:2670] """
counter = mock.Mock()
class Something(Base):
__tablename__ = 'something'
id = Column(Integer, primary_key=True)
class AbstractConcreteAbstraction(AbstractConcreteBase, Base):
id = Column(Integer, primary_key=True)
x = Column(Integer)
y = Column(Integer)
@declared_attr
def something_id(cls):
return Column(ForeignKey(Something.id))
@declared_attr
def something(cls):
counter(cls, "something")
return relationship("Something")
@declared_attr
def something_else(cls):
counter(cls, "something_else")
return relationship("Something")
class ConcreteConcreteAbstraction(AbstractConcreteAbstraction):
__tablename__ = 'cca'
__mapper_args__ = {
'polymorphic_identity': 'ccb',
'concrete': True}
# concrete is mapped, the abstract base is not (yet)
assert ConcreteConcreteAbstraction.__mapper__
assert not hasattr(AbstractConcreteAbstraction, '__mapper__')
session = Session()
self.assert_compile(
session.query(ConcreteConcreteAbstraction).filter(
ConcreteConcreteAbstraction.something.has(id=1)),
"SELECT cca.id AS cca_id, cca.x AS cca_x, cca.y AS cca_y, "
"cca.something_id AS cca_something_id FROM cca WHERE EXISTS "
"(SELECT 1 FROM something WHERE something.id = cca.something_id "
"AND something.id = :id_1)"
)
# now it is
assert AbstractConcreteAbstraction.__mapper__
self.assert_compile(
session.query(ConcreteConcreteAbstraction).filter(
ConcreteConcreteAbstraction.something_else.has(id=1)),
"SELECT cca.id AS cca_id, cca.x AS cca_x, cca.y AS cca_y, "
"cca.something_id AS cca_something_id FROM cca WHERE EXISTS "
"(SELECT 1 FROM something WHERE something.id = cca.something_id "
"AND something.id = :id_1)"
)
self.assert_compile(
session.query(AbstractConcreteAbstraction).filter(
AbstractConcreteAbstraction.something.has(id=1)),
"SELECT pjoin.id AS pjoin_id, pjoin.x AS pjoin_x, "
"pjoin.y AS pjoin_y, pjoin.something_id AS pjoin_something_id, "
"pjoin.type AS pjoin_type FROM "
"(SELECT cca.id AS id, cca.x AS x, cca.y AS y, "
"cca.something_id AS something_id, 'ccb' AS type FROM cca) "
"AS pjoin WHERE EXISTS (SELECT 1 FROM something "
"WHERE something.id = pjoin.something_id AND something.id = :id_1)"
)
self.assert_compile(
session.query(AbstractConcreteAbstraction).filter(
AbstractConcreteAbstraction.something_else.has(id=1)),
"SELECT pjoin.id AS pjoin_id, pjoin.x AS pjoin_x, "
"pjoin.y AS pjoin_y, pjoin.something_id AS pjoin_something_id, "
"pjoin.type AS pjoin_type FROM "
"(SELECT cca.id AS id, cca.x AS x, cca.y AS y, "
"cca.something_id AS something_id, 'ccb' AS type FROM cca) "
"AS pjoin WHERE EXISTS (SELECT 1 FROM something "
"WHERE something.id = pjoin.something_id AND something.id = :id_1)"
)
def test_abstract_in_hierarchy(self):
class Document(Base, AbstractConcreteBase):
doctype = Column(String)
class ContactDocument(Document):
__abstract__ = True
send_method = Column(String)
class ActualDocument(ContactDocument):
__tablename__ = 'actual_documents'
__mapper_args__ = {
'concrete': True,
'polymorphic_identity': 'actual'}
id = Column(Integer, primary_key=True)
configure_mappers()
session = Session()
self.assert_compile(
session.query(Document),
"SELECT pjoin.doctype AS pjoin_doctype, "
"pjoin.send_method AS pjoin_send_method, "
"pjoin.id AS pjoin_id, pjoin.type AS pjoin_type "
"FROM (SELECT actual_documents.doctype AS doctype, "
"actual_documents.send_method AS send_method, "
"actual_documents.id AS id, 'actual' AS type "
"FROM actual_documents) AS pjoin"
)
def test_column_attr_names(self):
"""test #3480"""
class Document(Base, AbstractConcreteBase):
documentType = Column('documenttype', String)
class Offer(Document):
__tablename__ = 'offers'
id = Column(Integer, primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'offer'
}
configure_mappers()
session = Session()
self.assert_compile(
session.query(Document),
"SELECT pjoin.documenttype AS pjoin_documenttype, "
"pjoin.id AS pjoin_id, pjoin.type AS pjoin_type FROM "
"(SELECT offers.documenttype AS documenttype, offers.id AS id, "
"'offer' AS type FROM offers) AS pjoin"
)
self.assert_compile(
session.query(Document.documentType),
"SELECT pjoin.documenttype AS pjoin_documenttype FROM "
"(SELECT offers.documenttype AS documenttype, offers.id AS id, "
"'offer' AS type FROM offers) AS pjoin"
)
| 36.322724 | 79 | 0.563377 |
3cc6813b07d8afc326da551cd081a424a145fa6b | 5,544 | py | Python | model.py | bhneo/SimSiam-TF | cd2b44a1b1049fb08c2a4fada499863f06d47b16 | [
"MIT"
] | null | null | null | model.py | bhneo/SimSiam-TF | cd2b44a1b1049fb08c2a4fada499863f06d47b16 | [
"MIT"
] | null | null | null | model.py | bhneo/SimSiam-TF | cd2b44a1b1049fb08c2a4fada499863f06d47b16 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Add
from tensorflow.keras import Sequential
from tensorflow.keras import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.initializers import Constant
from resnet import ResNet18, ResNet20
from layer import _conv2d
from layer import _batchnorm
from layer import _dense
MODEL_DICT = {
'resnet18' : ResNet18,
'resnet20' : ResNet20,
'resnet50' : tf.keras.applications.ResNet50,}
FAMILY_DICT = {
'resnet18' : tf.python.keras.applications.resnet,
'resnet20' : tf.python.keras.applications.resnet,
'resnet50' : tf.python.keras.applications.resnet,}
def set_lincls(args, backbone):
DEFAULT_ARGS = {
"use_bias": args.use_bias,
"kernel_regularizer": l2(args.weight_decay)}
if args.freeze:
backbone.trainable = False
# x = backbone.get_layer(name='avg_pool').output
x = backbone.get_layer(name=args.pred_layer).output
x = _dense(**DEFAULT_ARGS)(args.classes, name='predictions')(x)
model = Model(backbone.input, x, name='lincls')
return model
class SimSiam(Model):
def __init__(self, args, logger, num_workers=1, **kwargs):
super(SimSiam, self).__init__(**kwargs)
self.args = args
self._num_workers = num_workers
norm = 'bn' if self._num_workers == 1 else 'syncbn'
DEFAULT_ARGS = {
"filters": self.args.filters,
"classes": self.args.classes,
"use_bias": self.args.use_bias,
"kernel_regularizer": l2(self.args.weight_decay)}
FAMILY_DICT[self.args.backbone].Conv2D = _conv2d(**DEFAULT_ARGS)
FAMILY_DICT[self.args.backbone].BatchNormalization = _batchnorm(norm=norm)
FAMILY_DICT[self.args.backbone].Dense = _dense(**DEFAULT_ARGS)
DEFAULT_ARGS.update({'norm': norm})
backbone = MODEL_DICT[self.args.backbone](
include_top=False,
weights=None,
input_shape=(self.args.img_size, self.args.img_size, 3),
pooling='avg',
**DEFAULT_ARGS if self.args.backbone == 'resnet18' or self.args.backbone == 'resnet20' else {})
DEFAULT_ARGS.pop('norm')
DEFAULT_ARGS.pop('filters')
DEFAULT_ARGS.pop('classes')
x = backbone.output
outputs = []
# Projection MLP
num_mlp = 3 if self.args.dataset == 'imagenet' else 2
for i in range(num_mlp-1):
x = _dense(**DEFAULT_ARGS)(self.args.proj_dim, name=f'proj_fc{i+1}')(x)
if self.args.proj_bn_hidden:
x = _batchnorm(norm=norm)(epsilon=1.001e-5, name=f'proj_bn{i+1}')(x)
x = Activation('relu', name=f'proj_relu{i+1}')(x)
x = _dense(**DEFAULT_ARGS)(self.args.proj_dim, name='proj_fc3')(x)
if self.args.proj_bn_output:
x = _batchnorm(norm=norm)(epsilon=1.001e-5, name='proj_bn3')(x)
outputs.append(x)
# Prediction MLP
x = _dense(**DEFAULT_ARGS)(self.args.pred_dim, name='pred_fc1')(x)
if self.args.pred_bn_hidden:
x = _batchnorm(norm=norm)(epsilon=1.001e-5, name='pred_bn1')(x)
x = Activation('relu', name='pred_relu1')(x)
x = _dense(**DEFAULT_ARGS)(self.args.proj_dim, name='pred_fc2')(x)
if self.args.pred_bn_output:
x = _batchnorm(norm=norm)(epsilon=1.001e-5, name='pred_bn2')(x)
outputs.append(x)
self.encoder = Model(backbone.input, outputs, name='encoder')
# Load checkpoints
if self.args.snapshot and self.args.task == "pretext":
self.load_weights(self.args.snapshot)
logger.info('Load weights at {}'.format(self.args.snapshot))
def compile(
self,
optimizer,
loss,
run_eagerly=None):
super(SimSiam, self).compile(
optimizer=optimizer, run_eagerly=run_eagerly)
self._loss = loss
def call(self, inputs, training=None, mask=None):
img1, img2 = inputs
z1, p1 = self.encoder(img1)
def train_step(self, data):
img1, img2 = data
with tf.GradientTape() as tape:
z1, p1 = self.encoder(img1, training=True)
z2, p2 = self.encoder(img2, training=True)
if self.args.stop_gradient:
loss_simsiam = (self._loss(p1, tf.stop_gradient(z2)) + self._loss(p2, tf.stop_gradient(z1))) / 2
else:
loss_simsiam = (self._loss(p1, z2) + self._loss(p2, z1)) / 2
loss_simsiam = tf.reduce_mean(loss_simsiam)
loss_decay = sum(self.encoder.losses)
loss = loss_simsiam + loss_decay
total_loss = loss / self._num_workers
trainable_vars = self.encoder.trainable_variables
grads = tape.gradient(total_loss, trainable_vars)
self.optimizer.apply_gradients(zip(grads, trainable_vars))
proj_std = tf.reduce_mean(tf.math.reduce_std(tf.math.l2_normalize(tf.concat((z1, z2), axis=0), axis=-1), axis=0))
pred_std = tf.reduce_mean(tf.math.reduce_std(tf.math.l2_normalize(tf.concat((p1, p2), axis=0), axis=-1), axis=0))
results = {
'loss': loss,
'loss_simsiam': loss_simsiam,
'weight_decay': loss_decay,
'proj_std': proj_std,
'pred_std': pred_std}
return results | 37.208054 | 121 | 0.626984 |
b0c854002da5a93e1559ebd718de9b3f6bcdf1fc | 2,737 | py | Python | zilencer/management/commands/render_messages.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 2 | 2021-09-01T17:44:28.000Z | 2021-09-01T18:09:51.000Z | zilencer/management/commands/render_messages.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 1 | 2021-03-24T12:50:52.000Z | 2021-03-24T13:11:42.000Z | zilencer/management/commands/render_messages.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 1 | 2021-07-22T10:14:08.000Z | 2021-07-22T10:14:08.000Z | import os
from typing import Any, Iterator
import orjson
from django.core.management.base import BaseCommand, CommandParser
from django.db.models import QuerySet
from zerver.lib.message import render_markdown
from zerver.models import Message
def queryset_iterator(queryset: QuerySet, chunksize: int = 5000) -> Iterator[Any]:
queryset = queryset.order_by("id")
while queryset.exists():
for row in queryset[:chunksize]:
msg_id = row.id
yield row
queryset = queryset.filter(id__gt=msg_id)
class Command(BaseCommand):
help = """
Render messages to a file.
Usage: ./manage.py render_messages <destination> [--amount=10000]
"""
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument("destination", help="Destination file path")
parser.add_argument("--amount", default=100000, help="Number of messages to render")
parser.add_argument("--latest_id", default=0, help="Last message id to render")
def handle(self, *args: Any, **options: Any) -> None:
dest_dir = os.path.realpath(os.path.dirname(options["destination"]))
amount = int(options["amount"])
latest = int(options["latest_id"]) or Message.objects.latest("id").id
self.stdout.write(f"Latest message id: {latest}")
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
with open(options["destination"], "wb") as result:
result.write(b"[")
messages = Message.objects.filter(id__gt=latest - amount, id__lte=latest).order_by("id")
for message in queryset_iterator(messages):
content = message.content
# In order to ensure that the output of this tool is
# consistent across the time, even if messages are
# edited, we always render the original content
# version, extracting it from the edit history if
# necessary.
if message.edit_history:
history = orjson.loads(message.edit_history)
history = sorted(history, key=lambda i: i["timestamp"])
for entry in history:
if "prev_content" in entry:
content = entry["prev_content"]
break
result.write(
orjson.dumps(
{
"id": message.id,
"content": render_markdown(message, content),
}
)
)
if message.id != latest:
result.write(b",")
result.write(b"]")
| 40.25 | 100 | 0.572525 |
cad874f266f1346308ac23099e3b70165e905fc3 | 16,961 | py | Python | network.py | Rick-McCoy/MIDIWavenet | 229031df46c9b3791f6cd8fd4ff3d284083ee882 | [
"MIT"
] | 3 | 2019-03-19T09:44:43.000Z | 2020-05-12T12:57:34.000Z | network.py | Rick-McCoy/MIDIWavenet | 229031df46c9b3791f6cd8fd4ff3d284083ee882 | [
"MIT"
] | null | null | null | network.py | Rick-McCoy/MIDIWavenet | 229031df46c9b3791f6cd8fd4ff3d284083ee882 | [
"MIT"
] | null | null | null | """Wavenet, the raw network itself.
Contains various components of it as well as the network.
No hardcoded values, all passed arguments can be changed without issues."""
import queue
from math import sqrt
from torch import nn, cat, zeros
from utils import causal_pad
class DilatedConv1d(nn.Module):
"""Dilated 1D Convolution, the basis for Wavenet.
Supports sampling mode which disables dilation for fast generation.
As per paper, the convolution layer does not have a bias parameter.
Arguments
----------
in_channels : int
Number of channels of input.
out_channels : int
Number of channels of output.
kernel_size : int
Size of kernel.
dilaiton : int
Length of dilation.
Parameters
-----------
conv : torch.nn.Conv1d
Torch layer for convolution while training.
sample_conv : torch.nn.Conv1d
Torch layer for convolution while sampling.
Shares weight with sample_conv.
Methods
-----------
forward
Arguments
-----------
x : torch.Tensor
Input sequence.
sample : bool
Determines sampling.
Returns
-----------
self.conv(x) : torch.Tensor
Performs convolution on input x.
If sample=True, returns self.sample_conv(x).
"""
def __init__(self, in_channels, out_channels, kernel_size, dilation):
super(DilatedConv1d, self).__init__()
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
dilation=dilation,
bias=False
)
self.sample_conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
bias=False
)
self.sample_conv.weight = self.conv.weight
def forward(self, x, sample=False):
return self.sample_conv(x) if sample else self.conv(x)
class ResidualBlock(nn.Module):
"""The basic residual block.
Consists of convolution layers for the input sequence & condition.
For efficiency, condition convolution layers are implemented as linear layers.
Arguments
-----------
residual_channels : int
Number of channels of input sequence & residual sum.
dilation_channels : int
Number of channels of intermediate layers.
skip_channels : int
Number of channels of accumulated output.
condition_channels : int
Number of channels of condition.
kernel_size : int
Size of kernel of dilated convolutions.
dilation : int
Length of dilation.
Parameters
------------
dilation : int
Length of dilation.
filter_conv : DilatedConv1d
Convolution layer for input sequence, activation function tanh.
gate_conv : DilatedConv1d
Convolution layer for input sequence, activation function sigmoid.
filter_linear : torch.nn.Linear
Convolution layer for condition, implemented as torch.nn.Linear.
Activation funciton tanh.
gate_linear : torch.nn.Linear
Convolution layer for condition, implemented as torch.nn.Linear.
Activation funciton sigmoid.
residual_conv : torch.nn.Conv1d
Convolution layer for residual sum.
skip_conv : torch.nn.Conv1d
Convolution layer for accumulated output.
Methods
----------
forward
Arguments
------------
x : torch.Tensor
Input sequence.
condition : torch.Tensor
Global condition.
res_sum : torch.Tensor
Residual sum tensor.
sample : bool
Boolean to determine sampling.
Returns
------------
output : torch.Tensor
Output tensor of ResidualBlock.
res_sum : torch.Tensor
Accumulated residual sum.
"""
def __init__(
self,
residual_channels,
dilation_channels,
skip_channels,
condition_channels,
kernel_size,
dilation
):
super(ResidualBlock, self).__init__()
self.dilation = dilation
self.filter_conv = DilatedConv1d(
residual_channels,
dilation_channels,
kernel_size,
dilation
)
self.gate_conv = DilatedConv1d(
residual_channels,
dilation_channels,
kernel_size,
dilation
)
self.filter_linear = nn.Linear(
condition_channels,
dilation_channels
)
self.gate_linear = nn.Linear(
condition_channels,
dilation_channels
)
self.residual_conv = nn.Conv1d(
dilation_channels,
residual_channels,
1
)
self.skip_conv = nn.Conv1d(
dilation_channels,
skip_channels,
1
)
self.queues = [
queue.Queue(dilation + 1) for _ in range(kernel_size - 1)
]
self.output_length = 1
self.conditional_filter = 0
self.conditional_gate = 0
def forward(self, x, condition=0, res_sum=0, sample=False):
dilated_filter = self.filter_conv(x, sample)
dilated_gate = self.gate_conv(x, sample)
if sample:
conditional_filter = self.conditional_filter
conditional_gate = self.conditional_gate
else:
conditional_filter = self.filter_linear(condition).unsqueeze(dim=-1)
conditional_gate = self.gate_linear(condition).unsqueeze(dim=-1)
dilated_filter += conditional_filter
dilated_gate += conditional_gate
dilated_filter.tanh_()
dilated_gate.sigmoid_()
dilated = dilated_filter * dilated_gate
output = self.residual_conv(dilated) + x[..., -dilated.shape[2]:]
res_sum = res_sum + self.skip_conv(dilated)[..., -self.output_length:]
return output, res_sum
def set_condition(self, condition):
"""Sets condition for sampling mode.
Condition is reused during entire duration of sampling,
thus meaningless to recalculate filter & gate every time."""
self.conditional_filter = self.filter_linear(condition).unsqueeze(dim=-1)
self.conditional_gate = self.gate_linear(condition).unsqueeze(dim=-1)
class ResidualStack(nn.Module):
"""Stack of ResidualBlocks: Has no layers of its own.
Handles residual summation and sampling.
Arguments
-----------
layer_size : int
Size of layer, determines exponential part of receptive field.
stack_size : int
Size of stack, determines linear part of receptive field.
Repeats layer stack_size times.
residual_channels : int
Number of channels of input sequence & residual sum.
dilation_channels : int
Number of channels of intermediate layers within ResidualBlock.
skip_channels : int
Number of channels of accumulated output.
condition_channels : int
Number of channels of global condition.
kernel_size : int
Size of kernel of dilated convolutions.
Parameters
-----------
dilations : list
List of dilations for each ResidualBlock.
res_blocks : torch.nn.ModuleList
ModuleList of ResidualBlocks.
Methods
-----------
forward
Arguments
-----------
target : torch.Tensor
Input sequence.
condition : torch.Tensor
Global condition.
output_length : int
Length of output tensor.
Returns
----------
res_sum : torch.Tensor
Residual sum of all ResidualBlocks.
"""
def __init__(
self,
layer_size,
stack_size,
residual_channels,
dilation_channels,
skip_channels,
condition_channels,
kernel_size
):
super(ResidualStack, self).__init__()
self.dilations = [2 ** i for i in range(layer_size)] * stack_size
self.res_blocks = nn.ModuleList([
ResidualBlock(
residual_channels,
dilation_channels,
skip_channels,
condition_channels,
kernel_size,
dilation
) for dilation in self.dilations
])
self.norm = sqrt(len(self.res_blocks))
def forward(self, target, condition, output_length):
res_sum = zeros((1, 1, 1), device=target.device)
for res_block in self.res_blocks:
res_block.output_length = output_length
target, res_sum = res_block(target, condition, res_sum)
return res_sum / self.norm
def sample_forward(self, target):
"""Sampling function, operates at O(stack_size * layer_size * kernel_size).
Samples one time step at a time.
All queues of each individual ResidualBlock needs to be filled first before sampling.
Arguments
-----------
target : torch.Tensor
Input sequence, to be extended by sampling.
Returns
-----------
res_sum : torch.Tensor
Residual sum of all ResidualBlocks."""
res_sum = 0
for res_block in self.res_blocks:
res_block.output_length = 1
tops = [target] + [que.get() for que in res_block.queues]
for que, top in zip(res_block.queues, tops[:-1]):
que.put(top)
target = cat(tops[::-1], dim=-1)
target, res_sum = res_block(target, res_sum=res_sum, sample=True)
return res_sum
def fill_queues(self, target, condition):
"""Prepares ResidualBlock for sampling mode.
Calls set_condition and fills queues for each res_block.
Arguments
-----------
target : torch.Tensor
Input sequence for filling queues.
condition : torch.Tensor
Global condition for set_condition.
Returns
----------
Does not return anything."""
for res_block in self.res_blocks:
res_block.output_length = 1
for i, que in enumerate(res_block.queues):
with que.mutex:
que.queue.clear()
for j in range(-res_block.dilation, 0):
que.put(target[..., -res_block.dilation * i + j - 1].unsqueeze(dim=-1))
res_block.set_condition(condition)
target, _ = res_block(target, sample=True)
class PostProcess(nn.Module):
"""Simple Post processing, contains two convolutions.
Inplace relu is used for efficiency.
Arguments
-----------
skip_channels : int
Number of channels of accumulated outputs.
end_channels : int
Number of channels of intermediate layer.
channels : int
Number of channels of output.
Parameters
-----------
conv1 : torch.nn.Conv1d
First convolution.
conv2 : torch.nn.Conv1d
Second convolution.
relu : torch.nn.ReLU
Inplace ReLU activation function.
Methods
-----------
forward
Arguments
-----------
target : torch.Tensor
Input residual sum.
Returns
-----------
output : torch.Tensor
Output tensor."""
def __init__(self, skip_channels, end_channels, channels):
super(PostProcess, self).__init__()
self.conv1 = nn.Conv1d(skip_channels, end_channels, 1)
self.conv2 = nn.Conv1d(end_channels, channels, 1)
self.relu = nn.ReLU(inplace=True)
def forward(self, target):
output = self.relu(target)
output = self.conv1(output)
output = self.relu(output)
output = self.conv2(output)
return output
class Wavenet(nn.Module):
"""Base module of Wavenet.
Supports sampling via Fast Wavenet.
Forward function calculates loss, for output call get_output.
Arguments
-----------
layer_size : int
Size of layer, determines exponential part of receptive field.
stack_size : int
Size of stack, determines linear part of receptive field.
channels : int
Number of channels of input sequence & output.
embedding_channels : int
Number of channels of output of embedding.
residual_channels : int
Number of channels of residual sum.
skip_channels : int
Number of channels of accumulated outputs.
end_channels : int
Number of channels of intermediate layer in PostProcess.
condition_channels : int
Number of channels of global condition.
kernel_size : int
Size of kernel of dilated convolutions.
Parameters
------------
receptive_field : int
Receptive field of res_stsack.
embedding : torch.nn.Embedding
Embedding layer for token to embedding vector conversion.
causal : DilatedConv1d
Causal convolution layer for temporal alignment.
res_stacks : ResidualStack
ResidualStack for dilated convolutions.
post : PostProcess
PostProcess for final convolution layers.
loss : torch.nn.CrossEntropyLoss
CrossEntropyLoss for loss calculation.
Methods
----------
forward
Arguments
-----------
target : torch.Tensor
Input sequence.
condition : torch.Tensor
Global condition.
output_length : int
Length of output.
Returns
-----------
loss : torch.Tensor
CrossEntropyLoss of output.
"""
def __init__(
self,
layer_size,
stack_size,
channels,
embedding_channels,
residual_channels,
dilation_channels,
skip_channels,
end_channels,
condition_channels,
kernel_size
):
super(Wavenet, self).__init__()
self.receptive_field = (2 ** layer_size - 1) * stack_size * (kernel_size - 1)
self.embedding = nn.Embedding(channels, embedding_channels)
self.causal = DilatedConv1d(
embedding_channels,
residual_channels,
kernel_size=2,
dilation=1
)
self.res_stacks = ResidualStack(
layer_size,
stack_size,
residual_channels,
dilation_channels,
skip_channels,
condition_channels,
kernel_size
)
self.post = PostProcess(skip_channels, end_channels, channels)
self.loss = nn.CrossEntropyLoss()
def get_output(self, target, condition, output_length):
"""Returns raw output from Wavenet.
Specify output length.
Arguments
-----------
target : torch.Tensor
Input sequence.
condition : torch.Tensor
Global condition.
output_length : int
Length of output.
Returns
----------
output : torch.Tensor
Output of Wavenet."""
target = target[..., :-1]
output = self.embedding(target).transpose(1, 2)
output = causal_pad(output)
output = self.causal(output)
output = self.res_stacks(output, condition, output_length)
output = self.post(output)
return output
def forward(self, target, condition, output_length):
output = self.get_output(target, condition, output_length)
loss = self.loss(output, target[:, -output_length:])
return loss
def sample_output(self, target):
"""Output function used for sampling purposes.
Global condition must be primed with fill_queues.
Arguments
-----------
target : torch.Tensor
Input snippet.
Returns
-----------
output : torch.Tenosr
Output time step."""
output = self.embedding(target).transpose(1, 2)
output = causal_pad(output)
output = self.causal(output)[..., 1:]
output = self.res_stacks.sample_forward(output)
output = self.post(output)
return output
def fill_queues(self, target, condition):
"""Global condition & queue primer function.
Fills queues of ResidualStack with input sequence.
Global condition is reused throughout task, improving speed.
Arguments
-----------
target : torch.Tensor
Input sequence.
condition : torch.Tensor
Global condition.
Returns
-----------
Does not return anything."""
target = self.embedding(target).transpose(1, 2)
target = causal_pad(target)
target = self.causal(target)
self.res_stacks.fill_queues(target, condition)
| 28.602024 | 93 | 0.592418 |
7f5cdf8adf6390830e80b01eeafc4210eece9135 | 2,346 | py | Python | py3status/modules/docker.py | kevna/py3status | 8861944cd2facacd8d697958fe81513be3640fcb | [
"BSD-3-Clause"
] | null | null | null | py3status/modules/docker.py | kevna/py3status | 8861944cd2facacd8d697958fe81513be3640fcb | [
"BSD-3-Clause"
] | null | null | null | py3status/modules/docker.py | kevna/py3status | 8861944cd2facacd8d697958fe81513be3640fcb | [
"BSD-3-Clause"
] | null | null | null | r"""
Display properties of running docker containers.
Configuration parameters:
containers: filter the list by container or image name (default [])
delimiter: separator between containers (default ' ')
format: format to display each container (default '{name} {image}')
Format placeholders:
{name} instance name (--name= on run, or randomly generated)
{short_id} shortened unique container id
{id} full container id
{status} running state of container
{image} name of the image running in the container
{gateway} the gateway IP to interact with the container
Requires:
docker: python client for the docker container engine
Examples:
```
docker {
containers = ["mysql:*"]
delimiter = "|"
format = "{name}({image})- {gateway}"
}
```
@author la kevna (Aaron Moore)
SAMPLE OUTPUT
{'full_text': u'vigorous_ride mysql:latest'}
"""
import docker
from fnmatch import fnmatch
class Py3status:
"""
"""
# available configuration parameters
containers = []
delimiter = " "
format = "{name} {image}"
def post_config_hook(self):
self.client = docker.from_env()
def _match_containers(self):
containers = self.client.containers.list()
if self.containers:
for container in containers:
if any(
fnmatch(container.attrs["Config"]["Image"], glob)
for glob in self.containers
) or any(fnmatch(container.name, glob) for glob in self.containers):
yield container
else:
yield from containers
def _print(self, container):
return self.format.format(
name=container.name,
short_id=container.short_id,
id=container.id,
status=container.status,
image=container.attrs["Config"]["Image"],
gateway=container.attrs["NetworkSettings"]["Gateway"]
)
def docker(self):
"""
docker response
"""
text = []
for container in self._match_containers():
text.append(self._print(container))
return {"full_text": self.delimiter.join(text)}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| 26.066667 | 84 | 0.620205 |
2051eba2f4e2d06a77dde793b3a1d3d5b724fb13 | 4,927 | py | Python | chances/methods.py | bjtho08/chances | ec713de1e11f7c2d9997507b07658df9f3369555 | [
"MIT"
] | 5 | 2018-10-09T11:00:39.000Z | 2021-07-19T06:21:29.000Z | chances/methods.py | bjtho08/chances | ec713de1e11f7c2d9997507b07658df9f3369555 | [
"MIT"
] | 13 | 2019-02-09T10:53:06.000Z | 2019-09-26T21:37:05.000Z | chances/methods.py | bjtho08/chances | ec713de1e11f7c2d9997507b07658df9f3369555 | [
"MIT"
] | 2 | 2019-03-07T10:05:01.000Z | 2019-09-25T12:29:07.000Z | import numpy as np
import random
from .sobol.sobol_seq import i4_sobol_generate
from .hypercube.hycusampling import halton, korobov_design_matrix
from .hypercube.hycusampling import improved_lhd_matrix, lhd_matrix
from .lhs_sudoku import sudoku
from .quantum import cached_generator, randint
class Randomizer:
'''Create various random sequences.'''
def __init__(self, max_value, n):
'''
max_value : int
maximum value in the sequence
n : int
number of samples to be drawn
'''
self.len = max_value
self.n = n
def uniform_mersenne(self):
'''Regular uniform / pseudorandom sequence'''
return random.sample(range(self.len), k=self.n)
def uniform_crypto(self):
'''Cryptographically sound pseudorandom sequence'''
try:
from secrets import randbelow
except ImportError:
print('Python3.6 is required >> mersenne returned instead')
r = Randomizer(self.len, self.n)
return r.uniform_mersenne()
out = []
i = 0
while i < self.n:
num = randbelow(self.len)
if num not in out:
i += 1
out.append(num)
return out
def latin_sudoku(self, dims=2, sudoku_boxes=1):
'''Latin Hypercube with Sudoku-style Constraint.
M. D. McKay, R. J. Beckman, W. J. Conover, 1979.
dims :: number of dimensions
sudoku_boxes :: number of boxes to use as constraint
'''
n = int(self.len / sudoku_boxes)
if self.len % sudoku_boxes != 0:
raise ValueError('Index len must be divisible by sudoku_boxes')
out = sudoku.sample(dims, sudoku_boxes, n)
out = [i[0] for i in out[0]]
return out[:self.n]
def latin_improved(self):
out = [i[0] for i in improved_lhd_matrix(self.len, 1)]
return out[:self.n]
def latin_matrix(self):
out = [i[0] for i in lhd_matrix(self.len, 1)]
return out[:self.n]
def sobol(self):
'''Creates an index based on Sobol Sequence'''
org = [i[0] for i in i4_sobol_generate(1, self.len)]
return self._match_index(org)[:self.n]
def halton(self):
'''Creates an index based on Halton Sequence'''
org = [i[0] for i in halton(self.len, 1, 5)]
return self._match_index(org)[:self.n]
def korobov_matrix(self):
'''Returns a 1-d array of integeres in the Korobov Design Matrix'''
out = [i for i in korobov_design_matrix(self.len, 2)[:, 1]]
return out[:self.n]
def ambience(self):
'''An ambient sound based TRNG using RANDOM.ORG API'''
import time
import random
from .random_org.random_org import random_org
if self.len > 10000:
print("Due to API limitations, ambience method is for 10^4 range.")
out = []
while len(out) < self.n:
temp = random_org(self.n, 0, self.len)
out = list(set(out + temp))
time.sleep(.1)
random.shuffle(out)
return out[:self.len]
def quantum(self):
'''Quantum Random Number Generator
NOTE: this method can only return 1024 random numbers.
DESCRIPTION
===========
The random numbers are generated in real-time in ANU lab by measuring
the quantum fluctuations of the vacuum. The vacuum is described very
differently in the quantum mechanical context than in the classical
context. Traditionally, a vacuum is considered as a space that is
empty of matter or photons. Quantum mechanically, however, that same
space resembles a sea of virtual particles appearing and disappearing
all the time. This result is due to the fact that the vacuum still
possesses a zero-point energy. Consequently, the electromagnetic
field of the vacuum exhibits random fluctuations in phase and amplitude
at all frequencies. By carefully measuring these fluctuations, we are
able to generate ultra-high bandwidth random numbers.
EXAMPLE
=======
test = quantum_random(200, minimum=10, maximum=20)
randhist(test)
PARAMETERS
==========
n = number of integer values to return
minimum = min value for integers
maximum = max value for integers
'''
out = []
gen = cached_generator()
for i in range(self.n):
out.append(int(randint(min=0, max=self.len, generator=gen)))
return out
def _match_index(self, org):
'''Helper to match sequence with index and
reorganize index accordingly.'''
temp = np.array(list(zip(org, list(range(self.len)))))
out = temp[temp[:, 0].argsort()][:, 1].astype(int).tolist()
return out
| 26.632432 | 79 | 0.603816 |
82f054b4ceb245bd1cfb715f83913fdcf8476439 | 3,405 | py | Python | gui/qt/address_dialog.py | lazyboozer/electrum-desire | 42d204d9e7deef17b18bf9d7f43ce5c45cda5fc8 | [
"MIT"
] | null | null | null | gui/qt/address_dialog.py | lazyboozer/electrum-desire | 42d204d9e7deef17b18bf9d7f43ce5c45cda5fc8 | [
"MIT"
] | null | null | null | gui/qt/address_dialog.py | lazyboozer/electrum-desire | 42d204d9e7deef17b18bf9d7f43ce5c45cda5fc8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from electrum_desire.i18n import _
import PyQt4
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from util import *
from history_list import HistoryList
from qrtextedit import ShowQRTextEdit
class AddressDialog(WindowModalDialog):
def __init__(self, parent, address):
WindowModalDialog.__init__(self, parent, _("Address"))
self.address = address
self.parent = parent
self.config = parent.config
self.wallet = parent.wallet
self.app = parent.app
self.saved = True
self.setMinimumWidth(700)
vbox = QVBoxLayout()
self.setLayout(vbox)
vbox.addWidget(QLabel(_("Address:")))
self.addr_e = ButtonsLineEdit(self.address)
self.addr_e.addCopyButton(self.app)
self.addr_e.addButton(":icons/qrcode.png", self.show_qr, _("Show QR Code"))
self.addr_e.setReadOnly(True)
vbox.addWidget(self.addr_e)
try:
pubkeys = self.wallet.get_public_keys(address)
except BaseException as e:
pubkeys = None
if pubkeys:
vbox.addWidget(QLabel(_("Public keys") + ':'))
for pubkey in pubkeys:
pubkey_e = ButtonsLineEdit(pubkey)
pubkey_e.addCopyButton(self.app)
vbox.addWidget(pubkey_e)
try:
redeem_script = self.wallet.pubkeys_to_redeem_script(pubkeys)
except BaseException as e:
redeem_script = None
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
redeem_e = ShowQRTextEdit(text=redeem_script)
redeem_e.addCopyButton(self.app)
vbox.addWidget(redeem_e)
vbox.addWidget(QLabel(_("History")))
self.hw = HistoryList(self.parent)
self.hw.get_domain = self.get_domain
vbox.addWidget(self.hw)
vbox.addLayout(Buttons(CloseButton(self)))
self.format_amount = self.parent.format_amount
self.hw.update()
def get_domain(self):
return [self.address]
def show_qr(self):
text = self.address
try:
self.parent.show_qrcode(text, 'Address')
except Exception as e:
self.show_message(str(e))
| 35.103093 | 83 | 0.674302 |
3b8d8433264893f28bdace38626ec433bbfe86bf | 3,687 | py | Python | expertai/nlapi/common/model/token.py | DavidBakerEffendi/nlapi-python | a12fe8abc710824f2b8abe2154d0f193276a4ac7 | [
"Apache-2.0"
] | 36 | 2020-07-06T07:21:12.000Z | 2022-03-28T01:34:12.000Z | expertai/nlapi/common/model/token.py | DavidBakerEffendi/nlapi-python | a12fe8abc710824f2b8abe2154d0f193276a4ac7 | [
"Apache-2.0"
] | 14 | 2020-09-16T17:53:01.000Z | 2022-03-17T14:48:01.000Z | expertai/nlapi/common/model/token.py | DavidBakerEffendi/nlapi-python | a12fe8abc710824f2b8abe2154d0f193276a4ac7 | [
"Apache-2.0"
] | 14 | 2020-10-23T14:55:42.000Z | 2021-11-08T19:23:23.000Z | # Copyright (c) 2020 original authors
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from expertai.nlapi.common.errors import ETypeError, EValueError
from expertai.nlapi.common.model.atom import Atom
from expertai.nlapi.common.model.dependency import Dependency
from expertai.nlapi.common.model.position import Position
from expertai.nlapi.common.model.vsyncon import VSyncon
class Token(Position):
def __init__(
self,
start,
end,
syncon,
pos,
lemma,
paragraph,
sentence,
phrase,
dependency=None,
atoms=[],
morphology=None,
vsyn=None,
type=None,
type_=None,
):
"""Initialise the Property object
To minimise the `abuse` of the Python `type` keyword, the
initialisation method also accepts `type_`. The former argument
is used when the initialisation is nested inside other
data-model classes. In these cases the __init__ receives a
dictionary containing `type` not `type_`, because that how the
response the server sends is defined. Otherwise when the object
can be directly initialised using the second alternative.
Again, to mitigate this name clash with the reserved keyword
the property was suffixed with the underscore.
"""
super().__init__(start=start, end=end)
self._syncon = syncon
# by default if only one value is passed, it is considered
# to be the key
self._pos = pos
self._lemma = lemma
self._atoms = []
self._vsyn = None
self._dependency = None
if dependency:
self._dependency = Dependency(**dependency)
self._morphology = morphology
self._paragraph = paragraph
self._sentence = sentence
self._phrase = phrase
self._type = type or type_
if not isinstance(atoms, list):
raise ETypeError(list, atoms)
for atom in atoms:
if not isinstance(atom, dict):
raise ETypeError(dict, atom)
if not atom:
raise EValueError(atom, "Token.atom")
self._atoms.append(Atom(**atom))
if vsyn:
if not isinstance(vsyn, dict):
raise ETypeError(dict, vsyn)
self._vsyn = VSyncon(**vsyn)
@property
def syncon(self):
return self._syncon
@property
def pos(self):
return self._pos
@property
def lemma(self):
return self._lemma
@property
def dependency(self):
return self._dependency
@property
def morphology(self):
return self._morphology
@property
def paragraph(self):
return self._paragraph
@property
def sentence(self):
return self._sentence
@property
def phrase(self):
return self._phrase
@property
def atoms(self):
return self._atoms
@property
def vsyn(self):
return self._vsyn
@property
def type_(self):
return self._type
def __str__(self):
return "{},{},{}".format(self.start, self.end, self.lemma)
| 27.311111 | 74 | 0.632221 |
9418291929df9c455865794f10ae530d107cca0d | 17,647 | py | Python | tests/test_utils.py | marcoffee/typed-argument-parser | 1dac0c92a707dd7399a69f42213508cd2746b104 | [
"MIT"
] | null | null | null | tests/test_utils.py | marcoffee/typed-argument-parser | 1dac0c92a707dd7399a69f42213508cd2746b104 | [
"MIT"
] | null | null | null | tests/test_utils.py | marcoffee/typed-argument-parser | 1dac0c92a707dd7399a69f42213508cd2746b104 | [
"MIT"
] | null | null | null | from collections import OrderedDict
import json
import os
import platform
import subprocess
from tempfile import TemporaryDirectory
from typing import Any, Callable, List, Dict, Set, Tuple, Union
import unittest
from unittest import TestCase
from typing_extensions import Literal
from tap.utils import (
has_git,
get_class_column,
get_class_variables,
get_git_root,
get_git_url,
has_uncommitted_changes,
type_to_str,
get_literals,
TupleTypeEnforcer,
_nested_replace_type,
define_python_object_encoder,
UnpicklableObject,
as_python_object
)
class GitTests(TestCase):
def setUp(self) -> None:
self.temp_dir = TemporaryDirectory()
self.prev_dir = os.getcwd()
os.chdir(self.temp_dir.name)
subprocess.check_output(['git', 'init'])
self.url = 'https://github.com/test_account/test_repo'
subprocess.check_output(['git', 'remote', 'add', 'origin', f'{self.url}.git'])
subprocess.check_output(['touch', 'README.md'])
subprocess.check_output(['git', 'add', 'README.md'])
subprocess.check_output(['git', 'commit', '-m', 'Initial commit'])
def tearDown(self) -> None:
os.chdir(self.prev_dir)
# Add permissions to temporary directory to enable cleanup in Windows
for root, dirs, files in os.walk(self.temp_dir.name):
for name in dirs + files:
os.chmod(os.path.join(root, name), 0o777)
self.temp_dir.cleanup()
def test_has_git_true(self) -> None:
self.assertTrue(has_git())
def test_has_git_false(self) -> None:
with TemporaryDirectory() as temp_dir_no_git:
os.chdir(temp_dir_no_git)
self.assertFalse(has_git())
os.chdir(self.temp_dir.name)
def test_get_git_root(self) -> None:
# Ideally should be self.temp_dir.name == get_git_root() but the OS may add a prefix like /private
self.assertTrue(get_git_root().endswith(self.temp_dir.name.replace('\\', '/')))
def test_get_git_root_subdir(self) -> None:
subdir = os.path.join(self.temp_dir.name, 'subdir')
os.makedirs(subdir)
os.chdir(subdir)
# Ideally should be self.temp_dir.name == get_git_root() but the OS may add a prefix like /private
self.assertTrue(get_git_root().endswith(self.temp_dir.name.replace('\\', '/')))
os.chdir(self.temp_dir.name)
def test_get_git_url_https(self) -> None:
self.assertEqual(get_git_url(commit_hash=False), self.url)
def test_get_git_url_https_hash(self) -> None:
url = f'{self.url}/tree/'
self.assertEqual(get_git_url(commit_hash=True)[:len(url)], url)
def test_get_git_url_ssh(self) -> None:
subprocess.run(['git', 'remote', 'set-url', 'origin', 'git@github.com:test_account/test_repo.git'])
self.assertEqual(get_git_url(commit_hash=False), self.url)
def test_get_git_url_ssh_hash(self) -> None:
subprocess.run(['git', 'remote', 'set-url', 'origin', 'git@github.com:test_account/test_repo.git'])
url = f'{self.url}/tree/'
self.assertEqual(get_git_url(commit_hash=True)[:len(url)], url)
def test_get_git_url_https_enterprise(self) -> None:
true_url = 'https://github.tap.com/test_account/test_repo'
subprocess.run(['git', 'remote', 'set-url', 'origin', f'{true_url}.git'])
self.assertEqual(get_git_url(commit_hash=False), true_url)
def test_get_git_url_https_hash_enterprise(self) -> None:
true_url = 'https://github.tap.com/test_account/test_repo'
subprocess.run(['git', 'remote', 'set-url', 'origin', f'{true_url}.git'])
url = f'{true_url}/tree/'
self.assertEqual(get_git_url(commit_hash=True)[:len(url)], url)
def test_get_git_url_ssh_enterprise(self) -> None:
true_url = 'https://github.tap.com/test_account/test_repo'
subprocess.run(['git', 'remote', 'set-url', 'origin', 'git@github.tap.com:test_account/test_repo.git'])
self.assertEqual(get_git_url(commit_hash=False), true_url)
def test_get_git_url_ssh_hash_enterprise(self) -> None:
true_url = 'https://github.tap.com/test_account/test_repo'
subprocess.run(['git', 'remote', 'set-url', 'origin', 'git@github.tap.com:test_account/test_repo.git'])
url = f'{true_url}/tree/'
self.assertEqual(get_git_url(commit_hash=True)[:len(url)], url)
def test_has_uncommitted_changes_false(self) -> None:
self.assertFalse(has_uncommitted_changes())
def test_has_uncommited_changes_true(self) -> None:
subprocess.run(['touch', 'main.py'])
self.assertTrue(has_uncommitted_changes())
class TypeToStrTests(TestCase):
def test_type_to_str(self) -> None:
self.assertEqual(type_to_str(str), 'str')
self.assertEqual(type_to_str(int), 'int')
self.assertEqual(type_to_str(float), 'float')
self.assertEqual(type_to_str(bool), 'bool')
self.assertEqual(type_to_str(Any), 'Any')
self.assertEqual(type_to_str(Callable[[str], str]), 'Callable[[str], str]')
self.assertEqual(type_to_str(Callable[[str, int], Tuple[float, bool]]),
'Callable[[str, int], Tuple[float, bool]]')
self.assertEqual(type_to_str(List[int]), 'List[int]')
self.assertEqual(type_to_str(List[str]), 'List[str]')
self.assertEqual(type_to_str(List[float]), 'List[float]')
self.assertEqual(type_to_str(List[bool]), 'List[bool]')
self.assertEqual(type_to_str(Set[int]), 'Set[int]')
self.assertEqual(type_to_str(Dict[str, int]), 'Dict[str, int]')
self.assertEqual(type_to_str(Union[List[int], Dict[float, bool]]), 'Union[List[int], Dict[float, bool]]')
class ClassColumnTests(TestCase):
def test_column_simple(self):
class SimpleColumn:
arg = 2
self.assertEqual(get_class_column(SimpleColumn), 12)
def test_column_comment(self):
class CommentColumn:
"""hello
there
hi
"""
arg = 2
self.assertEqual(get_class_column(CommentColumn), 12)
def test_column_space(self):
class SpaceColumn:
arg = 2
self.assertEqual(get_class_column(SpaceColumn), 12)
def test_column_method(self):
class FuncColumn:
def func(self):
pass
self.assertEqual(get_class_column(FuncColumn), 12)
class ClassVariableTests(TestCase):
def test_no_variables(self):
class NoVariables:
pass
self.assertEqual(get_class_variables(NoVariables), OrderedDict())
def test_one_variable(self):
class OneVariable:
arg = 2
class_variables = OrderedDict()
class_variables['arg'] = {'comment': ''}
self.assertEqual(get_class_variables(OneVariable), class_variables)
def test_multiple_variable(self):
class MultiVariable:
arg_1 = 2
arg_2 = 3
class_variables = OrderedDict()
class_variables['arg_1'] = {'comment': ''}
class_variables['arg_2'] = {'comment': ''}
self.assertEqual(get_class_variables(MultiVariable), class_variables)
def test_typed_variables(self):
class TypedVariable:
arg_1: str
arg_2: int = 3
class_variables = OrderedDict()
class_variables['arg_1'] = {'comment': ''}
class_variables['arg_2'] = {'comment': ''}
self.assertEqual(get_class_variables(TypedVariable), class_variables)
def test_separated_variables(self):
class SeparatedVariable:
"""Comment
"""
arg_1: str
# Hello
def func(self):
pass
arg_2: int = 3
"""More comment"""
class_variables = OrderedDict()
class_variables['arg_1'] = {'comment': ''}
class_variables['arg_2'] = {'comment': 'More comment'}
self.assertEqual(get_class_variables(SeparatedVariable), class_variables)
def test_commented_variables(self):
class CommentedVariable:
"""Comment
"""
arg_1: str # Arg 1 comment
# Hello
def func(self):
pass
arg_2: int = 3 # Arg 2 comment
arg_3 : Dict[str, int] # noqa E203,E262 Poorly formatted comment
"""More comment"""
class_variables = OrderedDict()
class_variables['arg_1'] = {'comment': 'Arg 1 comment'}
class_variables['arg_2'] = {'comment': 'Arg 2 comment'}
class_variables['arg_3'] = {'comment': 'noqa E203,E262 Poorly formatted comment More comment'}
self.assertEqual(get_class_variables(CommentedVariable), class_variables)
def test_bad_spacing_multiline(self):
class TrickyMultiline:
""" This is really difficult
so
so very difficult
"""
foo: str = 'my' # Header line
""" Footer
T
A
P
multi
line!!
"""
class_variables = OrderedDict()
comment = 'Header line Footer\nT\n A\n P\n\n multi\n line!!'
class_variables['foo'] = {'comment': comment}
self.assertEqual(get_class_variables(TrickyMultiline), class_variables)
def test_single_quote_multiline(self):
class SingleQuoteMultiline:
bar: int = 0
'''biz baz'''
class_variables = OrderedDict()
class_variables['bar'] = {'comment': 'biz baz'}
self.assertEqual(get_class_variables(SingleQuoteMultiline), class_variables)
def test_functions_with_docs_multiline(self):
class FunctionsWithDocs:
i: int = 0
def f(self):
"""Function"""
a: str = 'hello' # noqa F841
"""with docs"""
class_variables = OrderedDict()
class_variables['i'] = {'comment': ''}
self.assertEqual(get_class_variables(FunctionsWithDocs), class_variables)
class GetLiteralsTests(TestCase):
def test_get_literals_string(self) -> None:
literal_f, shapes = get_literals(Literal['square', 'triangle', 'circle'], 'shape')
self.assertEqual(shapes, ['square', 'triangle', 'circle'])
self.assertEqual(literal_f('square'), 'square')
self.assertEqual(literal_f('triangle'), 'triangle')
self.assertEqual(literal_f('circle'), 'circle')
def test_get_literals_primitives(self) -> None:
literals = [True, 'one', 2, 3.14]
literal_f, prims = get_literals(Literal[True, 'one', 2, 3.14], 'number')
self.assertEqual(prims, literals)
self.assertEqual([literal_f(str(p)) for p in prims], literals)
def test_get_literals_uniqueness(self) -> None:
with self.assertRaises(ValueError):
get_literals(Literal['two', 2, '2'], 'number')
def test_get_literals_empty(self) -> None:
literal_f, prims = get_literals(Literal, 'hi')
self.assertEqual(prims, [])
class TupleTypeEnforcerTests(TestCase):
def test_tuple_type_enforcer_zero_types(self):
enforcer = TupleTypeEnforcer(types=[])
with self.assertRaises(IndexError):
enforcer('hi')
def test_tuple_type_enforcer_one_type_str(self):
enforcer = TupleTypeEnforcer(types=[str])
self.assertEqual(enforcer('hi'), 'hi')
def test_tuple_type_enforcer_one_type_int(self):
enforcer = TupleTypeEnforcer(types=[int])
self.assertEqual(enforcer('123'), 123)
def test_tuple_type_enforcer_one_type_float(self):
enforcer = TupleTypeEnforcer(types=[float])
self.assertEqual(enforcer('3.14159'), 3.14159)
def test_tuple_type_enforcer_one_type_bool(self):
enforcer = TupleTypeEnforcer(types=[bool])
self.assertEqual(enforcer('True'), True)
enforcer = TupleTypeEnforcer(types=[bool])
self.assertEqual(enforcer('true'), True)
enforcer = TupleTypeEnforcer(types=[bool])
self.assertEqual(enforcer('False'), False)
enforcer = TupleTypeEnforcer(types=[bool])
self.assertEqual(enforcer('false'), False)
enforcer = TupleTypeEnforcer(types=[bool])
self.assertEqual(enforcer('tRu'), True)
enforcer = TupleTypeEnforcer(types=[bool])
self.assertEqual(enforcer('faL'), False)
enforcer = TupleTypeEnforcer(types=[bool])
self.assertEqual(enforcer('1'), True)
enforcer = TupleTypeEnforcer(types=[bool])
self.assertEqual(enforcer('0'), False)
def test_tuple_type_enforcer_multi_types_same(self):
enforcer = TupleTypeEnforcer(types=[str, str])
args = ['hi', 'bye']
output = [enforcer(arg) for arg in args]
self.assertEqual(output, args)
enforcer = TupleTypeEnforcer(types=[int, int, int])
args = [123, 456, -789]
output = [enforcer(str(arg)) for arg in args]
self.assertEqual(output, args)
enforcer = TupleTypeEnforcer(types=[float, float, float, float])
args = [1.23, 4.56, -7.89, 3.14159]
output = [enforcer(str(arg)) for arg in args]
self.assertEqual(output, args)
enforcer = TupleTypeEnforcer(types=[bool, bool, bool, bool, bool])
args = ['True', 'False', '1', '0', 'tru']
true_output = [True, False, True, False, True]
output = [enforcer(str(arg)) for arg in args]
self.assertEqual(output, true_output)
def test_tuple_type_enforcer_multi_types_different(self):
enforcer = TupleTypeEnforcer(types=[str, int, float, bool])
args = ['hello', 77, 0.2, 'tru']
true_output = ['hello', 77, 0.2, True]
output = [enforcer(str(arg)) for arg in args]
self.assertEqual(output, true_output)
def test_tuple_type_enforcer_infinite(self):
enforcer = TupleTypeEnforcer(types=[int], loop=True)
args = [1, 2, -5, 20]
output = [enforcer(str(arg)) for arg in args]
self.assertEqual(output, args)
class NestedReplaceTypeTests(TestCase):
def test_nested_replace_type_notype(self):
obj = ['123', 4, 5, ('hello', 4.4)]
replaced_obj = _nested_replace_type(obj, bool, int)
self.assertEqual(obj, replaced_obj)
def test_nested_replace_type_unnested(self):
obj = ['123', 4, 5, ('hello', 4.4), True, False, 'hi there']
replaced_obj = _nested_replace_type(obj, tuple, list)
correct_obj = ['123', 4, 5, ['hello', 4.4], True, False, 'hi there']
self.assertNotEqual(obj, replaced_obj)
self.assertEqual(correct_obj, replaced_obj)
def test_nested_replace_type_nested(self):
obj = ['123', [4, (1, 2, (3, 4))], 5, ('hello', (4,), 4.4), {'1': [2, 3, [{'2': (3, 10)}, ' hi ']]}]
replaced_obj = _nested_replace_type(obj, tuple, list)
correct_obj = ['123', [4, [1, 2, [3, 4]]], 5, ['hello', [4], 4.4], {'1': [2, 3, [{'2': [3, 10]}, ' hi ']]}]
self.assertNotEqual(obj, replaced_obj)
self.assertEqual(correct_obj, replaced_obj)
class Person:
def __init__(self, name: str) -> None:
self.name = name
def __eq__(self, other: Any) -> bool:
return isinstance(other, Person) and self.name == other.name
class PythonObjectEncoderTests(TestCase):
def test_python_object_encoder_simple_types(self):
obj = [1, 2, 'hi', 'bye', 7.3, [1, 2, 'blarg'], True, False, None]
dumps = json.dumps(obj, indent=4, sort_keys=True, cls=define_python_object_encoder())
recreated_obj = json.loads(dumps, object_hook=as_python_object)
self.assertEqual(recreated_obj, obj)
def test_python_object_encoder_tuple(self):
obj = [1, 2, 'hi', 'bye', 7.3, (1, 2, 'blarg'), [('hi', 'bye'), 2], {'hi': {'bye': (3, 4)}}, True, False, None]
dumps = json.dumps(obj, indent=4, sort_keys=True, cls=define_python_object_encoder())
recreated_obj = json.loads(dumps, object_hook=as_python_object)
self.assertEqual(recreated_obj, obj)
def test_python_object_encoder_set(self):
obj = [1, 2, 'hi', 'bye', 7.3, {1, 2, 'blarg'}, [{'hi', 'bye'}, 2], {'hi': {'bye': {3, 4}}}, True, False, None]
dumps = json.dumps(obj, indent=4, sort_keys=True, cls=define_python_object_encoder())
recreated_obj = json.loads(dumps, object_hook=as_python_object)
self.assertEqual(recreated_obj, obj)
def test_python_object_encoder_complex(self):
obj = [1, 2, 'hi', 'bye', 7.3, {1, 2, 'blarg'}, [('hi', 'bye'), 2], {'hi': {'bye': {3, 4}}}, True, False, None,
(Person('tappy'), Person('tapper'))]
dumps = json.dumps(obj, indent=4, sort_keys=True, cls=define_python_object_encoder())
recreated_obj = json.loads(dumps, object_hook=as_python_object)
self.assertEqual(recreated_obj, obj)
def test_python_object_encoder_unpicklable(self):
class CannotPickleThis:
"""Da na na na. Can't pickle this. """
def __init__(self):
self.x = 1
obj = [1, CannotPickleThis()]
expected_obj = [1, UnpicklableObject()]
with self.assertRaises(ValueError):
dumps = json.dumps(obj, indent=4, sort_keys=True, cls=define_python_object_encoder())
dumps = json.dumps(obj, indent=4, sort_keys=True, cls=define_python_object_encoder(True))
recreated_obj = json.loads(dumps, object_hook=as_python_object)
self.assertEqual(recreated_obj, expected_obj)
if __name__ == '__main__':
unittest.main()
| 38.363043 | 119 | 0.628209 |
6a9cf3a0698042c30b148c5f909efb2e9e1c5e21 | 1,127 | py | Python | one_point_stats/utils/__init__.py | piyanatk/one_point_stats | b3d4ca0ea0ffc373b95ea62098bee009dd634755 | [
"MIT"
] | null | null | null | one_point_stats/utils/__init__.py | piyanatk/one_point_stats | b3d4ca0ea0ffc373b95ea62098bee009dd634755 | [
"MIT"
] | null | null | null | one_point_stats/utils/__init__.py | piyanatk/one_point_stats | b3d4ca0ea0ffc373b95ea62098bee009dd634755 | [
"MIT"
] | null | null | null | from .conversion import *
from .data import *
from .io import *
from .settings import *
from .fitting import *
from .interpolate import *
from .reproject import *
from .smooth import *
__all__ = ['beam_area', 'd2dms', 'dist2psf_pix', 'gaussian_fwhm2std',
'gaussian_std2fwhm', 'h2hms24', 'h2hms_signed', 'impix2dist',
'impix2uvdist', 'jybeam2k', 'jysr2k', 'k2jybeam', 'k2jysr',
'lst2gha', 'uvdist2impix',
'bin_freqs', 'crop_arr', 'gen_radius_array', 'gen_radial_mask',
'is_empty_list', 'radial_profile', 'LazyProperty',
'check_dir', 'set2df',
'MWA_LOC', 'MWA_FIELD_EOR0', 'MWA_FIELD_EOR1', 'MWA_FIELD_EOR2',
'MWA_FREQ_EOR_ALL_40KHZ', 'MWA_FREQ_EOR_ALL_80KHZ',
'MWA_FREQ_EOR_HI_40KHZ', 'MWA_FREQ_EOR_HI_80KHZ',
'MWA_FREQ_EOR_LOW_40KHZ', 'MWA_FREQ_EOR_LOW_80KHZ',
'HERA_ANT_DICT', 'F21', 'get_channel_indexes_per_bin',
'fit_gaussian1d', 'fit_multi_gaussian1d', 'box_degrade',
'healpix2sine', 'cube2healpix', 'gaussian_smooth', 'smooth_healpix',
'sum_gaussian_smoothed_maps']
| 45.08 | 79 | 0.65661 |
94be1bbd8af2e54faa801d5427dc0efed02110b5 | 4,501 | py | Python | article/plots/lem_Te_salhi_example.py | pytaunay/physics-of-cathodes | 4e0bee6b9224d7808dfd09e1b7ea23b297197e94 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | article/plots/lem_Te_salhi_example.py | pytaunay/physics-of-cathodes | 4e0bee6b9224d7808dfd09e1b7ea23b297197e94 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | article/plots/lem_Te_salhi_example.py | pytaunay/physics-of-cathodes | 4e0bee6b9224d7808dfd09e1b7ea23b297197e94 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2021 Pierre-Yves Camille Regis Taunay
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
File: lem_Te_salhi_example.py
Author: Pierre-Yves Taunay
Date: March, 2021
Description: generate Fig. 12a and 12b in Part 1 of Physics of Thermionic Orificed Hollow Cathodes.
We only consider Salhi's cathode for this example.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
### Path to HDF5 file
path_to_results = '../../results/salhi_xe.h5'
### Generate a dataframe out of results for the following parameters:
# Discharge current = 1-20 A
# Mass flow rate = 0.5 eqA (6 sccm)
# Neutral gas temperature = 2000 - 4000 K
# Sheath voltage = 1-10 V
key_root = 'Xe/simulations/results/'
key_end = ['r20210304172101','r20210304172637','r20210304173212']
Tgvec = [2000,3000,4000]
# Create a list for each dataframe
dlist = []
for TgK, ke in zip(Tgvec,key_end):
# Create the key
# 'Xe/simulations/results/<temperature>/insert/r<UTC time results were written>'
key = key_root + str(TgK) + '/insert/' + ke
# Read the dataframe
d = pd.read_hdf(path_to_results,key=key)
dlist.append(d)
# Append everything to the first dataframe
for d in dlist[1:]:
dlist[0] = dlist[0].append(d)
# Aggregate dataframe
dfall = dlist[0].copy()
fig, ax = plt.subplots(1,2)
### Find the minimum and maximum bounds for each discharge current
Idvec = np.unique(dfall['dischargeCurrent'])
mdvec = np.unique(dfall['massFlowRate_sccm'])
dc = np.unique(dfall['insertDiameter'])
min_lem = np.zeros_like(Idvec)
max_lem = np.zeros_like(Idvec)
min_te = np.zeros_like(Idvec)
max_te = np.zeros_like(Idvec)
# Plot the bounds and fill the area for each mass flow rate (there is only one here)
for idx, md in enumerate(mdvec):
dfx = dfall[dfall['massFlowRate_sccm'] == md]
# Change color and style depending on the mass flow
if idx == 0:
style = 'k--'
color = (0.7,0.7,0.7,0.5)
else:
style = 'k-.'
color = (0.1,0.1,0.1,0.5)
# Populate vectors to plot
for kk, Id in enumerate(Idvec):
dfxx = dfx[dfx['dischargeCurrent'] == Id]
min_te[kk] = np.nanmin(dfxx['insertElectronTemperature'])
max_te[kk] = np.nanmax(dfxx['insertElectronTemperature'])
min_lem[kk] = np.nanmin(dfxx['emissionLength'])/dc
max_lem[kk] = np.nanmax(dfxx['emissionLength'])/dc
# Plot
ax[0].fill_between(Idvec,min_lem,max_lem,color=color)
ax[0].plot(Idvec,min_lem,style)
ax[0].plot(Idvec,max_lem,style)
ax[1].fill_between(Idvec,min_te,max_te,color=color)
ax[1].plot(Idvec,min_te,style)
ax[1].plot(Idvec,max_te,style)
### Experimental data
xp_lem = np.array([
[5.0,0.344379987000234,0.0813386106947],
[9.0,0.31421574796719,0.0742309185405],
[12.0,0.282429429616658,0.0668072441952],
[15,0.439504506861141,0.127464586651],
])
ax[0].errorbar(xp_lem[:,0], xp_lem[:,1], yerr=xp_lem[:,2], fmt='ks')
xp_te = np.array([
[3,1.00,0.1,],
[5,1.11301642931159,0.5],
[9,1.08529116781942,0.5],
[10,0.98,0.12,],
[15,0.966499173349343,0.5],
[20,1.07,0.15],
])
ax[1].errorbar(xp_te[:,0], xp_te[:,1], yerr=xp_te[:,2], fmt='ks')
### Labels
ax[0].set_xlabel('Discharge current (A)')
ax[0].set_ylabel('Emission length / insert diameter')
ax[0].set_ylim([0,1])
ax[0].set_xlim([1,25])
ax[1].set_xlabel('Discharge current (A)')
ax[1].set_ylabel('Insert electron temperature (eV)')
ax[1].set_ylim([0,3])
ax[1].set_xlim([1,25])
plt.show()
| 31.921986 | 99 | 0.699178 |
b324ae1af713f839bd8c9003731f6228d2f22788 | 10,482 | py | Python | catkin_ws/src/line_detector/src/line_detector_node.py | kjoelovelife/Knight_car | 5d5909ce495094fdc46bb87be2e28be8ba718854 | [
"CC-BY-2.0"
] | 7 | 2018-10-28T05:49:18.000Z | 2020-05-12T06:31:44.000Z | catkin_ws/src/line_detector/src/line_detector_node.py | kjoelovelife/Knight_car | 5d5909ce495094fdc46bb87be2e28be8ba718854 | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src/line_detector/src/line_detector_node.py | kjoelovelife/Knight_car | 5d5909ce495094fdc46bb87be2e28be8ba718854 | [
"CC-BY-2.0"
] | 4 | 2019-08-17T03:36:07.000Z | 2020-09-29T02:19:46.000Z | #!/usr/bin/env python
from anti_instagram.AntiInstagram import *
from cv_bridge import CvBridge, CvBridgeError
from duckietown_msgs.msg import (AntiInstagramTransform, BoolStamped, Segment,
SegmentList, Vector2D)
from duckietown_utils.instantiate_utils import instantiate
from duckietown_utils.jpg import image_cv_from_jpg
from geometry_msgs.msg import Point
from sensor_msgs.msg import CompressedImage, Image
from visualization_msgs.msg import Marker
from line_detector.line_detector_plot import *
from line_detector.timekeeper import TimeKeeper
import cv2
import numpy as np
import rospy
import threading
import time
class LineDetectorNode(object):
def __init__(self):
self.node_name = "LineDetectorNode"
# Thread lock
self.thread_lock = threading.Lock()
# Constructor of line detector
self.bridge = CvBridge()
self.active = True
self.stats = Stats()
# Only be verbose every 10 cycles
self.intermittent_interval = 100
self.intermittent_counter = 0
# color correction
self.ai = AntiInstagram()
# these will be added if it becomes verbose
self.pub_edge = None
self.pub_colorSegment = None
self.detector = None
self.verbose = None
self.updateParams(None)
# Publishers
self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
# Subscribers
self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)
rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))
rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)
def updateParams(self, _event):
old_verbose = self.verbose
self.verbose = rospy.get_param('~verbose', True)
# self.loginfo('verbose = %r' % self.verbose)
if self.verbose != old_verbose:
self.loginfo('Verbose is now %r' % self.verbose)
self.image_size = rospy.get_param('~img_size')
self.top_cutoff = rospy.get_param('~top_cutoff')
if self.detector is None:
c = rospy.get_param('~detector')
assert isinstance(c, list) and len(c) == 2, c
# if str(self.detector_config) != str(c):
self.loginfo('new detector config: %s' % str(c))
self.detector = instantiate(c[0], c[1])
# self.detector_config = c
if self.verbose and self.pub_edge is None:
self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
self.pub_colorSegment = rospy.Publisher("~colorSegment", Image, queue_size=1)
def cbSwitch(self, switch_msg):
self.active = switch_msg.data
def cbImage(self, image_msg):
self.stats.received()
if not self.active:
return
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
# Returns rightaway
def cbTransform(self, transform_msg):
self.ai.shift = transform_msg.s[0:3]
self.ai.scale = transform_msg.s[3:6]
self.loginfo("AntiInstagram transform received")
def loginfo(self, s):
rospy.loginfo('[%s] %s' % (self.node_name, s))
def intermittent_log_now(self):
return self.intermittent_counter % self.intermittent_interval == 1
def intermittent_log(self, s):
if not self.intermittent_log_now():
return
self.loginfo('%3d:%s' % (self.intermittent_counter, s))
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
self.stats.skipped()
# Return immediately if the thread is locked
return
try:
self.processImage_(image_msg)
finally:
# Release the thread lock
self.thread_lock.release()
def processImage_(self, image_msg):
self.stats.processed()
if self.intermittent_log_now():
self.intermittent_log(self.stats.info())
self.stats.reset()
tk = TimeKeeper(image_msg)
self.intermittent_counter += 1
# Decode from compressed image with OpenCV
try:
image_cv = image_cv_from_jpg(image_msg.data)
except ValueError as e:
self.loginfo('Could not decode image: %s' % e)
return
tk.completed('decoded')
# Resize and crop image
hei_original, wid_original = image_cv.shape[0:2]
if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
# image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
interpolation=cv2.INTER_NEAREST)
image_cv = image_cv[self.top_cutoff:,:,:]
tk.completed('resized')
# apply color correction: AntiInstagram
image_cv_corr = self.ai.applyTransform(image_cv)
image_cv_corr = cv2.convertScaleAbs(image_cv_corr)
tk.completed('corrected')
# Set the image to be detected
self.detector.setImage(image_cv_corr)
# Detect lines and normals
white = self.detector.detectLines('white')
yellow = self.detector.detectLines('yellow')
red = self.detector.detectLines('red')
tk.completed('detected')
# SegmentList constructor
segmentList = SegmentList()
segmentList.header.stamp = image_msg.header.stamp
# Convert to normalized pixel coordinates, and add segments to segmentList
arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
if len(white.lines) > 0:
lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE))
if len(yellow.lines) > 0:
lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW))
if len(red.lines) > 0:
lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED))
self.intermittent_log('# segments: white %3d yellow %3d red %3d' % (len(white.lines),
len(yellow.lines), len(red.lines)))
#self.loginfo("self.verbose %d" % self.verbose)
tk.completed('prepared')
# Publish segmentList
self.pub_lines.publish(segmentList)
tk.completed('--pub_lines--')
# VISUALIZATION only below
if self.verbose:
# Draw lines and normals
image_with_lines = np.copy(image_cv_corr)
drawLines(image_with_lines, white.lines, (0, 0, 0))
drawLines(image_with_lines, yellow.lines, (255, 0, 0))
drawLines(image_with_lines, red.lines, (0, 255, 0))
tk.completed('drawn')
# Publish the frame with lines
image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image.publish(image_msg_out)
tk.completed('pub_image')
# if self.verbose:
colorSegment = color_segment(white.area, red.area, yellow.area)
edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
self.pub_edge.publish(edge_msg_out)
self.pub_colorSegment.publish(colorSegment_msg_out)
tk.completed('pub_edge/pub_segment')
self.intermittent_log(tk.getall())
def onShutdown(self):
self.loginfo("Shutdown.")
def toSegmentMsg(self, lines, normals, color):
segmentMsgList = []
for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
segment = Segment()
segment.color = color
segment.pixels_normalized[0].x = x1
segment.pixels_normalized[0].y = y1
segment.pixels_normalized[1].x = x2
segment.pixels_normalized[1].y = y2
segment.normal.x = norm_x
segment.normal.y = norm_y
segmentMsgList.append(segment)
return segmentMsgList
class Stats():
def __init__(self):
self.nresets = 0
self.reset()
def reset(self):
self.nresets += 1
self.t0 = time.time()
self.nreceived = 0
self.nskipped = 0
self.nprocessed = 0
def received(self):
if self.nreceived == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node received first image.')
self.nreceived += 1
def skipped(self):
self.nskipped += 1
def processed(self):
if self.nprocessed == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node processing first image.')
self.nprocessed += 1
def info(self):
delta = time.time() - self.t0
if self.nreceived:
skipped_perc = (100.0 * self.nskipped / self.nreceived)
else:
skipped_perc = 0
def fps(x):
return '%.1f fps' % (x / delta)
m = ('In the last %.1f s: received %d (%s) processed %d (%s) skipped %d (%s) (%1.f%%)' %
(delta, self.nreceived, fps(self.nreceived),
self.nprocessed, fps(self.nprocessed),
self.nskipped, fps(self.nskipped), skipped_perc))
return m
if __name__ == '__main__':
rospy.init_node('line_detector',anonymous=False)
line_detector_node = LineDetectorNode()
rospy.on_shutdown(line_detector_node.onShutdown)
rospy.spin()
| 33.382166 | 122 | 0.623545 |
3e03557f60352afec59748ad92df639073644856 | 2,396 | py | Python | polycircles/test/test_exceptions.py | JMSchietekat/polycircles | 26f46bb77c234ac0aec756131f599f1651a559da | [
"MIT"
] | 9 | 2016-07-04T08:57:57.000Z | 2021-04-30T16:02:12.000Z | polycircles/test/test_exceptions.py | JMSchietekat/polycircles | 26f46bb77c234ac0aec756131f599f1651a559da | [
"MIT"
] | 11 | 2016-06-30T19:36:24.000Z | 2021-12-04T21:20:23.000Z | polycircles/test/test_exceptions.py | JMSchietekat/polycircles | 26f46bb77c234ac0aec756131f599f1651a559da | [
"MIT"
] | 7 | 2015-11-15T02:38:38.000Z | 2021-12-04T09:16:49.000Z | import unittest
from polycircles import polycircles
from nose.tools import raises
class TestExceptions(unittest.TestCase):
"""Tests that the right exceptions are raised for erroneous inputs."""
@raises(AssertionError)
def test_less_than_3_vertices_no_1(self):
polycircle = polycircles.Polycircle(latitude=30,
longitude=30,
radius=100,
number_of_vertices=2)
@raises(AssertionError)
def test_less_than_3_vertices_no_2(self):
polycircle = polycircles.Polycircle(latitude=30,
longitude=30,
radius=100,
number_of_vertices=-3)
@raises(AssertionError)
def test_less_than_3_vertices_no_3(self):
polycircle = polycircles.Polycircle(latitude=30,
longitude=30,
radius=100,
number_of_vertices=0)
@raises(AssertionError)
def test_erroneous_latitude_1(self):
polycircle = polycircles.Polycircle(latitude=-100,
longitude=30,
radius=100)
@raises(AssertionError)
def test_erroneous_latitude_2(self):
polycircle = polycircles.Polycircle(latitude=100,
longitude=30,
radius=100)
@raises(AssertionError)
def test_erroneous_latitude_3(self):
polycircle = polycircles.Polycircle(latitude=200,
longitude=30,
radius=100)
@raises(AssertionError)
def test_erroneous_longitude_1(self):
polycircle = polycircles.Polycircle(latitude=30,
longitude=-200,
radius=100)
@raises(AssertionError)
def test_erroneous_longitude_2(self):
polycircle = polycircles.Polycircle(latitude=30,
longitude=200,
radius=100)
if __name__ == '__main__':
unittest.main(verbose=2) | 40.610169 | 74 | 0.492905 |
4a6585716152091240a687bf3aa6d557cb0efc83 | 928 | py | Python | gamechangerml/scripts/using_existing_models.py | ekmixon/gamechanger-ml | e7967261a4b2f21b06347020cd7e6a010538eb8f | [
"MIT"
] | null | null | null | gamechangerml/scripts/using_existing_models.py | ekmixon/gamechanger-ml | e7967261a4b2f21b06347020cd7e6a010538eb8f | [
"MIT"
] | 76 | 2021-07-24T02:33:16.000Z | 2022-03-20T22:40:46.000Z | gamechangerml/scripts/using_existing_models.py | ekmixon/gamechanger-ml | e7967261a4b2f21b06347020cd7e6a010538eb8f | [
"MIT"
] | null | null | null | from gamechangerml.src.search.semantic.models import D2V
from gamechangerml.src.text_handling.entity import Phrase_Detector
from gamechangerml.src.text_handling.process import preprocess
from gamechangerml import REPO_PATH
import os
model_dir = os.path.join(
REPO_PATH,
"gamechangerml/src/modelzoo/semantic/models"
)
model_name = "2020072720_model.d2v"
phrase_detector = Phrase_Detector("id")
phrase_detector.load(model_dir)
model = D2V("id")
model.load(f"{model_dir}/{model_name}")
tokens = preprocess(
"National Park",
min_len=1,
phrase_detector=phrase_detector,
remove_stopwords=True,
)
print(model.infer(tokens))
tokens = preprocess(
"National Parks",
min_len=1,
phrase_detector=phrase_detector,
remove_stopwords=True,
)
print(model.infer(tokens))
tokens = preprocess(
"taxes", min_len=1, phrase_detector=phrase_detector, remove_stopwords=True
)
print(model.infer(tokens))
| 23.2 | 78 | 0.769397 |
9366ee3309d307e9d0a810a1418c3dcbae1b5bd2 | 478 | py | Python | python/taichi/tools/__init__.py | nasnoisaac/taichi | 11f8777d878e7afe336b1e0e3e1f2fd00013693d | [
"MIT"
] | null | null | null | python/taichi/tools/__init__.py | nasnoisaac/taichi | 11f8777d878e7afe336b1e0e3e1f2fd00013693d | [
"MIT"
] | null | null | null | python/taichi/tools/__init__.py | nasnoisaac/taichi | 11f8777d878e7afe336b1e0e3e1f2fd00013693d | [
"MIT"
] | null | null | null | from .image import imdisplay, imread, imresize, imshow, imwrite
from .np2ply import PLYWriter
from .util import *
# Don't import taichi_logo here which will cause circular import.
# If you need it, just import from taichi.tools.patterns
from .video import VideoManager
__all__ = [
'PLYWriter',
'VideoManager',
'imdisplay',
'imread',
'imresize',
'imshow',
'imwrite',
'dump_dot',
'dot_to_pdf',
'get_kernel_stats',
'set_gdb_trigger',
]
| 22.761905 | 65 | 0.6841 |
273053706eb091d00c356012f2acc55b6e7dceab | 11,744 | py | Python | scripts/tests/test_encoder_decoder.py | davisliang/gluon-nlp | 18a736dbb55c80c2de82d73b923c3cd3d9d53591 | [
"Apache-2.0"
] | 7 | 2019-12-05T02:49:07.000Z | 2020-08-17T01:11:59.000Z | scripts/tests/test_encoder_decoder.py | davisliang/gluon-nlp | 18a736dbb55c80c2de82d73b923c3cd3d9d53591 | [
"Apache-2.0"
] | null | null | null | scripts/tests/test_encoder_decoder.py | davisliang/gluon-nlp | 18a736dbb55c80c2de82d73b923c3cd3d9d53591 | [
"Apache-2.0"
] | 3 | 2021-03-12T04:41:00.000Z | 2021-03-12T04:41:24.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import mxnet as mx
from mxnet.test_utils import assert_almost_equal
from ..machine_translation.gnmt import *
from gluonnlp.model.transformer import *
from gluonnlp.model.transformer import TransformerDecoder
def test_gnmt_encoder():
ctx = mx.Context.default_ctx
for cell_type in ["lstm", "gru", "relu_rnn", "tanh_rnn"]:
for num_layers, num_bi_layers in [(2, 1), (3, 0)]:
for use_residual in [False, True]:
encoder = GNMTEncoder(cell_type=cell_type, num_layers=num_layers,
num_bi_layers=num_bi_layers, hidden_size=8,
dropout=0.0, use_residual=use_residual,
prefix='gnmt_encoder_')
encoder.initialize(ctx=ctx)
encoder.hybridize()
for batch_size in [4]:
for seq_length in [5, 10]:
inputs_nd = mx.nd.random.normal(0, 1, shape=(batch_size, seq_length, 4), ctx=ctx)
valid_length_nd = mx.nd.array(np.random.randint(1, seq_length,
size=(batch_size,)), ctx=ctx)
encoder_outputs, _ = encoder(inputs_nd, valid_length=valid_length_nd)
valid_length_npy = valid_length_nd.asnumpy()
rnn_output = encoder_outputs[0].asnumpy()
for i in range(batch_size):
if valid_length_npy[i] < seq_length - 1:
padded_out = rnn_output[i, int(valid_length_npy[i]):, :]
assert_almost_equal(padded_out, np.zeros_like(padded_out), 1E-6, 1E-6)
assert(encoder_outputs[0].shape == (batch_size, seq_length, 8))
assert(len(encoder_outputs[1]) == num_layers)
def test_gnmt_encoder_decoder():
ctx = mx.Context.default_ctx
num_hidden = 8
encoder = GNMTEncoder(cell_type="lstm", num_layers=3, num_bi_layers=1, hidden_size=num_hidden,
dropout=0.0, use_residual=True, prefix='gnmt_encoder_')
encoder.initialize(ctx=ctx)
encoder.hybridize()
for output_attention in [True, False]:
for use_residual in [True, False]:
decoder = GNMTDecoder(cell_type="lstm", num_layers=3, hidden_size=num_hidden, dropout=0.0,
output_attention=output_attention, use_residual=use_residual, prefix='gnmt_decoder_')
decoder.initialize(ctx=ctx)
decoder.hybridize()
for batch_size in [4]:
for src_seq_length, tgt_seq_length in [(5, 10), (10, 5)]:
src_seq_nd = mx.nd.random.normal(0, 1, shape=(batch_size, src_seq_length, 4), ctx=ctx)
tgt_seq_nd = mx.nd.random.normal(0, 1, shape=(batch_size, tgt_seq_length, 4), ctx=ctx)
src_valid_length_nd = mx.nd.array(np.random.randint(1, src_seq_length, size=(batch_size,)), ctx=ctx)
tgt_valid_length_nd = mx.nd.array(np.random.randint(1, tgt_seq_length, size=(batch_size,)), ctx=ctx)
src_valid_length_npy = src_valid_length_nd.asnumpy()
tgt_valid_length_npy = tgt_valid_length_nd.asnumpy()
encoder_outputs, _ = encoder(src_seq_nd, valid_length=src_valid_length_nd)
decoder_states = decoder.init_state_from_encoder(encoder_outputs, src_valid_length_nd)
# Test multi step forwarding
output, new_states, additional_outputs = decoder.decode_seq(tgt_seq_nd,
decoder_states,
tgt_valid_length_nd)
assert(output.shape == (batch_size, tgt_seq_length, num_hidden))
output_npy = output.asnumpy()
for i in range(batch_size):
tgt_v_len = int(tgt_valid_length_npy[i])
if tgt_v_len < tgt_seq_length - 1:
assert((output_npy[i, tgt_v_len:, :] == 0).all())
if output_attention:
assert(len(additional_outputs) == 1)
attention_out = additional_outputs[0].asnumpy()
assert(attention_out.shape == (batch_size, tgt_seq_length, src_seq_length))
for i in range(batch_size):
mem_v_len = int(src_valid_length_npy[i])
if mem_v_len < src_seq_length - 1:
assert((attention_out[i, :, mem_v_len:] == 0).all())
if mem_v_len > 0:
assert_almost_equal(attention_out[i, :, :].sum(axis=-1),
np.ones(attention_out.shape[1]))
else:
assert(len(additional_outputs) == 0)
def test_transformer_encoder():
ctx = mx.Context.default_ctx
for num_layers in range(1, 3):
for output_attention in [True, False]:
for use_residual in [False, True]:
encoder = TransformerEncoder(num_layers=num_layers, max_length=10,
units=16, hidden_size=32, num_heads=8,
dropout=0.0, use_residual=use_residual,
output_attention=output_attention, prefix='transformer_encoder_')
encoder.initialize(ctx=ctx)
encoder.hybridize()
for batch_size in [4]:
for seq_length in [5, 10]:
inputs_nd = mx.nd.random.normal(0, 1, shape=(batch_size, seq_length, 16), ctx=ctx)
valid_length_nd = mx.nd.array(np.random.randint(1, seq_length,
size=(batch_size,)), ctx=ctx)
encoder_outputs, additional_outputs = encoder(inputs_nd, valid_length=valid_length_nd)
valid_length_npy = valid_length_nd.asnumpy()
encoder_outputs = encoder_outputs.asnumpy()
for i in range(batch_size):
if valid_length_npy[i] < seq_length - 1:
padded_out = encoder_outputs[i, int(valid_length_npy[i]):, :]
assert_almost_equal(padded_out, np.zeros_like(padded_out), 1E-6, 1E-6)
assert(encoder_outputs.shape == (batch_size, seq_length, 16))
if output_attention:
assert(len(additional_outputs) == num_layers)
attention_out = additional_outputs[0][0].asnumpy()
assert(attention_out.shape == (batch_size, 8, seq_length, seq_length))
for i in range(batch_size):
mem_v_len = int(valid_length_npy[i])
if mem_v_len < seq_length - 1:
assert((attention_out[i, :, :, mem_v_len:] == 0).all())
if mem_v_len > 0:
assert_almost_equal(attention_out[i, :, :, :].sum(axis=-1),
np.ones(attention_out.shape[1:3]))
else:
assert(len(additional_outputs) == 0)
def test_transformer_encoder_decoder():
ctx = mx.Context.default_ctx
units = 16
encoder = TransformerEncoder(num_layers=3, units=units, hidden_size=32, num_heads=8, max_length=10,
dropout=0.0, use_residual=True, prefix='transformer_encoder_')
encoder.initialize(ctx=ctx)
encoder.hybridize()
for output_attention in [True, False]:
for use_residual in [True, False]:
decoder = TransformerDecoder(num_layers=3, units=units, hidden_size=32, num_heads=8, max_length=10, dropout=0.0,
output_attention=output_attention, use_residual=use_residual, prefix='transformer_decoder_')
decoder.initialize(ctx=ctx)
decoder.hybridize()
for batch_size in [4]:
for src_seq_length, tgt_seq_length in [(5, 10), (10, 5)]:
src_seq_nd = mx.nd.random.normal(0, 1, shape=(batch_size, src_seq_length, units), ctx=ctx)
tgt_seq_nd = mx.nd.random.normal(0, 1, shape=(batch_size, tgt_seq_length, units), ctx=ctx)
src_valid_length_nd = mx.nd.array(np.random.randint(1, src_seq_length, size=(batch_size,)), ctx=ctx)
tgt_valid_length_nd = mx.nd.array(np.random.randint(1, tgt_seq_length, size=(batch_size,)), ctx=ctx)
src_valid_length_npy = src_valid_length_nd.asnumpy()
tgt_valid_length_npy = tgt_valid_length_nd.asnumpy()
encoder_outputs, _ = encoder(src_seq_nd, valid_length=src_valid_length_nd)
decoder_states = decoder.init_state_from_encoder(encoder_outputs, src_valid_length_nd)
# Test multi step forwarding
output, new_states, additional_outputs = decoder.decode_seq(tgt_seq_nd,
decoder_states,
tgt_valid_length_nd)
assert(output.shape == (batch_size, tgt_seq_length, units))
output_npy = output.asnumpy()
for i in range(batch_size):
tgt_v_len = int(tgt_valid_length_npy[i])
if tgt_v_len < tgt_seq_length - 1:
assert((output_npy[i, tgt_v_len:, :] == 0).all())
if output_attention:
assert(len(additional_outputs) == 3)
attention_out = additional_outputs[0][1].asnumpy()
assert(attention_out.shape == (batch_size, 8, tgt_seq_length, src_seq_length))
for i in range(batch_size):
mem_v_len = int(src_valid_length_npy[i])
if mem_v_len < src_seq_length - 1:
assert((attention_out[i, :, :, mem_v_len:] == 0).all())
if mem_v_len > 0:
assert_almost_equal(attention_out[i, :, :, :].sum(axis=-1),
np.ones(attention_out.shape[1:3]))
else:
assert(len(additional_outputs) == 0)
| 62.802139 | 133 | 0.539595 |
48d52bb257c5a09265819f87a74756d32f5d7459 | 1,869 | py | Python | monolith/modules/gitlab_repository_search.py | ronfury/monolith | 7b8c872c9058317497f37411c846b12ab08470b3 | [
"MIT"
] | 13 | 2020-10-24T05:47:52.000Z | 2021-10-17T18:40:12.000Z | monolith/modules/gitlab_repository_search.py | ronfury/monolith | 7b8c872c9058317497f37411c846b12ab08470b3 | [
"MIT"
] | null | null | null | monolith/modules/gitlab_repository_search.py | ronfury/monolith | 7b8c872c9058317497f37411c846b12ab08470b3 | [
"MIT"
] | 10 | 2020-11-10T23:26:15.000Z | 2022-02-04T11:05:32.000Z | from .monomodule import MonoModule
import requests
import datetime
import lxml.html
import urllib.parse
description = '''This module searches gitlab repository.
Set search keyword as a Query.
https://gitlab.com/explore/projects
'''
class CustomModule(MonoModule):
def set(self):
self.name = 'gitlab'
self.module_description = description
self.default_query['module'] = self.name
self.default_query['module_description'] = self.module_description
self.default_query['params'] = []
self.default_query['expire_date'] = 180
self.default_query['enable'] = True
self.default_query['channel'] = ''
self.extra_interval['hours'] = 6
def search(self):
word = self.query['query']
if word.find(' ') > 0:
word.replace(' ', '\" \"')
word = urllib.parse.quote('\"' + word + '\"')
url = 'https://gitlab.com/explore/projects?utf8=%E2%9C%93&name=' + word + '&sort=latest_activity_desc'
result = requests.get(url, timeout=10)
statuscode = result.status_code
root = lxml.html.fromstring(result.text)
if statuscode == 200:
codes = [{'repo':a.get('href'), 'repo:link':'https://gitlab.com' + a.get('href')} for a in root.xpath('//div/a[@class="project"]')]
self.setResultData(codes, filter='DROP', filter_target=['repo'], exclude_target=['repo'])
else:
self.setStatus('NG', comment='Status Code is {}'.format(str(statuscode)))
def createMessage(self):
result = self.getCurrentResult()
if len(result) != 0:
message = ['I found repos about `{}`'.format(self.query['name'])]
message += ['https://gitlab.com' + x['repo'] for x in result]
else:
message = []
return message
| 38.142857 | 144 | 0.59176 |
1e5f360fd2a4400616b8433a230bfe1baf9e49e9 | 5,047 | py | Python | src/runners/alert_queries_runner.py | mikeurbanski1/SnowAlert | 85608343ac80bfcad69267e65eae5a21b9ad454d | [
"Apache-2.0"
] | null | null | null | src/runners/alert_queries_runner.py | mikeurbanski1/SnowAlert | 85608343ac80bfcad69267e65eae5a21b9ad454d | [
"Apache-2.0"
] | 1 | 2021-02-24T09:38:10.000Z | 2021-02-24T09:38:10.000Z | src/runners/alert_queries_runner.py | isabella232/SnowAlert | 85608343ac80bfcad69267e65eae5a21b9ad454d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import datetime
import os
from multiprocessing import Pool
from typing import Any, Dict
from runners.config import (
POOLSIZE,
RUN_ID,
QUERY_METADATA_TABLE,
RUN_METADATA_TABLE,
ALERT_QUERY_POSTFIX,
CLOUDWATCH_METRICS,
)
from runners.helpers import db, log
ALERT_CUTOFF_MINUTES = os.environ.get('SA_ALERT_CUTOFF_MINUTES', -90)
ALERTS_TO_TIME = os.environ.get('SA_ALERT_TO_TIME', 'CURRENT_TIMESTAMP')
ALERTS_FROM_TIME = f"DATEADD(minute, {ALERT_CUTOFF_MINUTES}, {ALERTS_TO_TIME})"
RUN_ALERT_QUERY = f"""
CREATE TRANSIENT TABLE results.RUN_{RUN_ID}_{{query_name}} AS
SELECT OBJECT_CONSTRUCT(
'ALERT_ID', UUID_STRING(),
'QUERY_NAME', '{{query_name}}',
'QUERY_ID', IFNULL(QUERY_ID::VARIANT, PARSE_JSON('null')),
'ENVIRONMENT', IFNULL(ENVIRONMENT::VARIANT, PARSE_JSON('null')),
'SOURCES', IFNULL(SOURCES::VARIANT, PARSE_JSON('null')),
'ACTOR', IFNULL(ACTOR::VARIANT, PARSE_JSON('null')),
'OBJECT', IFNULL(OBJECT::VARIANT, PARSE_JSON('null')),
'ACTION', IFNULL(ACTION::VARIANT, PARSE_JSON('null')),
'TITLE', IFNULL(TITLE::VARIANT, PARSE_JSON('null')),
'EVENT_TIME', IFNULL(EVENT_TIME::VARIANT, PARSE_JSON('null')),
'ALERT_TIME', IFNULL(ALERT_TIME::VARIANT, PARSE_JSON('null')),
'DESCRIPTION', IFNULL(DESCRIPTION::VARIANT, PARSE_JSON('null')),
'DETECTOR', IFNULL(DETECTOR::VARIANT, PARSE_JSON('null')),
'EVENT_DATA', IFNULL(EVENT_DATA::VARIANT, PARSE_JSON('null')),
'SEVERITY', IFNULL(SEVERITY::VARIANT, PARSE_JSON('null')),
'HANDLERS', IFNULL(OBJECT_CONSTRUCT(*):HANDLERS::VARIANT, PARSE_JSON('null'))
) AS alert
, alert_time
, event_time
, 1 AS counter
FROM rules.{{query_name}}
WHERE event_time BETWEEN {{from_time_sql}} AND {{to_time_sql}}
"""
MERGE_ALERTS = f"""MERGE INTO results.alerts AS alerts USING (
SELECT ANY_VALUE(alert) AS alert
, SUM(counter) AS counter
, MIN(alert_time) AS alert_time
, MIN(event_time) AS event_time
FROM results.{{new_alerts_table}}
GROUP BY alert:OBJECT, alert:DESCRIPTION
) AS new_alerts
ON (
alerts.alert:OBJECT = new_alerts.alert:OBJECT
AND alerts.alert:DESCRIPTION = new_alerts.alert:DESCRIPTION
AND alerts.alert:EVENT_TIME > {{from_time_sql}}
)
WHEN MATCHED
THEN UPDATE SET counter = alerts.counter + new_alerts.counter
WHEN NOT MATCHED
THEN INSERT (alert, counter, alert_time, event_time)
VALUES (
new_alerts.alert,
new_alerts.counter,
new_alerts.alert_time,
new_alerts.event_time
)
;
"""
def merge_alerts(query_name, from_time_sql):
log.info(f"{query_name} processing...")
sql = MERGE_ALERTS.format(
query_name=query_name,
from_time_sql=from_time_sql,
new_alerts_table=f"RUN_{RUN_ID}_{query_name}",
)
result = db.execute(sql, fix_errors=False).fetchall()
created_count, updated_count = result[0]
log.info(f"{query_name} created {created_count}, updated {updated_count} rows.")
return created_count, updated_count
def create_alerts(rule_name: str) -> Dict[str, Any]:
metadata: Dict[str, Any] = {
'QUERY_NAME': rule_name,
'RUN_ID': RUN_ID,
'ATTEMPTS': 1,
'START_TIME': datetime.datetime.utcnow(),
'ROW_COUNT': {'INSERTED': 0, 'UPDATED': 0},
}
try:
db.execute(
RUN_ALERT_QUERY.format(
query_name=rule_name,
from_time_sql=ALERTS_FROM_TIME,
to_time_sql=ALERTS_TO_TIME,
),
fix_errors=False,
)
insert_count, update_count = merge_alerts(rule_name, ALERTS_FROM_TIME)
metadata['ROW_COUNT'] = {'INSERTED': insert_count, 'UPDATED': update_count}
db.execute(f"DROP TABLE results.RUN_{RUN_ID}_{rule_name}")
except Exception as e:
db.record_metadata(metadata, table=QUERY_METADATA_TABLE, e=e)
return metadata
db.record_metadata(metadata, table=QUERY_METADATA_TABLE)
log.info(f"{rule_name} done.")
return metadata
def main(rule_name=None):
RUN_METADATA = {
'RUN_ID': RUN_ID,
'RUN_TYPE': 'ALERT QUERY',
'START_TIME': datetime.datetime.utcnow(),
}
if rule_name:
metadata = [create_alerts(rule_name)]
else:
rules = list(db.load_rules(ALERT_QUERY_POSTFIX))
metadata = Pool(POOLSIZE).map(create_alerts, rules)
RUN_METADATA['ROW_COUNT'] = {
'INSERTED': sum(q['ROW_COUNT']['INSERTED'] for q in metadata),
'UPDATED': sum(q['ROW_COUNT']['UPDATED'] for q in metadata),
}
db.record_metadata(RUN_METADATA, table=RUN_METADATA_TABLE)
try:
if CLOUDWATCH_METRICS:
log.metric(
'Run',
'SnowAlert',
[{'Name': 'Component', 'Value': 'Alert Query Runner'}],
1,
)
except Exception as e:
log.error("Cloudwatch metric logging failed: ", e)
if __name__ == '__main__':
main()
| 30.96319 | 86 | 0.650287 |
dea559b05f6d50572e9d07276a075b70f84db063 | 19 | py | Python | Kartik.py | sahilvalvi/WP-Hack | 22755454fb9d8db70b795ccf4a7e092c83ce0ad0 | [
"Unlicense"
] | null | null | null | Kartik.py | sahilvalvi/WP-Hack | 22755454fb9d8db70b795ccf4a7e092c83ce0ad0 | [
"Unlicense"
] | null | null | null | Kartik.py | sahilvalvi/WP-Hack | 22755454fb9d8db70b795ccf4a7e092c83ce0ad0 | [
"Unlicense"
] | null | null | null | 1:Kartik
Hdjdbddh
| 4.75 | 8 | 0.789474 |
4104d35a8576f1d833a3afd9b05a2c9b1253a6d2 | 3,726 | py | Python | graphs/perception/perception_2nodes/launch/trace_rectify_resize_fpga_streamlined.launch.py | dirksavage88/acceleration_examples | 97140d08d84e53d7c7cc04340dfefe2c4a954117 | [
"Apache-2.0"
] | null | null | null | graphs/perception/perception_2nodes/launch/trace_rectify_resize_fpga_streamlined.launch.py | dirksavage88/acceleration_examples | 97140d08d84e53d7c7cc04340dfefe2c4a954117 | [
"Apache-2.0"
] | null | null | null | graphs/perception/perception_2nodes/launch/trace_rectify_resize_fpga_streamlined.launch.py | dirksavage88/acceleration_examples | 97140d08d84e53d7c7cc04340dfefe2c4a954117 | [
"Apache-2.0"
] | null | null | null | # ____ ____
# / /\/ /
# /___/ \ / Copyright (c) 2021, Xilinx®.
# \ \ \/ Author: Víctor Mayoral Vilches <victorma@xilinx.com>
# \ \
# / /
# /___/ /\
# \ \ / \
# \___\/\___\
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from launch import LaunchDescription
from launch_ros.actions import Node
from launch_ros.substitutions import FindPackageShare
from launch_ros.actions import ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
from tracetools_launch.action import Trace
from tracetools_trace.tools.names import DEFAULT_EVENTS_ROS
from tracetools_trace.tools.names import DEFAULT_EVENTS_KERNEL
from tracetools_trace.tools.names import DEFAULT_CONTEXT
def generate_launch_description():
# Trace
trace = Trace(
session_name="trace_rectify_resize_fpga_streamlined",
events_ust=[
"ros2_image_pipeline:*",
]
+ DEFAULT_EVENTS_ROS,
context_fields={
'kernel': [],
'userspace': ['vpid', 'vtid', 'procname'],
},
# events_kernel=DEFAULT_EVENTS_KERNEL,
)
# perception_container = ComposableNodeContainer(
# name="perception_container",
# namespace="",
# package="rclcpp_components",
# executable="component_container",
# composable_node_descriptions=[
# ComposableNode(
# package="image_proc",
# plugin="image_proc::RectifyNodeFPGAStreamlined",
# name="rectify_node_fpga",
# remappings=[
# ("image", "/camera/image_raw"),
# ("camera_info", "/camera/camera_info"),
# ],
# ),
# ComposableNode(
# namespace="resize",
# package="image_proc",
# plugin="image_proc::ResizeNodeFPGAStreamlined",
# name="resize_node_fpga",
# remappings=[
# ("camera_info", "/camera/camera_info"),
# # ("image", "/image_rect"),
# ("image", "/camera/image_raw"),
# ("resize", "resize"),
# ],
# parameters=[
# {
# "scale_height": 2.0,
# "scale_width": 2.0,
# }
# ],
# ),
# ],
# output="screen",
# )
# Use a multi-threaded executor instead
perception_node = Node(
package="image_pipeline_examples",
executable="rectify_resize_fpga_streamlined_node",
name="rectify_resize_fpga_streamlined_node",
remappings=[
("image", "/camera/image_raw"),
("camera_info", "/camera/camera_info"),
("resize", "resize"),
],
parameters=[
{
"scale_height": 2.0,
"scale_width": 2.0,
}
],
)
return LaunchDescription([
# LTTng tracing
trace,
# image pipeline
# perception_container
perception_node
]) | 33.567568 | 74 | 0.549651 |
5c55663aa794698dda73c317321fcad807dcc61e | 13,648 | py | Python | python_modules/libraries/dagstermill/dagstermill/manager.py | devarajnadiger/dagster | fae430f1d9463c23a427efa06dc9d0deb76429bb | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagstermill/dagstermill/manager.py | devarajnadiger/dagster | fae430f1d9463c23a427efa06dc9d0deb76429bb | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagstermill/dagstermill/manager.py | devarajnadiger/dagster | fae430f1d9463c23a427efa06dc9d0deb76429bb | [
"Apache-2.0"
] | 1 | 2021-11-30T21:40:46.000Z | 2021-11-30T21:40:46.000Z | import os
import pickle
import uuid
from dagster import (
AssetMaterialization,
ExpectationResult,
Failure,
Materialization,
ModeDefinition,
PipelineDefinition,
SolidDefinition,
TypeCheck,
check,
seven,
)
from dagster.core.definitions.dependency import NodeHandle
from dagster.core.definitions.events import RetryRequested
from dagster.core.definitions.pipeline_base import InMemoryPipeline
from dagster.core.definitions.reconstructable import ReconstructablePipeline
from dagster.core.definitions.resource import ScopedResourcesBuilder
from dagster.core.execution.api import scoped_pipeline_context
from dagster.core.execution.plan.plan import ExecutionPlan
from dagster.core.execution.resources_init import (
get_required_resource_keys_to_init,
resource_initialization_event_generator,
)
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus
from dagster.core.system_config.objects import ResolvedRunConfig
from dagster.core.utils import make_new_run_id
from dagster.loggers import colored_console_logger
from dagster.serdes import unpack_value
from dagster.utils import EventGenerationManager
from .context import DagstermillExecutionContext, DagstermillRuntimeExecutionContext
from .errors import DagstermillError
from .serialize import PICKLE_PROTOCOL, read_value, write_value
class DagstermillResourceEventGenerationManager(EventGenerationManager):
"""Utility class to explicitly manage setup/teardown of resource events. Overrides the default
`generate_teardown_events` method so that teardown is deferred until explicitly called by the
dagstermill Manager
"""
def generate_teardown_events(self):
return iter(())
def teardown(self):
return [
teardown_event
for teardown_event in super(
DagstermillResourceEventGenerationManager, self
).generate_teardown_events()
]
class Manager:
def __init__(self):
self.pipeline = None
self.solid_def = None
self.in_pipeline = False
self.marshal_dir = None
self.context = None
self.resource_manager = None
def _setup_resources(
self,
resource_defs,
resource_configs,
log_manager,
execution_plan,
pipeline_run,
resource_keys_to_init,
instance,
emit_persistent_events,
pipeline_def_for_backwards_compat,
):
"""
Drop-in replacement for
`dagster.core.execution.resources_init.resource_initialization_manager`. It uses a
`DagstermillResourceEventGenerationManager` and explicitly calls `teardown` on it
"""
generator = resource_initialization_event_generator(
resource_defs=resource_defs,
resource_configs=resource_configs,
log_manager=log_manager,
execution_plan=execution_plan,
pipeline_run=pipeline_run,
resource_keys_to_init=resource_keys_to_init,
instance=instance,
emit_persistent_events=emit_persistent_events,
pipeline_def_for_backwards_compat=pipeline_def_for_backwards_compat,
)
self.resource_manager = DagstermillResourceEventGenerationManager(
generator, ScopedResourcesBuilder
)
return self.resource_manager
def reconstitute_pipeline_context(
self,
output_log_path=None,
marshal_dir=None,
run_config=None,
executable_dict=None,
pipeline_run_dict=None,
solid_handle_kwargs=None,
instance_ref_dict=None,
):
"""Reconstitutes a context for dagstermill-managed execution.
You'll see this function called to reconstruct a pipeline context within the ``injected
parameters`` cell of a dagstermill output notebook. Users should not call this function
interactively except when debugging output notebooks.
Use :func:`dagstermill.get_context` in the ``parameters`` cell of your notebook to define a
context for interactive exploration and development. This call will be replaced by one to
:func:`dagstermill.reconstitute_pipeline_context` when the notebook is executed by
dagstermill.
"""
check.opt_str_param(output_log_path, "output_log_path")
check.opt_str_param(marshal_dir, "marshal_dir")
run_config = check.opt_dict_param(run_config, "run_config", key_type=str)
check.dict_param(pipeline_run_dict, "pipeline_run_dict")
check.dict_param(executable_dict, "executable_dict")
check.dict_param(solid_handle_kwargs, "solid_handle_kwargs")
check.dict_param(instance_ref_dict, "instance_ref_dict")
pipeline = ReconstructablePipeline.from_dict(executable_dict)
pipeline_def = pipeline.get_definition()
try:
instance_ref = unpack_value(instance_ref_dict)
instance = DagsterInstance.from_ref(instance_ref)
except Exception as err: # pylint: disable=broad-except
raise DagstermillError(
"Error when attempting to resolve DagsterInstance from serialized InstanceRef"
) from err
pipeline_run = unpack_value(pipeline_run_dict)
solid_handle = NodeHandle.from_dict(solid_handle_kwargs)
solid = pipeline_def.get_solid(solid_handle)
solid_def = solid.definition
self.marshal_dir = marshal_dir
self.in_pipeline = True
self.solid_def = solid_def
self.pipeline = pipeline
resolved_run_config = ResolvedRunConfig.build(
pipeline_def, run_config, mode=pipeline_run.mode
)
execution_plan = ExecutionPlan.build(
self.pipeline,
resolved_run_config,
step_keys_to_execute=pipeline_run.step_keys_to_execute,
)
with scoped_pipeline_context(
execution_plan,
pipeline,
run_config,
pipeline_run,
instance,
scoped_resources_builder_cm=self._setup_resources,
# Set this flag even though we're not in test for clearer error reporting
raise_on_error=True,
) as pipeline_context:
self.context = DagstermillRuntimeExecutionContext(
pipeline_context=pipeline_context,
pipeline_def=pipeline_def,
solid_config=run_config.get("solids", {}).get(solid.name, {}).get("config"),
resource_keys_to_init=get_required_resource_keys_to_init(
execution_plan,
pipeline_def,
resolved_run_config,
pipeline_context.intermediate_storage_def,
),
solid_name=solid.name,
solid_handle=solid_handle,
)
return self.context
def get_context(self, solid_config=None, mode_def=None, run_config=None):
"""Get a dagstermill execution context for interactive exploration and development.
Args:
solid_config (Optional[Any]): If specified, this value will be made available on the
context as its ``solid_config`` property.
mode_def (Optional[:class:`dagster.ModeDefinition`]): If specified, defines the mode to
use to construct the context. Specify this if you would like a context constructed
with specific ``resource_defs`` or ``logger_defs``. By default, an ephemeral mode
with a console logger will be constructed.
run_config(Optional[dict]): The config dict with which to construct
the context.
Returns:
:py:class:`~dagstermill.DagstermillExecutionContext`
"""
check.opt_inst_param(mode_def, "mode_def", ModeDefinition)
run_config = check.opt_dict_param(run_config, "run_config", key_type=str)
# If we are running non-interactively, and there is already a context reconstituted, return
# that context rather than overwriting it.
if self.context is not None and isinstance(
self.context, DagstermillRuntimeExecutionContext
):
return self.context
if not mode_def:
mode_def = ModeDefinition(logger_defs={"dagstermill": colored_console_logger})
run_config["loggers"] = {"dagstermill": {}}
solid_def = SolidDefinition(
name="this_solid",
input_defs=[],
compute_fn=lambda *args, **kwargs: None,
output_defs=[],
description="Ephemeral solid constructed by dagstermill.get_context()",
required_resource_keys=mode_def.resource_key_set,
)
pipeline_def = PipelineDefinition(
[solid_def], mode_defs=[mode_def], name="ephemeral_dagstermill_pipeline"
)
run_id = make_new_run_id()
# construct stubbed PipelineRun for notebook exploration...
# The actual pipeline run during pipeline execution will be serialized and reconstituted
# in the `reconstitute_pipeline_context` call
pipeline_run = PipelineRun(
pipeline_name=pipeline_def.name,
run_id=run_id,
run_config=run_config,
mode=mode_def.name,
step_keys_to_execute=None,
status=PipelineRunStatus.NOT_STARTED,
tags=None,
)
self.in_pipeline = False
self.solid_def = solid_def
self.pipeline = pipeline_def
resolved_run_config = ResolvedRunConfig.build(pipeline_def, run_config, mode=mode_def.name)
pipeline = InMemoryPipeline(pipeline_def)
execution_plan = ExecutionPlan.build(pipeline, resolved_run_config)
with scoped_pipeline_context(
execution_plan,
pipeline,
run_config,
pipeline_run,
DagsterInstance.ephemeral(),
scoped_resources_builder_cm=self._setup_resources,
) as pipeline_context:
self.context = DagstermillExecutionContext(
pipeline_context=pipeline_context,
pipeline_def=pipeline_def,
solid_config=solid_config,
resource_keys_to_init=get_required_resource_keys_to_init(
execution_plan,
pipeline_def,
resolved_run_config,
pipeline_context.intermediate_storage_def,
),
solid_name=solid_def.name,
solid_handle=NodeHandle(solid_def.name, parent=None),
)
return self.context
def yield_result(self, value, output_name="result"):
"""Yield a result directly from notebook code.
When called interactively or in development, returns its input.
Args:
value (Any): The value to yield.
output_name (Optional[str]): The name of the result to yield (default: ``'result'``).
"""
if not self.in_pipeline:
return value
# deferred import for perf
import scrapbook
if not self.solid_def.has_output(output_name):
raise DagstermillError(
f"Solid {self.solid_def.name} does not have output named {output_name}."
f"Expected one of {[str(output_def.name) for output_def in self.solid_def.output_defs]}"
)
dagster_type = self.solid_def.output_def_named(output_name).dagster_type
# https://github.com/dagster-io/dagster/issues/2648
# dagstermill temporary file creation should use a more systematic and robust scheme
out_file = os.path.join(
self.marshal_dir, f"{self.context.solid_handle}-output-{output_name}"
)
scrapbook.glue(output_name, write_value(dagster_type, value, out_file))
def yield_event(self, dagster_event):
"""Yield a dagster event directly from notebook code.
When called interactively or in development, returns its input.
Args:
dagster_event (Union[:class:`dagster.AssetMaterialization`, :class:`dagster.ExpectationResult`, :class:`dagster.TypeCheck`, :class:`dagster.Failure`, :class:`dagster.RetryRequested`]):
An event to yield back to Dagster.
"""
valid_types = (
Materialization,
AssetMaterialization,
ExpectationResult,
TypeCheck,
Failure,
RetryRequested,
)
if not isinstance(dagster_event, valid_types):
raise DagstermillError(
f"Received invalid type {dagster_event} in yield_event. Expected a Dagster event type, one of {valid_types}."
)
if not self.in_pipeline:
return dagster_event
# deferred import for perf
import scrapbook
event_id = "event-{event_uuid}".format(event_uuid=str(uuid.uuid4()))
out_file_path = os.path.join(self.marshal_dir, event_id)
with open(out_file_path, "wb") as fd:
fd.write(pickle.dumps(dagster_event, PICKLE_PROTOCOL))
scrapbook.glue(event_id, out_file_path)
def teardown_resources(self):
if self.resource_manager is not None:
self.resource_manager.teardown()
def load_parameter(self, input_name, input_value):
input_def = self.solid_def.input_def_named(input_name)
return read_value(input_def.dagster_type, seven.json.loads(input_value))
MANAGER_FOR_NOTEBOOK_INSTANCE = Manager()
| 38.337079 | 196 | 0.667424 |
7afea4ed18834346098f81e1f09750268f242682 | 5,288 | py | Python | pysrc/bytewax/parse.py | yutiansut/bytewax | 54dba26cbab2afd24007865f69083a92c05cbdc1 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-03-24T15:53:50.000Z | 2022-03-24T15:53:50.000Z | pysrc/bytewax/parse.py | yutiansut/bytewax | 54dba26cbab2afd24007865f69083a92c05cbdc1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | pysrc/bytewax/parse.py | yutiansut/bytewax | 54dba26cbab2afd24007865f69083a92c05cbdc1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """Helpers to read execution arguments from the environment or command
line.
"""
import os
from argparse import ArgumentParser
from typing import Any, Dict, Iterable, List, Optional, Tuple
def __skip_doctest_on_win_gha():
import os, pytest
if os.name == "nt" and os.environ.get("GITHUB_ACTION"):
pytest.skip("Hangs in Windows GitHub Actions")
def cluster_args(args: Iterable[str] = None) -> Dict[str, Any]:
"""Parse command line arguments to generate arguments for
`bytewax.run_cluster()`.
See documentation for `bytewax.run_cluster()` for semantics of
these variables.
>>> __skip_doctest_on_win_gha()
>>> from bytewax import Dataflow, run_cluster
>>> flow = Dataflow()
>>> flow.capture()
>>> args = "-w2 -n2".split()
>>> out = run_cluster(flow, enumerate(range(3)), **cluster_args(args))
>>> sorted(out)
[(0, 0), (1, 1), (2, 2)]
Args:
args: List of arguments to parse. Defaults to `sys.argv`.
Returns:
kwargs to pass to `bytewax.run_cluster()`.
"""
p = ArgumentParser()
p.add_argument(
"-w",
dest="worker_count_per_proc",
type=int,
help="Number of worker threads per process",
default=1,
)
p.add_argument(
"-n",
dest="proc_count",
type=int,
help="Number of processes to start",
default=1,
)
out = p.parse_args(args)
kwargs = {
"proc_count": out.proc_count,
"worker_count_per_proc": out.worker_count_per_proc,
}
return kwargs
def proc_env(env: Dict[str, str] = os.environ) -> Dict[str, Any]:
"""Parse environment variables to generate arguments for
`bytewax.cluster_main()` when you are manually launching a
cluster.
This is probably what you want to use in Kubernetes.
See documentation for `bytewax.cluster_main()` for semantics of
these variables.
The environment variables you need set are:
* `BYTEWAX_WORKERS_PER_PROCESS`
Then either:
* `BYTEWAX_ADDRESSES` - `;` separated list of "host:port"
addresses.
* `BYTEWAX_HOSTFILE_PATH` - Path to a file containing a list of
cluster addresses.
Then either:
* `BYTEWAX_PROCESS_ID`
* `BYTEWAX_POD_NAME` and `BYTEWAX_STATEFULSET_NAME` -
E.g. `cluster_name-0` and `cluster_name` and we will calculate
the process ID from that.
>>> __skip_doctest_on_win_gha()
>>> from bytewax import Dataflow, cluster_main
>>> flow = Dataflow()
>>> flow.capture()
>>> ib = lambda i, n: enumerate(range(3))
>>> ob = lambda i, n: print
>>> env = {
... "BYTEWAX_ADDRESSES": "localhost:2101",
... "BYTEWAX_PROCESS_ID": "0",
... "BYTEWAX_WORKERS_PER_PROCESS": "2",
... }
>>> cluster_main(flow, ib, ob, **proc_env(env)) # doctest: +ELLIPSIS
(0, 0)
...
(2, 2)
Args:
env: Environment variables. Defaults to `os.environ`.
Returns:
kwargs to pass to `bytewax.cluster_main()`.
"""
if "BYTEWAX_ADDRESSES" in env:
addresses = env["BYTEWAX_ADDRESSES"].split(";")
else:
with open(env["BYTEWAX_HOSTFILE_PATH"]) as hostfile:
addresses = [
address.strip() for address in hostfile if address.strip() != ""
]
if "BYTEWAX_PROCESS_ID" in env:
proc_id = int(env["BYTEWAX_PROCESS_ID"])
else:
proc_id = int(
env["BYTEWAX_POD_NAME"].replace(env["BYTEWAX_STATEFULSET_NAME"] + "-", "")
)
kwargs = {
"worker_count_per_proc": int(env["BYTEWAX_WORKERS_PER_PROCESS"]),
"addresses": addresses,
"proc_id": proc_id,
}
return kwargs
def proc_args(args: Iterable[str] = None) -> Dict[str, Any]:
"""Parse command line arguments to generate arguments for
`bytewax.cluster_main()` when you are manually launching a
cluster.
See documentation for `bytewax.cluster_main()` for semantics of
these variables.
>>> __skip_doctest_on_win_gha()
>>> from bytewax import Dataflow, cluster_main
>>> flow = Dataflow()
>>> flow.capture()
>>> ib = lambda i, n: enumerate(range(3))
>>> ob = lambda i, n: print
>>> args = "-w2 -p0 -a localhost:2101".split()
>>> cluster_main(flow, ib, ob, **proc_args(args)) # doctest: +ELLIPSIS
(0, 0)
...
(2, 2)
Args:
args: List of arguments to parse. Defaults to `sys.argv`.
Returns:
kwargs to pass to `bytewax.cluster_main()`.
"""
p = ArgumentParser()
p.add_argument(
"-w",
dest="worker_count_per_proc",
type=int,
help="Number of worker threads per process",
default=1,
)
p.add_argument(
"-p",
dest="proc_id",
type=int,
required=True,
help="Index of this process in cluster, starts from 0",
)
p.add_argument(
"-a",
dest="addresses",
action="append",
required=True,
help="Add the hostname:port address of every (including this) process in cluster",
)
out = p.parse_args(args)
kwargs = {
"worker_count_per_proc": out.worker_count_per_proc,
"addresses": out.addresses,
"proc_id": out.proc_id,
}
return kwargs
| 26.049261 | 90 | 0.604387 |
6e18e8c86f228d9e9eed6d66721d556b73b51817 | 10,064 | py | Python | eupheme/mime.py | Parnassos/Eupheme | 015ab4452bdcb324568d2f786b77c4a6986d4b99 | [
"BSD-3-Clause"
] | null | null | null | eupheme/mime.py | Parnassos/Eupheme | 015ab4452bdcb324568d2f786b77c4a6986d4b99 | [
"BSD-3-Clause"
] | null | null | null | eupheme/mime.py | Parnassos/Eupheme | 015ab4452bdcb324568d2f786b77c4a6986d4b99 | [
"BSD-3-Clause"
] | null | null | null | import re
import codecs
import eupheme.negotiation as negotiation
# A token as described in RFC2045 section 5.1. Consists of any ASCII character
# except non-printable ones, spaces as well as ( ) < > @ , ; \ " [ ] ? =
RE_TOKEN = re.compile(r'^[^\x00-\x20\x80-\xff()<>@,;:\\"/\[\]?=]+$')
class MimeParameters:
# The rough structure for a type parameter; c.f. to RFC2045 section 5.1.
RE_PARAMETER = re.compile('^(?P<key>\w+)=(?P<value>.*)$', re.DOTALL)
# A quoted string as specified in RFC822 section 3.3.
RE_QUOTED_STRING = re.compile(
r'^"('
# qtext: any ASCII character excepting <"> and "\".
# All carriage returns (\r) must be followed by a line feed (\n).
r'[^\\\"\r\x80-\xff]|' '\r\n|'
# quoted-pair: any ASCII character prepended with a backslash.
r'\\[\x00-\x7f]'
r')*"$'
)
def __init__(self, encoded=None, **parameters):
"""
Creates a MimeParameters instance, parsed from the parameters in
the 'encoded' argument. Further parameters can be specified as keyword
arguments.
"""
# Copy over the keyword arguments
self.values = {}
for key, value in parameters.items():
self[key] = value
if encoded is None:
return # Do not bother parsing if we have no encoded parameters.
# Parse additional parameters specified in the mimetype.
for parameter in encoded.split(';'):
parameter = parameter.strip()
if not parameter:
continue # Skip empty parameters
match = self.RE_PARAMETER.match(parameter)
if match is None:
raise ValueError('Invalid parameter: {0}'.format(parameter))
key = match.group('key')
value = match.group('value')
# Value can either be a token or a quoted string
if RE_TOKEN.match(value):
self[key] = value
elif self.RE_QUOTED_STRING.match(value):
# We got a quoted string, unquote
self[key] = re.sub(r'\\(.)', r'\1', value[1:-1])
else:
raise ValueError('Invalid parameter value: {0}'.format(value))
def __str__(self):
"""Encodes the MimeParameters back into a string."""
encoded = []
for key, value in self.values.items():
if not RE_TOKEN.match(value):
# The token cannot be encoded directly, escape any character
# that needs quoting in a quoted-string (quotes, backslashes
# and carriage returns not followed by a newline).
value = '"{0}"'.format(
re.sub(r'("|\\|\r(!?\n))', r'\\\1', value))
encoded.append('{0}={1}'.format(key, value))
return '; '.join(encoded)
def __getitem__(self, key):
"""Returns the value for the parameter 'key', if it exists."""
return self.values[key]
def __setitem__(self, key, value):
"""Sets the value for the parameter 'key' to 'value'."""
self.values[key] = str(value)
def __contains__(self, key):
"""Returns a boolean indicating the existence of parameter 'key'."""
return key in self.values
def __len__(self):
"""
Returns the number of parameters, excluding the (reserved) quality
parameter if it exists.
"""
return len(self.values)-1 if 'q' in self else len(self.values)
def __le__(self, other):
"""
Returns a boolean indicating whether all parameters (excluding the
quality parameter) are contained in the instance 'other'.
"""
for key, value in self.values.items():
if key != 'q' and (key not in other or other[key] != self[key]):
return False
return True
class MimeType(negotiation.Negotiable):
# The rough structure for a MIME type; c.f. RFC2045 section 5.1.
RE_MIMETYPE = re.compile(r'^(?P<type>.+)/'
r'(?P<subtype>.+?)'
r'(?P<parameters>;.*)?$')
# Media types as registered with IANA, refer to
# http://www.iana.org/assignments/media-types
MEDIA_TYPES = [
'application',
'audio',
'example',
'image',
'message',
'model',
'multipart',
'text',
'video'
]
def __init__(self, type_, subtype, **parameters):
"""
Instantiates a mimetype with type 'type_' and subtype 'subtype'.
Additional type parameters can be passed as keyword arguments.
"""
if not RE_TOKEN.match(type_):
raise ValueError('Invalid type token: {0}'.format(type_))
if not RE_TOKEN.match(subtype):
raise ValueError('Invalid subtype token: {0}'.format(subtype))
# The media type needs to be a known one or indicated as an extension.
# There's a sundry of subtypes in the wild, thus we allow any subtype.
if not self.media_type_valid(type_):
raise ValueError('Invalid media type: "{0}"'.format(type_))
self.type = type_
self.subtype = subtype
self.parameters = MimeParameters(**parameters)
# Types such as '*/html' are not allowed.
if self.type == '*' and self.subtype != '*':
raise ValueError('Type wildcard without subtype wildcard')
@classmethod
def parse(cls, encoded):
"""Parses a string into a MimeType instance."""
match = cls.RE_MIMETYPE.match(encoded)
if match is None:
raise ValueError('Could not parse mimetype: {0}'
.format(encoded))
mimetype = cls(match.group('type'), match.group('subtype'))
mimetype.parameters = MimeParameters(match.group('parameters'))
return mimetype
def media_type_valid(self, value):
"""
Checks whether a media type is either one of the types registered with
IANA, or an extension field prefixed with an 'x-'.
"""
return (value == '*' or
value in self.MEDIA_TYPES or
value.lower().startswith("x-"))
def __str__(self):
"""Returns a string representation of the mime type."""
if not len(self.parameters):
return "{0}/{1}".format(self.type, self.subtype)
return "{0}/{1}; {2}".format(self.type,
self.subtype,
str(self.parameters))
def __contains__(self, other):
"""
Returns a boolean indicating whether the mimetype 'other' is satisfied
by the present mimetype.
"""
# The */* media range will satisfy all content types.
if self.type == '*':
return True
# If the type is explicitly given, they must match.
if self.type != other.type:
return False
# The TYPE/* range will satisfy all subtypes of TYPE.
if self.subtype == '*':
return True
# If the subtype is explicitly given, they must match.
if self.subtype != other.subtype:
return False
# If type and subtype match, the parameters of the contained type
# should be a superset of the containing type. This is not mentioned
# explicitly in RFC2616, but it appears to be true in the example of
# section 14.1.
return self.parameters <= other.parameters
def __gt__(self, other):
"""
Returns a boolean indicating whether the mimetype 'other' is stricter
than the present mimetype.
"""
# If the other type has wildcards and we don't, we are stricter.
if other.type == '*' and self.type != '*':
return True
if other.subtype == '*' and self.subtype != '*':
return True
# If we have more parameters, we are stricter too. Further ordering in
# case of the same number of parameters depends on the subtypes being
# ordered and is therefore outside the scope of this implementation.
return len(self.parameters) > len(other.parameters)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
class CharacterSet(negotiation.Negotiable):
"""Represents a character set."""
# Rough format as in RFC2616 section 14.2.
RE_CHARSET = re.compile(r'^(?P<name>.+?)(?P<params>;.*)?$')
def __init__(self, name, quality='1'):
"""Instantiates a charset with name 'name' and quality 'quality'."""
self.codec = codecs.lookup(name)
self.parameters = MimeParameters(q=quality)
@classmethod
def parse(cls, encoded):
"""
Parses an encoded character set and its parameters and returns the
result. The expected format is as in RFC2616 section 14.2.
"""
match = cls.RE_CHARSET.match(encoded)
if match is None:
raise ValueError('Invalid character set: {0}'.format(encoded))
# The charset set name must be a valid token; c.f. RFC2616 section 3.4
if RE_TOKEN.match(match.group('name')) is None:
raise ValueError('Invalid token: {0}'.format(encoded))
charset = cls(match.group('name'))
if match.group('params') is not None:
charset.parameters = MimeParameters(match.group('params'))
return charset
def __contains__(self, other):
"""Checks whether this character set is satisfied by 'other'."""
# Compare codecs rather than their names to account for aliases. For
# example, iso-ir-6 is an alias of ASCII.
return self.codec == other.codec
def __lt__(self, other):
"""Imposes an alphabetical ordering on canonical codec names."""
# Since there is no way of telling whether one codec satisfies another,
# we simply order them alphabetically.
return self.codec.name < other.codec.name
| 34.823529 | 79 | 0.583863 |
6554c0d3c209041bf608d39dc34d63bbf617cc33 | 21,072 | py | Python | synapse/handlers/profile.py | buffless-matt/synapse | dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df | [
"Apache-2.0"
] | null | null | null | synapse/handlers/profile.py | buffless-matt/synapse | dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df | [
"Apache-2.0"
] | 2 | 2022-03-01T08:22:45.000Z | 2022-03-11T08:13:55.000Z | synapse/handlers/profile.py | buffless-matt/synapse | dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df | [
"Apache-2.0"
] | 1 | 2022-03-31T09:03:27.000Z | 2022-03-31T09:03:27.000Z | # Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
from typing import TYPE_CHECKING, Optional
from synapse.api.errors import (
AuthError,
Codes,
HttpResponseException,
RequestSendFailed,
StoreError,
SynapseError,
)
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.types import (
JsonDict,
Requester,
UserID,
create_requester,
get_domain_from_id,
)
from synapse.util.caches.descriptors import cached
from synapse.util.stringutils import parse_and_validate_mxc_uri
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
MAX_DISPLAYNAME_LEN = 256
MAX_AVATAR_URL_LEN = 1000
class ProfileHandler:
"""Handles fetching and updating user profile information.
ProfileHandler can be instantiated directly on workers and will
delegate to master when necessary.
"""
PROFILE_UPDATE_MS = 60 * 1000
PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.clock = hs.get_clock()
self.hs = hs
self.federation = hs.get_federation_client()
hs.get_federation_registry().register_query_handler(
"profile", self.on_profile_query
)
self.user_directory_handler = hs.get_user_directory_handler()
self.request_ratelimiter = hs.get_request_ratelimiter()
self.max_avatar_size = hs.config.server.max_avatar_size
self.allowed_avatar_mimetypes = hs.config.server.allowed_avatar_mimetypes
self.server_name = hs.config.server.server_name
self._third_party_rules = hs.get_third_party_event_rules()
if hs.config.worker.run_background_tasks:
self.clock.looping_call(
self._update_remote_profile_cache, self.PROFILE_UPDATE_MS
)
async def get_profile(self, user_id: str) -> JsonDict:
target_user = UserID.from_string(user_id)
if self.hs.is_mine(target_user):
try:
displayname = await self.store.get_profile_displayname(
target_user.localpart
)
avatar_url = await self.store.get_profile_avatar_url(
target_user.localpart
)
except StoreError as e:
if e.code == 404:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
raise
return {"displayname": displayname, "avatar_url": avatar_url}
else:
try:
result = await self.federation.make_query(
destination=target_user.domain,
query_type="profile",
args={"user_id": user_id},
ignore_backoff=True,
)
return result
except RequestSendFailed as e:
raise SynapseError(502, "Failed to fetch profile") from e
except HttpResponseException as e:
if e.code < 500 and e.code != 404:
# Other codes are not allowed in c2s API
logger.info(
"Server replied with wrong response: %s %s", e.code, e.msg
)
raise SynapseError(502, "Failed to fetch profile")
raise e.to_synapse_error()
async def get_profile_from_cache(self, user_id: str) -> JsonDict:
"""Get the profile information from our local cache. If the user is
ours then the profile information will always be correct. Otherwise,
it may be out of date/missing.
"""
target_user = UserID.from_string(user_id)
if self.hs.is_mine(target_user):
try:
displayname = await self.store.get_profile_displayname(
target_user.localpart
)
avatar_url = await self.store.get_profile_avatar_url(
target_user.localpart
)
except StoreError as e:
if e.code == 404:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
raise
return {"displayname": displayname, "avatar_url": avatar_url}
else:
profile = await self.store.get_from_remote_profile_cache(user_id)
return profile or {}
async def get_displayname(self, target_user: UserID) -> Optional[str]:
if self.hs.is_mine(target_user):
try:
displayname = await self.store.get_profile_displayname(
target_user.localpart
)
except StoreError as e:
if e.code == 404:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
raise
return displayname
else:
try:
result = await self.federation.make_query(
destination=target_user.domain,
query_type="profile",
args={"user_id": target_user.to_string(), "field": "displayname"},
ignore_backoff=True,
)
except RequestSendFailed as e:
raise SynapseError(502, "Failed to fetch profile") from e
except HttpResponseException as e:
raise e.to_synapse_error()
return result.get("displayname")
async def set_displayname(
self,
target_user: UserID,
requester: Requester,
new_displayname: str,
by_admin: bool = False,
deactivation: bool = False,
) -> None:
"""Set the displayname of a user
Args:
target_user: the user whose displayname is to be changed.
requester: The user attempting to make this change.
new_displayname: The displayname to give this user.
by_admin: Whether this change was made by an administrator.
deactivation: Whether this change was made while deactivating the user.
"""
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this homeserver")
if not by_admin and target_user != requester.user:
raise AuthError(400, "Cannot set another user's displayname")
if not by_admin and not self.hs.config.registration.enable_set_displayname:
profile = await self.store.get_profileinfo(target_user.localpart)
if profile.display_name:
raise SynapseError(
400,
"Changing display name is disabled on this server",
Codes.FORBIDDEN,
)
if not isinstance(new_displayname, str):
raise SynapseError(
400, "'displayname' must be a string", errcode=Codes.INVALID_PARAM
)
if len(new_displayname) > MAX_DISPLAYNAME_LEN:
raise SynapseError(
400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,)
)
displayname_to_set: Optional[str] = new_displayname
if new_displayname == "":
displayname_to_set = None
# If the admin changes the display name of a user, the requesting user cannot send
# the join event to update the displayname in the rooms.
# This must be done by the target user himself.
if by_admin:
requester = create_requester(
target_user,
authenticated_entity=requester.authenticated_entity,
)
await self.store.set_profile_displayname(
target_user.localpart, displayname_to_set
)
profile = await self.store.get_profileinfo(target_user.localpart)
await self.user_directory_handler.handle_local_profile_change(
target_user.to_string(), profile
)
await self._third_party_rules.on_profile_update(
target_user.to_string(), profile, by_admin, deactivation
)
await self._update_join_states(requester, target_user)
async def get_avatar_url(self, target_user: UserID) -> Optional[str]:
if self.hs.is_mine(target_user):
try:
avatar_url = await self.store.get_profile_avatar_url(
target_user.localpart
)
except StoreError as e:
if e.code == 404:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
raise
return avatar_url
else:
try:
result = await self.federation.make_query(
destination=target_user.domain,
query_type="profile",
args={"user_id": target_user.to_string(), "field": "avatar_url"},
ignore_backoff=True,
)
except RequestSendFailed as e:
raise SynapseError(502, "Failed to fetch profile") from e
except HttpResponseException as e:
raise e.to_synapse_error()
return result.get("avatar_url")
async def set_avatar_url(
self,
target_user: UserID,
requester: Requester,
new_avatar_url: str,
by_admin: bool = False,
deactivation: bool = False,
) -> None:
"""Set a new avatar URL for a user.
Args:
target_user: the user whose avatar URL is to be changed.
requester: The user attempting to make this change.
new_avatar_url: The avatar URL to give this user.
by_admin: Whether this change was made by an administrator.
deactivation: Whether this change was made while deactivating the user.
"""
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this homeserver")
if not by_admin and target_user != requester.user:
raise AuthError(400, "Cannot set another user's avatar_url")
if not by_admin and not self.hs.config.registration.enable_set_avatar_url:
profile = await self.store.get_profileinfo(target_user.localpart)
if profile.avatar_url:
raise SynapseError(
400, "Changing avatar is disabled on this server", Codes.FORBIDDEN
)
if not isinstance(new_avatar_url, str):
raise SynapseError(
400, "'avatar_url' must be a string", errcode=Codes.INVALID_PARAM
)
if len(new_avatar_url) > MAX_AVATAR_URL_LEN:
raise SynapseError(
400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,)
)
if not await self.check_avatar_size_and_mime_type(new_avatar_url):
raise SynapseError(403, "This avatar is not allowed", Codes.FORBIDDEN)
avatar_url_to_set: Optional[str] = new_avatar_url
if new_avatar_url == "":
avatar_url_to_set = None
# Same like set_displayname
if by_admin:
requester = create_requester(
target_user, authenticated_entity=requester.authenticated_entity
)
await self.store.set_profile_avatar_url(
target_user.localpart, avatar_url_to_set
)
profile = await self.store.get_profileinfo(target_user.localpart)
await self.user_directory_handler.handle_local_profile_change(
target_user.to_string(), profile
)
await self._third_party_rules.on_profile_update(
target_user.to_string(), profile, by_admin, deactivation
)
await self._update_join_states(requester, target_user)
@cached()
async def check_avatar_size_and_mime_type(self, mxc: str) -> bool:
"""Check that the size and content type of the avatar at the given MXC URI are
within the configured limits.
Args:
mxc: The MXC URI at which the avatar can be found.
Returns:
A boolean indicating whether the file can be allowed to be set as an avatar.
"""
if not self.max_avatar_size and not self.allowed_avatar_mimetypes:
return True
server_name, _, media_id = parse_and_validate_mxc_uri(mxc)
if server_name == self.server_name:
media_info = await self.store.get_local_media(media_id)
else:
media_info = await self.store.get_cached_remote_media(server_name, media_id)
if media_info is None:
# Both configuration options need to access the file's metadata, and
# retrieving remote avatars just for this becomes a bit of a faff, especially
# if e.g. the file is too big. It's also generally safe to assume most files
# used as avatar are uploaded locally, or if the upload didn't happen as part
# of a PUT request on /avatar_url that the file was at least previewed by the
# user locally (and therefore downloaded to the remote media cache).
logger.warning("Forbidding avatar change to %s: avatar not on server", mxc)
return False
if self.max_avatar_size:
# Ensure avatar does not exceed max allowed avatar size
if media_info["media_length"] > self.max_avatar_size:
logger.warning(
"Forbidding avatar change to %s: %d bytes is above the allowed size "
"limit",
mxc,
media_info["media_length"],
)
return False
if self.allowed_avatar_mimetypes:
# Ensure the avatar's file type is allowed
if (
self.allowed_avatar_mimetypes
and media_info["media_type"] not in self.allowed_avatar_mimetypes
):
logger.warning(
"Forbidding avatar change to %s: mimetype %s not allowed",
mxc,
media_info["media_type"],
)
return False
return True
async def on_profile_query(self, args: JsonDict) -> JsonDict:
"""Handles federation profile query requests."""
if not self.hs.config.federation.allow_profile_lookup_over_federation:
raise SynapseError(
403,
"Profile lookup over federation is disabled on this homeserver",
Codes.FORBIDDEN,
)
user = UserID.from_string(args["user_id"])
if not self.hs.is_mine(user):
raise SynapseError(400, "User is not hosted on this homeserver")
just_field = args.get("field", None)
response = {}
try:
if just_field is None or just_field == "displayname":
response["displayname"] = await self.store.get_profile_displayname(
user.localpart
)
if just_field is None or just_field == "avatar_url":
response["avatar_url"] = await self.store.get_profile_avatar_url(
user.localpart
)
except StoreError as e:
if e.code == 404:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
raise
return response
async def _update_join_states(
self, requester: Requester, target_user: UserID
) -> None:
if not self.hs.is_mine(target_user):
return
await self.request_ratelimiter.ratelimit(requester)
# Do not actually update the room state for shadow-banned users.
if requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
return
room_ids = await self.store.get_rooms_for_user(target_user.to_string())
for room_id in room_ids:
handler = self.hs.get_room_member_handler()
try:
# Assume the target_user isn't a guest,
# because we don't let guests set profile or avatar data.
await handler.update_membership(
requester,
target_user,
room_id,
"join", # We treat a profile update like a join.
ratelimit=False, # Try to hide that these events aren't atomic.
)
except Exception as e:
logger.warning(
"Failed to update join event for room %s - %s", room_id, str(e)
)
async def check_profile_query_allowed(
self, target_user: UserID, requester: Optional[UserID] = None
) -> None:
"""Checks whether a profile query is allowed. If the
'require_auth_for_profile_requests' config flag is set to True and a
'requester' is provided, the query is only allowed if the two users
share a room.
Args:
target_user: The owner of the queried profile.
requester: The user querying for the profile.
Raises:
SynapseError(403): The two users share no room, or ne user couldn't
be found to be in any room the server is in, and therefore the query
is denied.
"""
# Implementation of MSC1301: don't allow looking up profiles if the
# requester isn't in the same room as the target. We expect requester to
# be None when this function is called outside of a profile query, e.g.
# when building a membership event. In this case, we must allow the
# lookup.
if (
not self.hs.config.server.limit_profile_requests_to_users_who_share_rooms
or not requester
):
return
# Always allow the user to query their own profile.
if target_user.to_string() == requester.to_string():
return
try:
requester_rooms = await self.store.get_rooms_for_user(requester.to_string())
target_user_rooms = await self.store.get_rooms_for_user(
target_user.to_string()
)
# Check if the room lists have no elements in common.
if requester_rooms.isdisjoint(target_user_rooms):
raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN)
except StoreError as e:
if e.code == 404:
# This likely means that one of the users doesn't exist,
# so we act as if we couldn't find the profile.
raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN)
raise
@wrap_as_background_process("Update remote profile")
async def _update_remote_profile_cache(self) -> None:
"""Called periodically to check profiles of remote users we haven't
checked in a while.
"""
entries = await self.store.get_remote_profile_cache_entries_that_expire(
last_checked=self.clock.time_msec() - self.PROFILE_UPDATE_EVERY_MS
)
for user_id, displayname, avatar_url in entries:
is_subscribed = await self.store.is_subscribed_remote_profile_for_user(
user_id
)
if not is_subscribed:
await self.store.maybe_delete_remote_profile_cache(user_id)
continue
try:
profile = await self.federation.make_query(
destination=get_domain_from_id(user_id),
query_type="profile",
args={"user_id": user_id},
ignore_backoff=True,
)
except Exception:
logger.exception("Failed to get avatar_url")
await self.store.update_remote_profile_cache(
user_id, displayname, avatar_url
)
continue
new_name = profile.get("displayname")
if not isinstance(new_name, str):
new_name = None
new_avatar = profile.get("avatar_url")
if not isinstance(new_avatar, str):
new_avatar = None
# We always hit update to update the last_check timestamp
await self.store.update_remote_profile_cache(user_id, new_name, new_avatar)
| 38.452555 | 90 | 0.600228 |
543059a0ec7cd558f8986b264b822d4ec7709ed4 | 2,145 | py | Python | gpplot/style.py | gpp-rnd/gpplot | 627a2feb398fe8de5539ee6d0ae3150079578a7a | [
"MIT"
] | 2 | 2020-06-19T19:35:14.000Z | 2020-07-22T17:24:02.000Z | gpplot/style.py | gpp-rnd/gpplot | 627a2feb398fe8de5539ee6d0ae3150079578a7a | [
"MIT"
] | 1 | 2020-08-23T21:47:57.000Z | 2020-08-23T21:47:57.000Z | gpplot/style.py | gpp-rnd/gpplot | 627a2feb398fe8de5539ee6d0ae3150079578a7a | [
"MIT"
] | null | null | null | """style module. Contains functions to standardize styles for matplotlib-based plots"""
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
def discrete_palette(palette='Set2', n=8):
"""Default discrete palette"""
return sns.color_palette(palette, n)
def diverging_cmap(cmap='RdBu_r'):
"""Default diverging colormap"""
return cmap
def sequential_cmap(cmap='viridis'):
"""Default sequential colormap"""
return cmap
def set_aesthetics(style='ticks', context='notebook', font='Arial',
font_scale=1, palette=None, rc=None):
"""Set aesthetics for plotting, using seaborn.set_style and matplotlib.rcParams
Parameters
----------
style: str, optional
One of darkgrid, whitegrid, dark, white, ticks
context: str, optional
One of paper, notebook, talk, poster
font: str, optional
Font family
font_scale: int, optional
Scaling factor to scale the size of font elements
palette: str or seaborn.color_palette, optional
Discrete color palette to use in plots, defaults to gpplot.discrete_palette
rc: dict, optional
Mappings to pass to matplotlib.rcParams
"""
if palette is None:
palette = discrete_palette()
sns.set(style=style, context=context, font=font,
palette=palette,
font_scale=font_scale)
mpl.rc('pdf', fonttype=42)
if rc is not None:
for key, value in rc:
mpl.rcParams[key] = value
def savefig(path, fig=None, bbox_inches='tight', transparent=True, **kwargs):
"""Wrapper function to save figures
Parameters
----------
fig: matplotlib.figure.Figure
Figure to be saved
path: str
Location to save figure
bbox_inches: str, optional
Bounding box of figure
transparent: bool, optional
Whether to include a background for the plot
**kwargs
Other keyword arguments are passed through to matplotlib.pyplot.savefig
"""
if fig is None:
fig = plt.gcf()
fig.savefig(path, bbox_inches=bbox_inches, transparent=transparent, **kwargs)
| 29.791667 | 87 | 0.665734 |
667785310a7968737e3c9a743979877d5fec08c5 | 1,048 | py | Python | sendgrid/helpers/mail/group_id.py | modernwarfareuplink/sendgrid-python | b360223622418216f89a98278cfa1cde3e2a9ceb | [
"MIT"
] | 1,268 | 2015-01-07T01:26:41.000Z | 2022-03-31T12:40:59.000Z | sendgrid/helpers/mail/group_id.py | modernwarfareuplink/sendgrid-python | b360223622418216f89a98278cfa1cde3e2a9ceb | [
"MIT"
] | 881 | 2015-01-08T23:14:57.000Z | 2022-03-30T15:10:21.000Z | sendgrid/helpers/mail/group_id.py | modernwarfareuplink/sendgrid-python | b360223622418216f89a98278cfa1cde3e2a9ceb | [
"MIT"
] | 761 | 2015-01-08T10:28:25.000Z | 2022-03-29T12:04:42.000Z | class GroupId(object):
"""The unsubscribe group ID to associate with this email."""
def __init__(self, group_id=None):
"""Create a GroupId object
:param group_id: The unsubscribe group to associate with this email.
:type group_id: integer, optional
"""
self._group_id = None
if group_id is not None:
self.group_id = group_id
@property
def group_id(self):
"""The unsubscribe group to associate with this email.
:rtype: integer
"""
return self._group_id
@group_id.setter
def group_id(self, value):
"""The unsubscribe group to associate with this email.
:param value: The unsubscribe group to associate with this email.
:type value: integer
"""
self._group_id = value
def get(self):
"""
Get a JSON-ready representation of this GroupId.
:returns: This GroupId, ready for use in a request body.
:rtype: integer
"""
return self.group_id
| 26.2 | 76 | 0.604008 |
556747fbf93023c2764ce2504210ac942a3a6f29 | 1,027 | py | Python | src/asphalt/serialization/serializers/pickle.py | Asphalt-framework/asphalt-serialization | 07fbf1e2295e6e27c9b396f2b88943166759cf32 | [
"Apache-2.0"
] | null | null | null | src/asphalt/serialization/serializers/pickle.py | Asphalt-framework/asphalt-serialization | 07fbf1e2295e6e27c9b396f2b88943166759cf32 | [
"Apache-2.0"
] | null | null | null | src/asphalt/serialization/serializers/pickle.py | Asphalt-framework/asphalt-serialization | 07fbf1e2295e6e27c9b396f2b88943166759cf32 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
import pickle
from typing import Any
from ..api import Serializer
class PickleSerializer(Serializer):
"""
Serializes objects using the standard library :mod:`pickle` module.
.. warning:: This serializer is insecure because it allows execution of arbitrary
code when deserializing. Avoid using this if at all possible.
:param protocol: pickle protocol level to use (defaults to the highest possible)
"""
__slots__ = "protocol"
def __init__(self, protocol: int = pickle.HIGHEST_PROTOCOL):
assert (
0 <= protocol <= pickle.HIGHEST_PROTOCOL
), f'"protocol" must be between 0 and {pickle.HIGHEST_PROTOCOL}'
self.protocol: int = protocol
def serialize(self, obj: Any) -> bytes:
return pickle.dumps(obj, protocol=self.protocol)
def deserialize(self, payload: bytes) -> Any:
return pickle.loads(payload)
@property
def mimetype(self) -> str:
return "application/python-pickle"
| 27.756757 | 85 | 0.685492 |
de09f558b7eb400207a486f01f9d13a7f8bb39d3 | 1,797 | py | Python | 1_open3d/27_non_blocking_visualization.py | lyffly/Python-3DPointCloud-and-RGBD | b3b973ec96045a34d3540522a115c5a77f4de136 | [
"Apache-2.0"
] | 16 | 2019-10-28T01:17:09.000Z | 2022-01-20T08:26:06.000Z | 1_open3d/27_non_blocking_visualization.py | lyffly/Python-3DPointCloud-and-RGBD | b3b973ec96045a34d3540522a115c5a77f4de136 | [
"Apache-2.0"
] | null | null | null | 1_open3d/27_non_blocking_visualization.py | lyffly/Python-3DPointCloud-and-RGBD | b3b973ec96045a34d3540522a115c5a77f4de136 | [
"Apache-2.0"
] | 3 | 2020-04-07T08:48:28.000Z | 2021-09-22T14:47:42.000Z | # coding = utf-8
# coding by liuyunfei
# origin code from open3d samples(github)
import numpy as np
import open3d as op3
import matplotlib.pyplot as plt
import copy
import time
from trajectory_io import *
import os
import sys
if __name__ == "__main__":
op3.utility.set_verbosity_level(op3.utility.VerbosityLevel.Debug)
source_raw = op3.io.read_point_cloud("demodata/ICP/cloud_bin_0.pcd")
target_raw = op3.io.read_point_cloud("demodata/ICP/cloud_bin_1.pcd")
source = source_raw.voxel_down_sample(voxel_size = 0.02)
target = target_raw.voxel_down_sample(voxel_size = 0.02)
trans = [[0.862,0.011,-0.507,0.0],
[-0.139,0.967,-0.215,0.7],
[0.487,0.255,0.835,-1.4],
[0.0,0.0,0.0,1.0]]
source.transform(trans)
flip_tranform = [[1,0,0,0],
[0,-1,0,0],
[0,0,-1,0],
[0,0,0,1]]
source.transform(flip_tranform)
target.transform(flip_tranform)
vis =op3.visualization.Visualizer()
vis.create_window(width=1280,height=720)
vis.add_geometry(source)
vis.add_geometry(target)
threshold =0.05
icp_iteration =200
save_image = False
time.sleep(6)
for i in range(icp_iteration):
reg_p2l = op3.registration.registration_icp(
source,
target,
threshold,
np.identity(4),
op3.registration.TransformationEstimationPointToPlane(),
op3.registration.ICPConvergenceCriteria(max_iteration=1)
)
source.transform(reg_p2l.transformation)
vis.update_geometry()
vis.poll_events()
vis.update_renderer()
time.sleep(0.05)
if save_image:
vis.capture_screen_image("temp_%04d.jpg" %i)
vis.destroy_window()
| 26.820896 | 72 | 0.631608 |
42e81b691c3ee3b3db9a896e42ffc1a11976046d | 2,127 | py | Python | saleor/product/utils/__init__.py | bennetritters/saleor | f4b0beaf586f15f260d838ddaba45bee5f133e0f | [
"CC-BY-4.0"
] | 1 | 2020-12-19T14:19:00.000Z | 2020-12-19T14:19:00.000Z | saleor/product/utils/__init__.py | bennetritters/saleor | f4b0beaf586f15f260d838ddaba45bee5f133e0f | [
"CC-BY-4.0"
] | 1 | 2020-11-10T13:57:37.000Z | 2020-11-10T13:57:37.000Z | saleor/product/utils/__init__.py | bennetritters/saleor | f4b0beaf586f15f260d838ddaba45bee5f133e0f | [
"CC-BY-4.0"
] | 1 | 2020-11-11T18:53:55.000Z | 2020-11-11T18:53:55.000Z | from typing import TYPE_CHECKING, List, Union
from urllib.parse import urlencode
from django.conf import settings
from django.db import transaction
from ...core.taxes import TaxedMoney, zero_taxed_money
from ..tasks import update_products_minimal_variant_prices_task
if TYPE_CHECKING:
# flake8: noqa
from datetime import date, datetime
from django.db.models.query import QuerySet
from ..models import Category, Product, ProductVariant
def calculate_revenue_for_variant(
variant: "ProductVariant", start_date: Union["date", "datetime"]
) -> TaxedMoney:
"""Calculate total revenue generated by a product variant."""
revenue = zero_taxed_money()
for order_line in variant.order_lines.all():
if order_line.order.created >= start_date:
net = order_line.unit_price_net * order_line.quantity
gross = order_line.unit_price_gross * order_line.quantity
revenue += TaxedMoney(net, gross)
return revenue
@transaction.atomic
def delete_categories(categories_ids: List[str]):
"""Delete categories and perform all necessary actions.
Set products of deleted categories as unpublished, delete categories
and update products minimal variant prices.
"""
from ..models import Category, Product
categories = Category.objects.select_for_update().filter(pk__in=categories_ids)
categories.prefetch_related("products")
products = Product.objects.none()
for category in categories:
products = products | collect_categories_tree_products(category)
products.update(is_published=False, publication_date=None)
product_ids = list(products.values_list("id", flat=True))
categories.delete()
update_products_minimal_variant_prices_task.delay(product_ids=product_ids)
def collect_categories_tree_products(category: "Category") -> "QuerySet[Product]":
"""Collect products from all levels in category tree."""
products = category.products.all()
descendants = category.get_descendants()
for descendant in descendants:
products = products | descendant.products.all()
return products
| 34.868852 | 83 | 0.748002 |
5ca8779f07cbbfd460bdf0263592e0ec83d0c273 | 3,811 | py | Python | config/api_router.py | jaseemkm/care | 51c081121f5dcb4db3a721f563862b5a05b16cc8 | [
"MIT"
] | null | null | null | config/api_router.py | jaseemkm/care | 51c081121f5dcb4db3a721f563862b5a05b16cc8 | [
"MIT"
] | null | null | null | config/api_router.py | jaseemkm/care | 51c081121f5dcb4db3a721f563862b5a05b16cc8 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls import include, url
from rest_framework.routers import DefaultRouter, SimpleRouter
from rest_framework_nested.routers import NestedSimpleRouter
from care.facility.api.viewsets.ambulance import AmbulanceCreateViewSet, AmbulanceViewSet
from care.facility.api.viewsets.facility import FacilityViewSet, AllFacilityViewSet
from care.facility.api.viewsets.facility_capacity import FacilityCapacityViewSet
from care.facility.api.viewsets.hospital_doctor import HospitalDoctorViewSet
from care.facility.api.viewsets.patient import FacilityPatientStatsHistoryViewSet, PatientSearchViewSet, PatientViewSet
from care.facility.api.viewsets.patient_consultation import DailyRoundsViewSet, PatientConsultationViewSet
from care.facility.api.viewsets.patient_sample import PatientSampleViewSet
from care.facility.api.viewsets.inventory import (
FacilityInventoryItemViewSet,
FacilityInventoryLogViewSet,
FacilityInventorySummaryViewSet,
FacilityInventoryMinQuantityViewSet,
)
from care.facility.api.viewsets.patient_search import PatientScopedSearchViewSet
from care.users.api.viewsets.lsg import DistrictViewSet, LocalBodyViewSet, StateViewSet
from care.users.api.viewsets.users import UserViewSet
from care.facility.summarisation.facility_capacity import FacilityCapacitySummaryViewSet
from care.facility.summarisation.patient_summary import PatientSummaryViewSet
from care.facility.summarisation.tests_summary import TestsSummaryViewSet
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
router.register("facility", FacilityViewSet)
router.register("getallfacilities", AllFacilityViewSet)
router.register("ambulance/create", AmbulanceCreateViewSet)
router.register("ambulance", AmbulanceViewSet)
router.register("patient/search", PatientSearchViewSet)
router.register("patient", PatientViewSet)
router.register("consultation", PatientConsultationViewSet)
# Local Body / LSG Viewsets
router.register("state", StateViewSet)
router.register("district", DistrictViewSet)
router.register("local_body", LocalBodyViewSet)
# Patient Sample
router.register("test_sample", PatientSampleViewSet)
# Patient Search
router.register("patient_search", PatientScopedSearchViewSet)
# Summarisation
router.register("facility_summary", FacilityCapacitySummaryViewSet, basename="summary-facility")
router.register("patient_summary", PatientSummaryViewSet, basename="summary-patient")
router.register("tests_summary", TestsSummaryViewSet, basename="summary-tests")
router.register("items", FacilityInventoryItemViewSet)
# Ref: https://github.com/alanjds/drf-nested-routers
facility_nested_router = NestedSimpleRouter(router, r"facility", lookup="facility")
facility_nested_router.register(r"hospital_doctor", HospitalDoctorViewSet)
facility_nested_router.register(r"capacity", FacilityCapacityViewSet)
facility_nested_router.register(r"patient_stats", FacilityPatientStatsHistoryViewSet)
facility_nested_router.register(r"inventory", FacilityInventoryLogViewSet)
facility_nested_router.register(r"inventorysummary", FacilityInventorySummaryViewSet)
facility_nested_router.register(r"min_quantity", FacilityInventoryMinQuantityViewSet)
patient_nested_router = NestedSimpleRouter(router, r"patient", lookup="patient")
patient_nested_router.register(r"test_sample", PatientSampleViewSet)
consultation_nested_router = NestedSimpleRouter(router, r"consultation", lookup="consultation")
consultation_nested_router.register(r"daily_rounds", DailyRoundsViewSet)
app_name = "api"
urlpatterns = [
url(r"^", include(router.urls)),
url(r"^", include(facility_nested_router.urls)),
url(r"^", include(patient_nested_router.urls)),
url(r"^", include(consultation_nested_router.urls)),
]
| 44.313953 | 119 | 0.844398 |
cb3128ff6c12794e5062572c1365d7706620abc4 | 144 | py | Python | smart_cctv/myauth/apps.py | Ming-desu/smart-cctv | b4214ef362bcdd02802f7287a6fd6237e4b02535 | [
"MIT"
] | null | null | null | smart_cctv/myauth/apps.py | Ming-desu/smart-cctv | b4214ef362bcdd02802f7287a6fd6237e4b02535 | [
"MIT"
] | null | null | null | smart_cctv/myauth/apps.py | Ming-desu/smart-cctv | b4214ef362bcdd02802f7287a6fd6237e4b02535 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class MyauthConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'myauth'
| 20.571429 | 56 | 0.756944 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.