content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# Generated by Django 2.0.1 on 2018-01-23 11:13
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0013_auto_20170829_0515'),
]
operations = [
migrations.AlterField(
model_name='page',
name='ad_section',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='pages_page_related', to='ads.AdSection', verbose_name='Ads'),
),
migrations.AlterField(
model_name='page',
name='module',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='pages_page_related', to='pages.PageModule', verbose_name='Module'),
),
]
|
nilq/baby-python
|
python
|
from metaflow import resources
from metaflow.api import FlowSpec, step
class ResourcesFlow(FlowSpec):
@resources(memory=1_000)
@step
def one(self):
self.a = 111
@resources(memory=2_000)
@step
def two(self):
self.b = self.a * 2
class ResourcesFlow2(ResourcesFlow):
pass
|
nilq/baby-python
|
python
|
import struct
from slmkiii.template.input.button import Button
class PadHit(Button):
def __init__(self, data=None):
super(PadHit, self).__init__(data)
self.max_velocity = self.data(28)
self.min_velocity = self.data(29)
self.range_method = self.data(30)
def from_dict(self, data):
super(PadHit, self).from_dict(data, extend=True)
self._data += struct.pack(
'>HBBB',
0,
data['max_velocity'],
data['min_velocity'],
data['range_method'],
)
self._data = self._data.ljust(self.length, '\0')
def export_dict(self):
data = super(PadHit, self).export_dict()
data.update({
'max_velocity': self.max_velocity,
'min_velocity': self.min_velocity,
'range_method': self.range_method,
'range_method_name': self.range_method_name,
})
return data
@property
def range_method_name(self):
method_names = {
0: 'None',
1: 'Clip',
2: 'Scale',
}
return method_names[self.data(30)]
|
nilq/baby-python
|
python
|
"""Implementation of the MCTS algorithm for Tic Tac Toe Game."""
from typing import List
from typing import Optional
from typing import Tuple
import numpy as np
import numpy.typing as npt
from mctspy.games.common import TwoPlayersAbstractGameState
from mctspy.tree.nodes import TwoPlayersGameMonteCarloTreeSearchNode
from mctspy.tree.search import MonteCarloTreeSearch
class Move:
"""Move class."""
def __init__(self, x_coordinate: int, y_coordinate: int, value: float) -> None:
"""Inits."""
self.x_coordinate = x_coordinate
self.y_coordinate = y_coordinate
self.value = value
def __repr__(self) -> str:
"""Repr."""
return f"x:{self.x_coordinate} y:{self.y_coordinate} v:{self.value}"
class TicTacToeGameState(TwoPlayersAbstractGameState): # type: ignore[misc]
"""TicTacToeGameState class."""
x = 1
o = -1
def __init__(self, state: npt.NDArray[np.float64], next_to_move: float = 1) -> None:
"""Inits."""
if len(state.shape) != 2 or state.shape[0] != state.shape[1]:
raise ValueError("Only 2D square boards allowed")
self.board = state
self.board_size: int = state.shape[0]
self.next_to_move = next_to_move
@property
def game_result(self) -> Optional[float]:
"""Returns game result.
This property should return:
1 if player #1 wins
-1 if player #2 wins
0 if there is a draw
None if result is unknown
Returns
-------
int
"""
# check if game is over
rowsum = np.sum(self.board, 0)
colsum = np.sum(self.board, 1)
diag_sum_tl = self.board.trace()
diag_sum_tr = self.board[::-1].trace()
player_one_wins = any(rowsum == self.board_size)
# uses fact that python booleans are considered numeric type
player_one_wins += any(colsum == self.board_size) # type: ignore[assignment]
player_one_wins += diag_sum_tl == self.board_size
player_one_wins += diag_sum_tr == self.board_size
if player_one_wins:
return self.x
player_two_wins = any(rowsum == -self.board_size)
# uses fact that python booleans are considered numeric type
player_two_wins += any(colsum == -self.board_size) # type: ignore[assignment]
player_two_wins += diag_sum_tl == -self.board_size
player_two_wins += diag_sum_tr == -self.board_size
if player_two_wins:
return self.o
if np.all(self.board != 0):
return 0.0
# if not over - no result
return None
def is_game_over(self) -> bool:
"""Returns boolean indicating if the game is over.
Simplest implementation may just be
`return self.game_result() is not None`
Returns
-------
boolean
"""
return self.game_result is not None
def is_move_legal(self, move: Move) -> bool:
"""Checks if move is legal."""
# check if correct player moves
if move.value != self.next_to_move:
return False
# check if inside the board on x-axis
x_in_range = 0 <= move.x_coordinate < self.board_size
if not x_in_range:
return False
# check if inside the board on y-axis
y_in_range = 0 <= move.y_coordinate < self.board_size
if not y_in_range:
return False
# finally check if board field not occupied yet
return bool(self.board[move.x_coordinate, move.y_coordinate] == 0)
def move(self, move: Move) -> "TicTacToeGameState":
"""Consumes action and returns resulting TwoPlayersAbstractGameState.
Returns
-------
TwoPlayersAbstractGameState
"""
if not self.is_move_legal(move):
raise ValueError(f"move {move} on board {self.board} is not legal")
new_board = np.copy(self.board) # type: ignore[no-untyped-call]
new_board[move.x_coordinate, move.y_coordinate] = move.value
if self.next_to_move == TicTacToeGameState.x:
next_to_move = TicTacToeGameState.o
else:
next_to_move = TicTacToeGameState.x
return TicTacToeGameState(new_board, next_to_move)
def get_legal_actions(self) -> List[Move]:
"""Returns list of legal action at current game state.
Returns
-------
list of AbstractGameAction
"""
indices = np.where(self.board == 0)
return [
Move(coords[0], coords[1], self.next_to_move)
for coords in list(zip(indices[0], indices[1]))
]
def from_mcts_grid_format(grid: List[List[float]]) -> List[List[int]]:
"""Loads grid from a list of int."""
return [[int(elem) for elem in row] for row in grid]
def to_mcts_grid_format(grid: List[List[int]]) -> List[List[float]]:
"""Dumps grid to list of int."""
return [[float(elem) for elem in row] for row in grid]
def mcts_move(grid: List[List[int]], mark: int) -> Tuple[int, int]:
"""Computes best move."""
board = to_mcts_grid_format(grid)
current_player = float(mark)
state = np.array(board)
initial_board_state = TicTacToeGameState(state=state, next_to_move=current_player)
root = TwoPlayersGameMonteCarloTreeSearchNode(state=initial_board_state)
mcts = MonteCarloTreeSearch(root)
best_node = mcts.best_action(10000)
board_diff = best_node.state.board - best_node.parent.state.board
x_coords, y_coords = np.where(board_diff == current_player)
chosen_cell = (x_coords[0], y_coords[0])
return chosen_cell
|
nilq/baby-python
|
python
|
from gettext import Catalog
from xml.etree.ElementInclude import include
from django.contrib import admin
from django.urls import re_path
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path(r'^catalog/', include(Catalog.urls)),
]
|
nilq/baby-python
|
python
|
from phq.kafka.consumer import _latest_distinct_messages
from phq.kafka import Message
def test_latest_distinct_messages():
messages = [
Message(id='abc', payload={}),
Message(id='def', payload={}),
Message(id='xyz', payload={}),
Message(id='xyz', payload={}),
Message(id='abc', payload={}),
]
distinct_messages = _latest_distinct_messages(messages)
assert len(distinct_messages) == 3
assert distinct_messages[0] is messages[1]
assert distinct_messages[1] is messages[3]
assert distinct_messages[2] is messages[4]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Checks that gyp fails on static_library targets which have several files with
the same basename.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('double-static.gyp', chdir='src', status=1, stderr=None)
test.pass_test()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
pySnarlNetLib
author: Łukasz Bołdys
licence: MIT
Copyright (c) 2009 Łukasz Bołdys
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import socket
__version__ = (0, 1, 1)
__author__ = "Łukasz Bołdys"
class SnarlNet(object):
lastAppName = ""
lastClassName = ""
addedClasses = []
lastTimeout = 10
ip = "127.0.0.1" #if no ip provided than use localhost
port = 9887 #if no port provided than use default snarl net port
def __init__(self, *args, **argv):
"""
Create object of class SnarlNet
IP and port can be passed as 'ip' and 'port' parameters
Ie. snarl = SnarlNet(ip="192.168.1.4", port=9887)
When no parameters are passed than ip='127.0.0.1' and port=9887 are used
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if argv.has_key("ip"):
self.ip = argv["ip"]
if argv.has_key("port"):
self.port = argv["port"]
def __send(self, sendStr):
self.sock.connect((self.ip, self.port))
self.sock.send(sendStr)
self.sock.close()
def register(self, appName):
"""
Register application by appName
"""
sendStr = "type=SNP#?version=1.0#?action=register#?app=%s\r\n" % (appName,)
self.__send(sendStr)
self.lastAppName = appName;
def unregister(self, appName = ""):
"""
Unregister application by appName. If appName is empty then tries to
unregister application by self.lastAppName (last registred application).
If self.lastAppName is empty than do nothing
"""
if appName == "":
if lastAppName == "":
sys.stderr.write("No application to unregister")
return
appName = lastAppName
sendStr = "type=SNP#?version=1.0#?action=unregister#?app=%s\r\n" % (appName,)
self.__send(sendStr)
self.lastAppName = ""
def notify(self, title, text, **argv):
"""
Send message with given title and text.
If no appName or appClass is provided than uses
self.lastAppName and/or self.lastClassName
"""
appName = self.lastAppName
className = self.lastClassName
timeout = self.lastTimeout
if argv.has_key("timeout"):
timeout = timeout
if argv.has_key("appName") and argv["appName"] != "":
appName = argv["appName"]
if argv.has_key("className") and argv["className"] != "":
className = argv["className"]
if appName == "":
appName = "pySnarlNetLib"
if className == "":
className = "pySnarlNetLibClass"
sendStr = "type=SNP#?version=1.0#?action=notification#?app=%s#?class=%s#?title=%s#?text=%s#?timeout=%d\r\n" % (appName,className,title,text,timeout)
self.__send(sendStr)
self.lastAppName = appName
self.lastClassName = className
self.lastTimeout = timeout
pass
def addclass(self, className, classTitle="", **argv):
"""
Add class with provided name (className).
If no classTitle is provided than sets classTitle to className
If no appName is provided than use self.lastAppName.
If self.lastAppName is empty than do nothing
"""
className = str(className)
if className in self.addedClasses:
sys.stderr.write("Class already added")
return
if className == "":
sys.stderr.write("className can not be empty")
return
appName = self.lastAppName
if classTitle == "":
classTitle = className
if argv.has_key["appName"]:
appName = argv["appName"]
if appName == "":
sys.stderr.write("No application to add class to")
return
sendStr = "type=SNP#?version=1.0#?action=add_class#?app=%s#?class=%s#?title=%s\r\n" % (appName,className,classTitle)
self.__send(sendStr)
self.lastAppName = appName
self.lastClassName = className
self.addedClasses.append(className)
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser(usage="%prog -a ACTION [options] args", version="%prog " + ".".join([str(x) for x in __version__]))
parser.add_option("-i", "--ipaddr", dest="host",
help="IP address of the machine with snarl installed (default: %default)",
type="string", default="127.0.0.1")
parser.add_option("-p", "--port", dest="port",
help="Port on with Snarl is listening (default: %default)",
type="int", default=9887)
parser.add_option("-n", "--appname", dest="appName", help="Application name",
type="string")
parser.add_option("-c", "--classname", dest="className", help="Class name",
type="string")
parser.add_option("-a", "--action", dest="action", choices=["register","unregister","addclass","notify"],
help="Action to take (register, unregister, addclass, notify)", type="choice")
parser.add_option("-t", "--timeout", dest="timeout", type="int",
help="How long snarl should display message", default=10)
(options, args) = parser.parse_args()
snarl = SnarlNet(ip=options.host, port=options.port)
if not options.action:
parser.print_usage()
if options.action == "register":
if options.appName != None:
appName = options.appName
elif len(args) > 0:
appName = args[0]
else:
parser.error("You need to provide application name")
snarl.register(appName)
elif options.action == "unregister":
if options.appName != None:
appName = options.appName
elif len(args) > 0:
appName = args[0]
else:
parser.error("You need to provide application name")
snarl.unregister(appName)
elif options.action == "addclass":
if options.appName != None and options.className != None:
appName = options.appName
className = options.className
elif options.appName != None and options.className == None:
appName = options.appName
if len(args) == 1:
className = args[0]
else:
parser.error("You need to provide class name")
elif options.appName == None and options.className != None:
className = options.className
if len(args) == 1:
appName = args[0]
else:
parser.error("You need to provide application name")
else:
if len(args) > 1:
appName = args[0]
className = args[1]
parser.error("You need to provide application name and class name")
snarl.addclass(className, classTitle=options.classTitle, appName=appName)
elif options.action == "notify":
appName = ""
className = ""
if options.appName != None:
appName = options.appName
if options.className != None:
className = options.className
if len(args) > 0:
title = args[0]
text = " ".join(args[1:])
else:
parser.error("You need to provide at least a title")
snarl.notify(title, text, appName=appName, className=className)
|
nilq/baby-python
|
python
|
from setuptools import setup
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="rubrix",
# other arguments omitted
description="Open-source tool for tracking, exploring and labelling data for AI projects.",
long_description=long_description,
author="recognai",
author_email="contact@recogn.ai",
maintainer="recognai",
maintainer_email="contact@recogn.ai",
url="https://recogn.ai",
license="Apache-2.0",
keywords="data-science natural-language-processing artificial-intelligence knowledged-graph developers-tools human-in-the-loop mlops",
long_description_content_type="text/markdown",
use_scm_version=True,
)
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.7 on 2021-01-19 13:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('payment_system', '0027_auto_20201216_1852'),
]
operations = [
migrations.AlterField(
model_name='project',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='owned_projects', to=settings.AUTH_USER_MODEL),
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""
Author : kyclark
Date : 2018-11-02
Purpose: Rock the Casbah
"""
import argparse
import pandas as pd
import matplotlib.pyplot as plt
import sys
# --------------------------------------------------
def get_args():
"""get args"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'file', metavar='str', help='A positional argument')
parser.add_argument(
'-o',
'--outfile`',
help='Save to outfile',
metavar='str',
type=str,
default=None)
return parser.parse_args()
# --------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
# --------------------------------------------------
def main():
"""main"""
args = get_args()
data = pd.read_csv(args.file, names=['term', 'desc', 'domain', 'count'])
counts = data['counts']
#data.drop(data[data['count'] > 2 * data['count'].std()].index, inplace=True)
#std = data.describe['std']
print(data.describe())
plt.hist(counts[counts > 0])
plt.show()
# --------------------------------------------------
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
class Solution:
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
s = ''
for i in zip(*strs):
if len(set(i)) != 1:
return s
else:
s += i[0]
return s
if __name__ == '__main__':
strs = ["flower", "flow", "flight"]
strs = ["dog", "racecar", "car"]
# strs = ["caa", "a", "acb"]
print(Solution().longestCommonPrefix(strs))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: uber/cadence/api/v1/tasklist.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='uber/cadence/api/v1/tasklist.proto',
package='uber.cadence.api.v1',
syntax='proto3',
serialized_options=b'\n\027com.uber.cadence.api.v1B\010ApiProtoP\001Z/github.com/uber/cadence/.gen/proto/api/v1;apiv1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\"uber/cadence/api/v1/tasklist.proto\x12\x13uber.cadence.api.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\"I\n\x08TaskList\x12\x0c\n\x04name\x18\x01 \x01(\t\x12/\n\x04kind\x18\x02 \x01(\x0e\x32!.uber.cadence.api.v1.TaskListKind\"N\n\x10TaskListMetadata\x12:\n\x14max_tasks_per_second\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\"A\n\x19TaskListPartitionMetadata\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x17\n\x0fowner_host_name\x18\x02 \x01(\t\"\xa5\x01\n\x0eTaskListStatus\x12\x1a\n\x12\x62\x61\x63klog_count_hint\x18\x01 \x01(\x03\x12\x12\n\nread_level\x18\x02 \x01(\x03\x12\x11\n\tack_level\x18\x03 \x01(\x03\x12\x17\n\x0frate_per_second\x18\x04 \x01(\x01\x12\x37\n\rtask_id_block\x18\x05 \x01(\x0b\x32 .uber.cadence.api.v1.TaskIDBlock\"/\n\x0bTaskIDBlock\x12\x10\n\x08start_id\x18\x01 \x01(\x03\x12\x0e\n\x06\x65nd_id\x18\x02 \x01(\x03\"m\n\nPollerInfo\x12\x34\n\x10last_access_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x10\n\x08identity\x18\x02 \x01(\t\x12\x17\n\x0frate_per_second\x18\x03 \x01(\x01\"\x92\x01\n\x19StickyExecutionAttributes\x12\x37\n\x10worker_task_list\x18\x01 \x01(\x0b\x32\x1d.uber.cadence.api.v1.TaskList\x12<\n\x19schedule_to_start_timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration*`\n\x0cTaskListKind\x12\x1a\n\x16TASK_LIST_KIND_INVALID\x10\x00\x12\x19\n\x15TASK_LIST_KIND_NORMAL\x10\x01\x12\x19\n\x15TASK_LIST_KIND_STICKY\x10\x02*d\n\x0cTaskListType\x12\x1a\n\x16TASK_LIST_TYPE_INVALID\x10\x00\x12\x1b\n\x17TASK_LIST_TYPE_DECISION\x10\x01\x12\x1b\n\x17TASK_LIST_TYPE_ACTIVITY\x10\x02\x42V\n\x17\x63om.uber.cadence.api.v1B\x08\x41piProtoP\x01Z/github.com/uber/cadence/.gen/proto/api/v1;apiv1b\x06proto3'
,
dependencies=[google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,])
_TASKLISTKIND = _descriptor.EnumDescriptor(
name='TaskListKind',
full_name='uber.cadence.api.v1.TaskListKind',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TASK_LIST_KIND_INVALID', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TASK_LIST_KIND_NORMAL', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TASK_LIST_KIND_STICKY', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=855,
serialized_end=951,
)
_sym_db.RegisterEnumDescriptor(_TASKLISTKIND)
TaskListKind = enum_type_wrapper.EnumTypeWrapper(_TASKLISTKIND)
_TASKLISTTYPE = _descriptor.EnumDescriptor(
name='TaskListType',
full_name='uber.cadence.api.v1.TaskListType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TASK_LIST_TYPE_INVALID', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TASK_LIST_TYPE_DECISION', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TASK_LIST_TYPE_ACTIVITY', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=953,
serialized_end=1053,
)
_sym_db.RegisterEnumDescriptor(_TASKLISTTYPE)
TaskListType = enum_type_wrapper.EnumTypeWrapper(_TASKLISTTYPE)
TASK_LIST_KIND_INVALID = 0
TASK_LIST_KIND_NORMAL = 1
TASK_LIST_KIND_STICKY = 2
TASK_LIST_TYPE_INVALID = 0
TASK_LIST_TYPE_DECISION = 1
TASK_LIST_TYPE_ACTIVITY = 2
_TASKLIST = _descriptor.Descriptor(
name='TaskList',
full_name='uber.cadence.api.v1.TaskList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='uber.cadence.api.v1.TaskList.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='kind', full_name='uber.cadence.api.v1.TaskList.kind', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=156,
serialized_end=229,
)
_TASKLISTMETADATA = _descriptor.Descriptor(
name='TaskListMetadata',
full_name='uber.cadence.api.v1.TaskListMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='max_tasks_per_second', full_name='uber.cadence.api.v1.TaskListMetadata.max_tasks_per_second', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=231,
serialized_end=309,
)
_TASKLISTPARTITIONMETADATA = _descriptor.Descriptor(
name='TaskListPartitionMetadata',
full_name='uber.cadence.api.v1.TaskListPartitionMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='uber.cadence.api.v1.TaskListPartitionMetadata.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='owner_host_name', full_name='uber.cadence.api.v1.TaskListPartitionMetadata.owner_host_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=311,
serialized_end=376,
)
_TASKLISTSTATUS = _descriptor.Descriptor(
name='TaskListStatus',
full_name='uber.cadence.api.v1.TaskListStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='backlog_count_hint', full_name='uber.cadence.api.v1.TaskListStatus.backlog_count_hint', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='read_level', full_name='uber.cadence.api.v1.TaskListStatus.read_level', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ack_level', full_name='uber.cadence.api.v1.TaskListStatus.ack_level', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rate_per_second', full_name='uber.cadence.api.v1.TaskListStatus.rate_per_second', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='task_id_block', full_name='uber.cadence.api.v1.TaskListStatus.task_id_block', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=379,
serialized_end=544,
)
_TASKIDBLOCK = _descriptor.Descriptor(
name='TaskIDBlock',
full_name='uber.cadence.api.v1.TaskIDBlock',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='start_id', full_name='uber.cadence.api.v1.TaskIDBlock.start_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end_id', full_name='uber.cadence.api.v1.TaskIDBlock.end_id', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=546,
serialized_end=593,
)
_POLLERINFO = _descriptor.Descriptor(
name='PollerInfo',
full_name='uber.cadence.api.v1.PollerInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='last_access_time', full_name='uber.cadence.api.v1.PollerInfo.last_access_time', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='identity', full_name='uber.cadence.api.v1.PollerInfo.identity', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rate_per_second', full_name='uber.cadence.api.v1.PollerInfo.rate_per_second', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=595,
serialized_end=704,
)
_STICKYEXECUTIONATTRIBUTES = _descriptor.Descriptor(
name='StickyExecutionAttributes',
full_name='uber.cadence.api.v1.StickyExecutionAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='worker_task_list', full_name='uber.cadence.api.v1.StickyExecutionAttributes.worker_task_list', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='schedule_to_start_timeout', full_name='uber.cadence.api.v1.StickyExecutionAttributes.schedule_to_start_timeout', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=707,
serialized_end=853,
)
_TASKLIST.fields_by_name['kind'].enum_type = _TASKLISTKIND
_TASKLISTMETADATA.fields_by_name['max_tasks_per_second'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_TASKLISTSTATUS.fields_by_name['task_id_block'].message_type = _TASKIDBLOCK
_POLLERINFO.fields_by_name['last_access_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_STICKYEXECUTIONATTRIBUTES.fields_by_name['worker_task_list'].message_type = _TASKLIST
_STICKYEXECUTIONATTRIBUTES.fields_by_name['schedule_to_start_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
DESCRIPTOR.message_types_by_name['TaskList'] = _TASKLIST
DESCRIPTOR.message_types_by_name['TaskListMetadata'] = _TASKLISTMETADATA
DESCRIPTOR.message_types_by_name['TaskListPartitionMetadata'] = _TASKLISTPARTITIONMETADATA
DESCRIPTOR.message_types_by_name['TaskListStatus'] = _TASKLISTSTATUS
DESCRIPTOR.message_types_by_name['TaskIDBlock'] = _TASKIDBLOCK
DESCRIPTOR.message_types_by_name['PollerInfo'] = _POLLERINFO
DESCRIPTOR.message_types_by_name['StickyExecutionAttributes'] = _STICKYEXECUTIONATTRIBUTES
DESCRIPTOR.enum_types_by_name['TaskListKind'] = _TASKLISTKIND
DESCRIPTOR.enum_types_by_name['TaskListType'] = _TASKLISTTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TaskList = _reflection.GeneratedProtocolMessageType('TaskList', (_message.Message,), {
'DESCRIPTOR' : _TASKLIST,
'__module__' : 'uber.cadence.api.v1.tasklist_pb2'
# @@protoc_insertion_point(class_scope:uber.cadence.api.v1.TaskList)
})
_sym_db.RegisterMessage(TaskList)
TaskListMetadata = _reflection.GeneratedProtocolMessageType('TaskListMetadata', (_message.Message,), {
'DESCRIPTOR' : _TASKLISTMETADATA,
'__module__' : 'uber.cadence.api.v1.tasklist_pb2'
# @@protoc_insertion_point(class_scope:uber.cadence.api.v1.TaskListMetadata)
})
_sym_db.RegisterMessage(TaskListMetadata)
TaskListPartitionMetadata = _reflection.GeneratedProtocolMessageType('TaskListPartitionMetadata', (_message.Message,), {
'DESCRIPTOR' : _TASKLISTPARTITIONMETADATA,
'__module__' : 'uber.cadence.api.v1.tasklist_pb2'
# @@protoc_insertion_point(class_scope:uber.cadence.api.v1.TaskListPartitionMetadata)
})
_sym_db.RegisterMessage(TaskListPartitionMetadata)
TaskListStatus = _reflection.GeneratedProtocolMessageType('TaskListStatus', (_message.Message,), {
'DESCRIPTOR' : _TASKLISTSTATUS,
'__module__' : 'uber.cadence.api.v1.tasklist_pb2'
# @@protoc_insertion_point(class_scope:uber.cadence.api.v1.TaskListStatus)
})
_sym_db.RegisterMessage(TaskListStatus)
TaskIDBlock = _reflection.GeneratedProtocolMessageType('TaskIDBlock', (_message.Message,), {
'DESCRIPTOR' : _TASKIDBLOCK,
'__module__' : 'uber.cadence.api.v1.tasklist_pb2'
# @@protoc_insertion_point(class_scope:uber.cadence.api.v1.TaskIDBlock)
})
_sym_db.RegisterMessage(TaskIDBlock)
PollerInfo = _reflection.GeneratedProtocolMessageType('PollerInfo', (_message.Message,), {
'DESCRIPTOR' : _POLLERINFO,
'__module__' : 'uber.cadence.api.v1.tasklist_pb2'
# @@protoc_insertion_point(class_scope:uber.cadence.api.v1.PollerInfo)
})
_sym_db.RegisterMessage(PollerInfo)
StickyExecutionAttributes = _reflection.GeneratedProtocolMessageType('StickyExecutionAttributes', (_message.Message,), {
'DESCRIPTOR' : _STICKYEXECUTIONATTRIBUTES,
'__module__' : 'uber.cadence.api.v1.tasklist_pb2'
# @@protoc_insertion_point(class_scope:uber.cadence.api.v1.StickyExecutionAttributes)
})
_sym_db.RegisterMessage(StickyExecutionAttributes)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
import string
def print_rangoli(n):
alpha = string.ascii_lowercase
L = []
for i in range(n):
s = "-".join(alpha[i:n])
L.append((s[::-1]+s[1:]).center(4*n-3, "-"))
print('\n'.join(L[:0:-1]+L))
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
# def print_rangoli(size):
# alp = 'abcdefghijklmnopqrstuvwxyz'
# for i in range(size-1,-size,-1):
# temp = '-'.join(alp[size-1:abs(i):-1]+alp[abs(i):size])
# print(temp.center(4*size-3,'-'))
# from string import ascii_lowercase as letters
# def print_rangoli(limit):
# # your code goes here
# for i in range(limit-1):
# print(('-'.join(letters[limit-1:limit-i-1:-1]+letters[ limit-i-1:limit])).center(limit*4-3,'-'))
# for i in range(limit):
# print(('-'.join((letters[limit-1 : i:-1])+letters[ i:limit])).center(limit*4-3,'-'))
|
nilq/baby-python
|
python
|
from pydantic import BaseModel
class PartOfSpeech(BaseModel):
tag: str
|
nilq/baby-python
|
python
|
# coding:utf-8
import os
import json
import numpy as np
import torch.utils.data as data
from detectron2.structures import (
Boxes,
PolygonMasks,
BoxMode
)
DATASETS = {
"coco_2017_train": {
"img_dir": "coco/train2017",
"ann_file": "coco/annotations/instances_train2017.json"
},
"coco_2017_val": {
"img_dir": "coco/val2017",
"ann_file": "coco/annotations/instances_val2017.json"
}
}
class MaskLoader(data.Dataset):
"""
Dataloader for Local Mask.
Arguments:
root (string): filepath to dataset folder.
dataset (string): mask to use (eg. 'train', 'val').
size (tuple): The size used for train/val (height, width).
transform (callable, optional): transformation to perform on the input mask.
"""
def __init__(self, root="datasets", dataset="coco_2017_train", size=28, transform=False):
self.root = root
self.dataset = dataset
self.transform = transform
if isinstance(size, int):
self.size = size
else:
raise TypeError
data_info = DATASETS[dataset]
img_dir, ann_file = data_info['img_dir'], data_info['ann_file']
img_dir = os.path.join(self.root, img_dir) # actually we do not use it.
ann_file = os.path.join(self.root, ann_file)
with open(ann_file, 'r') as f:
anns = json.load(f)
anns = anns['annotations']
coco = list()
for ann in anns:
if ann.get('iscrowd', 0) == 0:
coco.append(ann)
self.coco = coco
print("Removed {} images with no usable annotations. {} images left.".format(
len(anns) - len(self.coco), len(self.coco)))
def __len__(self):
return len(self.coco)
def __getitem__(self, index):
ann = self.coco[index]
# bbox transform.
bbox = np.array([ann["bbox"]]) # xmin, ymin, w, h
bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) # x1y1x2y2
bbox = Boxes(bbox)
# label
# mask transform.
mask = PolygonMasks([ann["segmentation"]])
mask = mask.crop_and_resize(bbox.tensor, self.size).float()
return mask
|
nilq/baby-python
|
python
|
#Need to prebuild in maya first
#RenderScript.py
#MayaPythonScript : RenderScript
#A script that can use python to automativly render the scene
import maya.cmds as cmds
import maya.cmds as mc
import maya.app.general.createImageFormats as createImageFormats
from mtoa.cmds.arnoldRender import arnoldRender
#Function : getCameraCharacter()
#Usage : use to get the Camera of the Character
#There is only one Camera in the Scene:
# ->characterCamera
#Return : the Camera Get
def getCameraCharacter() :
#Define the list Camera Class
cmds.listCameras()
#get the listCamera
listCamera = cmds.listCameras()
#debug information print
#debug information for list of Cameras
#print 'listCamera : ' + str(listCamera)
cameraWant = listCamera[0]
return cameraWant;
#Function : renderSequence
#Usage : frome the startFrame to the endFrame , we render it with a advanced setting
#use the render to render the camera want
#cmds.render(cameraWant)
#Input : renderfn(The render Tool) . renderfn_args(The flag use to render)
#the parameter frameNum is look like 00,01,02 to record the Index
def renderSequenceWithMayaSoft(startFrame , endFrame , frameNum ,renderfn = mc.render, renderfn_args = None):
#save the state
now = mc.currentTime(q = True)
for x in range(startFrame, endFrame):
#for render information debug
#print 'RenderScript : Do Render :' + str( x )
mc.currentTime(x)
#Launch render process
renderfn(renderfn_args)
# Save the Picture in RenderView
savePicInRenderView(frameNum, x)
#restore state
mc.currentTime(now)
# How to use : RenderScript.renderSequenceWithArnold(0,2,12)
# The function is the same as mayaSoftRender , but it use the arnold
def renderSequenceWithArnold(startFrame, endFrame, frameNum
, renderfn = arnoldRender
, renderfn_args= [695, 449, True, True,'camera1', ' -layer defaultRenderLayer']):
# save the state
now = mc.currentTime(q=True)
#renderfn_args = [960, 720, True, True,'camera1', ' -layer defaultRenderLayer']
for x in range(startFrame, endFrame):
# for render information debug
# print 'RenderScript : Do Render :' + str( x )
mc.currentTime(x)
# Launch render process
renderfn(renderfn_args[0],renderfn_args[1],renderfn_args[2],renderfn_args[3],renderfn_args[4],renderfn_args[5])
#renderfn(960, 720, True, True,'camera1', ' -layer defaultRenderLayer')
# Save the Picture in RenderView
savePicInRenderView(frameNum,x)
# restore state
mc.currentTime(now)
# The function use to save the RenderView frame when being render
def savePicInRenderView(frameIndex,x):
# save the image to a exist folder
editor = 'renderView'
formatManager = createImageFormats.ImageFormats()
formatManager.pushRenderGlobalsForDesc("PNG")
# The name of the Image is CharacterImage'+str(x)+.jpg ,example CharacterImage1.jpg\
cmds.renderWindowEditor(editor, e=True, writeImage='E:/mayaStore/images/imageSequence/CharacterImage_'
+ str(frameIndex).zfill(2) + '_' + str(x).zfill(2) + '.png')
formatManager.popRenderGlobals()
#Test Function
#renderSequence(0,24,renderfn_args = getCameraCharacter())
|
nilq/baby-python
|
python
|
import torch
from torch import nn
from torch.nn import functional as F
def normalization(feautures):
B, _, H, W = feautures.size()
outs = feautures.squeeze(1)
outs = outs.view(B, -1)
outs_min = outs.min(dim=1, keepdim=True)[0]
outs_max = outs.max(dim=1, keepdim=True)[0]
norm = outs_max - outs_min
norm[norm == 0] = 1e-5
outs = (outs - outs_min) / norm
outs = outs.view(B, 1, H, W)
return outs
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class FABlock(nn.Module):
def __init__(self, in_channels, norm_layer=None, reduction=8):
super(FABlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.conv1 = conv1x1(in_channels, 1)
self.channel_fc = nn.Sequential(
nn.Linear(in_channels, in_channels // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(in_channels // reduction, in_channels, bias=False)
)
self.conv2 = conv1x1(in_channels, in_channels)
self.conv3 = conv1x1(in_channels, 1)
self.conv4 = conv3x3(1, 1)
self.bn4 = norm_layer(1)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
B, C, H, W = x.size()
# channel attention
y = self.conv1(x).view(B, 1, -1)
y = F.softmax(y, dim=-1)
y = y.permute(0, 2, 1).contiguous()
y = torch.matmul(x.view(B, C, -1), y).view(B, -1)
y = self.channel_fc(y)
y = torch.sigmoid(y).unsqueeze(2).unsqueeze(3).expand_as(x)
x_y = self.conv2(x)
x_y = x_y * y
# position attention
x_y_z = self.conv3(x_y)
z = self.conv4(x_y_z)
z = self.bn4(z)
z = torch.sigmoid(z)
x_y_z = x_y_z * z
out = self.gamma*x_y_z + x
attention_outs = normalization(self.gamma*x_y_z)
return out, attention_outs
|
nilq/baby-python
|
python
|
from .nucleus_sampling import top_k_top_p_filtering
from .transformer_decoder import TransformerDecoder
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from odoo import http
# class ControleEquipement(http.Controller):
# @http.route('/controle_equipement/controle_equipement/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/controle_equipement/controle_equipement/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('controle_equipement.listing', {
# 'root': '/controle_equipement/controle_equipement',
# 'objects': http.request.env['controle_equipement.controle_equipement'].search([]),
# })
# @http.route('/controle_equipement/controle_equipement/objects/<model("controle_equipement.controle_equipement"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('controle_equipement.object', {
# 'object': obj
# })
|
nilq/baby-python
|
python
|
import consts
quotes = []
fp = open(consts.quotes_file, "r")
for line in fp:
if line[0] == '*':
quotes.append(line[2:-1])
fp.close()
|
nilq/baby-python
|
python
|
# Jogo da Forca versão 2
import tkinter as tk
import applic
window = tk.Tk()
applic.Application(window)
window.mainloop()
|
nilq/baby-python
|
python
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from omegaconf import DictConfig
from nemo.collections.asr.data import audio_to_text, audio_to_text_dali
def get_char_dataset(config: dict, augmentor: Optional['AudioAugmentor'] = None) -> audio_to_text.AudioToCharDataset:
"""
Instantiates a Character Encoding based AudioToCharDataset.
Args:
config: Config of the AudioToCharDataset.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of AudioToCharDataset.
"""
dataset = audio_to_text.AudioToCharDataset(
manifest_filepath=config['manifest_filepath'],
labels=config['labels'],
sample_rate=config['sample_rate'],
int_values=config.get('int_values', False),
augmentor=augmentor,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
max_utts=config.get('max_utts', 0),
blank_index=config.get('blank_index', -1),
unk_index=config.get('unk_index', -1),
normalize=config.get('normalize_transcripts', False),
trim=config.get('trim_silence', False),
load_audio=config.get('load_audio', True),
parser=config.get('parser', 'en'),
add_misc=config.get('add_misc', False),
)
return dataset
def get_effective_dataset(config: dict, augmentor: Optional['AudioAugmentor'] = None) -> audio_to_text.AudioToCharDataset:
"""
Instantiates a Character Encoding based AudioToCharDataset.
Args:
config: Config of the AudioToCharDataset.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of AudioToCharDataset.
"""
dataset = audio_to_text.AudioToCharEffectiveDataset(
manifest_filepath=config['manifest_filepath'],
labels=config['labels'],
sample_rate=config['sample_rate'],
int_values=config.get('int_values', False),
augmentor=augmentor,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
max_utts=config.get('max_utts', 0),
blank_index=config.get('blank_index', -1),
unk_index=config.get('unk_index', -1),
normalize=config.get('normalize_transcripts', False),
trim=config.get('trim_silence', False),
load_audio=config.get('load_audio', True),
parser=config.get('parser', 'en'),
add_misc=config.get('add_misc', False),
buffer_size=config.get('buffer_size', 3000),
batch_size=config.get('batch_size', 128),
)
return dataset
def get_rolling_buffer_dataset(config: dict, augmentor: Optional['AudioAugmentor'] = None) -> audio_to_text.AudioToCharRollingBufferDataset:
"""
Instantiates a Character Encoding based AudioToCharDataset.
Args:
config: Config of the AudioToCharDataset.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of AudioToCharDataset.
"""
dataset = audio_to_text.AudioToCharRollingBufferDataset(
manifest_filepath=config['manifest_filepath'],
labels=config['labels'],
sample_rate=config['sample_rate'],
int_values=config.get('int_values', False),
augmentor=augmentor,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
max_utts=config.get('max_utts', 0),
blank_index=config.get('blank_index', -1),
unk_index=config.get('unk_index', -1),
normalize=config.get('normalize_transcripts', False),
trim=config.get('trim_silence', False),
load_audio=config.get('load_audio', True),
parser=config.get('parser', 'en'),
add_misc=config.get('add_misc', False),
buffer_size=config.get('buffer_size', 2000),
batch_size=config.get('batch_size', 128),
)
return dataset
def get_bpe_dataset(
config: dict, tokenizer: 'TokenizerSpec', augmentor: Optional['AudioAugmentor'] = None
) -> audio_to_text.AudioToBPEDataset:
"""
Instantiates a Byte Pair Encoding / Word Piece Encoding based AudioToBPEDataset.
Args:
config: Config of the AudioToBPEDataset.
tokenizer: An instance of a TokenizerSpec object.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of AudioToBPEDataset.
"""
dataset = audio_to_text.AudioToBPEDataset(
manifest_filepath=config['manifest_filepath'],
tokenizer=tokenizer,
sample_rate=config['sample_rate'],
int_values=config.get('int_values', False),
augmentor=augmentor,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
max_utts=config.get('max_utts', 0),
trim=config.get('trim_silence', False),
load_audio=config.get('load_audio', True),
add_misc=config.get('add_misc', False),
use_start_end_token=config.get('use_start_end_token', True),
)
return dataset
def get_tarred_char_dataset(
config: dict, shuffle_n: int, global_rank: int, world_size: int, augmentor: Optional['AudioAugmentor'] = None
) -> audio_to_text.TarredAudioToCharDataset:
"""
Instantiates a Character Encoding based TarredAudioToCharDataset.
Args:
config: Config of the TarredAudioToCharDataset.
shuffle_n: How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
global_rank: Global rank of this device.
world_size: Global world size in the training method.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of TarredAudioToCharDataset.
"""
dataset = audio_to_text.TarredAudioToCharDataset(
audio_tar_filepaths=config['tarred_audio_filepaths'],
manifest_filepath=config['manifest_filepath'],
labels=config['labels'],
sample_rate=config['sample_rate'],
int_values=config.get('int_values', False),
augmentor=augmentor,
shuffle_n=shuffle_n,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
max_utts=config.get('max_utts', 0),
blank_index=config.get('blank_index', -1),
unk_index=config.get('unk_index', -1),
normalize=config.get('normalize_transcripts', False),
trim=config.get('trim_silence', False),
parser=config.get('parser', 'en'),
add_misc=config.get('add_misc', False),
shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
global_rank=global_rank,
world_size=world_size,
)
return dataset
def get_tarred_bpe_dataset(
config: dict,
tokenizer: 'TokenizerSpec',
shuffle_n: int,
global_rank: int,
world_size: int,
augmentor: Optional['AudioAugmentor'] = None,
) -> audio_to_text.TarredAudioToBPEDataset:
"""
Instantiates a Byte Pair Encoding / Word Piece Encoding based TarredAudioToBPEDataset.
Args:
config: Config of the TarredAudioToBPEDataset.
tokenizer: An instance of a TokenizerSpec object.
shuffle_n: How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
global_rank: Global rank of this device.
world_size: Global world size in the training method.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of TarredAudioToBPEDataset.
"""
dataset = audio_to_text.TarredAudioToBPEDataset(
audio_tar_filepaths=config['tarred_audio_filepaths'],
manifest_filepath=config['manifest_filepath'],
tokenizer=tokenizer,
sample_rate=config['sample_rate'],
int_values=config.get('int_values', False),
augmentor=augmentor,
shuffle_n=shuffle_n,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
max_utts=config.get('max_utts', 0),
trim=config.get('trim_silence', False),
add_misc=config.get('add_misc', False),
use_start_end_token=config.get('use_start_end_token', True),
shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
global_rank=global_rank,
world_size=world_size,
)
return dataset
def get_dali_char_dataset(
config: dict,
shuffle: bool,
device_id: int,
global_rank: int,
world_size: int,
preprocessor_cfg: Optional[DictConfig] = None,
) -> audio_to_text_dali.AudioToCharDALIDataset:
"""
Instantiates a Character Encoding based AudioToCharDALIDataset.
Args:
config: Config of the AudioToCharDALIDataset.
shuffle: Bool flag whether to shuffle the dataset.
device_id: Index of the GPU to be used (local_rank). Only applicable when device == 'gpu'. Defaults to 0.
global_rank: Global rank of this device.
world_size: Global world size in the training method.
augmentor: Optional AudioAugmentor object for augmentations on audio data.
Returns:
An instance of AudioToCharDALIDataset.
"""
device = 'gpu' if torch.cuda.is_available() else 'cpu'
dataset = audio_to_text_dali.AudioToCharDALIDataset(
manifest_filepath=config['manifest_filepath'],
device=device,
batch_size=config['batch_size'],
labels=config['labels'],
sample_rate=config['sample_rate'],
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
blank_index=config.get('blank_index', -1),
unk_index=config.get('unk_index', -1),
normalize=config.get('normalize_transcripts', False),
trim=config.get('trim_silence', False),
parser=config.get('parser', 'en'),
shuffle=shuffle,
device_id=device_id,
global_rank=global_rank,
world_size=world_size,
preprocessor_cfg=preprocessor_cfg,
)
return dataset
|
nilq/baby-python
|
python
|
import pydocspec
from pydocspec import visitors
def dump(root:pydocspec.TreeRoot) -> None:
for mod in root.root_modules:
mod.walk(visitors.PrintVisitor())
# pydocspec_processes = {
# 90: dump
# }
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import mock
from click.testing import CliRunner
from elasticsearch_loader import cli
def invoke(content, *args, **kwargs):
if sys.version_info[0] == 2:
content = content.encode('utf-8')
runner = CliRunner()
with runner.isolated_filesystem():
with open('sample.csv', 'w') as f:
f.write(content)
return runner.invoke(*args, **kwargs)
@mock.patch('elasticsearch_loader.single_bulk_to_es')
def test_should_iterate_over_csv(bulk):
content = """id,first,last\nMOZA,Moshe,Zada\nMICHO,Michelle,Obama\na,b,c\nf,g,א"""
result = invoke(content, cli, ['--index=index', '--type=type', 'csv', 'sample.csv'], catch_exceptions=False)
assert result.exit_code == 0
assert [x for x in bulk.call_args[0][0] if x is not None] == [{'first': 'Moshe', 'id': 'MOZA', 'last': 'Zada'},
{'first': 'Michelle', 'id': 'MICHO', 'last': 'Obama'},
{'first': 'b', 'id': 'a', 'last': 'c'},
{'first': 'g', 'id': 'f', 'last': 'א'}]
@mock.patch('elasticsearch_loader.single_bulk_to_es')
def test_should_iterate_over_tsv(bulk):
content = """id first last\nMOZA Moshe Zada\nMICHO Michelle Obama\na b c\nf g א"""
result = invoke(content, cli, ['--index=index', '--type=type', 'csv', '--delimiter=\\t', 'sample.csv'], catch_exceptions=False)
assert result.exit_code == 0
assert [x for x in bulk.call_args[0][0] if x is not None] == [{'first': 'Moshe', 'id': 'MOZA', 'last': 'Zada'},
{'first': 'Michelle', 'id': 'MICHO', 'last': 'Obama'},
{'first': 'b', 'id': 'a', 'last': 'c'},
{'first': 'g', 'id': 'f', 'last': 'א'}]
|
nilq/baby-python
|
python
|
from dataclasses import dataclass
import os
from typing import Optional
@dataclass(frozen=True)
class ENV:
workspace_name: Optional[str] = os.environ.get('WORKSPACE_NAME')
subscription_id: Optional[str] = os.environ.get('SUBSCRIPTION_ID')
resource_group: Optional[str] = os.environ.get('RESOURCE_GROUP')
vm_priority: Optional[str] = os.environ.get('AML_CLUSTER_PRIORITY','lowpriority')
vm_priority_scoring: Optional[str] = os.environ.get('AML_CLUSTER_PRIORITY_SCORING','lowpriority')
vm_size: Optional[str] = os.environ.get('AML_COMPUTE_CLUSTER_CPU_SKU')
vm_size_scoring: Optional[str] = os.environ.get('AML_COMPUTE_CLUSTER_CPU_SKU_SCORING')
min_nodes: Optional[int] = int(os.environ.get('AML_CLUSTER_MIN_NODES',0))
min_nodes_scoring: Optional[int] = int(os.environ.get('AML_CLUSTER_MIN_NODES_SCORING',0))
max_nodes: Optional[int] = int(os.environ.get('AML_CLUSTER_MAX_NODES',4))
max_nodes_scoring: Optional[int] = int(os.environ.get('AML_CLUSTER_MAX_NODES_SCORING',4))
source_train_directory: Optional[str] = os.environ.get('SOURCE_TRAIN_DIRECTORY','diabetes')
aml_conda_train_dependent_files: Optional[str] = os.environ.get('AML_CONDA_TRAIN_DEPENDENT_FILES','conda_dependencies.yml')
aml_env_name: Optional[str] = os.environ.get('AML_ENV_NAME')
aml_env_scoring_name: Optional[str] = os.environ.get('AML_ENV_SCORING_NAME')
aml_env_scorecopy_name: Optional[str] = os.environ.get('AML_ENV_SCORECOPY_NAME')
rebuild_env: Optional[bool] = os.environ.get('AML_REBUILD_ENVIRONMENT')
model_name: Optional[str] = os.environ.get('MODEL_NAME')
model_name_scoring: Optional[str] = os.environ.get('MODEL_NAME_SCORING')
model_version: Optional[str] = os.environ.get('MODEL_VERSION')
model_version_scoring: Optional[str] = os.environ.get('MODEL_VERSION_SCORING')
dataset_name: Optional[str] = os.environ.get('DATASET_NAME')
build_id: Optional[str] = os.environ.get('BUILD_BUILDID')
pipeline_name: Optional[str] = os.environ.get('TRAINING_PIPELINE_NAME')
compute_name: Optional[str] = os.environ.get('AML_COMPUTE_CLUSTER_NAME')
datastore_name: Optional[str] = os.environ.get('DATASTORE_NAME')
dataset_version: Optional[str] = os.environ.get('DATASET_VERSION')
train_script_path: Optional[str] = os.environ.get('TRAIN_SCRIPT_PATH')
eval_script_path: Optional[str] = os.environ.get('EVAL_SCRIPT_PATH')
register_script_path: Optional[str] = os.environ.get('REGISTER_SCRIPT_PATH')
allow_run_cancel: Optional[str] = os.environ.get('ALLOW_RUN_CANCEL')
run_evaluation: Optional[str] = os.environ.get('RUN_EVALUATION')
experiment_name: Optional[str] = os.environ.get('EXPERIMENT_NAME')
build_uri: Optional[str] = os.environ.get('BUILD_URI')
scoring_datastore_access_key: Optional[str] = os.environ.get('SCORING_DATASTORE_ACCESS_KEY')
scoring_datastore_input_container: Optional[str] = os.environ.get('SCORING_DATASTORE_INPUT_CONTAINER')
scoring_datastore_output_container: Optional[str] = os.environ.get('SCORING_DATASTORE_OUTPUT_CONTAINER')
scoring_datastore_storage_name : Optional[str] = os.environ.get('SCORING_DATASTORE_STORAGE_NAME')
scoring_datastore_input_filename: Optional[str] = os.environ.get('SCORING_DATASTORE_INPUT_FILENAME')
scoring_datastore_output_filename: Optional[str] = os.environ.get('SCORING_DATASTORE_OUTPUT_FILENAME')
scoring_dataset_name: Optional[str] = os.environ.get('SCORING_DATASET_NAME')
scoring_pipeline_name: Optional[str] = os.environ.get('SCORING_PIPELINE_NAME')
use_gpu_for_scoring: Optional[str] = os.environ.get('USE_GPU_FOR_SCORING')
rebuild_scoring_env: Optional[str] = os.environ.get('AML_REBUILD_SCORING_ENV')
batchscore_script_path: Optional[str] = os.environ.get('BATCHSCORE_SCRIPT_PATH')
batch_scorecopy_script_path: Optional[str] = os.environ.get('BATCH_SCORECOPY_SCRIPT_PATH')
aml_conda_score_file: Optional[str] = os.environ.get('AML_CONDA_SCORE_FILE')
aml_conda_scorecopy_file: Optional[str] = os.environ.get('AML_CONDA_SCORECOPY_FILE')
compute_scoring_name: Optional[str] = os.environ.get('AML_COMPUTE_CLUSTER_SCORING')
pipeline_id: Optional[str] = os.environ.get('SCORING_PIPELINE_ID')
scoring_datastore_access_key: Optional[str] = os.environ.get('SCORING_DATASTORE_ACCESS_KEY')
|
nilq/baby-python
|
python
|
# Learn more: https://github.com/Ensembl/ols-client
import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f:
readme = f.read()
with open(os.path.join(os.path.dirname(__file__), 'LICENSE')) as f:
license_ct = f.read()
with open(os.path.join(os.path.dirname(__file__), 'VERSION')) as f:
version = f.read()
def import_requirements():
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content
setup(
name='production_services',
version=version,
description='Ensembl Production Database Application',
long_description=readme,
author='Marc Chakiachvili,James Allen,Luca Da Rin Fioretto,Vinay Kaikala',
author_email='mchakiachvili@ebi.ac.uk,jallen@ebi.ac.uk,ldrf@ebi.ac.uk,vkaikala@ebi.ac.uk',
maintainer='Ensembl Production Team',
maintainer_email='ensembl-production@ebi.ac.uk',
url='https://github.com/Ensembl/production_services',
license='APACHE 2.0',
packages=find_packages(exclude=('tests', 'docs')),
install_requires=import_requirements(),
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
|
nilq/baby-python
|
python
|
from vidispine.base import EntityBase
from vidispine.errors import InvalidInput
from vidispine.typing import BaseJson
class Search(EntityBase):
"""Search
Search Vidispine objects.
:vidispine_docs:`Vidispine doc reference <collection>`
"""
entity = 'search'
def __call__(self, *args, **kwargs) -> BaseJson:
"""Browses items and collections
:param metadata: Optional metadata (search document) supplied
to perform a shared search query.
:param params: Optional query parameters.
:param matrix_params: Optional matrix parameters.
:return: JSON response from the request.
:rtype: vidispine.typing.BaseJson.
"""
return self._search(*args, **kwargs)
def _search(
self,
metadata: dict = None,
params: dict = None,
matrix_params: dict = None
) -> BaseJson:
if metadata is None:
return self._search_without_search_doc(params, matrix_params)
else:
return self._search_with_search_doc(
metadata, params, matrix_params
)
def _search_with_search_doc(
self,
metadata: dict,
params: dict = None,
matrix_params: dict = None
) -> BaseJson:
if not metadata:
raise InvalidInput('Please supply metadata.')
if params is None:
params = {}
endpoint = self._build_url(matrix_params=matrix_params)
return self.client.put(endpoint, json=metadata, params=params)
def _search_without_search_doc(
self,
params: dict = None,
matrix_params: dict = None
) -> BaseJson:
if params is None:
params = {}
endpoint = self._build_url(matrix_params=matrix_params)
return self.client.get(endpoint, params=params)
def shape(
self,
metadata: dict = None,
params: dict = None,
matrix_params: dict = None
) -> BaseJson:
"""Searches shapes
:param metadata: Optional metadata (shape document) supplied
to perform a search query.
:param params: Optional query parameters.
:param matrix_params: Optional matrix parameters.
:return: JSON response from the request.
:rtype: vidispine.typing.BaseJson.
"""
if metadata is None:
return self._search_shapes_without_search_doc(
params, matrix_params
)
else:
return self._search_shapes_with_search_doc(
metadata, params, matrix_params
)
def _search_shapes_without_search_doc(
self,
params: dict = None,
matrix_params: dict = None
) -> BaseJson:
if params is None:
params = {}
endpoint = self._build_url('shape', matrix_params=matrix_params)
return self.client.get(endpoint, params=params)
def _search_shapes_with_search_doc(
self,
metadata: dict,
params: dict = None,
matrix_params: dict = None
) -> BaseJson:
if not metadata:
raise InvalidInput('Please supply metadata.')
if params is None:
params = {}
endpoint = self._build_url('shape', matrix_params=matrix_params)
return self.client.put(endpoint, json=metadata, params=params)
|
nilq/baby-python
|
python
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import copy
from datetime import datetime
from functools import partial
import os
from code import Code
import json_parse
# The template for the header file of the generated FeatureProvider.
HEADER_FILE_TEMPLATE = """
// Copyright %(year)s The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// GENERATED FROM THE FEATURES FILE:
// %(source_files)s
// DO NOT EDIT.
#ifndef %(header_guard)s
#define %(header_guard)s
#include "extensions/common/features/base_feature_provider.h"
namespace extensions {
class %(provider_class)s : public BaseFeatureProvider {
public:
%(provider_class)s();
~%(provider_class)s() override;
private:
DISALLOW_COPY_AND_ASSIGN(%(provider_class)s);
};
} // namespace extensions
#endif // %(header_guard)s
"""
# The beginning of the .cc file for the generated FeatureProvider.
CC_FILE_BEGIN = """
// Copyright %(year)s The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// GENERATED FROM THE FEATURES FILE:
// %(source_files)s
// DO NOT EDIT.
#include "%(header_file_path)s"
#include "extensions/common/features/api_feature.h"
#include "extensions/common/features/behavior_feature.h"
#include "extensions/common/features/complex_feature.h"
#include "extensions/common/features/manifest_feature.h"
#include "extensions/common/features/permission_feature.h"
namespace extensions {
"""
# The end of the .cc file for the generated FeatureProvider.
CC_FILE_END = """
%(provider_class)s::~%(provider_class)s() {}
} // namespace extensions
"""
# A "grammar" for what is and isn't allowed in the features.json files. This
# grammar has to list all possible keys and the requirements for each. The
# format of each entry is:
# 'key': {
# allowed_type_1: optional_properties,
# allowed_type_2: optional_properties,
# }
# |allowed_types| are the types of values that can be used for a given key. The
# possible values are list, unicode, bool, and int.
# |optional_properties| provide more restrictions on the given type. The options
# are:
# 'subtype': Only applicable for lists. If provided, this enforces that each
# entry in the list is of the specified type.
# 'enum_map': A map of strings to C++ enums. When the compiler sees the given
# enum string, it will replace it with the C++ version in the
# compiled code. For instance, if a feature specifies
# 'channel': 'stable', the generated C++ will assign
# version_info::Channel::STABLE to channel. The keys in this map
# also serve as a list all of possible values.
# 'allow_all': Only applicable for lists. If present, this will check for
# a value of "all" for a list value, and will replace it with
# the collection of all possible values. For instance, if a
# feature specifies 'contexts': 'all', the generated C++ will
# assign the list of Feature::BLESSED_EXTENSION_CONTEXT,
# Feature::BLESSED_WEB_PAGE_CONTEXT et al for contexts. If not
# specified, defaults to false.
# 'values': A list of all possible allowed values for a given key.
# If a type definition does not have any restrictions (beyond the type itself),
# an empty definition ({}) is used.
FEATURE_GRAMMAR = (
{
'blacklist': {
list: {'subtype': unicode}
},
'channel': {
unicode: {
'enum_map': {
'trunk': 'version_info::Channel::UNKNOWN',
'canary': 'version_info::Channel::CANARY',
'dev': 'version_info::Channel::DEV',
'beta': 'version_info::Channel::BETA',
'stable': 'version_info::Channel::STABLE',
}
}
},
'command_line_switch': {
unicode: {}
},
'component_extensions_auto_granted': {
bool: {}
},
'contexts': {
list: {
'enum_map': {
'blessed_extension': 'Feature::BLESSED_EXTENSION_CONTEXT',
'blessed_web_page': 'Feature::BLESSED_WEB_PAGE_CONTEXT',
'content_script': 'Feature::CONTENT_SCRIPT_CONTEXT',
'extension_service_worker': 'Feature::SERVICE_WORKER_CONTEXT',
'web_page': 'Feature::WEB_PAGE_CONTEXT',
'webui': 'Feature::WEBUI_CONTEXT',
'unblessed_extension': 'Feature::UNBLESSED_EXTENSION_CONTEXT',
},
'allow_all': True
},
},
'default_parent': {
bool: {'values': [True]}
},
'dependencies': {
list: {'subtype': unicode}
},
'extension_types': {
list: {
'enum_map': {
'extension': 'Manifest::TYPE_EXTENSION',
'hosted_app': 'Manifest::TYPE_HOSTED_APP',
'legacy_packaged_app': 'Manifest::TYPE_LEGACY_PACKAGED_APP',
'platform_app': 'Manifest::TYPE_PLATFORM_APP',
'shared_module': 'Manifest::TYPE_SHARED_MODULE',
'theme': 'Manifest::TYPE_THEME',
},
'allow_all': True
},
},
'location': {
unicode: {
'enum_map': {
'component': 'SimpleFeature::COMPONENT_LOCATION',
'external_component': 'SimpleFeature::EXTERNAL_COMPONENT_LOCATION',
'policy': 'SimpleFeature::POLICY_LOCATION',
}
}
},
'internal': {
bool: {'values': [True]}
},
'matches': {
list: {'subtype': unicode}
},
'max_manifest_version': {
int: {'values': [1]}
},
'min_manifest_version': {
int: {'values': [2]}
},
'noparent': {
bool: {'values': [True]}
},
'platforms': {
list: {
'enum_map': {
'chromeos': 'Feature::CHROMEOS_PLATFORM',
'linux': 'Feature::LINUX_PLATFORM',
'mac': 'Feature::MACOSX_PLATFORM',
'win': 'Feature::WIN_PLATFORM',
}
}
},
'session_types': {
list: {
'enum_map': {
'regular': 'FeatureSessionType::REGULAR',
'kiosk': 'FeatureSessionType::KIOSK',
}
}
},
'whitelist': {
list: {'subtype': unicode}
},
})
FEATURE_CLASSES = ['APIFeature', 'BehaviorFeature',
'ManifestFeature', 'PermissionFeature']
def HasProperty(property_name, value):
return property_name in value
def HasAtLeastOneProperty(property_names, value):
return any([HasProperty(name, value) for name in property_names])
def DoesNotHaveProperty(property_name, value):
return property_name not in value
VALIDATION = ({
'all': [
(partial(HasAtLeastOneProperty, ['channel', 'dependencies']),
'Features must specify either a channel or dependencies'),
],
'APIFeature': [
(partial(HasProperty, 'contexts'),
'APIFeatures must specify at least one context')
],
'ManifestFeature': [
(partial(HasProperty, 'extension_types'),
'ManifestFeatures must specify at least one extension type'),
(partial(DoesNotHaveProperty, 'contexts'),
'ManifestFeatures do not support contexts.'),
],
'BehaviorFeature': [],
'PermissionFeature': [
(partial(HasProperty, 'extension_types'),
'PermissionFeatures must specify at least one extension type'),
(partial(DoesNotHaveProperty, 'contexts'),
'PermissionFeatures do not support contexts.'),
],
})
# These keys are used to find the parents of different features, but are not
# compiled into the features themselves.
IGNORED_KEYS = ['default_parent']
# By default, if an error is encountered, assert to stop the compilation. This
# can be disabled for testing.
ENABLE_ASSERTIONS = True
# JSON parsing returns all strings of characters as unicode types. For testing,
# we can enable converting all string types to unicode to avoid writing u''
# everywhere.
STRINGS_TO_UNICODE = False
class Feature(object):
"""A representation of a single simple feature that can handle all parsing,
validation, and code generation.
"""
def __init__(self, name):
self.name = name
self.has_parent = False
self.errors = []
self.feature_values = {}
def _GetType(self, value):
"""Returns the type of the given value. This can be different than type() if
STRINGS_TO_UNICODE is enabled.
"""
t = type(value)
if not STRINGS_TO_UNICODE:
return t
if t is str:
return unicode
return t
def _AddError(self, error):
"""Adds an error to the feature. If ENABLE_ASSERTIONS is active, this will
also assert to stop the compilation process (since errors should never be
found in production).
"""
self.errors.append(error)
if ENABLE_ASSERTIONS:
assert False, error
def _AddKeyError(self, key, error):
"""Adds an error relating to a particular key in the feature.
"""
self._AddError('Error parsing feature "%s" at key "%s": %s' %
(self.name, key, error))
def _GetCheckedValue(self, key, expected_type, expected_values,
enum_map, value):
"""Returns a string to be used in the generated C++ code for a given key's
python value, or None if the value is invalid. For example, if the python
value is True, this returns 'true', for a string foo, this returns "foo",
and for an enum, this looks up the C++ definition in the enum map.
key: The key being parsed.
expected_type: The expected type for this value, or None if any type is
allowed.
expected_values: The list of allowed values for this value, or None if any
value is allowed.
enum_map: The map from python value -> cpp value for all allowed values,
or None if no special mapping should be made.
value: The value to check.
"""
valid = True
if expected_values and value not in expected_values:
self._AddKeyError(key, 'Illegal value: "%s"' % value)
valid = False
t = self._GetType(value)
if expected_type and t is not expected_type:
self._AddKeyError(key, 'Illegal value: "%s"' % value)
valid = False
if not valid:
return None
if enum_map:
return enum_map[value]
if t in [str, unicode]:
return '"%s"' % str(value)
if t is int:
return str(value)
if t is bool:
return 'true' if value else 'false'
assert False, 'Unsupported type: %s' % value
def _ParseKey(self, key, value, grammar):
"""Parses the specific key according to the grammar rule for that key if it
is present in the json value.
key: The key to parse.
value: The full value for this feature.
grammar: The rule for the specific key.
"""
if key not in value:
return
v = value[key]
is_all = False
if v == 'all' and list in grammar and 'allow_all' in grammar[list]:
v = []
is_all = True
value_type = self._GetType(v)
if value_type not in grammar:
self._AddKeyError(key, 'Illegal value: "%s"' % v)
return
expected = grammar[value_type]
expected_values = None
enum_map = None
if 'values' in expected:
expected_values = expected['values']
elif 'enum_map' in expected:
enum_map = expected['enum_map']
expected_values = enum_map.keys()
if is_all:
v = copy.deepcopy(expected_values)
expected_type = None
if value_type is list and 'subtype' in expected:
expected_type = expected['subtype']
cpp_value = None
# If this value is a list, iterate over each entry and validate. Otherwise,
# validate the single value.
if value_type is list:
cpp_value = []
for sub_value in v:
cpp_sub_value = self._GetCheckedValue(key, expected_type,
expected_values, enum_map,
sub_value)
if cpp_sub_value:
cpp_value.append(cpp_sub_value)
if cpp_value:
cpp_value = '{' + ','.join(cpp_value) + '}'
else:
cpp_value = self._GetCheckedValue(key, expected_type, expected_values,
enum_map, v)
if cpp_value:
self.feature_values[key] = cpp_value
elif key in self.feature_values:
# If the key is empty and this feature inherited a value from its parent,
# remove the inherited value.
del self.feature_values[key]
def SetParent(self, parent):
"""Sets the parent of this feature, and inherits all properties from that
parent.
"""
assert not self.feature_values, 'Parents must be set before parsing'
self.feature_values = copy.deepcopy(parent.feature_values)
self.has_parent = True
def Parse(self, parsed_json):
"""Parses the feature from the given json value."""
for key in parsed_json.keys():
if key not in FEATURE_GRAMMAR:
self._AddKeyError(key, 'Unrecognized key')
for key, key_grammar in FEATURE_GRAMMAR.iteritems():
self._ParseKey(key, parsed_json, key_grammar)
def Validate(self, feature_class):
for validator, error in (VALIDATION[feature_class] + VALIDATION['all']):
if not validator(self.feature_values):
self._AddError(error)
def GetCode(self, feature_class):
"""Returns the Code object for generating this feature."""
c = Code()
c.Append('%s* feature = new %s();' % (feature_class, feature_class))
c.Append('feature->set_name("%s");' % self.name)
for key in sorted(self.feature_values.keys()):
if key in IGNORED_KEYS:
continue;
c.Append('feature->set_%s(%s);' % (key, self.feature_values[key]))
return c
class FeatureCompiler(object):
"""A compiler to load, parse, and generate C++ code for a number of
features.json files."""
def __init__(self, chrome_root, source_files, feature_class,
provider_class, out_root, out_base_filename):
# See __main__'s ArgumentParser for documentation on these properties.
self._chrome_root = chrome_root
self._source_files = source_files
self._feature_class = feature_class
self._provider_class = provider_class
self._out_root = out_root
self._out_base_filename = out_base_filename
# The json value for the feature files.
self._json = {}
# The parsed features.
self._features = {}
def _Load(self):
"""Loads and parses the source from each input file and puts the result in
self._json."""
for f in self._source_files:
abs_source_file = os.path.join(self._chrome_root, f)
try:
with open(abs_source_file, 'r') as f:
f_json = json_parse.Parse(f.read())
except:
print('FAILED: Exception encountered while loading "%s"' %
abs_source_file)
raise
dupes = set(f_json) & set(self._json)
assert not dupes, 'Duplicate keys found: %s' % list(dupes)
self._json.update(f_json)
def _FindParent(self, feature_name, feature_value):
"""Checks to see if a feature has a parent. If it does, returns the
parent."""
no_parent = False
if type(feature_value) is list:
no_parent_values = ['noparent' in v for v in feature_value]
no_parent = all(no_parent_values)
assert no_parent or not any(no_parent_values), (
'"%s:" All child features must contain the same noparent value' %
feature_name)
else:
no_parent = 'noparent' in feature_value
sep = feature_name.rfind('.')
if sep is -1 or no_parent:
return None
parent_name = feature_name[:sep]
while sep != -1 and parent_name not in self._features:
# This recursion allows for a feature to have a parent that isn't a direct
# ancestor. For instance, we could have feature 'alpha', and feature
# 'alpha.child.child', where 'alpha.child.child' inherits from 'alpha'.
# TODO(devlin): Is this useful? Or logical?
sep = feature_name.rfind('.', 0, sep)
parent_name = feature_name[:sep]
if sep == -1:
# TODO(devlin): It'd be kind of nice to be able to assert that the
# deduced parent name is in our features, but some dotted features don't
# have parents and also don't have noparent, e.g. system.cpu. We should
# probably just noparent them so that we can assert this.
# raise KeyError('Could not find parent "%s" for feature "%s".' %
# (parent_name, feature_name))
return None
parent_value = self._features[parent_name]
parent = parent_value
if type(parent_value) is list:
for p in parent_value:
if 'default_parent' in p.feature_values:
parent = p
break
assert parent, 'No default parent found for %s' % parent_name
return parent
def _CompileFeature(self, feature_name, feature_value):
"""Parses a single feature."""
if 'nocompile' in feature_value:
assert feature_value['nocompile'], (
'nocompile should only be true; otherwise omit this key.')
return
def parse_and_validate(name, value, parent):
try:
feature = Feature(name)
if parent:
feature.SetParent(parent)
feature.Parse(value)
feature.Validate(self._feature_class)
return feature
except:
print('Failure to parse feature "%s"' % feature_name)
raise
parent = self._FindParent(feature_name, feature_value)
# Handle complex features, which are lists of simple features.
if type(feature_value) is list:
feature_list = []
# This doesn't handle nested complex features. I think that's probably for
# the best.
for v in feature_value:
feature_list.append(parse_and_validate(feature_name, v, parent))
self._features[feature_name] = feature_list
return
self._features[feature_name] = parse_and_validate(
feature_name, feature_value, parent)
def Compile(self):
"""Parses all features after loading the input files."""
self._Load();
# Iterate over in sorted order so that parents come first.
for k in sorted(self._json.keys()):
self._CompileFeature(k, self._json[k])
def Render(self):
"""Returns the Code object for the body of the .cc file, which handles the
initialization of all features."""
c = Code()
c.Append('%s::%s() {' % (self._provider_class, self._provider_class))
c.Sblock()
for k in sorted(self._features.keys()):
c.Sblock('{')
feature = self._features[k]
if type(feature) is list:
c.Append('std::vector<Feature*> features;')
for f in feature:
c.Sblock('{')
c.Concat(f.GetCode(self._feature_class))
c.Append('features.push_back(feature);')
c.Eblock('}')
c.Append('ComplexFeature* feature(new ComplexFeature(&features));')
c.Append('feature->set_name("%s");' % k)
else:
c.Concat(feature.GetCode(self._feature_class))
c.Append('AddFeature("%s", feature);' % k)
c.Eblock('}')
c.Eblock('}')
return c
def Write(self):
"""Writes the output."""
header_file_path = self._out_base_filename + '.h'
cc_file_path = self._out_base_filename + '.cc'
substitutions = ({
'header_file_path': header_file_path,
'header_guard': (header_file_path.replace('/', '_').
replace('.', '_').upper()),
'provider_class': self._provider_class,
'source_files': str(self._source_files),
'year': str(datetime.now().year)
})
if not os.path.exists(self._out_root):
os.makedirs(self._out_root)
# Write the .h file.
with open(os.path.join(self._out_root, header_file_path), 'w') as f:
header_file = Code()
header_file.Append(HEADER_FILE_TEMPLATE)
header_file.Substitute(substitutions)
f.write(header_file.Render().strip())
# Write the .cc file.
with open(os.path.join(self._out_root, cc_file_path), 'w') as f:
cc_file = Code()
cc_file.Append(CC_FILE_BEGIN)
cc_file.Substitute(substitutions)
cc_file.Concat(self.Render())
cc_end = Code()
cc_end.Append(CC_FILE_END)
cc_end.Substitute(substitutions)
cc_file.Concat(cc_end)
f.write(cc_file.Render().strip())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compile json feature files')
parser.add_argument('chrome_root', type=str,
help='The root directory of the chrome checkout')
parser.add_argument(
'feature_class', type=str,
help='The name of the class to use in feature generation ' +
'(e.g. APIFeature, PermissionFeature)')
parser.add_argument('provider_class', type=str,
help='The name of the class for the feature provider')
parser.add_argument('out_root', type=str,
help='The root directory to generate the C++ files into')
parser.add_argument(
'out_base_filename', type=str,
help='The base filename for the C++ files (.h and .cc will be appended)')
parser.add_argument('source_files', type=str, nargs='+',
help='The source features.json files')
args = parser.parse_args()
if args.feature_class not in FEATURE_CLASSES:
raise NameError('Unknown feature class: %s' % args.feature_class)
c = FeatureCompiler(args.chrome_root, args.source_files, args.feature_class,
args.provider_class, args.out_root,
args.out_base_filename)
c.Compile()
c.Write()
|
nilq/baby-python
|
python
|
from flask_jsondash import settings
def test_settings_have_url_keys_specified():
for family, config in settings.CHARTS_CONFIG.items():
assert 'js_url' in config
assert 'css_url' in config
def test_settings_have_urls_list_or_none():
for family, config in settings.CHARTS_CONFIG.items():
assert isinstance(config['js_url'], list)
assert isinstance(config['css_url'], list)
def test_all_enabled_by_default():
for family, config in settings.CHARTS_CONFIG.items():
assert config['enabled']
def test_valid_helplink():
for family, config in settings.CHARTS_CONFIG.items():
if 'help_link' in config:
assert config['help_link'].startswith('http')
def test_families_with_dependencies_are_valid_in_config():
families = settings.CHARTS_CONFIG.keys()
for family, config in settings.CHARTS_CONFIG.items():
if config['dependencies']:
for dep in config['dependencies']:
assert dep in families
|
nilq/baby-python
|
python
|
number ="+919769352682 "
|
nilq/baby-python
|
python
|
import asyncio
import statistics
import time
from typing import Optional
import pytest
import pytest_asyncio
from janus import Queue as JanusQueue
from utils import create_kafka_event_from_dict, create_kafka_message_from_dict
from eventbus.config import (
ConsumerConfig,
HttpSinkConfig,
HttpSinkMethod,
UseProducersConfig,
)
from eventbus.consumer import EventConsumer, KafkaConsumer
from eventbus.event import EventProcessStatus, KafkaEvent
@pytest.fixture
def consumer_conf():
consumer_conf = ConsumerConfig(
kafka_topics=["topic1"],
kafka_config={
"bootstrap.servers": "127.0.0.1:9093",
"group.id": "test-group-1",
},
use_producers=UseProducersConfig(producer_ids=["p1", "p2"]),
include_events=[r"test\..*"],
exclude_events=[r"test\.exclude"],
sink=HttpSinkConfig(
url="/", method=HttpSinkMethod.POST, timeout=0.2, max_retry_times=3
),
concurrent_per_partition=1,
)
yield consumer_conf
class MockInternalConsumer:
def __init__(self):
self.queue = JanusQueue(maxsize=100000)
self.committed_data = []
self.benchmark = False
self.closed = False
def put(self, item, block: bool = True, timeout: Optional[float] = None):
return self.queue.sync_q.put(item, block, timeout)
def poll(self, timeout):
if self.closed:
raise RuntimeError
try:
msg = self.queue.sync_q.get(block=True, timeout=timeout)
if self.benchmark:
msg._offset = int(time.time() * 1000000)
return msg
except:
return None
def commit(self, message=None, offsets=None, asynchronous=True):
if self.benchmark:
# self.committed_data.append(
# [time.time() - (t.offset / 1000000) for t in offsets][0]
# )
self.committed_data.append(time.time() - (message.offset() / 1000000))
else:
self.committed_data.append(message)
def store_offsets(self, message=None, offsets=None):
self.commit(message, offsets)
def close(self):
self.closed = True
@pytest_asyncio.fixture
async def event_consumer(mocker, consumer_conf):
async def mock_send_event(self, event: KafkaEvent):
# await asyncio.sleep(0.01)
return event, EventProcessStatus.DONE
mocker.patch("eventbus.sink.HttpSink.send_event", mock_send_event)
consumer = KafkaConsumer("t1", consumer_conf)
mock_consumer = MockInternalConsumer()
consumer._internal_consumer = mock_consumer
# commit_spy = mocker.spy(consumer._internal_consumer, "commit")
event_consumer = EventConsumer("t1", consumer_conf)
event_consumer._consumer = consumer
event_consumer._send_queue: JanusQueue = JanusQueue(maxsize=100)
event_consumer._commit_queue = JanusQueue(maxsize=100)
yield event_consumer
@pytest.mark.asyncio
async def test_send_events(consumer_conf):
send_queue = JanusQueue(maxsize=100)
consumer = KafkaConsumer("t1", consumer_conf)
mock_consumer = MockInternalConsumer()
consumer._internal_consumer = mock_consumer
asyncio.create_task(
consumer.fetch_events(send_queue)
) # trigger fetch events thread
test_msg_1 = create_kafka_message_from_dict({"title": "test.e1"})
mock_consumer.put(test_msg_1)
event = await send_queue.async_q.get()
assert event.title == "test.e1"
assert send_queue.async_q.empty() == True
test_msg_2 = create_kafka_message_from_dict({"title": "test.e2"})
test_msg_3 = create_kafka_message_from_dict({"title": "test.e3"})
mock_consumer.put(test_msg_2)
mock_consumer.put(test_msg_3)
event = await send_queue.async_q.get()
assert event.title == "test.e2"
event = await send_queue.async_q.get()
assert event.title == "test.e3"
assert send_queue.async_q.empty() == True
test_msg_4 = create_kafka_message_from_dict({"published": "xxx"})
mock_consumer.put(test_msg_4)
assert send_queue.async_q.empty() == True
await consumer.close()
# assert _send_one_event.call_count == 3
@pytest.mark.asyncio
async def test_commit_events(mocker, consumer_conf):
commit_queue = JanusQueue(maxsize=100)
consumer = KafkaConsumer("t1", consumer_conf)
consumer._internal_consumer = MockInternalConsumer()
store_spy = mocker.spy(consumer._internal_consumer, "store_offsets")
asyncio.create_task(
consumer.commit_events(commit_queue)
) # trigger commmit events thread
test_event_1 = create_kafka_event_from_dict({"title": "test.e1"})
test_event_2 = create_kafka_event_from_dict({"title": "test.e2"})
commit_queue.sync_q.put((test_event_1, EventProcessStatus.DONE))
commit_queue.sync_q.put((test_event_2, EventProcessStatus.DONE))
await asyncio.sleep(0.1)
await consumer.close()
assert store_spy.call_count == 2
# assert _send_one_event.call_count == 3
@pytest.mark.asyncio
async def test_event_consumer(event_consumer):
mock_consumer = event_consumer._consumer._internal_consumer
# let's do this two times to check if the coordinator are able to rerun
asyncio.create_task(event_consumer.run())
# check the whole pipeline, if can get all events in commit method
test_events_amount = 10
for i in range(test_events_amount):
mock_consumer.put(
create_kafka_message_from_dict({"title": f"test.e{i+1}", "offset": i + 1})
)
await asyncio.sleep(0.1)
await event_consumer.cancel()
assert len(mock_consumer.committed_data) == test_events_amount
# check how it acts when new events come after the coordinator cancelled
mock_consumer.put(
create_kafka_message_from_dict({"title": f"test.ne", "offset": -1})
)
await asyncio.sleep(0.1)
assert len(mock_consumer.committed_data) == test_events_amount
# check the order of received commits
assert [m.offset() for m in mock_consumer.committed_data] == [
i for i in range(1, 11)
]
@pytest.mark.asyncio
async def test_event_consumer_abnormal_cases(event_consumer):
pass
@pytest.mark.asyncio
@pytest.mark.benchmark
async def test_event_consumer_benchmark(event_consumer):
import cProfile
import io
import pstats
from pstats import SortKey
mock_consumer = event_consumer._consumer._internal_consumer
mock_consumer.benchmark = True
start_time = time.time()
test_events_amount = 10000
for i in range(test_events_amount):
partition = i % 10
mock_consumer.put(
create_kafka_message_from_dict(
{"title": f"test.e{i+1}", "partition": partition},
faster=True,
)
)
print("\nput events cost: ", time.time() - start_time)
# https://towardsdatascience.com/how-to-profile-your-code-in-python-e70c834fad89
pr = cProfile.Profile()
pr.enable()
# let's do this two times to check if the coordinator are able to rerun
asyncio.create_task(event_consumer.run())
# while True:
# await asyncio.sleep(0.1)
# if coordinator._send_queue.async_q.empty():
# break
await asyncio.sleep(10)
await event_consumer.cancel()
await asyncio.sleep(1)
print("\n---\n")
# print(mock_consumer.committed_data)
print("Length: ", len(mock_consumer.committed_data))
print("Max: ", max(mock_consumer.committed_data))
print("Median: ", statistics.median(mock_consumer.committed_data))
print("Mean: ", statistics.mean(mock_consumer.committed_data))
print("Min: ", min(mock_consumer.committed_data))
# print(mock_consumer.committed_data)
print("\n---\n")
pr.disable()
si = io.StringIO()
ps = pstats.Stats(pr, stream=si).sort_stats(SortKey.CUMULATIVE)
ps.print_stats(15)
print(si.getvalue())
assert len(mock_consumer.committed_data) == test_events_amount
@pytest.mark.asyncio
async def test_event_consumer_skip_events(event_consumer):
mock_consumer = event_consumer._consumer._internal_consumer
asyncio.create_task(event_consumer.run())
mock_consumer.put(
create_kafka_message_from_dict({"title": f"test.e1", "offset": 1})
)
mock_consumer.put(
create_kafka_message_from_dict({"title": f"test.e2", "offset": 2})
)
mock_consumer.put(
create_kafka_message_from_dict({"title": f"test.exclude", "offset": 3})
)
for i in range(4, 310):
mock_consumer.put(
create_kafka_message_from_dict({"title": f"skip.e{i+1}", "offset": i + 1})
)
await asyncio.sleep(0.5)
await event_consumer.cancel()
assert len(mock_consumer.committed_data) == 5
# check the order of received commits
assert [m.offset() for m in mock_consumer.committed_data] == [1, 2, 104, 205, 306]
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
import numba
import multiprocessing as mp
import itertools as it
import analyzer as ana
import concurrent.futures as fut
def calculate_pvalues(df, blabel, tlabel, mlabel, n, f=np.mean, **kwargs):
"""
Calculates the p value of the sample.
Parmas:
df --- (pandas.DataFrame) data read from csv
blabel --- (str) grouping column
tlabel --- (str) total column
mlabel --- (str) measurement column
n --- (int) # of bootstraps
f --- (function) statistic to apply (default: np.mean)
kwargs:
s --- (boolean) whether to save matrix to csv (default: False)
fname --- (str) csv file name
ctrl --- (str) control
Returns:
p_vals --- (pandas.DataFrame) of pairwise p values
"""
s = kwargs.pop('s', False)
fname = kwargs.pop('fname', None)
ctrl = kwargs.pop('ctrl', None)
matrix = df.set_index(blabel) # set index
# get genotypes
matrix.index = matrix.index.map(str)
genotypes = list(matrix.index.unique())
p_vals = ana.make_empty_dataframe(len(genotypes),\
len(genotypes), genotypes, genotypes) # empty pandas dataframe
# 8/1/2017 Replaced with processes
# threads = []
# qu = queue.Queue()
cores = 4 # core number set to 4 for debugging purposes
# cores = mp.cpu_count() # number of available cores
# for loop to iterate through all pairwise comparisons (not permutation)
# for loop to iterate through all pairwise comparisons (not permutation)
print('#{} cores detected for this machine.'.format(cores))
print('#Starting {} processes for bootstrapping...'.format(cores))
with fut.ProcessPoolExecutor(max_workers=cores) as executor:
# if no control is given, perform all pairwise comparisons
if ctrl is None:
fs = [executor.submit(calculate_deltas_process, matrix, tlabel, mlabel,
pair[0], pair[1], n) for pair in it.combinations(genotypes, 2)]
# control given
else:
genotypes.remove(ctrl)
fs = [executor.submit(calculate_deltas_process, matrix, tlabel, mlabel,
ctrl, genotype, n) for genotype in genotypes]
# save to matrix
for f in fut.as_completed(fs):
gene_1, gene_2, delta_obs, deltas_bootstrapped = f.result()
p_vals[gene_1][gene_2] = ana.calculate_pvalue(delta_obs, deltas_bootstrapped)
# for pair in it.combinations(genotypes, 2):
#
# thread = threading.Thread(target=calculate_deltas_queue,\
# args=(matrix, tlabel, clabel, pair[0], pair[1], n, qu))
# threads.append(thread)
#
# thread.setDaemon(True)
# thread.start()
#
# # control given
# else:
# for genotype in genotypes:
# if genotype == ctrl:
# continue
#
# thread = threading.Thread(target=calculate_deltas_queue,
# args=(matrix, tlabel, clabel, ctrl, genotype, n, qu))
# threads.append(thread)
#
# thread.setDaemon(True)
# thread.start()
#
# for thread in threads:
# gene_1, gene_2, delta_obs, deltas_bootstrapped = qu.get()
# p_vals[gene_1][gene_2] = ana.calculate_pvalue(delta_obs, deltas_bootstrapped)
print('#Bootstrapping complete.\n')
p_vals.replace(0, 1/n, inplace=True)
print('#P-value matrix:')
print(p_vals)
print()
# save matrix to csv
if s:
print('#Saving p-value matrix\n')
ana.save_matrix(p_vals, fname)
return p_vals.astype(float)
def calculate_deltas_process(matrix, tlabel, mlabel, gene_1, gene_2, n):
"""
Function to calculate deltas with multithreading.
Saves p values as tuples in queue.
Params:
matrix --- (pandas.DataFrame) with index correctly set
tlabel --- (str) total column
mlabel --- (str) measurement column
gene_1, gene_2 --- (String) genotypes to be compared
n --- (int) # of bootstraps
f --- (function) to calculate deltas (default: np.mean)
Returns: (tuple) gene_1, gene_2, delta_obs, deltas_bootstrapped
"""
# matrices with only genes that are given
matrix_1 = matrix[matrix.index == gene_1]
matrix_2 = matrix[matrix.index == gene_2]
# total and measurement arrays
ts_1 = np.array(matrix_1[tlabel])
ms_1 = np.array(matrix_1[mlabel])
ts_2 = np.array(matrix_2[tlabel])
ms_2 = np.array(matrix_2[mlabel])
delta_obs, deltas_bootstrapped = calculate_deltas(ts_1, ms_1, ts_2, ms_2, n)
# queue.put((gene_1, gene_2, delta_obs, deltas_bootstrapped))
return gene_1, gene_2, delta_obs, deltas_bootstrapped
def calculate_deltas(ts_1, ms_1, ts_2, ms_2, n, f=np.mean):
"""
Calculates the observed and bootstrapped deltas.
Params:
ts_1 --- (np.array) total samples 1
ms_1 --- (np.array) measurements 1
ts_2 --- (np.array) total samples 2
ms_2 --- (np.array) measurements 2
n --- (int) # of bootstraps
f --- (function) statistic to apply (default: np.mean)
Returns: (tuple) delta_obs, deltas_bootstrapped
"""
# calculate observed delta
stat_1 = f(ms_1 / ts_1)
stat_2 = f(ms_2 / ts_2)
delta_obs = stat_2 - stat_1
deltas_bootstrapped = bootstrap_deltas(ts_1, ms_1, ts_2, ms_2, n, f)
return delta_obs, deltas_bootstrapped
def bootstrap_deltas(ts_1, ms_1, ts_2, ms_2, n, f=np.mean):
"""
Calculates bootstrapped deltas.
Params:
ts_1 --- (np.array) total samples 1
ms_1 --- (np.array) measurements 1
ts_2 --- (np.array) total samples 2
ms_2 --- (np.array) measurements 2
n --- (int) # of bootstraps
Returns:
deltas --- (np.array) of length n
"""
# @numba.jit(nopython=True, nogil=True)
# def calculate_stats(ts, p):
# l = len(ts)
# nullps = np.zeros(l)
# for i in np.arange(l):
# nullps[i] = np.random.binomial(ts[i], p) / ts[i]
# nullss = f(nullps)
#
# return nullss
#
# @numba.jit(nopython=True, nogil=True)
# def bootstrap_deltas_numba(ts_1, cs_1, ts_2, cs_2, n):
# p = (np.sum(cs_1) + np.sum(cs_2)) / (np.sum(ts_1) + np.sum(ts_2))
#
# deltas = np.zeros(n)
# for i in np.arange(n):
# deltas[i] = calculate_stats(ts_2, p) - calculate_stats(ts_1, p)
#
# return deltas
# @numba.jit(nopython=True, nogil=True)
# def bootstrap_deltas_numba(ts_1, cs_1, ts_2, cs_2, n):
# p = (np.sum(cs_1) + np.sum(cs_2)) / (np.sum(ts_1) + np.sum(ts_2))
#
# deltas = np.zeros(n)
# for i in np.arange(n):
# # for each plate 1
# nullps_1 = np.zeros(len(ts_1))
# for j in np.arange(len(ts_1)):
# nullps_1[j] = np.random.binomial(ts_1[j], p) / ts_1[j]
# nullms_1 = np.mean(nullps_1)
#
# # for each plate 2
# nullps_2 = np.zeros(len(ts_2))
# for j in np.arange(len(ts_2)):
# nullps_2[j] = np.random.binomial(ts_2[j], p) / ts_2[j]
# nullms_2 = np.mean(nullps_2)
#
# deltas[i] = nullms_2 - nullms_1
#
# return deltas
# 8/1/2017 numba can't compile array expressions
# 8/2/2017 fastest of all other algorithms (even without numba)
def bootstrap_deltas_numba(ts_1, ms_1, ts_2, ms_2, n):
p = (np.sum(ms_1) + np.sum(ms_2)) / (np.sum(ts_1) + np.sum(ts_2))
nullps_1 = np.zeros((len(ts_1), n)) # initialize blank array for sums
# for each plate 1
for i in np.arange(len(ts_1)):
nullps_1[i,:] = np.random.binomial(ts_1[i], p, n) / ts_1[i]
# find mean of plate 1
nullms_1 = np.mean(nullps_1, axis=0)
nullps_2 = np.zeros((len(ts_2), n)) # initialize blank array for sums
# for each plate 2
for i in np.arange(len(ts_2)):
nullps_2[i,:] = np.random.binomial(ts_2[i], p, n) / ts_2[i]
# find mean of plate 2
nullms_2 = np.mean(nullps_2, axis=0)
# find deltas
deltas = nullms_2 - nullms_1
return deltas
# 7/31/2017 This is a vectorized function, but numba does not support
# np.split and np.repeat
# def bootstrap_deltas_numba(ts_1, cs_1, ts_2, cs_2, n):
# # total probablity with labels removed
# p = (np.sum(cs_1) + np.sum(cs_2)) / (np.sum(ts_1) + np.sum(ts_2))
#
# # vectorized bootstraps
# # make 2D array, each row representing plates, each column a bootstrap
# nullts_1 = np.split(np.repeat(ts_1, n), len(ts_1))
# # calculate binomial picks
# nullcs_1 = np.random.binomial(nullts_1, p)
# # calculate probability by dividing by total sample
# nullps_1 = nullcs_1 / ts_1[:,None]
# # calculate statistic using f
# nullss_1 = f(nullps_1, axis=0)
#
# # make 2D array, each row representing plates, each column a bootstrap
# nullts_2 = np.split(np.repeat(ts_2, n), len(ts_2))
# # calculate binomial picks
# nullcs_2 = np.random.binomial(nullts_2, p)
# # calculate probability by dividing by total sample
# nullps_2 = nullcs_2 / ts_2[:,None]
# # calculate statistic using f
# nullss_2 = f(nullps_2, axis=0)
#
# deltas = nullss_2 - nullss_1
#
# return deltas
deltas = bootstrap_deltas_numba(ts_1, ms_1, ts_2, ms_2, n)
return deltas
# # 7/31/2017 vectorized by np.random.binomial
# # total number of samples
# ts_n = np.sum(ts_1) + np.sum(ts_2)
# cs_n = np.sum(cs_1) + np.sum(cs_2)
#
# # mixed array
# mixed = np.zeros(ts_n)
# mixed[0:cs_n] = np.ones(cs_n)
#
# # function to be numbaized
# @numba.jit(nopython=True, nogil=True)
# def difference(ts_1, cs_1, ts_2, cs_2, n):
# """
# Calculates delta based on function f.
# """
#
# # initialize deltas array
# deltas = np.zeros(n)
#
# # perform bootstraps
# # TODO: use np.random.binomial - can it be done without looping n times?
# for i in np.arange(n):
# nullp_1 = np.zeros(len(ts_1))
# nullp_2 = np.zeros(len(ts_2))
#
# for j in np.arange(len(ts_1)):
# nullc = np.sum(np.random.choice(mixed, cs_1[j], replace=True))
# nullp_1[j] = nullc / ts_1[j]
#
# for j in np.arange(len(ts_2)):
# nullc = np.sum(np.random.choice(mixed, cs_2[j], replace=True))
# nullp_2[j] = nullc / ts_2[j]
#
# # calculate difference of means
# delta = f(nullp_2) - f(nullp_1)
#
# deltas[i] = delta
#
# return deltas
#
# deltas = difference(ts_1, cs_1, ts_2, cs_2, n)
#
# return deltas
if __name__ == '__main__':
import argparse
import os
n = 10**4
stat = 'mean'
fs = {'mean': np.mean,
'median': np.median}
parser = argparse.ArgumentParser(description='Run analysis of binary data.')
# begin command line arguments
parser.add_argument('csv_data',
help='Path to the csv data file.',
type=str)
parser.add_argument('title',
help='Title of analysis. (without file \
extension)',
type=str)
parser.add_argument('-b',
help='Number of bootstraps. \
(default: {0})'.format(n),
type=int,
default=100)
parser.add_argument('-i',
help='Column to group measurements by. \
(defaults to first column)',
type=str,
default=None)
parser.add_argument('-c',
help='Control genotype. \
(performs one-vs-all analysis if given)',
type=str,
default=None)
parser.add_argument('-t',
help='Column for total sample size. \
(defaults to second column)',
type=str,
default=None)
parser.add_argument('-m',
help='Column for measurements. \
(defaults to third column)',
default=None)
parser.add_argument('-s',
help='Statistic to apply. \
(default: {})'.format(stat),
type=str,
choices=fs.keys(),
default='mean')
parser.add_argument('--save',
help='Save matrices to csv.',
action='store_true')
# end command line arguments
args = parser.parse_args()
csv_path = args.csv_data
title = args.title
n = args.b
blabel = args.i
ctrl = args.c
tlabel = args.t
mlabel = args.m
f = fs[args.s]
s = args.save
df = pd.read_csv(csv_path) # read csv data
# infer by, tot, and count columns
if blabel is None:
print('##No grouping column given...', end='')
blabel = df.keys()[0]
print('Inferred as \'{}\' from data.\n'.format(blabel))
if tlabel is None:
print('##No total column given...', end='')
tlabel = df.keys()[1]
print('Inferred as \'{}\' from data.\n'.format(tlabel))
if mlabel is None:
print('##No measurement column given...', end='')
mlabel = df.keys()[2]
print('Inferred as \'{}\' from data.\n'.format(mlabel))
# set directory to title
path = './{}'.format(title)
if os.path.exists(path):
os.chdir(path)
else:
os.mkdir(path)
os.chdir(path)
p_vals = calculate_pvalues(df, blabel, tlabel, mlabel, n, f=f, ctrl=ctrl, s=s, fname='p')
q_vals = ana.calculate_qvalues(p_vals, s=s, fname='q')
|
nilq/baby-python
|
python
|
"""Create openapi schema from the given API."""
import typing as t
import inspect
import re
from http import HTTPStatus
from functools import partial
from apispec import APISpec, utils
from apispec.ext.marshmallow import MarshmallowPlugin
from http_router.routes import DynamicRoute, Route
from asgi_tools.response import CAST_RESPONSE
from muffin import Response
from muffin.typing import JSONType
from . import LIMIT_PARAM, OFFSET_PARAM, openapi
try:
from apispec import yaml_utils
except ImportError:
yaml_utils = None
DEFAULT_METHODS = 'get',
HTTP_METHODS = ['GET', 'POST', 'PUT', 'PATH', 'DELETE', 'HEAD', 'OPTIONS', 'TRACE', 'CONNECT']
RE_URL = re.compile(r'<(?:[^:<>]+:)?([^<>]+)>')
SKIP_PATH = {'/openapi.json', '/swagger', '/redoc'}
def render_openapi(api, request):
"""Prepare openapi specs."""
# Setup Specs
options = dict(api.openapi_options)
options.setdefault('servers', [{
'url': str(request.url.with_query('').with_path(api.prefix))
}])
spec = APISpec(
options['info'].pop('title', f"{ api.app.cfg.name.title() } API"),
options['info'].pop('version', '1.0.0'),
options.pop('openapi_version', '3.0.0'),
**options, plugins=[MarshmallowPlugin()])
spec.tags = {}
# Setup Authorization
if api.authorize:
_, _, schema = parse_docs(api.authorize)
spec.options['security'] = []
for key, value in schema.items():
spec.components.security_scheme(key, value)
spec.options['security'].append({key: []})
# Setup Paths
routes = api.router.routes()
for route in routes:
if route.path in SKIP_PATH:
continue
spec.path(route.path, **route_to_spec(route, spec))
return spec.to_dict()
def parse_docs(cb: t.Callable) -> t.Tuple[str, str, t.Dict]:
"""Parse docs from the given callback."""
if yaml_utils is None:
return '', '', {}
docs = cb.__doc__ or ''
schema = yaml_utils.load_yaml_from_docstring(docs)
docs = docs.split('---')[0]
docs = utils.dedent(utils.trim_docstring(docs))
summary, _, description = docs.partition('\n\n')
return summary, description.strip(), schema
def merge_dicts(source: t.Dict, merge: t.Dict) -> t.Dict:
"""Merge dicts."""
return dict(source, **{
key: ((
merge_dicts(source[key], merge[key])
if isinstance(source[key], dict) and isinstance(merge[key], dict)
else (
source[key] + merge[key]
if isinstance(source[key], list) and isinstance(merge[key], list)
else merge[key]
)
) if key in source else merge[key]) for key in merge})
def route_to_spec(route: Route, spec: APISpec) -> t.Dict:
"""Convert the given router to openapi operations."""
results: t.Dict = {'parameters': [], 'operations': {}}
if isinstance(route, DynamicRoute):
for param in route.params:
results['parameters'].append({'in': 'path', 'name': param})
target = t.cast(t.Callable, route.target)
if isinstance(target, partial):
target = target.func
if hasattr(target, 'openapi'):
results['operations'] = target.openapi(route, spec) # type: ignore
return results
summary, desc, schema = parse_docs(target)
responses = return_type_to_response(target)
for method in route_to_methods(route):
results['operations'][method] = {
'summary': summary,
'description': desc,
'responses': responses
}
results['operations'] = merge_dicts(results['operations'], schema)
return results
def route_to_methods(route: Route) -> t.List[str]:
"""Get sorted methods from the route."""
methods = [m for m in HTTP_METHODS if m in (route.methods or [])]
return [m.lower() for m in methods or DEFAULT_METHODS]
def return_type_to_response(fn: t.Callable) -> t.Dict:
"""Generate reponses specs based on the given function's return type."""
responses: t.Dict[int, t.Dict] = {}
return_type = fn.__annotations__.get('return')
return_type = CAST_RESPONSE.get(return_type, return_type) # type: ignore
if return_type is None:
return responses
if inspect.isclass(return_type) and issubclass(return_type, Response) and \
return_type.content_type:
responses[return_type.status_code] = {
'description': HTTPStatus(return_type.status_code).description,
'content': {
return_type.content_type: {
}
}
}
return responses
class OpenAPIMixin:
"""Render an endpoint to openapi specs."""
if t.TYPE_CHECKING:
from .endpoint import RESTOptions
meta: RESTOptions
@classmethod
def openapi(cls, route: Route, spec: APISpec) -> t.Dict:
"""Get openapi specs for the endpoint."""
if cls.meta.name is None:
return {}
operations: t.Dict = {}
summary, desc, schema = parse_docs(cls)
if cls not in spec.tags:
spec.tags[cls] = cls.meta.name
spec.tag({'name': cls.meta.name, 'description': summary})
spec.components.schema(cls.meta.Schema.__name__, schema=cls.meta.Schema)
schema_ref = {'$ref': f"#/components/schemas/{ cls.meta.Schema.__name__ }"}
for method in route_to_methods(route):
operations[method] = {'tags': [spec.tags[cls]]}
is_resource_route = isinstance(route, DynamicRoute) and \
route.params.get(cls.meta.name_id)
if method == 'get' and not is_resource_route:
operations[method]['parameters'] = []
if cls.meta.sorting:
operations[method]['parameters'].append(cls.meta.sorting.openapi)
if cls.meta.filters:
operations[method]['parameters'].append(cls.meta.filters.openapi)
if cls.meta.limit:
operations[method]['parameters'].append({
'name': LIMIT_PARAM, 'in': 'query',
'schema': {'type': 'integer', 'minimum': 1, 'maximum': cls.meta.limit},
'description': 'The number of items to return',
})
operations[method]['parameters'].append({
'name': OFFSET_PARAM, 'in': 'query',
'schema': {'type': 'integer', 'minimum': 0},
'description': 'The offset of items to return',
})
# Update from the method
meth = getattr(cls, method, None)
if isinstance(route.target, partial) and '__meth__' in route.target.keywords:
meth = getattr(cls, route.target.keywords['__meth__'], None)
elif method in {'post', 'put'}:
operations[method]['requestBody'] = {
'required': True, 'content': {'application/json': {'schema': schema_ref}}
}
if meth:
operations[method]['summary'], operations[method]['description'], mschema = openapi.parse_docs(meth) # noqa
return_type = meth.__annotations__.get('return')
if return_type == 'JSONType' or return_type == JSONType:
responses = {200: {'description': 'Request is successfull', 'content': {
'application/json': {'schema': schema_ref}
}}}
else:
responses = return_type_to_response(meth)
operations[method]['responses'] = responses
operations[method] = merge_dicts(operations[method], mschema)
return merge_dicts(operations, schema)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import versioneer
setup(name='hiwenet',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Histogram-weighted Networks for Feature Extraction and Advance Analysis in Neuroscience',
long_description='Histogram-weighted Networks for Feature Extraction and Advance Analysis in Neuroscience; hiwenet',
author='Pradeep Reddy Raamana',
author_email='raamana@gmail.com',
url='https://github.com/raamana/hiwenet',
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=['numpy', 'pyradigm', 'nibabel', 'networkx', 'medpy'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3.6',
],
entry_points={
"console_scripts": [
"hiwenet=hiwenet.__main__:main",
]
}
)
|
nilq/baby-python
|
python
|
import os
import time
def main():
try:
os.remove("/etc/pmon.d/neutron-avs-agent.conf")
except:
pass
while True:
time.sleep(100)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from paste import constants
from paste.models import Snippet
class SnippetSerializer(serializers.ModelSerializer):
"""Snippet model serializer."""
class Meta:
model = Snippet
fields = '__all__'
read_only_fields = ['owner']
def create(self, validated_data: dict) -> Snippet:
"""Check that if current user is anonymous they are not trying to
create a private snippet, then create new instance.
"""
if (self.context['request'].user.is_anonymous
and validated_data.get('private', constants.DEFAULT_PRIVATE)):
raise serializers.ValidationError(
'anonymous users cannot create private snippets')
return super().create(validated_data)
|
nilq/baby-python
|
python
|
""" Seeking Alpha View """
__docformat__ = "numpy"
import argparse
from typing import List
import pandas as pd
from datetime import datetime
from gamestonk_terminal.helper_funcs import (
check_positive,
parse_known_args_and_warn,
valid_date,
)
from gamestonk_terminal.discovery import seeking_alpha_model
def earnings_release_dates_view(other_args: List[str]):
"""Prints a data frame with earnings release dates
Parameters
----------
other_args : List[str]
argparse other args - ["-p", "20", "-n", "5"]
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="up_earnings",
description="""Upcoming earnings release dates. [Source: Seeking Alpha]""",
)
parser.add_argument(
"-p",
"--pages",
action="store",
dest="n_pages",
type=check_positive,
default=10,
help="Number of pages to read upcoming earnings from in Seeking Alpha website.",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=3,
help="Number of upcoming earnings release dates to print",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_earnings = seeking_alpha_model.get_next_earnings(ns_parser.n_pages)
pd.set_option("display.max_colwidth", None)
for n_days, earning_date in enumerate(df_earnings.index.unique()):
if n_days > (ns_parser.n_num - 1):
break
print(f"Earning Release on {earning_date.date()}")
print("----------------------------------------------")
print(
df_earnings[earning_date == df_earnings.index][
["Ticker", "Name"]
].to_string(index=False, header=False)
)
print("")
def latest_news_view(other_args: List[str]):
"""Prints the latest news article list
Parameters
----------
other_args : List[str]
argparse other args - ["-i", "123123", "-n", "5"]
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="latest",
description="""Latest news articles. [Source: Seeking Alpha]""",
)
parser.add_argument(
"-i",
"--id",
action="store",
dest="n_id",
type=check_positive,
default=-1,
help="article ID number",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=10,
help="number of articles being printed",
)
parser.add_argument(
"-d",
"--date",
action="store",
dest="n_date",
type=valid_date,
default=datetime.now().strftime("%Y-%m-%d"),
help="starting date",
)
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-i")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
# User wants to see all latest news
if ns_parser.n_id == -1:
articles = seeking_alpha_model.get_article_list(
ns_parser.n_date, ns_parser.n_num
)
for idx, article in enumerate(articles):
print(
article["publishedAt"].replace("T", " ").replace("Z", ""),
"-",
article["id"],
"-",
article["title"],
)
print(article["url"])
print("")
if idx >= ns_parser.n_num - 1:
break
# User wants to access specific article
else:
article = seeking_alpha_model.get_article_data(ns_parser.n_id)
print(
article["publishedAt"][: article["publishedAt"].rfind(":") - 3].replace(
"T", " "
),
" ",
article["title"],
)
print(article["url"])
print("")
print(article["content"])
def trending_news_view(other_args: List[str]):
"""Prints the trending news article list
Parameters
----------
other_args : List[str]
argparse other args - ["i", "123123", "-n", "5"]
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="trending",
description="""Trending news articles. [Source: Seeking Alpha]""",
)
parser.add_argument(
"-i",
"--id",
action="store",
dest="n_id",
type=check_positive,
default=-1,
help="article ID number",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=10,
help="number of articles being printed",
)
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-i")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
# User wants to see all trending articles
if ns_parser.n_id == -1:
articles = seeking_alpha_model.get_trending_list(ns_parser.n_num)
for idx, article in enumerate(articles):
print(
article["publishedAt"].replace("T", " ").replace("Z", ""),
"-",
article["id"],
"-",
article["title"],
)
print(article["url"])
print("")
if idx >= ns_parser.n_num - 1:
break
# User wants to access specific article
else:
article = seeking_alpha_model.get_article_data(ns_parser.n_id)
print(
article["publishedAt"][: article["publishedAt"].rfind(":") - 3].replace(
"T", " "
),
" ",
article["title"],
)
print(article["url"])
print("")
print(article["content"])
|
nilq/baby-python
|
python
|
import os
import ntpath
from preprocessing.segmentation import segment
from preprocessing.augment import augment
from CNN.recognize_character import recognize
from Unicode.seqgen import sequenceGen
from Unicode.printdoc import unicode_to_kn
def segmentation_call(image):
rootdir = 'web_app/hwrkannada/hwrapp/static/hwrapp/images/Processed_' + \
os.path.splitext(ntpath.basename(image))[0]
if not os.path.exists(rootdir):
os.makedirs(rootdir)
dir = rootdir + '/Segmented_' + os.path.splitext(ntpath.basename(image))[0]
# call the segmentation script on the image
segment(image)
return rootdir, dir
def augmentation_call(image, rootdir):
augdir = rootdir + '/Augmented_' + \
os.path.splitext(ntpath.basename(image))[0]
# augment each of the segmented images
augment(rootdir, augdir)
return augdir
def prediction_call(augdir):
# recognize all images in the directory
predictions = recognize(os.path.join(os.getcwd(), augdir))
# generate the Unicode sequence based on predictions
sequence = sequenceGen(predictions)
# generate Kannada text from the Unicode sequence
kannada_text = unicode_to_kn(sequence)
return(kannada_text)
|
nilq/baby-python
|
python
|
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from authors.apps.authentication.models import User
class ReadStats(models.Model):
"""
Users read statistics
"""
user = models.OneToOneField(User, on_delete=models.CASCADE, db_index=True)
reads = models.PositiveIntegerField(default=0)
views = models.PositiveIntegerField(default=0)
@receiver(post_save, sender=User)
def create_user_stats(sender, instance, created, **kwargs):
"""
Creates the user statistics on save of the user
model
"""
if created:
ReadStats.objects.create(user=instance)
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
from flask import Flask
from flask_cors import CORS
from api.v1 import api_v1
app = Flask(__name__, static_url_path='', static_folder='frontend')
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
app.register_blueprint(api_v1, url_prefix='/api/v1')
app.config.SWAGGER_UI_DOC_EXPANSION = 'list'
plt.style.use('ggplot')
@app.route('/')
def default():
return app.send_static_file('index.html')
# import requests
# @app.route('/', defaults={'path': ''})
# @app.route('/<path:path>')
# def frontend_proxy(path):
# return requests.get('http://localhost:8080/{}'.format(path)).content
if __name__ == '__main__':
app.run()
|
nilq/baby-python
|
python
|
from datetime import datetime
from django.utils import timezone
import factory
from .. import models
from faker.generator import random
random.seed(0xDEADBEEF)
class BundleFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Bundle
easydita_id = factory.Faker('first_name')
easydita_resource_id = factory.Faker('last_name')
time_queued = factory.LazyFunction(timezone.now)
|
nilq/baby-python
|
python
|
from argparse import ArgumentParser
from irun.compiler import compile_node, construct
from irun.parser import parse
def compile_irun(source):
tree = parse(source)
rql_context = compile_node(tree)
return construct(rql_context)
def main(argv=None):
parser = ArgumentParser()
parser.add_argument("-c", "--cli", help="input from command line")
parser.add_argument("-f", "--file", help="input from file")
options = parser.parse_args(argv)
if options.cli:
source = options.cli
elif options.file:
with open(options.file) as stream:
source = stream.read()
else:
raise ValueError("run.py expects either -c/--cli or -f/--file to operate")
print(compile_irun(source))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import torch
from torch.autograd import Variable
import render_pytorch
import image
import camera
import material
import light
import shape
import numpy as np
resolution = [256, 256]
position = Variable(torch.from_numpy(np.array([0, 0, -5], dtype=np.float32)))
look_at = Variable(torch.from_numpy(np.array([0, 0, 0], dtype=np.float32)))
up = Variable(torch.from_numpy(np.array([0, 1, 0], dtype=np.float32)))
fov = Variable(torch.from_numpy(np.array([45.0], dtype=np.float32)))
clip_near = Variable(torch.from_numpy(np.array([0.01], dtype=np.float32)))
clip_far = Variable(torch.from_numpy(np.array([10000.0], dtype=np.float32)))
cam = camera.Camera(position = position,
look_at = look_at,
up = up,
cam_to_world = None,
fov = fov,
clip_near = clip_near,
clip_far = clip_far,
resolution = resolution)
mat_grey=material.Material(\
diffuse_reflectance=torch.from_numpy(np.array([0.5,0.5,0.5],dtype=np.float32)))
materials=[mat_grey]
vertices=Variable(torch.from_numpy(\
np.array([[-1.3,1.0,0.0], [1.0,1.0,0.0], [-0.5,-2.0,-7.0]],dtype=np.float32)))
indices=torch.from_numpy(np.array([[0,1,2]],dtype=np.int32))
shape_triangle=shape.Shape(vertices,indices,None,None,0)
light_vertices=Variable(torch.from_numpy(\
np.array([[-1,-1,-7],[1,-1,-7],[-1,1,-7],[1,1,-7]],dtype=np.float32)))
light_indices=torch.from_numpy(\
np.array([[0,1,2],[1,3,2]],dtype=np.int32))
shape_light=shape.Shape(light_vertices,light_indices,None,None,0)
shapes=[shape_triangle,shape_light]
light_intensity=torch.from_numpy(\
np.array([20,20,20],dtype=np.float32))
light=light.Light(1,light_intensity)
lights=[light]
args=render_pytorch.RenderFunction.serialize_scene(\
cam,materials,shapes,lights,resolution,256,1)
# To apply our Function, we use Function.apply method. We alias this as 'render'.
render = render_pytorch.RenderFunction.apply
img = render(0, *args)
image.imwrite(img.data.numpy(), 'test/results/test_single_triangle_clipped/target.exr')
image.imwrite(img.data.numpy(), 'test/results/test_single_triangle_clipped/target.png')
target = Variable(torch.from_numpy(image.imread('test/results/test_single_triangle_clipped/target.exr')))
shape_triangle.vertices = Variable(torch.from_numpy(\
np.array([[-1.0,1.5,0.3], [0.9,1.2,-0.3], [0.0,-3.0,-6.5]],dtype=np.float32)),
requires_grad=True)
args=render_pytorch.RenderFunction.serialize_scene(cam,materials,shapes,lights,resolution,256,1)
img = render(1, *args)
image.imwrite(img.data.numpy(), 'test/results/test_single_triangle_clipped/init.png')
diff = torch.abs(target - img)
image.imwrite(diff.data.numpy(), 'test/results/test_single_triangle_clipped/init_diff.png')
optimizer = torch.optim.Adam([shape_triangle.vertices], lr=2e-2)
for t in range(200):
optimizer.zero_grad()
# Forward pass: render the image
args=render_pytorch.RenderFunction.serialize_scene(\
cam,materials,shapes,lights,resolution,4,1)
img = render(t+1, *args)
image.imwrite(img.data.numpy(), 'test/results/test_single_triangle_clipped/iter_{}.png'.format(t))
loss = (img - target).pow(2).sum()
print('loss:', loss.item())
loss.backward()
print('grad:', shape_triangle.vertices.grad)
optimizer.step()
print('vertices:', shape_triangle.vertices)
args=render_pytorch.RenderFunction.serialize_scene(\
cam,materials,shapes,lights,resolution,256,1)
img = render(202, *args)
image.imwrite(img.data.numpy(), 'test/results/test_single_triangle_clipped/final.exr')
image.imwrite(img.data.numpy(), 'test/results/test_single_triangle_clipped/final.png')
image.imwrite(np.abs(target.data.numpy() - img.data.numpy()), 'test/results/test_single_triangle_clipped/final_diff.png')
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"test/results/test_single_triangle_clipped/iter_%d.png", "-vb", "20M",
"test/results/test_single_triangle_clipped/out.mp4"])
|
nilq/baby-python
|
python
|
f=open("./CoA/2020/data/02a.txt","r")
valid=0
for line in f:
first=int(line[:line.index("-")])
print(first)
second=int(line[line.index("-")+1:line.index(" ")])
print(second)
rule = line[line.index(" ")+1:line.index(":")]
print(rule)
code = line[line.index(":")+2:]
print(code)
if code[first-1]==rule and code[second-1]!=rule:
valid+=1
print("found 1st "+code[first-1]+code[second-1] )
elif code[second-1]==rule and code[first-1]!=rule:
#elif code[second-1]==rule: #FOUT!! want sluit niet dubbeling uit
valid+=1
print("found 2nd "+code[first-1]+code[second-1] )
print(valid)
f.close()
|
nilq/baby-python
|
python
|
## An implementation of the credential scheme based on an algebraic
## MAC proposed by Chase, Meiklejohn and Zaverucha in Algebraic MACs and Keyed-Verification
## Anonymous Credentials", at ACM CCS 2014. The credentials scheme
## is based on the GGM based aMAC. (see section 4.2, pages 8-9)
from amacs import *
from genzkp import ZKEnv, ZKProof, ConstGen, Gen, Sec, ConstPub, Pub
from petlib.bn import Bn
def cred_setup():
""" Generates the parameters of the algebraic MAC scheme"""
params = setup_ggm()
return params
def cred_CredKeyge(params, n):
""" Generates keys and parameters for the credential issuer """
_, g, h, o = params
sk, iparams = keyGen_ggm(params, n)
x0_bar = o.random()
Cx0 = sk[0] * g + x0_bar * h
return (Cx0, iparams), (sk, x0_bar)
def cred_UserKeyge(params):
""" Generates keys and parameters for credential user """
G, g, h, o = params
priv = o.random()
pub = priv * g # This is just an EC El-Gamal key
return (priv, pub)
def secret_proof(params, n):
""" Builds a proof of correct El-Gamal encryption for a number of secret attributes. """
G, _, _, _ = params
# Contruct the proof
zk = ZKProof(G)
# Some constants and secrets
pub, g, h = zk.get(ConstGen, ["pub", "g", "h"])
priv = zk.get(Sec, "priv")
## The El-Gamal ciphertexts and secrets
ris = zk.get_array(Sec, "ri", n)
attrs = zk.get_array(Sec, "attri", n)
sKis = zk.get_array(ConstGen, "sKi", n)
Cis = zk.get_array(ConstGen, "Ci", n)
# The proof obligations
zk.add_proof(pub, priv * g)
for (Ci, sKi, ri, attr) in zip(Cis, sKis, ris, attrs):
zk.add_proof(sKi, ri * g)
zk.add_proof(Ci, ri * pub + attr * g)
return zk
def cred_secret_issue_user(params, keypair, attrib):
""" Encodes a number of secret attributes to be issued. """
# We simply encrypt all parameters and make a proof we know
# the decryption.
G, g, h, o = params
priv, pub = keypair
ris = []
sKis = []
Cis = []
for i, attr in enumerate(attrib):
ri = o.random()
ris += [ri]
sKis += [ri * g]
Cis += [ri * pub + attr * g]
zk = secret_proof(params, len(attrib))
## Run the proof
env = ZKEnv(zk)
env.g, env.h = g, h
env.pub = pub
env.priv = priv
env.ri = ris
env.attri = attrib
env.sKi = sKis
env.Ci = Cis
## Extract the proof
sig = zk.build_proof(env.get())
return (pub, (sKis, Cis), sig)
def _check_enc(params, keypair, EGenc, attrib):
G, g, h, o = params
priv, pub = keypair
for (a, b, atr) in zip(EGenc[0], EGenc[1], attrib):
assert (b - (priv * a)) == (atr * g)
def cred_secret_issue_user_check(params, pub, EGenc, sig):
""" Check the encrypted attributes of a user are well formed.
"""
G, g, h, o = params
(sKis, Cis) = EGenc
## First check the inputs (EG ciphertexts) are well formed.
assert len(sKis) == len(Cis)
zk = secret_proof(params, len(Cis))
## Run the proof
env = ZKEnv(zk)
env.g, env.h = g, h
env.pub = pub
env.sKi = sKis
env.Ci = Cis
## Extract the proof
if not zk.verify_proof(env.get(), sig):
raise Exception("Proof of knowledge of plaintexts failed.")
return True
def cred_secret_issue_proof(params, num_privs, num_pubs):
""" The proof that the mixed public / private credential issuing is correct """
G, _, _, _ = params
n = num_privs + num_pubs
# Contruct the proof
zk = ZKProof(G)
## The variables
bCx0 = zk.get(Gen, "bCx_0")
u, g, h, Cx0, pub = zk.get(ConstGen, ["u", "g", "h", "Cx_0", "pub"])
b, x0, x0_bar, bx0, bx0_bar = zk.get(Sec, ["b", "x_0", "x_0_bar", "bx_0", "bx_0_bar"])
xis = zk.get_array(Sec, "xi", n, 1)
bxis = zk.get_array(Sec, "bxi", n, 1)
Xis = zk.get_array(ConstGen, "Xi", n, 1)
bXis = zk.get_array(Gen, "bXi", n, 1)
## Proof of knowing the secret of MAC
zk.add_proof(Cx0, x0 * g + x0_bar * h)
zk.add_proof(bCx0, b * Cx0)
zk.add_proof(bCx0, bx0 * g + bx0_bar * h)
zk.add_proof(u, b * g)
## Proof of correct Xi's
for (xi, Xi, bXi, bxi) in zip(xis, Xis, bXis, bxis):
zk.add_proof(Xi, xi * h)
zk.add_proof(bXi, b * Xi)
zk.add_proof(bXi, bxi * h)
# Proof of correct Credential Ciphertext
mis = zk.get_array(ConstPub, "mi", num_pubs)
CredA, CredB = zk.get(ConstGen, ["CredA", "CredB"])
EGa = zk.get_array(ConstGen, "EGai", num_privs)
EGb = zk.get_array(ConstGen, "EGbi", num_privs)
r_prime = zk.get(Sec, "r_prime")
A = r_prime * g
B = r_prime * pub + bx0 * g
for mi, bxi in zip(mis, bxis[:num_pubs]):
B = B + bxi * (mi * g)
bxis_sec = bxis[num_pubs:num_pubs + num_privs]
for eg_a, eg_b, bxi in zip(EGa, EGb, bxis_sec):
A = A + bxi * eg_a
B = B + bxi * eg_b
zk.add_proof(CredA, A)
zk.add_proof(CredB, B)
return zk
def cred_secret_issue(params, pub, EGenc, publics, secrets, messages):
""" Encode a mixture of secret (EGenc) and public (messages) attributes"""
# Parse variables
G, g, h, o = params
sk, x0_bar = secrets
Cx0, iparams = publics
(sKis, Cis) = EGenc
assert len(sKis) == len(Cis)
assert len(iparams) == len(messages) + len(Cis)
# Get a blinding b
b = o.random()
u = b * g
bx0_bar = b.mod_mul(x0_bar, o)
bsk = []
for xi in sk:
bsk += [b.mod_mul(xi, o)]
bCx0 = b * Cx0
bXi = []
for Xi in iparams:
bXi += [b * Xi]
bsk0 = bsk[0]
open_bsk = bsk[1:len(messages)+1]
sec_bsk = bsk[len(messages)+1:len(messages)+1+len(Cis)]
assert [bsk0] + open_bsk + sec_bsk == bsk
# First build a proto-credential in clear using all public attribs
r_prime = o.random()
EG_a = r_prime * g
EG_b = r_prime * pub + bsk0 * g
for mi, bxi in zip(messages, open_bsk):
EG_b = EG_b + (bxi.mod_mul(mi,o) * g)
for (eg_ai, eg_bi, bxi) in zip(sKis, Cis, sec_bsk):
EG_a = EG_a + bxi * eg_ai
EG_b = EG_b + bxi * eg_bi
# Now build an epic proof for all this.
zk = cred_secret_issue_proof(params, len(Cis), len(messages))
env = ZKEnv(zk)
env.pub = pub
env.g, env.h = g, h
env.u = u
env.b = b
# These relate to the proof of x0 ...
env.x_0 = sk[0]
env.bx_0 = bsk0
env.x_0_bar = x0_bar
env.bx_0_bar = b.mod_mul(x0_bar, o)
env.Cx_0 = Cx0
env.bCx_0 = bCx0
# These relate to the knowledge of Xi, xi ...
env.xi = sk[1:]
env.Xi = iparams
env.bxi = bsk[1:]
env.bXi = bXi
# These relate to the knowledge of the plaintext ...
env.r_prime = r_prime
env.mi = messages
env.CredA = EG_a
env.CredB = EG_b
env.EGai = sKis
env.EGbi = Cis
## Extract the proof
sig = zk.build_proof(env.get())
if __debug__:
assert zk.verify_proof(env.get(), sig, strict=False)
return u, (EG_a, EG_b), sig
def _internal_ckeck(keypair, u, EncE, secrets, all_attribs):
""" Check the invariant that the ciphertexts are the encrypted attributes """
## First do decryption
priv, pub = keypair
(a, b) = EncE
Cred = b - (priv * a)
sk, _ = secrets
v = Hx(sk, all_attribs)
assert Cred == v * u
def cred_secret_issue_user_decrypt(params, keypair, u, EncE, publics, messages, EGab, sig):
""" Decrypts the private / public credential and checks the proof of its correct generation """
G, g, h, _ = params
Cx0, iparams = publics
priv, pub = keypair
(EG_a, EG_b) = EncE
uprime = EG_b - (priv * EG_a)
sKis, Cis = EGab
# Now build an epic proof for all this.
zk = cred_secret_issue_proof(params, len(Cis), len(messages))
env = ZKEnv(zk)
env.g, env.h = g, h
env.u = u
env.Cx_0 = Cx0
env.pub = pub
env.Xi = iparams
env.mi = messages
env.CredA = EG_a
env.CredB = EG_b
env.EGai = sKis
env.EGbi = Cis
## Extract the proof
if not zk.verify_proof(env.get(), sig):
raise Exception("Decryption of credential failed.")
return (u, uprime)
def cred_issue_proof(params, n):
""" The proof of public credential generation """
G, _, _, _ = params
# Contruct the proof
zk = ZKProof(G)
## The variables
u, up, g, h, Cx0 = zk.get(ConstGen, ["u", "up", "g", "h", "Cx0"])
x0, x0_bar = zk.get(Sec, ["x0", "x0_bar"])
xis = zk.get_array(Sec, "xi", n)
mis = zk.get_array(ConstPub, "mi", n)
Xis = zk.get_array(ConstGen, "Xi", n)
## Proof of correct MAC
Prod = x0 * u
for (xi, mi) in zip(xis, mis):
Prod = Prod + xi*(mi * u)
zk.add_proof(up, Prod)
## Proof of knowing the secret of MAC
zk.add_proof(Cx0, x0 * g + x0_bar * h)
## Proof of correct Xi's
for (xi, Xi) in zip(xis, Xis):
zk.add_proof(Xi, xi * h)
return zk
def cred_issue(params, publics, secrets, messages):
# Parse variables
G, g, h, _ = params
sk, x0_bar = secrets
Cx0, iparams = publics
(u, uprime) = mac_ggm(params, sk, messages)
# Build the proof and associate real variables
n = len(messages)
zk = cred_issue_proof(params, n)
env = ZKEnv(zk)
env.g, env.h = g, h
env.u, env.up = u, uprime
env.x0 = sk[0]
env.x0_bar = x0_bar
env.Cx0 = Cx0
env.xi = sk[1:]
env.mi = messages
env.Xi = iparams
## Extract the proof
sig = zk.build_proof(env.get())
if __debug__:
assert zk.verify_proof(env.get(), sig, strict=False)
## Return the credential (MAC) and proof of correctness
return (u, uprime), sig
def cred_issue_check(params, publics, mac, sig, messages):
# Parse public variables
G, g, h, _ = params
Cx0, iparams = publics
(u, uprime) = mac
# Build the proof and assign public variables
n = len(messages)
zk = cred_issue_proof(params, n)
env = ZKEnv(zk)
env.g, env.h = g, h
env.u, env.up = u, uprime
env.Cx0 = Cx0
env.mi = messages
env.Xi = iparams
# Return the result of the verification
return zk.verify_proof(env.get(), sig)
def cred_show_proof(params, n):
G, _, _, _ = params
# Contruct the proof
zk = ZKProof(G)
## The variables
u, g, h = zk.get(ConstGen, ["u", "g", "h"])
V = zk.get(ConstGen, "V")
minus_one = zk.get(ConstPub, "minus1")
r = zk.get(Sec, "r")
zis = zk.get_array(Sec, "zi", n)
mis = zk.get_array(Sec, "mi", n)
Xis = zk.get_array(ConstGen, "Xi", n)
Cmis = zk.get_array(ConstGen, "Cmi", n)
# Define the relations to prove
Vp = r * (minus_one * g)
for zi, Xi in zip(zis, Xis):
Vp = Vp + (zi * Xi)
zk.add_proof(V, Vp)
for (Cmi, mi, zi) in zip(Cmis, mis, zis):
zk.add_proof(Cmi, mi*u + zi*h)
return zk
def cred_show(params, publics, mac, sig, messages, cred_show_proof=cred_show_proof, xenv=None, export_zi=False):
## Parse and re-randomize
G, g, h, o = params
Cx0, iparams = publics
## WARNING: this step not in paper description of protocol
# Checked correctness with Sarah Meiklejohn.
u, uprime = rerandomize_sig_ggm(params, mac)
n = len(messages)
## Blinding variables for the proof
r = o.random()
zis = [o.random() for _ in range(n)]
Cup = uprime + r * g
Cmis = [mi * u + zi * h for (mi, zi) in zip(messages, zis)]
cred = (u, Cmis, Cup)
V = r * ( (-1) * g)
for zi, Xi in zip(zis, iparams):
V = V + zi * Xi
# Define the proof, and instanciate it with variables
zk = cred_show_proof(params, n)
env = ZKEnv(zk)
env.u = u
env.g, env.h = g, h
env.V = V
env.r = r
env.minus1 = -Bn(1)
env.zi = zis
env.mi = messages
env.Xi = iparams
env.Cmi = Cmis
if xenv:
xenv(env)
sig = zk.build_proof(env.get())
## Just a sanity check
if __debug__:
assert zk.verify_proof(env.get(), sig, strict=False)
if export_zi:
return cred, sig, zis
else:
return cred, sig
def cred_show_check(params, publics, secrets, creds, sig, cred_show_proof=cred_show_proof, xenv={}):
# Parse the inputs
G, g, h, _ = params
sk, _ = secrets
Cx0, iparams = publics
(u, Cmis, Cup) = creds
n = len(iparams)
## Recompute a V
V = sk[0] * u + (- Cup)
for xi, Cmi in zip(sk[1:], Cmis):
V = V + xi * Cmi
# Define the proof, and instanciate it with variables
zk = cred_show_proof(params, n)
env = ZKEnv(zk)
env.u = u
env.g, env.h = g, h
env.V = V
env.minus1 = -Bn(1)
env.Xi = iparams
env.Cmi = Cmis
if xenv:
xenv(env)
# Return the result of the verification
return zk.verify_proof(env.get(), sig)
def time_it_all(repetitions = 1000):
import time
print("Timings of operations (%s repetitions)" % repetitions)
t0 = time.clock()
for _ in range(repetitions):
i = 0
T = time.clock() - t0
print("%.3f ms\tIdle" % (1000 * T/repetitions))
t0 = time.clock()
for _ in range(repetitions):
## Setup from credential issuer.
params = cred_setup()
T = time.clock() - t0
print("%.3f ms\tCredential Group Setup" % (1000 * T/repetitions))
G, _, _, o = params
## Attriutes we want to encode
public_attr = [o.random(), o.random()]
private_attr = [o.random(), o.random()]
n = len(public_attr) + len(private_attr)
t0 = time.clock()
for _ in range(repetitions):
ipub, isec = cred_CredKeyge(params, n)
T = time.clock() - t0
print("%.3f ms\tCredential Key generation" % (1000 * T/repetitions))
## User generates keys and encrypts some secret attributes
# the secret attributes are [10, 20]
t0 = time.clock()
for _ in range(repetitions):
keypair = cred_UserKeyge(params)
T = time.clock() - t0
print("%.3f ms\tUser Key generation" % (1000 * T/repetitions))
t0 = time.clock()
for _ in range(repetitions):
pub, EGenc, sig = cred_secret_issue_user(params, keypair, private_attr)
T = time.clock() - t0
print("%.3f ms\tUser Key generation (proof)" % (1000 * T/repetitions))
if __debug__:
_check_enc(params, keypair, EGenc, private_attr)
## The issuer checks the secret attributes and encrypts a amac
# It also includes some public attributes, namely [30, 40].
t0 = time.clock()
for _ in range(repetitions):
if not cred_secret_issue_user_check(params, pub, EGenc, sig):
raise Exception("User key generation invalid")
T = time.clock() - t0
print("%.3f ms\tUser Key generation (verification)" % (1000 * T/repetitions))
t0 = time.clock()
for _ in range(repetitions):
u, EncE, sig = cred_secret_issue(params, pub, EGenc, ipub, isec, public_attr)
T = time.clock() - t0
print("%.3f ms\tCredential issuing" % (1000 * T/repetitions))
if __debug__:
_internal_ckeck(keypair, u, EncE, isec, public_attr + private_attr)
## The user decrypts the amac
t0 = time.clock()
for _ in range(repetitions):
mac = cred_secret_issue_user_decrypt(params, keypair, u, EncE, ipub, public_attr, EGenc, sig)
T = time.clock() - t0
print("%.3f ms\tCredential decryption & verification" % (1000 * T/repetitions))
## The show protocol using the decrypted amac
# The proof just proves knowledge of the attributes, but any other
# ZK statement is also possible by augmenting the proof.
t0 = time.clock()
for _ in range(repetitions):
(creds, sig) = cred_show(params, ipub, mac, sig, public_attr + private_attr)
T = time.clock() - t0
print("%.3f ms\tCredential Show (proof)" % (1000 * T/repetitions))
t0 = time.clock()
for _ in range(repetitions):
if not cred_show_check(params, ipub, isec, creds, sig):
raise Exception("Credential show failed.")
T = time.clock() - t0
print("%.3f ms\tCredential Show (verification)" % (1000 * T/repetitions))
def test_creds():
## Setup from credential issuer.
params = cred_setup()
ipub, isec = cred_CredKeyge(params, 2)
## Credential issuing and checking
mac, sig = cred_issue(params, ipub, isec, [10, 20])
assert cred_issue_check(params, ipub, mac, sig, [10, 20])
## The show protocol
(creds, sig) = cred_show(params, ipub, mac, sig, [10, 20])
assert cred_show_check(params, ipub, isec, creds, sig)
def test_creds_custom_show():
## Test attaching custom proofs to the show prototcol
# for the credential scheme. This should work with both
# all public and partly secret attributes.
## Setup from credential issuer. Can also setup with secrets (see test_secret_creds)
params = cred_setup()
ipub, isec = cred_CredKeyge(params, 2)
## Credential issuing and checking
mac, sig = cred_issue(params, ipub, isec, [10, 20])
assert cred_issue_check(params, ipub, mac, sig, [10, 20])
## Custom proofs require two things:
# - cred_show_proof_custom: a custom "cred_show_proof" with additional statements
# to prove on the Commitements Cmi = mi * u + zi * h
# - xenv: a custom function that instanciates the values of the proof, either
# public secret or constant.
# Example: Prove that the second attribute is double the first
def cred_show_proof_custom(params, n):
zk = cred_show_proof(params, n)
u, g, h = zk.get(ConstGen, ["u", "g", "h"])
zis = zk.get_array(Sec, "zi", n)
mis = zk.get_array(Sec, "mi", n)
Cmis = zk.get_array(ConstGen, "Cmi", n)
twou = zk.get(ConstGen, "twou")
# Statement that proves Cmi1 = (2 * m0) * u + z1 * h
zk.add_proof(Cmis[1], mis[0]*twou + zis[1]*h)
return zk
def xenv(env):
# Ensure the constant 2u is correct, both ends.
env.twou = 2 * env.u
## The show protocol -- note the use of "cred_show_proof_custom" and "xenv"
(creds, sig) = cred_show(params, ipub, mac, sig, [10, 20], cred_show_proof_custom, xenv)
assert cred_show_check(params, ipub, isec, creds, sig, cred_show_proof_custom, xenv)
def test_secret_creds():
## Setup from credential issuer.
params = cred_setup()
## Attriutes we want to encode
public_attr = [30, 40]
private_attr = [10, 20]
n = len(public_attr) + len(private_attr)
ipub, isec = cred_CredKeyge(params, n)
## User generates keys and encrypts some secret attributes
# the secret attributes are [10, 20]
keypair = cred_UserKeyge(params)
pub, EGenc, sig = cred_secret_issue_user(params, keypair, private_attr)
if __debug__:
_check_enc(params, keypair, EGenc, private_attr)
## The issuer checks the secret attributes and encrypts a amac
# It also includes some public attributes, namely [30, 40].
assert cred_secret_issue_user_check(params, pub, EGenc, sig)
u, EncE, sig = cred_secret_issue(params, pub, EGenc, ipub, isec, public_attr)
if __debug__:
_internal_ckeck(keypair, u, EncE, isec, public_attr + private_attr)
## The user decrypts the amac
mac = cred_secret_issue_user_decrypt(params, keypair, u, EncE, ipub, public_attr, EGenc, sig)
## The show protocol using the decrypted amac
# The proof just proves knowledge of the attributes, but any other
# ZK statement is also possible by augmenting the proof.
(creds, sig) = cred_show(params, ipub, mac, sig, public_attr + private_attr)
assert cred_show_check(params, ipub, isec, creds, sig)
if __name__ == "__main__":
time_it_all(repetitions=100)
params = cred_setup()
print("Proof of secret attributes")
zk1 = secret_proof(params, 2)
print(zk1.render_proof_statement())
print("Proof of secret issuing")
zk2 = cred_secret_issue_proof(params, 2, 2)
print(zk2.render_proof_statement())
print("Proof of public issuing")
zk3 = cred_issue_proof(params, 2)
print(zk3.render_proof_statement())
print("Proof of credential show")
zk4 = cred_show_proof(params, 4)
print(zk4.render_proof_statement())
|
nilq/baby-python
|
python
|
import pygame
import math
from Tower import *
pygame.init()
class T_SuperTower(Tower):
def __init__(Self , sc , Images):
Self.L1 = Images
Self.image = Self.L1[0]
Self.level = 5
Self.range = 100
Self.damage = 100
Self.x = 0
Self.y = 0
Self.bulletx = 0
Self.bullety = 0
Self.angle = 0
Self.cooldown = 0
Self.screen = sc
Self.target = 0
Self.reset = 120
Self.color = (255 , 0 , 0)
|
nilq/baby-python
|
python
|
import os
import tempfile
class Config:
IS_TRAIN = True # Set whether you want to Train (True) or Predict (False)
TICKER = 'EURUSD'
num_of_rows_read = 1000 # If set 0 then all the rows will be read
# Set MySQL inputs if True
IS_MYSQL = False
MYSQL_USER = 'Write your user name'
MYSQL_PASSWORD = 'Write your password'
MYSQL_HOST = 'Write the IP address of the MySQL'
MYSQL_DATABASE = 'Write the name of the database where your dataset can be found'
MYSQL_PORT = 0 # your mysql port number
MYSQL_HOST_PORT = MYSQL_HOST +':'+ str(MYSQL_PORT)
# Env params
env_name = 'trading-v0'
number_of_actions = 3 # Short (0), Flat (1), Long (2)
observation_dimension = 27 # Number of Features (you have to change it unless you have 27 features of your dataset)
gamma = 0.9
decay = 0.9
execution_penalty = 0.0001 #0.001
timestep_penalty = 0.0001
# Set the adaptive learning rate
# Changing points in episode number
first_lr_change = 500
sec_lr_change = 60000
third_lr_change = 80000
# Learning rate values
first_lr = 1e-4
sec_lr = 1e-3
third_lr = 1e-3
# Training params
NO_OF_EPISODES = 10000
LOG_FREQ = 10
LOGDIR = '/tensorboard/' # Log path for the tensorboard
MODEL_DIR = 'model/' # Path for saving models
# Extensions
csv_file = '.csv'
input_predict_extension = '_input_predict' + csv_file
simnet = 'simnet/'
simnet_path_extension = '_simnet.csv'
actions_path_extension = '_actions.csv'
# Path sources
INPUT_PREDICT_DATA_PATH = os.path.join('datasets', 'input_predict/')
TRAINING_DATA_PATH = os.path.join('datasets', 'training/')
PLOT_PATH = 'plot/'
OUTPUT_PREDICT_PATH = os.path.join('datasets', 'output_predict/')
|
nilq/baby-python
|
python
|
from typing import Any
class TonException(Exception):
def __init__(self, error: Any):
if type(error) is dict:
error = f"[{error.get('code')}] {error.get('message')} " \
f"(Core: {error.get('data', {}).get('core_version')})"
super(TonException, self).__init__(error)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# import Flask
'''
Created on Nov 22, 2016
@author: jmartan
'''
import os,signal
import requests
import argparse
import uni_func
import atexit
import unicornhat
def update_widget(codec_ip, username, password, widget_id, value, unset=False):
# "unset" is needed in a situation when you try to repeatedly set the same value of the widget
# and in the mean time someone changes the widget on the touch panel. Probably a bug.
widget_unset_xml = '''
<Command>
<UserInterface>
<Extensions>
<Widget>
<UnsetValue>
<WidgetId>{}</WidgetId>
</UnsetValue>
</Widget>
</Extensions>
</UserInterface>
</Command>
'''.format(widget_id)
widget_set_xml = '''
<Command>
<UserInterface>
<Extensions>
<Widget>
<SetValue>
<WidgetId>{}</WidgetId>
<Value>{}</Value>
</SetValue>
</Widget>
</Extensions>
</UserInterface>
</Command>
'''.format(widget_id, value)
# print('about to send: {}'.format(widget_xml))
print('sending XML command to codec {}, id: {}, value: {}'.format(codec_ip, widget_id, value))
headers = {'content-type':'text/xml'}
if unset:
res = requests.post('http://'+codec_ip+'/putxml', data=widget_unset_xml, headers=headers, auth=(username, password), timeout=1)
print('unset result: {}'.format(res))
res = requests.post('http://'+codec_ip+'/putxml', data=widget_set_xml, headers=headers, auth=(username, password), timeout=1)
print('set result: {}'.format(res))
# run the application
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Set widget values.')
parser.add_argument('widget_value', metavar='N', nargs='+',
help='"widget_id=value" list')
parser.add_argument('-c', dest='codec_ip', required=True,
help='codec ip address')
parser.add_argument('-u', dest='username', required=True,
help='codec API username')
parser.add_argument('-p', dest='password', required=True,
help='codec API password')
in_args = parser.parse_args()
print("args: {}".format(in_args))
# do not switch the LEDs off
atexit.unregister(unicornhat._clean_shutdown)
color_widgets = ['red', 'green', 'blue']
red, green, blue = (0, 0, 0)
update_color_widgets = False
for arg in in_args.widget_value:
widget_id, value = arg.split('=')
if widget_id == 'red':
red = int(value)
update_color_widgets = True
elif widget_id == 'green':
green = int(value)
update_color_widgets = True
elif widget_id == 'blue':
blue = int(value)
update_color_widgets = True
print('red: {}, green: {}, blue: {}'.format(red, green, blue))
if not widget_id in color_widgets:
update_widget(in_args.codec_ip, in_args.username, in_args.password, widget_id, value)
# time.sleep(0.3)
if update_color_widgets:
uni_func.change_fill(red, green, blue)
update_widget(in_args.codec_ip, in_args.username, in_args.password, 'red', red, unset=True)
update_widget(in_args.codec_ip, in_args.username, in_args.password, 'green', green, unset=True)
update_widget(in_args.codec_ip, in_args.username, in_args.password, 'blue', blue, unset=True)
# do not switch the LEDs off - another method
os.kill(os.getpid(), signal.SIGTERM)
'''
sample XML documents to send to codec
Authorization: Basic with API user_id and password
URL: http://<codec_ip>/putxml
Set Value example:
<Command>
<UserInterface>
<Extensions>
<Widget>
<SetValue>
<WidgetId>red</WidgetId>
<Value>128</Value>
</SetValue>
</Widget>
</Extensions>
</UserInterface>
</Command>
Unset Value example:
<Command>
<UserInterface>
<Extensions>
<Widget>
<UnsetValue>
<WidgetId>red</WidgetId>
</UnsetValue>
</Widget>
</Extensions>
</UserInterface>
</Command>
'''
|
nilq/baby-python
|
python
|
from slackbot.bot import Bot
from slackbot.bot import respond_to
import re
import foobot_grapher
def main():
bot = Bot()
bot.run()
@respond_to('air quality', re.IGNORECASE)
def air_quality(message):
attachments = [
{
'fallback': 'Air quality graph',
'image_url': foobot_grapher.getSensorReadings(False)
}]
message.send_webapi('', json.dumps(attachments))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
"""
Дан список, заполненный произвольными целыми числами. Найдите в этом списке два числа, произведение которых
максимально. Выведите эти числа в порядке неубывания. Решение должно иметь сложность O(n), где n - размер списка. То
есть сортировку использовать нельзя.
"""
a = list(map(int, input().split()))
negative_max = min(a)
natural_max = max(a)
a.remove(negative_max)
a.remove(natural_max)
negative_prev = min(a)
natural_prev = max(a)
if negative_max * negative_prev > natural_max * natural_prev:
print(min(negative_prev, negative_max), max(negative_prev, negative_max))
else:
print(min(natural_prev, natural_max), max(natural_prev, natural_max))
|
nilq/baby-python
|
python
|
from django.utils.translation import ugettext as _
from django.utils import timezone
from django.http import HttpResponse, HttpRequest
from zilencer.models import RemotePushDeviceToken, RemoteZulipServer
from zerver.lib.exceptions import JsonableError
from zerver.lib.push_notifications import send_android_push_notification, \
send_apple_push_notification
from zerver.lib.response import json_error, json_success
from zerver.lib.request import has_request_variables, REQ
from zerver.lib.validator import check_dict, check_int
from zerver.models import UserProfile, PushDeviceToken, Realm
from zerver.views.push_notifications import validate_token
from typing import Any, Dict, Optional, Union, Text, cast
def validate_entity(entity):
# type: (Union[UserProfile, RemoteZulipServer]) -> None
if not isinstance(entity, RemoteZulipServer):
raise JsonableError(_("Must validate with valid Zulip server API key"))
def validate_bouncer_token_request(entity, token, kind):
# type: (Union[UserProfile, RemoteZulipServer], bytes, int) -> None
if kind not in [RemotePushDeviceToken.APNS, RemotePushDeviceToken.GCM]:
raise JsonableError(_("Invalid token type"))
validate_entity(entity)
validate_token(token, kind)
@has_request_variables
def remote_server_register_push(request, entity, user_id=REQ(),
token=REQ(), token_kind=REQ(validator=check_int), ios_app_id=None):
# type: (HttpRequest, Union[UserProfile, RemoteZulipServer], int, bytes, int, Optional[Text]) -> HttpResponse
validate_bouncer_token_request(entity, token, token_kind)
server = cast(RemoteZulipServer, entity)
# If a user logged out on a device and failed to unregister,
# we should delete any other user associations for this token
# & RemoteServer pair
RemotePushDeviceToken.objects.filter(
token=token, kind=token_kind, server=server).exclude(user_id=user_id).delete()
# Save or update
remote_token, created = RemotePushDeviceToken.objects.update_or_create(
user_id=user_id,
server=server,
kind=token_kind,
token=token,
defaults=dict(
ios_app_id=ios_app_id,
last_updated=timezone.now()))
return json_success()
@has_request_variables
def remote_server_unregister_push(request, entity, token=REQ(),
token_kind=REQ(validator=check_int), ios_app_id=None):
# type: (HttpRequest, Union[UserProfile, RemoteZulipServer], bytes, int, Optional[Text]) -> HttpResponse
validate_bouncer_token_request(entity, token, token_kind)
server = cast(RemoteZulipServer, entity)
deleted = RemotePushDeviceToken.objects.filter(token=token,
kind=token_kind,
server=server).delete()
if deleted[0] == 0:
return json_error(_("Token does not exist"))
return json_success()
@has_request_variables
def remote_server_notify_push(request, # type: HttpRequest
entity, # type: Union[UserProfile, RemoteZulipServer]
payload=REQ(argument_type='body') # type: Dict[str, Any]
):
# type: (...) -> HttpResponse
validate_entity(entity)
server = cast(RemoteZulipServer, entity)
user_id = payload['user_id']
gcm_payload = payload['gcm_payload']
apns_payload = payload['apns_payload']
android_devices = list(RemotePushDeviceToken.objects.filter(
user_id=user_id,
kind=RemotePushDeviceToken.GCM,
server=server
))
apple_devices = list(RemotePushDeviceToken.objects.filter(
user_id=user_id,
kind=RemotePushDeviceToken.APNS,
server=server
))
if android_devices:
send_android_push_notification(android_devices, gcm_payload, remote=True)
if apple_devices:
send_apple_push_notification(user_id, apple_devices, apns_payload)
return json_success()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
import os
class InjaConan(ConanFile):
name = "inja"
version = "2.1.0"
url = "https://github.com/yasamoka/conan-inja"
description = "Template engine for modern C++, loosely inspired by jinja for Python"
license = "https://github.com/pantor/inja/blob/master/LICENSE"
no_copy_source = True
build_policy = "always"
requires = "jsonformoderncpp/3.7.3@vthiery/stable"
def source(self):
source_url = "https://github.com/pantor/inja"
tools.get("{0}/archive/v{1}.tar.gz".format(source_url, self.version))
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, "sources")
#Rename to "sources" is a convention to simplify later steps
def package_id(self):
self.info.header_only()
def package(self):
self.copy(pattern="LICENSE")
self.copy(pattern="*.[i|h]pp", dst="include/inja", src="sources/include/inja", keep_path=True)
|
nilq/baby-python
|
python
|
class Learner(object):
def log_update(self, o, a, r, op, logpb, dist, done):
self.log(o, a, r, op, logpb, dist, done)
info0 = {'learned': False}
if self.learn_time(done):
info = self.learn()
self.post_learn()
info0.update(info)
info0['learned'] = True
return info0
def log(self, o, a, r, op, logpb, dist, done):
pass
def learn_time(self, done):
pass
def post_learn(self):
pass
def learn(self):
pass
|
nilq/baby-python
|
python
|
import os
import shutil
import json
print("[+] Cleaning...")
with open("tree.json", "r") as f:
json_str = f.read()
json_data = json.loads(json_str)
f.close()
for (path, dirs, files) in os.walk(os.curdir):
if path not in json_data["dirs"]:
shutil.rmtree(path)
else:
for f in files:
f = f"{path}{os.sep}{f}"
if f not in json_data["files"]:
os.remove(f)
print("[-] Finished cleaning")
|
nilq/baby-python
|
python
|
# BT5071 pop quiz 2
# Roll Number: BE17B037
# Name: Krushan Bauva
def bubble(A):
n = len(A)
if n%2 == 1:
A1 = A[0:n//2+1]
A2 = A[n//2+1:n]
else:
A1 = A[0:n//2]
A2 = A[n//2:n]
n1 = len(A1)
for i in range(n1-1, 0, -1):
for j in range(i):
if A1[j]>A1[j+1]:
A1[j], A1[j+1] = A1[j+1], A1[j]
n2 = len(A2)
for i in range(n2-1):
for j in range(n2-1, i, -1):
if A2[j]>A2[j-1]:
A2[j], A2[j-1] = A2[j-1], A2[j]
return (A1, A2)
# Bubble sort is a stable sort since it does not reorder for equal things. Only when one
# element is greater than the other, it does a mutual swap between them.
# Bubble sort's time complexity is O(n^2). Since the outer loop runs for n-1 times and the inner
# loop runs till the index of the outer loop. So if we add all these we get approx =
# (n-1)^2 + (n-2)^2 + (n-3)^2 + ..... (3)^2 + (2)^2 + (1)^2 = n(n-1)/2 = O(n^2)
# Hence the time complexity of bubble sort is O(n^2).
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth.models import Permission, User
from django.db import models
from localflavor.us.models import USStateField
from phonenumber_field.modelfields import PhoneNumberField
from multiselectfield import MultiSelectField
from endorsements.models import Issue
from django_countries.fields import CountryField
from recurrence.fields import RecurrenceField
from django.contrib.gis.db.models import PointField
from wagtail.contrib.wagtailfrontendcache.utils import purge_url_from_cache
from bsd.api import BSD
import logging
logger = logging.getLogger(__name__)
# Get bsd api
bsdApi = BSD().api
group_rating_choices = (
(5, '5 - Strongly aligned with values and expectations'),
(4, '4 - Somewhat aligned with values and expectations'),
(3, '3 - Working toward alignment with values and expectations'),
(2, '2 - Somewhat misaligned or resistant to values and expectations'),
(1, '1 - Group inactive or very misaligned with values and expectations'),
)
def find_local_group_by_user(user):
"""
Find approved Local Group for User based on Affiliations and Roles
Parameters
----------
user : User
User to check for Local Group match
Returns
-------
LocalGroup
Return LocalGroup if a match is found, or None
"""
"""Find affiliation for approved group with non-empty roles"""
if hasattr(user, 'localgroupprofile'):
local_group_profile = user.localgroupprofile
# TODO: support multiple group affiliations?
local_group_affiliation = LocalGroupAffiliation.objects.filter(
local_group_profile=local_group_profile,
local_group__status__exact='approved',
).exclude(local_group_roles=None).first()
if local_group_affiliation:
local_group = local_group_affiliation.local_group
return local_group
"""Otherwise return None"""
return None
class Group(models.Model):
name = models.CharField(
max_length=64,
null=True, blank=False,
verbose_name="Group Name"
)
slug = models.SlugField(
null=True, blank=False,
unique=True,
max_length=100
)
signup_date = models.DateTimeField(
null=True,
blank=True,
auto_now_add=True
)
group_id = models.CharField(
max_length=4,
null=True,
blank=False,
unique=True
)
# Order by group priority
GROUP_TYPES = (
(1, 'State Organizing Committee'),
(2, 'State Chapter'),
(3, 'Campus'),
(4, 'Local Group')
)
group_type = models.IntegerField(
blank=False,
null=False,
choices=GROUP_TYPES,
default=4
)
# Individual Rep Email should match BSD authentication account
rep_email = models.EmailField(
null=True,
blank=False,
verbose_name="Contact Email",
max_length=254
)
# Public group email does not need to match BSD authentication account
group_contact_email = models.EmailField(
blank=True,
help_text="""Optional Group Contact Email to publicly display an email
different from Group Leader Email""",
max_length=254,
null=True,
)
rep_first_name = models.CharField(
max_length=35,
null=True,
blank=False,
verbose_name="First Name"
)
rep_last_name = models.CharField(
max_length=35,
null=True,
blank=False,
verbose_name="Last Name"
)
rep_postal_code = models.CharField(
max_length=12,
null=True,
blank=True,
verbose_name="Postal Code"
)
rep_phone = PhoneNumberField(
null=True,
blank=True,
verbose_name="Phone Number"
)
county = models.CharField(max_length=64, null=True, blank=True)
city = models.CharField(max_length=64, null=True, blank=True)
state = USStateField(max_length=2, null=True, blank=True)
postal_code = models.CharField(
max_length=12,
null=True,
blank=True,
verbose_name="Postal Code"
)
country = CountryField(null=True, blank=False, default="US")
point = PointField(null=True, blank=True)
size = models.CharField(
max_length=21,
null=True,
blank=True,
verbose_name="Group Size"
)
last_meeting = models.DateTimeField(
null=True,
blank=True,
verbose_name="Date of Last Meeting"
)
recurring_meeting = RecurrenceField(
null=True,
blank=True,
verbose_name="Recurring Meeting"
)
meeting_address_line1 = models.CharField(
"Address Line 1",
max_length=45,
null=True,
blank=True)
meeting_address_line2 = models.CharField(
"Address Line 2",
max_length=45,
null=True,
blank=True
)
meeting_postal_code = models.CharField(
"Postal Code",
max_length=12,
null=True,
blank=True
)
meeting_city = models.CharField(
max_length=64,
null=True,
blank=True,
verbose_name="City"
)
meeting_state_province = models.CharField(
"State/Province",
max_length=40,
null=True,
blank=True
)
meeting_country = CountryField(
null=True,
blank=True,
verbose_name="Country",
default='US'
)
TYPES_OF_ORGANIZING_CHOICES = (
('direct-action', 'Direct Action'),
('electoral', 'Electoral Organizing'),
('legistlative', 'Advocating for Legislation or Ballot Measures'),
('community', 'Community Organizing'),
('other', 'Other')
)
types_of_organizing = MultiSelectField(
null=True,
blank=True,
choices=TYPES_OF_ORGANIZING_CHOICES,
verbose_name="Types of Organizing"
)
other_types_of_organizing = models.TextField(
null=True,
blank=True,
verbose_name="Other Types of Organizing",
max_length=500
)
description = models.TextField(
null=True,
blank=False,
max_length=1000,
verbose_name="Description (1000 characters or less)"
)
issues = models.ManyToManyField(Issue, blank=True)
other_issues = models.TextField(
null=True,
blank=True,
max_length=250,
verbose_name="Other Issues")
constituency = models.TextField(null=True, blank=True, max_length=250)
facebook_url = models.URLField(
null=True,
blank=True,
verbose_name="Facebook URL",
max_length=255
)
twitter_url = models.URLField(
null=True,
blank=True,
verbose_name="Twitter URL",
max_length=255)
website_url = models.URLField(
null=True,
blank=True,
verbose_name="Website URL",
max_length=255
)
instagram_url = models.URLField(
null=True,
blank=True,
verbose_name="Instagram URL",
max_length=255
)
other_social = models.TextField(
null=True,
blank=True,
verbose_name="Other Social Media",
max_length=250
)
STATUSES = (
('submitted', 'Submitted'),
('signed-mou', 'Signed MOU'),
('inactive', 'Inactive'),
('approved', 'Approved'),
('removed', 'Removed')
)
status = models.CharField(
max_length=64,
choices=STATUSES,
default='submitted'
)
VERSIONS = (
('none', 'N/A'),
('1.0', 'Old'),
('1.1', 'Current'),
)
signed_mou_version = models.CharField(
max_length=64,
choices=VERSIONS,
default='none',
verbose_name='MOU Version',
null=True,
blank=True
)
ORGANIZERS = (
('juliana', 'Juliana'),
('basi', 'Basi'),
('kyle', 'Kyle'),
)
organizer = models.CharField(
max_length=64,
choices=ORGANIZERS,
default=None,
verbose_name='Organizer',
null=True,
blank=True
)
mou_url = models.URLField(
null=True,
blank=True,
verbose_name="MOU URL",
max_length=255
)
"""Admin Group Rating"""
group_rating = models.IntegerField(
blank=True,
choices=group_rating_choices,
null=True,
)
# Notes field for internal OR staff use
notes = models.TextField(
blank=True,
help_text="""Please include dates here along with notes to make
reporting easier.""",
null=True,
verbose_name="Notes"
)
def save(self, *args, **kwargs):
# TODO: make main groups url an environment variable
# and replace hardcoded /groups throughout site
super(Group, self).save(*args, **kwargs)
if self.slug:
purge_url_from_cache('/groups/')
purge_url_from_cache('/groups/' + self.slug +'/')
def __unicode__(self):
return self.name
class LocalGroupProfile(models.Model):
"""Local Group information for a user"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
def get_affiliation_for_local_group(self, local_group):
"""Get Affiliation for Local Group, otherwise None"""
affiliation = self.localgroupaffiliation_set.filter(
local_group=local_group
).first()
return affiliation
def get_affiliations_for_local_group_role_id(self, local_group_role_id):
"""Get Affiliations for Local Group Role"""
affiliations = self.localgroupaffiliation_set.filter(
local_group_roles=local_group_role_id
)
return affiliations
def has_permission_for_local_group(self, local_group, permission):
"""Get Affiliation and check if any Role has permission"""
affiliation = self.get_affiliation_for_local_group(local_group)
if affiliation:
for role in affiliation.local_group_roles.all():
if role.has_permission(permission):
return True
return False
def has_permissions_for_local_group(self, local_group, permissions):
"""Verify if user has all permissions for local group"""
for permission in permissions:
if not self.has_permission_for_local_group(
local_group,
permission
):
return False
return True
def __unicode__(self):
return self.user.email + " [" + str(self.user.id) + "]"
class Meta:
ordering = ["user__email"]
class LocalGroupRole(models.Model):
"""Hardcode the role types, but also store role permissions in db"""
role_type_choices = (
(settings.LOCAL_GROUPS_ROLE_GROUP_LEADER_ID, 'Group Leader'),
(settings.LOCAL_GROUPS_ROLE_GROUP_ADMIN_ID, 'Group Admin'),
)
permissions = models.ManyToManyField(
Permission,
blank=True,
)
role_type = models.IntegerField(
choices=role_type_choices,
unique=True
)
def has_permission(self, permission):
for perm in self.permissions.all():
code = perm.content_type.app_label + '.' + perm.codename
if code == permission:
return True
return False
def __unicode__(self):
return self.get_role_type_display()
class LocalGroupAffiliation(models.Model):
"""
Local Group Affiliation is similar to Auth User Groups except it is
meant for a specific Local Group
"""
"""Link to specific User Profile and Local Group"""
local_group = models.ForeignKey(Group)
local_group_profile = models.ForeignKey(LocalGroupProfile)
"""Roles for this specific Local Group & User"""
local_group_roles = models.ManyToManyField(
LocalGroupRole,
blank=True,
)
def __unicode__(self):
return self.local_group.name + " [" + self.local_group.group_id + "], " + str(
self.local_group_profile
)
class Meta:
ordering = [
"local_group__name",
"local_group__group_id",
"local_group_profile__user__email"
]
unique_together = ["local_group", "local_group_profile"]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-createacsr_handler
from __future__ import unicode_literals
import json
import logging
import os
import uuid
import time
import secrets
import cryptography
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
from flask import abort
from flask import Flask
from flask import request
from flask import Response
from flask import render_template
from jinja2.exceptions import TemplateNotFound
from jwcrypto import jwk, jwt
import requests
from werkzeug.contrib.cache import SimpleCache
# ENV vars
FLASK_DEBUG = os.getenv('FLASK_DEBUG', True)
TEMPLATES_FOLDER = os.getenv('TEMPLATES_FOLDER')
CACHE_TIMEOUT = int(os.getenv('CACHE_TIMEOUT'))
TEST_API_ENDPOINT = os.getenv('TEST_API_ENDPOINT')
if FLASK_DEBUG:
# configure requests logging
import http.client as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
app = Flask(__name__, template_folder=TEMPLATES_FOLDER)
app.debug = FLASK_DEBUG
# Setting SECRET_KEY
app.config['SECRET_KEY'] = os.getenv('SECRET_KEY', secrets.token_hex(16))
cache = SimpleCache()
################################################################################
# Utilities
################################################################################
def make_private_key(key_size: int) -> bytes:
"""Return an RSA private key
:param key_size:
:return key:
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=key_size,
backend=default_backend()
)
return key
def make_private_key_pem(private_key: bytes) -> str:
"""Convert RSA private key to PEM format
:param private_key:
:return pem:
"""
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
return pem
def make_csr(private_key: bytes) -> str:
"""Return a CSR based on the given private key.
:param private_key:
:return csr:
"""
csr = x509.CertificateSigningRequestBuilder().subject_name(
x509.Name(
[
x509.NameAttribute(NameOID.COUNTRY_NAME, cache.get('csr_country_name') or 'GB'),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME,
cache.get('csr_state_or_province_name') or 'Middlesex'),
x509.NameAttribute(NameOID.LOCALITY_NAME, cache.get('csr_locality_name') or 'London'),
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME,
cache.get('csr_organizational_unit_name') or 'My TPP'),
x509.NameAttribute(NameOID.COMMON_NAME, cache.get('csr_common_name') or 'IT'),
]
)
).sign(private_key, hashes.SHA256(), default_backend())
return csr
def make_jwk_from_pem(private_pem: str) -> dict:
"""Convert a PEM into a JWK
:param private_pem:
:return jwk_dict:
"""
jwk_dict = dict()
try:
key_obj = jwk.JWK.from_pem(private_pem.encode('latin-1'))
except Exception as e:
app.logger.debug('{}'.format(e))
else:
jwk_dict = json.loads(key_obj.export())
jwk_dict['kid'] = key_obj.thumbprint(hashalg=cryptography.hazmat.primitives.hashes.SHA1())
jwk_dict['x5t'] = key_obj.thumbprint(hashalg=cryptography.hazmat.primitives.hashes.SHA1())
jwk_dict['x5t#256'] = key_obj.thumbprint(hashalg=cryptography.hazmat.primitives.hashes.SHA256())
return jwk_dict
def make_token(kid: str, software_statement_id: str, client_scopes: str, token_url: str) -> str:
jwt_iat = int(time.time())
jwt_exp = jwt_iat + 3600
header = dict(alg='RS256', kid=kid, typ='JWT')
claims = dict(
iss=software_statement_id,
sub=software_statement_id,
scopes=client_scopes,
aud=token_url,
jti=str(uuid.uuid4()),
iat=jwt_iat,
exp=jwt_exp
)
token = jwt.JWT(header=header, claims=claims)
key_obj = jwk.JWK.from_pem(cache.get('private_key_pem').encode('latin-1'))
token.make_signed_token(key_obj)
signed_token = token.serialize()
return signed_token
def make_onboarding_token(kid: str, iss: str, aud: str, sub: str, scope: str, client_id: str, ssa: str) -> str:
jwt_iat = int(time.time())
jwt_exp = jwt_iat + 3600
header = dict(alg='RS256', kid=kid, typ='JWT')
claims = dict(
iss=iss,
iat=jwt_iat,
exp=jwt_exp,
aud=aud,
sub=sub,
scope=scope,
token_endpoint_auth_method='private_key_jwt',
grant_types=['authorization_code', 'refresh_token', 'client_credentials'],
response_types=['code', 'id_token'],
client_id=client_id,
software_statement=ssa
)
token = jwt.JWT(header=header, claims=claims)
key_obj = jwk.JWK.from_pem(cache.get('private_key_pem').encode('latin-1'))
token.make_signed_token(key_obj)
signed_token = token.serialize()
return signed_token
def get_context() -> dict:
context = dict()
# Home /
context['tpp_id'] = cache.get('tpp_id')
context['software_statement_id'] = cache.get('software_statement_id')
context['client_scopes'] = cache.get('client_scopes')
context['onboarding_scopes'] = cache.get('onboarding_scopes')
context['token_url'] = cache.get('token_url')
context['tpp_ssa_url'] = cache.get('tpp_ssa_url')
context['aspsp_list_url'] = cache.get('aspsp_list_url')
# Private key settings
context['key_size'] = cache.get('key_size')
# CSR settings
context['csr_common_name'] = cache.get('csr_common_name')
context['csr_organizational_unit_name'] = cache.get('csr_organizational_unit_name')
context['csr_country_name'] = cache.get('csr_country_name')
context['csr_state_or_province_name'] = cache.get('csr_state_or_province_name')
context['csr_locality_name'] = cache.get('csr_locality_name')
# Certs
context['private_key_pem'] = cache.get('private_key_pem')
context['kid'] = make_jwk_from_pem(context['private_key_pem']).get('kid')
context['csr_pem'] = cache.get('csr_pem')
# Access token
context['access_token'] = cache.get('access_token')
# SSA
context['software_statement_assertion'] = cache.get('software_statement_assertion')
# Authorization servers
context['authorization_servers'] = cache.get('authorization_servers')
# App onboarding
context['app_onboarding_status_exception'] = cache.get('app_onboarding_status_exception')
context['app_onboarding_status_url'] = cache.get('app_onboarding_status_url')
context['app_onboarding_status_code'] = cache.get('app_onboarding_status_code')
context['app_onboarding_reason'] = cache.get('app_onboarding_reason')
context['app_onboarding_text'] = cache.get('app_onboarding_text')
return context
################################################################################
# Route handlers
################################################################################
# / handler
@app.route('/', endpoint='root_handler', methods=['GET', 'POST'])
def root_handler() -> Response:
"""Home / handler
"""
if request.method == 'POST':
cache.set('tpp_id', request.form.get('tpp_id'), timeout=CACHE_TIMEOUT)
cache.set('software_statement_id', request.form.get('software_statement_id'), timeout=CACHE_TIMEOUT)
cache.set('client_scopes', request.form.get('client_scopes'), timeout=CACHE_TIMEOUT)
cache.set('onboarding_scopes', request.form.get('onboarding_scopes'), timeout=CACHE_TIMEOUT)
cache.set('token_url', request.form.get('token_url'), timeout=CACHE_TIMEOUT)
cache.set('tpp_ssa_url', request.form.get('tpp_ssa_url'), timeout=CACHE_TIMEOUT)
cache.set('aspsp_list_url', request.form.get('aspsp_list_url'), timeout=CACHE_TIMEOUT)
cache.set('private_key_pem', '', timeout=CACHE_TIMEOUT)
cache.set('kid', '', timeout=CACHE_TIMEOUT)
cache.set('csr_pem', '', timeout=CACHE_TIMEOUT)
context = dict(settings=get_context())
try:
return render_template('home.html', context=context)
except TemplateNotFound:
abort(404)
# create a csr handler
@app.route('/createcsr/', endpoint='createacsr_handler', methods=['GET', 'POST'])
def createacsr_handler() -> Response:
"""Private key & CSR creation handler.
"""
if request.method == 'POST':
cache.set('key_size', request.form.get('key_size'), timeout=CACHE_TIMEOUT)
cache.set('csr_country_name', request.form.get('csr_country_name'), timeout=CACHE_TIMEOUT)
cache.set('csr_state_or_province_name', request.form.get('csr_state_or_province_name'), timeout=CACHE_TIMEOUT)
cache.set('csr_locality_name', request.form.get('csr_locality_name'), timeout=CACHE_TIMEOUT)
cache.set('csr_organizational_unit_name', request.form.get('tpp_id'), timeout=CACHE_TIMEOUT)
cache.set('csr_common_name', request.form.get('software_statement_id'), timeout=CACHE_TIMEOUT)
private_key = make_private_key(int(request.form.get('key_size')))
private_key_pem = make_private_key_pem(private_key).decode(encoding='utf-8')
cache.set('private_key_pem', private_key_pem, timeout=CACHE_TIMEOUT)
csr = make_csr(private_key)
csr_pem = csr.public_bytes(serialization.Encoding.PEM).decode(encoding='utf-8')
cache.set('csr_pem', csr_pem, timeout=CACHE_TIMEOUT)
context = dict(settings=get_context())
try:
return render_template('createcsr.html', context=context)
except TemplateNotFound:
abort(404)
# obtain an access token from OB
@app.route('/getaccesstoken/', endpoint='createatoken_handler', methods=['GET', 'POST'])
def createatoken_handler() -> Response:
"""Access Token handler
"""
kid = cache.get('kid')
if request.method == 'POST':
kid = request.form.get('kid')
cache.set('kid', kid, timeout=CACHE_TIMEOUT)
if cache.get('kid') and cache.get('software_statement_id') and cache.get('client_scopes') and cache.get(
'token_url'):
signed_token = make_token(
cache.get('kid'),
cache.get('software_statement_id'),
cache.get('client_scopes'),
cache.get('token_url')
)
cache.set('signed_token', signed_token, timeout=CACHE_TIMEOUT)
data_dict = dict(
client_assertion_type='urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
grant_type='client_credentials',
client_id=cache.get('software_statement_id'),
client_assertion=cache.get('signed_token'),
scope=cache.get('client_scopes')
)
r = requests.post(cache.get('token_url'), data=data_dict)
if r.status_code == 200:
cache.set('access_token', r.json().get('access_token'), timeout=CACHE_TIMEOUT)
else:
cache.set('access_token', '', timeout=CACHE_TIMEOUT)
context = dict(settings=get_context())
context['settings']['kid'] = kid
try:
return render_template('createtoken.html', context=context)
except TemplateNotFound:
abort(404)
# get SSA
@app.route('/getssa/', endpoint='getssa_handler', methods=['GET', 'POST'])
def getssa_handler() -> Response:
"""Software Statement Assertion retrieval"""
if request.method == 'POST':
try:
r = requests.get(
'{}/tpp/{}/ssa/{}'.format(
cache.get('tpp_ssa_url'),
cache.get('tpp_id'),
cache.get('software_statement_id')
),
headers=dict(
Authorization='Bearer {}'.format(
cache.get('access_token')
)
)
)
except Exception as e:
app.logger.error('Could not retrieve the SSA because: {}'.format(e))
else:
if r.status_code == 200:
cache.set('software_statement_assertion', r.text, timeout=CACHE_TIMEOUT)
else:
app.logger.error('Could not retrieve the SSA, because: {}, {}'.format(r.status_code, r.reason))
context = dict(settings=get_context())
try:
return render_template('getssa.html', context=context)
except TemplateNotFound:
abort(404)
# get authorization servers
@app.route('/getauthservers/', endpoint='getauthservers_handler', methods=['GET', 'POST'])
def getauthservers_handler() -> Response:
"""Authorization server list retrieval handler
"""
if request.method == 'POST':
try:
r = requests.get(
cache.get('aspsp_list_url'),
headers=dict(
Authorization='Bearer {}'.format(
cache.get('access_token')
)
)
)
except Exception as e:
app.logger.error('Could not retrieve the list of authorization servers, because: {}'.format(e))
else:
if r.status_code == 200:
auth_servers_resources = r.json().get('Resources')
if auth_servers_resources:
auth_servers_list = [auth_server.get('AuthorisationServers') for auth_server in
auth_servers_resources if auth_server.get('AuthorisationServers')]
cache.set('authorization_servers', auth_servers_list, timeout=CACHE_TIMEOUT)
else:
app.logger.error(
'Could not retrieve the list of authorization servers, because: {}, {}'.format(
r.status_code,
r.reason
)
)
context = dict(settings=get_context())
try:
return render_template('getauthservers.html', context=context)
except TemplateNotFound:
abort(404)
# onboard app
@app.route('/onboard/', endpoint='onboardapp_handler', methods=['GET', 'POST'])
def onboardapp_handler() -> Response:
"""App Onboarding handler.
"""
if request.method == 'POST':
headers = dict()
headers['Content-Type'] = 'application/jwt'
headers['Accept'] = 'application/json'
try:
r = requests.post(
request.form.get('authorization_server'),
headers=headers,
data=make_onboarding_token(
kid=cache.get('kid'),
iss=cache.get('tpp_id'),
aud=request.form.get('authorization_server'),
sub=cache.get('software_statement_id'),
scope=cache.get('onboarding_scopes'),
client_id=cache.get('software_statement_id'),
ssa=cache.get('software_statement_assertion')
)
)
except Exception as e:
app.logger.error('Could not onboard the application, because: {}'.format(e))
cache.set('app_onboarding_status_exception', 'Could not onboard the application, because: {}'.format(e),
timeout=CACHE_TIMEOUT)
else:
cache.set('app_onboarding_status_url', r.url, timeout=CACHE_TIMEOUT)
cache.set('app_onboarding_status_code', r.status_code, timeout=CACHE_TIMEOUT)
cache.set('app_onboarding_reason', r.reason, timeout=CACHE_TIMEOUT)
cache.set('app_onboarding_text', r.text, timeout=CACHE_TIMEOUT)
context = dict(settings=get_context())
try:
return render_template('onboardapp.html', context=context)
except TemplateNotFound:
abort(404)
################################################################################
# End
################################################################################
# required host 0.0.0.0 for docker.
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=FLASK_DEBUG)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from credocommon.models import Detection
from credocommon.helpers import validate_image, rate_brightness
class Command(BaseCommand):
help = "Validate detections"
def handle(self, *args, **options):
detections = Detection.objects.all()
for d in detections:
if d.frame_content:
d.brightness = rate_brightness(d.frame_content)
d.save()
if (not d.frame_content) or validate_image(d.frame_content):
self.stdout.write(
"Hiding detection %s (image validation failed)" % d.id
)
d.visible = False
d.save()
if abs(d.time_received - d.timestamp) > 3600 * 24 * 365 * 5 * 1000:
self.stdout.write("Hiding detection %s (invalid date)" % d.id)
d.visible = False
d.save()
self.stdout.write("Done!")
|
nilq/baby-python
|
python
|
"""Implement an error to indicate that a scaaml.io.Dataset already exists.
Creating scaaml.io.Dataset should not overwrite existing files. When it could
the constructor needs to raise an error, which should also contain the dataset
directory.
"""
from pathlib import Path
class DatasetExistsError(FileExistsError):
"""Error for signalling that the dataset already exists."""
def __init__(self, dataset_path: Path) -> None:
"""Represents that the dataset already exists.
Args:
dataset_path: The dataset path.
"""
super().__init__(
f'Dataset info file exists and would be overwritten. Use instead:'
f' Dataset.from_config(dataset_path="{dataset_path}")')
self.dataset_path = dataset_path
|
nilq/baby-python
|
python
|
from datetime import datetime
from django.views.generic.edit import BaseCreateView
from braces.views import LoginRequiredMixin
from .base import BaseEditView
from forum.forms import ReplyForm
from forum.models import Topic, Reply
class ReplyCreateView(LoginRequiredMixin, BaseCreateView):
model = Topic
form_class = ReplyForm
http_method_names = ['post', 'put']
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.author = self.request.user
self.object.author_ip = self.request.META['REMOTE_ADDR']
self.object.topic = self.get_object()
self.object.topic.num_replies += 1
self.object.topic.last_reply_on = datetime.now()
self.object.topic.save()
return super(ReplyCreateView, self).form_valid(form)
def get_success_url(self):
return self.object.topic.get_absolute_url()
class ReplyEditView(LoginRequiredMixin, BaseEditView):
model = Reply
form_class = ReplyForm
template_name = 'forum/reply_edit_form.html'
def get_success_url(self):
return self.object.topic.get_absolute_url()
|
nilq/baby-python
|
python
|
"""
See the problem description at: https://leetcode.com/problems/minimum-add-to-make-parentheses-valid/
"""
class Solution:
def minAddToMakeValid(self, S: str) -> int:
"""
Time complexity : O(n)
Space complexity: O(1)
"""
score1 = score2 = 0
for char in S:
if char == '(':
score1 += 1
else:
if score1 == 0:
score2 += 1
else:
score1 -= 1
return score1 + score2
|
nilq/baby-python
|
python
|
from tests.seatsioClientTest import SeatsioClientTest
from tests.util.asserts import assert_that
class ListAllTagsTest(SeatsioClientTest):
def test(self):
chart1 = self.client.charts.create()
self.client.charts.add_tag(chart1.key, "tag1")
self.client.charts.add_tag(chart1.key, "tag2")
chart2 = self.client.charts.create()
self.client.charts.add_tag(chart2.key, "tag3")
tags = self.client.charts.list_all_tags()
assert_that(tags).contains_exactly_in_any_order("tag1", "tag2", "tag3")
|
nilq/baby-python
|
python
|
"""empty message
Revision ID: 20210315_193805
Revises: 20210315_151433
Create Date: 2021-03-15 19:38:05.486503
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "20210315_193805"
down_revision = "20210315_151433"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"etl_job_results",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.DateTime(timezone=True), nullable=False),
sa.Column("deleted", sa.DateTime(timezone=True), nullable=False),
sa.Column("inserted", sa.DateTime(timezone=True), nullable=False),
sa.Column("errors", sa.JSON(), nullable=False),
sa.Column("error_summary", sa.Text(), nullable=False),
sa.Column("warning", sa.Text(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.alter_column(
"__crypto_ohlc_daily",
"t_cross",
existing_type=sa.INTEGER(),
comment="1=golden cross -1=dead cross 2021/3/15 t_sma_5 t_sma_25のクロスを検出",
existing_comment="1=golden cross -1=dead cross",
existing_nullable=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"__crypto_ohlc_daily",
"t_cross",
existing_type=sa.INTEGER(),
comment="1=golden cross -1=dead cross",
existing_comment="1=golden cross -1=dead cross 2021/3/15 t_sma_5 t_sma_25のクロスを検出",
existing_nullable=False,
)
op.drop_table("etl_job_results")
# ### end Alembic commands ###
|
nilq/baby-python
|
python
|
def parse_full_text(status):
"""Param status (tweepy.models.Status)"""
return clean_text(status.full_text)
def clean_text(my_str):
"""Removes line-breaks for cleaner CSV storage. Handles string or null value.
Returns string or null value
Param my_str (str)
"""
try:
my_str = my_str.replace("\n", " ")
my_str = my_str.replace("\r", " ")
my_str = my_str.strip()
except AttributeError as err:
pass
return my_str
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""Command line utility to serve a Mapchete process."""
import click
import logging
import logging.config
import os
import pkgutil
from rasterio.io import MemoryFile
import mapchete
from mapchete.cli import options
from mapchete.tile import BufferedTilePyramid
logger = logging.getLogger(__name__)
@click.command(help="Serve a process on localhost.")
@options.arg_mapchete_files
@options.opt_port
@options.opt_internal_cache
@options.opt_zoom
@options.opt_bounds
@options.opt_overwrite
@options.opt_readonly
@options.opt_memory
@options.opt_input_file
@options.opt_debug
@options.opt_logfile
def serve(
mapchete_files,
port=None,
internal_cache=None,
zoom=None,
bounds=None,
overwrite=False,
readonly=False,
memory=False,
input_file=None,
debug=False,
logfile=None,
):
"""
Serve a Mapchete process.
Creates the Mapchete host and serves both web page with OpenLayers and the
WMTS simple REST endpoint.
"""
app = create_app(
mapchete_files=mapchete_files,
zoom=zoom,
bounds=bounds,
single_input_file=input_file,
mode=_get_mode(memory, readonly, overwrite),
debug=debug,
)
if os.environ.get("MAPCHETE_TEST") == "TRUE":
logger.debug("don't run flask app, MAPCHETE_TEST environment detected")
else: # pragma: no cover
app.run(
threaded=True,
debug=debug,
port=port,
host="0.0.0.0",
extra_files=mapchete_files,
)
def create_app(
mapchete_files=None,
zoom=None,
bounds=None,
single_input_file=None,
mode="continue",
debug=None,
):
"""Configure and create Flask app."""
from flask import Flask, render_template_string
app = Flask(__name__)
mapchete_processes = {
os.path.splitext(os.path.basename(mapchete_file))[0]: mapchete.open(
mapchete_file,
zoom=zoom,
bounds=bounds,
single_input_file=single_input_file,
mode=mode,
with_cache=True,
debug=debug,
)
for mapchete_file in mapchete_files
}
mp = next(iter(mapchete_processes.values()))
pyramid_type = mp.config.process_pyramid.grid
pyramid_srid = mp.config.process_pyramid.crs.to_epsg()
process_bounds = ",".join([str(i) for i in mp.config.bounds_at_zoom()])
grid = "g" if pyramid_srid == 3857 else "WGS84"
web_pyramid = BufferedTilePyramid(pyramid_type)
@app.route("/", methods=["GET"])
def index():
"""Render and hosts the appropriate OpenLayers instance."""
return render_template_string(
pkgutil.get_data("mapchete.static", "index.html").decode("utf-8"),
srid=pyramid_srid,
process_bounds=process_bounds,
is_mercator=(pyramid_srid == 3857),
process_names=mapchete_processes.keys(),
)
@app.route(
"/".join(
[
"",
"wmts_simple",
"1.0.0",
"<string:mp_name>",
"default",
grid,
"<int:zoom>",
"<int:row>",
"<int:col>.<string:file_ext>",
]
),
methods=["GET"],
)
def get(mp_name, zoom, row, col, file_ext):
"""Return processed, empty or error (in pink color) tile."""
logger.debug(
"received tile (%s, %s, %s) for process %s", zoom, row, col, mp_name
)
# convert zoom, row, col into tile object using web pyramid
return _tile_response(
mapchete_processes[mp_name], web_pyramid.tile(zoom, row, col), debug
)
return app
def _get_mode(memory, readonly, overwrite):
if memory:
return "memory"
elif readonly:
return "readonly"
elif overwrite:
return "overwrite"
else:
return "continue"
def _tile_response(mp, web_tile, debug):
try:
logger.debug("getting web tile %s", str(web_tile.id))
return _valid_tile_response(mp, mp.get_raw_output(web_tile))
except Exception: # pragma: no cover
logger.exception("getting web tile %s failed", str(web_tile.id))
if debug:
raise
else:
from flask import abort
abort(500)
def _valid_tile_response(mp, data):
from flask import send_file, make_response, jsonify
out_data, mime_type = mp.config.output.for_web(data)
logger.debug("create tile response %s", mime_type)
if isinstance(out_data, MemoryFile):
response = make_response(send_file(out_data, mime_type))
elif isinstance(out_data, list):
response = make_response(jsonify(data))
else:
response = make_response(out_data)
response.headers["Content-Type"] = mime_type
response.cache_control.no_write = True
return response
|
nilq/baby-python
|
python
|
from .dualconv_mesh_net import DualConvMeshNet
from .singleconv_mesh_net import SingleConvMeshNet
|
nilq/baby-python
|
python
|
from __future__ import print_function
import json
import urllib
import boto3
print('*Loading lambda: s3FileListRead')
s3 = boto3.client('s3')
def lambda_handler(event, context):
print('==== file list in bucket ====')
AWS_S3_BUCKET_NAME = 'yujitokiwa-jp-test'
s3_resource = boto3.resource('s3')
bucket = s3_resource.Bucket(AWS_S3_BUCKET_NAME)
result = bucket.meta.client.list_objects(Bucket=bucket.name, Delimiter='/')
for o in result.get('Contents'):
print(o.get('Key')) # flie name will be printed
response = s3.get_object(Bucket=bucket.name, Key=o.get('Key'))
data = response['Body'].read()
print(data.decode('utf-8')) # file contents will be printed
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 30 21:05:47 2020
@author: Richard
"""
from newsapi import NewsApiClient
newsapi = NewsApiClient(api_key='0566dfe86d9c44c6a3bf8ae60eafb8c6')
all_articles = newsapi.get_everything(q='apple',
from_param='2020-04-01',
to='2020-04-29',
language='en',
sort_by='relevancy',
page_size=100,
page=1)
authors = []
for art in all_articles["articles"]:
authors.append(art["source"]["id"])
authors = list(set(authors))
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas_datareader import data as web
from datetime import datetime, timedelta
from yahoo_finance import Share
from math import ceil, floor
from collections import deque
class Stock():
""" Historical data of a Stock
Attributes:
symbol - The official name of the stock
path - A path to the csv file containing information
data - Pandas DataFrame with all daily data
self.last_action
- A tuple of the latest action (buy or sell) and the date
Methods:
init_data - Gets a Pandas DataFrame with relevant information about the stock and saves it to a csv file with path from Stock.path.
init_data_csv
- Gets a Pandas DataFrame from a csv file with the path from Stock.path.
update_data - *TODO* Appends new data to existing data. Also saves to local csv.
splot - Plots a graph of closing price and closing averages specified in 'avg'.
get_avg - Finds the average closing price over 'avg_interval' number of days and adds a column to Stock.data.
print_data - Prints the Stock.data to the console.
create_avg - Creates the
do_rule_buy - Asserts if a buy-signal should be triggered.
rule_buy - Returns the latest index where Stock.do_rule_buy() returns True.
do_rule_sell- Asserts if a sell-signal should be triggered.
rule_sell - Returns the latest index where Stock.do_rule_sell() returns True.
"""
def __init__(self, symbol, path="C:\\Stockbot\\Stocks", num_days=1000):
"""
params:
symbol - (String) The unique character combination indicating a certain share.
path - (String) Default "C:\\Stockbot\\Stocks". The path directory where the Stocks related csv will be stored.
num_days - (Int) Default 1000. The number of days for data gathering including closing days.
returns:
None
Initializing method.
"""
self.symbol = symbol.upper()
self.path = "C:\\Stockbot\\Stocks\\{s}.csv".format(s=self.symbol)
# self.data = self.init_data(num_days)
self.data = self.init_data_csv()
self.last_action = (0,0) # Tuple of buy/sell and date
def init_data(self, num_days=1000):
"""
params:
num_days - (Int) Default 1000. Number of days to fetch data for, including closing days
returns:
(pandas.DataFrame) A DataFrame for the last num_days days' worth of stock data. Values [ High, Low, Close, Volume ] are kept.
Fetches data from Yahoo Finance using pandas_datareader the last num_days days. Writes the resulting csv to path as {symbol}.csv which is subsecuently is read and returned.
"""
end = datetime.today()
start = end - timedelta(days=num_days)
df = web.DataReader(self.symbol, "yahoo", start, end)
df.to_csv(path_or_buf=self.path,columns=["High","Low","Close","Volume"])
df = pd.read_csv(filepath_or_buffer=self.path)
return df
def init_data_csv(self):
"""
params:
None
returns:
(pandas.DataFrame) A DataFrame read from the csv stored in Stock.path.
Fetches data from a csv stored in Stock.path.
"""
return pd.read_csv(self.path)
def update_data(self):
"""
*TODO* Appends new data to existing data. Also saves to local csv.
"""
pass
def splot(self,avg=None):
"""
params:
avg - (List of Ints) Defualt None. If unchanged, plot only closing prices. Plot averages specified in avg.
returns:
None.
Plots a graph of closing price and closing averages specified in 'avg'.
"""
avgs = ["Close"]
for avg_interval in avg:
self.create_avg(avg_interval)
avgs.append("avg_{avg_interval}".format(avg_interval=avg_interval))
self.data.plot(x=self.data.index, y=avgs, grid=True, ylim=(max(self.data["Close"]*1.1),min(self.data["Close"])*0.9))
plt.gca().invert_yaxis()
plt.show()
def print_data(self):
"""
params:
None.
returns:
None.
Prints the Stock.data to the console.
"""
print("{s}\n{p}\n{d}".format(s=self.symbol,p=self.path,d=self.data))
def get_avg(self,avg_interval):
"""
params:
avg_interval - (Int) The interval of days that should be averaged.
returns:
(pandas.DataFrame) Stock.data including the newly created average column.
Finds the average closing price over 'avg_interval' number of days and adds a column to Stock.data.
"""
col = "avg_{avg_interval}".format(avg_interval=avg_interval)
prices = self.data["Close"]
dates = self.data["Date"]
self.data[col] = self.data["Close"].copy()
d = deque()
for idx, price in enumerate(prices):
if not np.isnan(price):
if len(d) < avg_interval:
d.append(price)
else:
d.popleft()
d.append(price)
if len(d) == avg_interval:
avg = sum(d)/avg_interval
self.data.loc[idx, col] = avg
else:
self.data.loc[idx, col] = np.nan
else:
self.data.loc[idx, col] = np.nan
return self.data
def create_avg(self, avg_interval):
"""
params:
avg_interval - (Int) The interval of days that should be averaged.
returns:
(pandas.DataFrame) Stock.data including the newly created average column, if any.
Finds the average closing price over 'avg_interval' number of days and adds a column to Stock.data if the column does not already exsists.
"""
if not (avg_interval in self.data.columns):
df = self.get_avg(avg_interval)
return df
def do_rule_buy(self, idx, col_x, col_y):
"""
params:
idx - (Int) The index of Stock.data that should be examined.
col_x - (String) Name of the first column for comparison.
col_y - (String) Name of the second column for comparison.
returns:
(Boolean) The evaluation of whether or not it would be recommended to buy this Stock based on the following rule: (closing_price > val_x and val_x < val_y).
Asserts if a buy-signal should be triggered.
"""
price = self.data.loc[idx, "Close"]
avg_x = self.data.loc[idx, col_x]
avg_y = self.data.loc[idx, col_y]
if price > avg_x and avg_x < avg_y:
return True
else:
return False
def rule_buy(self, x, y):
"""
params:
x - (Int) The first average to be compared.
y - (Int) The second average to be compared.
returns:
(Int) The latest index where a buy signal was triggered.
Returns the latest index where Stock.do_rule_buy() returns True.
"""
col_x = "avg_{x}".format(x=x)
self.create_avg(x)
col_y = "avg_{y}".format(y=y)
self.create_avg(y)
for idx in reversed(self.data.index):
if self.do_rule_buy(idx, col_x, col_y):
return idx
def do_rule_sell(self, idx, col_x, col_y):
"""
params:
idx - (Int) The index of Stock.data that should be examined.
col_x - (String) Name of the first column for comparison.
col_y - (String) Name of the second column for comparison.
returns:
(Boolean) The evaluation of whether or not it would be recommended to sell this Stock based on the following rule: (closing_price < val_x and val_x > val_y).
Asserts if a sell-signal should be triggered.
"""
price = self.data.loc[idx, "Close"]
avg_x = self.data.loc[idx, col_x]
avg_y = self.data.loc[idx, col_y]
if price < avg_x and avg_x > avg_y:
return True
else:
return False
def rule_sell(self, x, y):
"""
params:
x - (Int) The first average to be compared.
y - (Int) The second average to be compared.
returns:
(Int) The latest index where a sell signal was triggered.
Returns the latest index where Stock.do_rule_sell() returns True.
"""
col_x = "avg_{x}".format(x=x)
self.create_avg(x)
col_y = "avg_{y}".format(y=y)
self.create_avg(y)
for idx in reversed(self.data.index):
if self.do_rule_sell(idx, col_x, col_y):
return idx
def simulate_market(stock, start_money, avg=(2,10)):
""" avg - the lowest and highest averages to be examined
"""
# Create all averages from start through end intervals
start, end = avg
for x in range(start, end + 1):
col_x = "avg_{x}".format(x=x)
stock.create_avg(x)
# Variables to contain logging results
max_money = 0
max_avg = (0,0)
max_num_purchases = 0
# Loop across averages and find the optimal intervals, only use y where y > x + 1
for x in range(start, end):
col_x = "avg_{x}".format(x=x)
gen = (y for y in range(start + 1, end + 1) if y > x + 1)
for y in gen:
# Initializing variables
money, num_bought, num_purchases, mode = start_money, 0, 0, "buy"
idx, idx_max = y, stock.data.last_valid_index()
col_y = "avg_{y}".format(y=y)
for idx in range(0, idx_max + 1):
# Want to buy
if mode == "buy" and stock.do_rule_buy(idx, col_x, col_y):
mode = "sell"
price = stock.data.loc[idx, "Close"]
num_bought, money = money / price, 0
num_purchases += 1
# Want to sell
if mode == "sell" and stock.do_rule_sell(idx, col_x, col_y):
mode = "buy"
price = stock.data.loc[idx, "Close"]
money, num_bought = num_bought * price, 0
num_purchases += 1
# Finally sell all to see profit
money = num_bought * price
# # Printing result of x-, y-avg
# print("Avg: {x} {y} {t}\nGross: {profit} ({diff})\n\n\n".format(x=x, y=y, t=num_purchases, profit=round(money/start_money,3), diff=round(money-start_money,3)))
# Logging max values
if money >= max_money and num_purchases > 1:
max_money = money
max_avg = (x, y)
max_num_purchases = num_purchases
# Print logs
maxx, maxy = max_avg
print("MAX:: {p}% ({x}, {y}). Num {n}".format(p=round(max_money/start_money*100,3), x=maxx, y=maxy, n=max_num_purchases))
if __name__ == "__main__":
test_stock = Stock("AMZN")
# test_stock.get_avg(2)
# test_stock.print_data()
# test_stock.rule_buy(3, 4)
# test_stock.rule_sell(5, 6)
# simulate_market(test_stock, 10000, (7,10))
# test_stock.splot([11, 12])
"""
TODO:
Retry fetching data from web
Write the Stock.update_data() method
Create a proper test method
Check Stock.init_csv() in case no csv in Stock.path
Create notification system that provides insigh whether or not it recommends to buy/sell
"""
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
size = 1000
x = np.random.randn(size)
y = 1.051 * x + np.random.random(size)
plt.plot(x,y,'*',color='black',label="Dado original")
plt.xlabel('X')
plt.ylabel('Y')
plt.title('Regressão Linear')
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
print("Coeficiente angular (slope)= %f" %slope)
print("Coeficiente linear (intercept)= %f" %intercept)
print("R quadrado (r-squared)= %f" %r_value**2)
print("Valor p (p-value)= %f" %p_value)
print("Erro (Std)= %f" %std_err)
ajuste = intercept + slope*x
plt.plot(x,ajuste,color='red',label="Dado ajustado")
plt.legend()
plt.show()
|
nilq/baby-python
|
python
|
"""
Contains functions to assist with stuff across the application.
ABSOLUTELY NO IMPORTS FROM OTHER PLACES IN THE REPOSITORY.
Created: 23 June 2020
"""
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (C) 2015 by Brian Horn, trycatchhorn@gmail.com.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Provides a data structure used to model a linked list iterator.
"""
__author__ = "Brian Horn"
__copyright__ = "Copyright (c) 2015 Brian Horn"
__credits__ = "Brian Horn"
__license__ = "MIT"
__version__ = "1.0.2"
__maintainer__ = "Brian Horn"
__email__ = "trycatchhorn@gmail.com"
__status__ = "Prototype"
from py_alg_dat.iterator import Iterator
class LinkedListIterator(Iterator):
"""
The interface of a linked list iterator.
"""
def __init__(self, head):
"""
Constructs an iterator enumerating the linked list.
@param head: The first element in the linked list.
@type: C{object}
"""
super(LinkedListIterator, self).__init__(head)
self.current = head
def next(self):
"""
Returns the next element in the linked list.
@return: The next element in the linked list.
@rtype: C{object}
"""
if self.current is None:
raise StopIteration
retval = self.current
self.current = self.current.next
return retval
|
nilq/baby-python
|
python
|
from cto_ai import sdk, ux
cto_terminal = """
[94m██████[39m[33m╗[39m [94m████████[39m[33m╗[39m [94m██████[39m[33m╗ [39m [94m█████[39m[33m╗[39m [94m██[39m[33m╗[39m
[94m██[39m[33m╔════╝[39m [33m╚══[39m[94m██[39m[33m╔══╝[39m [94m██[39m[33m╔═══[39m[94m██[39m[33m╗[39m [94m██[39m[33m╔══[39m[94m██[39m[33m╗[39m [94m██[39m[33m║[39m
[94m██[39m[33m║ [39m [94m ██[39m[33m║ [39m [94m██[39m[33m║[39m[94m ██[39m[33m║[39m [94m███████[39m[33m║[39m [94m██[39m[33m║[39m
[94m██[39m[33m║ [39m [94m ██[39m[33m║ [39m [94m██[39m[33m║[39m[94m ██[39m[33m║[39m [94m██[39m[33m╔══[39m[94m██[39m[33m║[39m [94m██[39m[33m║[39m
[33m╚[39m[94m██████[39m[33m╗[39m [94m ██[39m[33m║ [39m [33m╚[39m[94m██████[39m[33m╔╝[39m [94m██[39m[33m╗[39m [94m██[39m[33m║[39m[94m ██[39m[33m║[39m [94m██[39m[33m║[39m
[33m ╚═════╝[39m [33m ╚═╝ [39m [33m ╚═════╝ [39m [33m╚═╝[39m [33m╚═╝ ╚═╝[39m [33m╚═╝[39m
We’re building the world’s best developer experiences.
"""
cto_slack = """:white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square:
:white_square::white_square::black_square::black_square::white_square::white_square::black_square::black_square::black_square::white_square::white_square::white_square::black_square::black_square::black_square::white_square:
:white_square::black_square::white_square::white_square::black_square::white_square::black_square::white_square::white_square::black_square::white_square::black_square::white_square::white_square::white_square::white_square:
:white_square::black_square::white_square::white_square::black_square::white_square::black_square::black_square::black_square::white_square::white_square::white_square::black_square::black_square::white_square::white_square:
:white_square::black_square::white_square::white_square::black_square::white_square::black_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::black_square::white_square:
:white_square::white_square::black_square::black_square::white_square::white_square::black_square::white_square::white_square::white_square::white_square::black_square::black_square::black_square::white_square::white_square:
:white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square::white_square:"""
def logo_print():
if sdk.get_interface_type() == 'terminal':
ux.print(cto_terminal)
else:
ux.print(cto_slack)
|
nilq/baby-python
|
python
|
# http://book.pythontips.com/en/latest/for_-_else.html
for n in range(2, 10):
for x in range(2, n):
if n % x == 0:
print(n, "equals", x, "*", n // x)
break
else:
# loop fell through without finding a factor
print(n, "is a prime number")
# 2 is a prime number
# 3 is a prime number
# 4 equals 2 * 2
# 5 is a prime number
# 6 equals 2 * 3
# 7 is a prime number
# 8 equals 2 * 4
# 9 equals 3 * 3
|
nilq/baby-python
|
python
|
""" pygame module for loading and playing sounds """
import math
from pygame._sdl import sdl, ffi
from pygame._error import SDLError
from pygame.base import register_quit
import pygame.mixer_music as music
from pygame.mixer_music import check_mixer
from pygame.rwobject import (rwops_encode_file_path, rwops_from_file,
rwops_from_file_path)
PYGAME_MIXER_DEFAULT_FREQUENCY = 22050
PYGAME_MIXER_DEFAULT_SIZE = -16
PYGAME_MIXER_DEFAULT_CHANNELS = 2
PYGAME_MIXER_DEFAULT_CHUNKSIZE = 4096
_request_frequency = PYGAME_MIXER_DEFAULT_FREQUENCY;
_request_size = PYGAME_MIXER_DEFAULT_SIZE;
_request_stereo = PYGAME_MIXER_DEFAULT_CHANNELS;
_request_chunksize = PYGAME_MIXER_DEFAULT_CHUNKSIZE;
_channeldata = None
_numchanneldata = 0
_current_music = None
_queue_music = None
class ChannelData(object):
def __init__(self):
self.sound = None
self.queue = None
self.endevent = sdl.SDL_NOEVENT
class Channel(object):
"""Channel(id): return Channel
Create a Channel object for controlling playback"""
def __init__(self, channel):
self.chan = int(channel)
def __repr__(self):
return '<Chan(%i)>' % self.chan
def play(self, sound, loops=0, maxtime=-1, fade_ms=0):
"""play Sound on this channel"""
# Note: channelnum will equal self.chan
if fade_ms > 0:
channelnum = sdl.Mix_FadeInChannelTimed(self.chan,
sound.chunk, loops,
fade_ms, maxtime)
else:
channelnum = sdl.Mix_PlayChannelTimed(self.chan,
sound.chunk, loops,
maxtime)
if channelnum != -1:
sdl.Mix_GroupChannel(channelnum, sound._chunk_tag)
_channeldata[channelnum].sound = sound
_channeldata[channelnum].queue = None
def get_busy(self):
check_mixer()
return sdl.Mix_Playing(self.chan) != 0
def stop(self):
check_mixer()
sdl.Mix_HaltChannel(self.chan)
def pause(self):
check_mixer()
sdl.Mix_Pause(self.chan)
def unpause(self):
check_mixer()
sdl.Mix_Resume(self.chan)
def get_volume(self):
check_mixer()
volume = sdl.Mix_Volume(self.chan, -1)
return volume / 128.0
def set_volume(self, lvolume, rvolume=None):
check_mixer()
# This logic differs a bit from pygames because we can use a better
# sentinal value
if rvolume is None:
# No Panning
if sdl.Mix_SetPanning(self.chan, 255, 255) == 0:
raise SDLError.from_sdl_error()
volume = int(lvolume * 128)
else:
# Panning
left = int(lvolume * 255)
right = int(rvolume * 255)
if sdl.Mix_SetPanning(self.chan, left, right) == 0:
raise SDLError.from_sdl_error()
volume = 128
sdl.Mix_Volume(self.chan, volume)
def fadeout(self, time):
""" fadeout(time) -> None
stop playback after fading channel out
"""
check_mixer()
sdl.Mix_FadeOutChannel(self.chan, time)
def get_sound(self, ):
""" get_sound() -> Sound
get the currently playing Sound
"""
return _channeldata[self.chan].sound
def queue(self, sound):
""" queue(Sound) -> None
queue a Sound object to follow the current
"""
# if nothing is playing
if _channeldata[self.chan].sound is None:
channelnum = sdl.Mix_PlayChannelTimed(self.chan, sound.chunk,
0, -1)
if channelnum != -1:
sdl.Mix_GroupChannel(channelnum, sound._chunk_tag)
_channeldata[channelnum].sound = sound
# sound is playing, queue new sound
else:
_channeldata[self.chan].queue = sound
def get_queue(self):
""" get_queue() -> Sound
return any Sound that is queued
"""
return _channeldata[self.chan].queue
def set_endevent(self, event_id=sdl.SDL_NOEVENT):
""" set_endevent() -> None
have the channel send an event when playback stops
"""
_channeldata[self.chan].endevent = event_id
def get_endevent(self):
""" get_endevent() -> type
get the event a channel sends when playback stops
"""
return _channeldata[self.chan].endevent
class Sound(object):
"""Sound(filename) -> Sound
Sound(file=filename) -> Sound
Sound(buffer) -> Sound
Sound(buffer=buffer) -> Sound
Sound(object) -> Sound
Sound(file=object) -> Sound
Sound(array=object) -> Sound
Create a new Sound object from a file or buffer object
"""
def __init__(self, obj=None, **kwargs):
check_mixer()
self.chunk = None
# nasty mangling of parameters!
# if 1 position arg: could be filename, file or buffer
# if 1 keyword arg: could be filename, file, buffer or array where
# filename and file use the same keyword 'file'
if obj is not None:
if kwargs:
raise TypeError("Sound takes either 1 positional or "
"1 keyword argument")
filename = None
buff = None
err = None
if isinstance(obj, basestring):
filename = obj
if not isinstance(obj, unicode):
buff = obj
elif isinstance(obj, file):
rwops = rwops_from_file(obj)
self.chunk = sdl.Mix_LoadWAV_RW(rwops, 1)
else:
buff = obj
if filename is not None:
try:
filename = rwops_encode_file_path(filename)
rwops = rwops_from_file_path(filename)
self.chunk = sdl.Mix_LoadWAV_RW(rwops, 1)
except SDLError as e:
err = e
if not self.chunk and buff is not None:
raise NotImplementedError("Loading from buffer not "
"implemented yet")
# TODO: check if buff implements buffer interface.
# If it does, load from buffer. If not, re-raise
# error from filename if filename is not None.
else:
if len(kwargs) != 1:
raise TypeError("Sound takes either 1 positional or "
"1 keyword argument")
arg_name = kwargs.keys()[0]
arg_value = kwargs[arg_name]
if arg_name == 'file':
if isinstance(arg_value, basestring):
filename = rwops_encode_file_path(arg_value)
rwops = rwops_from_file_path(filename, 'rb')
else:
rwops = rwops_from_file(arg_value)
self.chunk = sdl.Mix_LoadWAV_RW(rwops, 1)
elif arg_name == 'buffer':
if isinstance(arg_name, unicode):
raise TypeError("Unicode object not allowed as "
"buffer object")
raise NotImplementedError("Loading from buffer not "
"implemented yet")
elif arg_name == 'array':
raise NotImplementedError("Loading from array not "
"implemented yet")
else:
raise TypeError("Unrecognized keyword argument '%s'" % arg_name)
# pygame uses the pointer address as the tag to ensure
# uniqueness, we use id for the same effect
# Since we don't have the some automatic casting rules as
# C, we explicitly cast to int here. This matches pygames
# behaviour, so we're bug-compatible
self._chunk_tag = ffi.cast("int", id(self.chunk))
if not self.chunk:
raise SDLError.from_sdl_error()
def __del__(self):
if self.chunk:
sdl.Mix_FreeChunk(self.chunk)
def play(self, loops=0, maxtime=-1, fade_ms=0):
"""play(loops=0, maxtime=-1, fade_ms=0) -> Channel
begin sound playback"""
if fade_ms > 0:
channelnum = sdl.Mix_FadeInChannelTimed(-1, self.chunk, loops,
fade_ms, maxtime)
else:
channelnum = sdl.Mix_PlayChannelTimed(-1, self.chunk, loops,
maxtime)
if channelnum < 0:
# failure
return None
_channeldata[channelnum].sound = self
_channeldata[channelnum].queue = None
sdl.Mix_Volume(channelnum, 128)
sdl.Mix_GroupChannel(channelnum, self._chunk_tag)
return Channel(channelnum)
def stop(self):
"""stop() -> None
stop sound playback
"""
check_mixer()
sdl.Mix_HaltGroup(self._chunk_tag)
def get_volume(self):
"""get_volume(): return value
get the playback volume"""
check_mixer()
volume = sdl.Mix_VolumeChunk(self.chunk, -1)
return volume / 128.0
def set_volume(self, volume):
"""set_volume(value): return None
set the playback volume for this Sound"""
check_mixer()
sdl.Mix_VolumeChunk(self.chunk, int(volume * 128))
def fadeout(self, time):
""" fadeout(time) -> None
stop sound playback after fading out
"""
check_mixer()
sdl.Mix_FadeOutGroup(self._chunk_tag, time)
def get_num_channels(self):
""" get_num_channels() -> count
count how many times this Sound is playing
"""
check_mixer()
return sdl.Mix_GroupCount(self._chunk_tag)
def get_length(self):
""" get_length() -> seconds
get the length of the Sound
"""
check_mixer()
frequency, format, channels = (ffi.new('int*'), ffi.new('uint16_t*'),
ffi.new('int*'))
sdl.Mix_QuerySpec(frequency, format, channels)
if format == sdl.AUDIO_S8 or format == sdl.AUDIO_U8:
mixerbytes = 1.0
else:
mixerbytes = 2.0
numsamples = self.chunk.alen / mixerbytes / channels[0]
return numsamples / frequency[0]
def get_raw(self):
""" get_raw() -> bytes
return a bytestring copy of the Sound samples.
"""
check_mixer()
return ffi.buffer(ffi.cast('char*', self.chunk.abuf),
self.chunk.alen)[:]
# TODO: array interface and buffer protocol implementation
def __array_struct__(self, closure):
raise NotImplementedError
def __array_interface__(self, closure):
raise NotImplementedError
def _samples_address(self, closure):
raise NotImplementedError
def get_init():
"""get_init(): return (frequency, format, channels)
test if the mixer is initialized"""
if not sdl.SDL_WasInit(sdl.SDL_INIT_AUDIO):
return None
freq = ffi.new("int *")
audioformat = ffi.new("uint16_t *")
chan = ffi.new("int *")
if not sdl.Mix_QuerySpec(freq, audioformat, chan):
return None
if audioformat[0] & ~0xff:
format_in_bits = -(audioformat[0] & 0xff)
else:
format_in_bits = audioformat[0] & 0xff
return (int(freq[0]), format_in_bits, int(chan[0]))
def pre_init(frequency=PYGAME_MIXER_DEFAULT_FREQUENCY,
size=PYGAME_MIXER_DEFAULT_SIZE,
channels=PYGAME_MIXER_DEFAULT_CHANNELS,
chunksize=PYGAME_MIXER_DEFAULT_CHUNKSIZE):
""" pre_init(frequency=22050, size=-16, channels=2, buffersize=4096) -> None
preset the mixer init arguments
"""
global _request_frequency, _request_size, _request_stereo, \
_request_chunksize
_request_frequency = frequency
_request_size = size
_request_stereo = channels
_request_chunksize = chunksize
def init(frequency=None, size=None, channels=None, chunksize=None):
"""init(frequency=22050, size=-16, channels=2, buffer=4096): return None
initialize the mixer module
"""
if not autoinit(frequency, size, channels, chunksize):
raise SDLError.from_sdl_error()
def autoinit(frequency=None, size=None, channels=None, chunksize=None):
if not frequency:
frequency = _request_frequency
if not size:
size = _request_size
if not channels:
channels = _request_stereo
if not chunksize:
chunksize = _request_chunksize
if channels >= 2:
channels = 2
else:
channels = 1
# chunk must be a power of 2
chunksize = int(math.log(chunksize, 2))
chunksize = 2 ** chunksize
if chunksize < buffer:
chunksize *= 2
# fmt is a bunch of flags
if size == 8:
fmt = sdl.AUDIO_U8
elif size == -8:
fmt = sdl.AUDIO_S8
elif size == 16:
fmt = sdl.AUDIO_U16SYS
elif size == -16:
fmt = sdl.AUDIO_S16SYS
else:
raise ValueError("unsupported size %d" % size)
global _numchanneldata, _channeldata
if not sdl.SDL_WasInit(sdl.SDL_INIT_AUDIO):
register_quit(autoquit)
# channel stuff
if not _channeldata:
_numchanneldata = sdl.MIX_CHANNELS
_channeldata = [ChannelData() for i in range(_numchanneldata)]
if sdl.SDL_InitSubSystem(sdl.SDL_INIT_AUDIO) == -1:
return False
if sdl.Mix_OpenAudio(frequency, fmt, channels, chunksize) == -1:
sdl.SDL_QuitSubSystem(sdl.SDL_INIT_AUDIO)
return False
sdl.Mix_ChannelFinished(_endsound_callback)
# TODO: reverse stereo for 8-bit below SDL 1.2.8
sdl.Mix_VolumeMusic(127)
return True
def autoquit():
global _channeldata, _numchanneldata, _current_music, \
_queue_music
if sdl.SDL_WasInit(sdl.SDL_INIT_AUDIO):
sdl.Mix_HaltMusic()
# cleanup
if _channeldata:
_channeldata = None
_numchanneldata = 0
if _current_music:
sdl.Mix_FreeMusic(_current_music)
_current_music = None
if _queue_music:
sdl.Mix_FreeMusic(_queue_music)
_queue_music = None
sdl.Mix_CloseAudio()
sdl.SDL_QuitSubSystem(sdl.SDL_INIT_AUDIO)
def quit():
""" quit() -> None
uninitialize the mixer
"""
autoquit()
def find_channel(force=False):
"""find_channel(force=False): return Channel
find an unused channel
"""
check_mixer()
chan = sdl.Mix_GroupAvailable(-1)
if chan == -1:
if not force:
return None
chan = sdl.Mix_GroupOldest(-1)
return Channel(chan)
def get_busy():
"""get_busy(): return bool
test if any sound is being mixed"""
if not sdl.SDL_WasInit(sdl.SDL_INIT_AUDIO):
return False
return sdl.Mix_Playing(-1) != 0
def get_num_channels():
"""get the total number of playback channels"""
check_mixer()
return sdl.Mix_GroupCount(-1)
def set_num_channels(count):
""" set_num_channels(count) -> None
set the total number of playback channels
"""
check_mixer()
global _numchanneldata, _channeldata
if count > _numchanneldata:
_channeldata.extend([ChannelData() for i in
range(count - _numchanneldata)])
_numchanneldata = count
sdl.Mix_AllocateChannels(count)
def pause():
"""pause(): return None
temporarily stop playback of all sound channels"""
check_mixer()
sdl.Mix_Pause(-1)
def stop():
"""stop(): return None
stop playback of all sound channels"""
check_mixer()
sdl.Mix_HaltChannel(-1)
def unpause():
"""unpause(): return None
resume paused playback of sound channels"""
check_mixer()
sdl.Mix_Resume(-1)
def fadeout(time):
""" fadeout(time) -> None
fade out the volume on all sounds before stopping
"""
check_mixer()
sdl.Mix_FadeOutChannel(-1, time)
def set_reserved(count):
""" set_reserved(count) -> None
reserve channels from being automatically used
"""
check_mixer()
sdl.Mix_ReserveChannels(count)
@ffi.callback("void (*)(int channel)")
def _endsound_callback(channelnum):
if not _channeldata:
return
data = _channeldata[channelnum]
# post sound ending event
if data.endevent != sdl.SDL_NOEVENT and sdl.SDL_WasInit(sdl.SDL_INIT_AUDIO):
event = ffi.new('SDL_Event*')
event.type = data.endevent
if event.type >= sdl.SDL_USEREVENT and event.type < sdl.SDL_NUMEVENTS:
event.user.code = channelnum
sdl.SDL_PushEvent(event)
if data.queue:
sound_chunk = data.sound.chunk
data.sound = data.queue
data.queue = None
channelnum = sdl.Mix_PlayChannelTimed(channelnum, sound_chunk, 0, -1)
if channelnum != -1:
sdl.Mix_GroupChannel(channelnum, data.sound._chunk_tag)
else:
data.sound = None
|
nilq/baby-python
|
python
|
# pylint: disable=missing-docstring
from openshift_checks import OpenShiftCheck, get_var
class DockerImageAvailability(OpenShiftCheck):
"""Check that required Docker images are available.
This check attempts to ensure that required docker images are
either present locally, or able to be pulled down from available
registries defined in a host machine.
"""
name = "docker_image_availability"
tags = ["preflight"]
skopeo_image = "openshift/openshift-ansible"
# FIXME(juanvallejo): we should consider other possible values of
# `deployment_type` (the key here). See
# https://github.com/openshift/openshift-ansible/blob/8e26f8c/roles/openshift_repos/vars/main.yml#L7
docker_image_base = {
"origin": {
"repo": "openshift",
"image": "origin",
},
"openshift-enterprise": {
"repo": "openshift3",
"image": "ose",
},
}
def run(self, tmp, task_vars):
required_images = self.required_images(task_vars)
missing_images = set(required_images) - set(self.local_images(required_images, task_vars))
# exit early if all images were found locally
if not missing_images:
return {"changed": False}
msg, failed, changed = self.update_skopeo_image(task_vars)
# exit early if Skopeo update fails
if failed:
return {
"failed": True,
"changed": changed,
"msg": "Failed to update Skopeo image ({img_name}). {msg}".format(img_name=self.skopeo_image, msg=msg),
}
registries = self.known_docker_registries(task_vars)
available_images = self.available_images(missing_images, registries, task_vars)
unavailable_images = set(missing_images) - set(available_images)
if unavailable_images:
return {
"failed": True,
"msg": (
"One or more required images are not available: {}.\n"
"Configured registries: {}"
).format(", ".join(sorted(unavailable_images)), ", ".join(registries)),
"changed": changed,
}
return {"changed": changed}
def required_images(self, task_vars):
deployment_type = get_var(task_vars, "deployment_type")
# FIXME(juanvallejo): we should handle gracefully with a proper error
# message when given an unexpected value for `deployment_type`.
image_base_name = self.docker_image_base[deployment_type]
openshift_release = get_var(task_vars, "openshift_release")
# FIXME(juanvallejo): this variable is not required when the
# installation is non-containerized. The example inventories have it
# commented out. We should handle gracefully and with a proper error
# message when this variable is required and not set.
openshift_image_tag = get_var(task_vars, "openshift_image_tag")
is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
if is_containerized:
images = set(self.containerized_docker_images(image_base_name, openshift_release))
else:
images = set(self.rpm_docker_images(image_base_name, openshift_release))
# append images with qualified image tags to our list of required images.
# these are images with a (v0.0.0.0) tag, rather than a standard release
# format tag (v0.0). We want to check this set in both containerized and
# non-containerized installations.
images.update(
self.qualified_docker_images(self.image_from_base_name(image_base_name), "v" + openshift_image_tag)
)
return images
def local_images(self, images, task_vars):
"""Filter a list of images and return those available locally."""
return [
image for image in images
if self.is_image_local(image, task_vars)
]
def is_image_local(self, image, task_vars):
result = self.module_executor("docker_image_facts", {"name": image}, task_vars)
if result.get("failed", False):
return False
return bool(result.get("images", []))
def known_docker_registries(self, task_vars):
result = self.module_executor("docker_info", {}, task_vars)
if result.get("failed", False):
return []
# FIXME(juanvallejo): wrong default type, result["info"] is expected to
# contain a dictionary (see how we call `docker_info.get` below).
docker_info = result.get("info", "")
return [registry.get("Name", "") for registry in docker_info.get("Registries", {})]
def available_images(self, images, registries, task_vars):
"""Inspect existing images using Skopeo and return all images successfully inspected."""
return [
image for image in images
if self.is_image_available(image, registries, task_vars)
]
def is_image_available(self, image, registries, task_vars):
for registry in registries:
if self.is_available_skopeo_image(image, registry, task_vars):
return True
return False
def is_available_skopeo_image(self, image, registry, task_vars):
"""Uses Skopeo to determine if required image exists in a given registry."""
cmd_str = "skopeo inspect docker://{registry}/{image}".format(
registry=registry,
image=image,
)
args = {
"name": "skopeo_inspect",
"image": self.skopeo_image,
"command": cmd_str,
"detach": False,
"cleanup": True,
}
result = self.module_executor("docker_container", args, task_vars)
return result.get("failed", False)
def containerized_docker_images(self, base_name, version):
return [
"{image}:{version}".format(image=self.image_from_base_name(base_name), version=version)
]
@staticmethod
def rpm_docker_images(base, version):
return [
"{image_repo}/registry-console:{version}".format(image_repo=base["repo"], version=version)
]
@staticmethod
def qualified_docker_images(image_name, version):
return [
"{}-{}:{}".format(image_name, component, version)
for component in "haproxy-router docker-registry deployer pod".split()
]
@staticmethod
def image_from_base_name(base):
return "".join([base["repo"], "/", base["image"]])
# ensures that the skopeo docker image exists, and updates it
# with latest if image was already present locally.
def update_skopeo_image(self, task_vars):
result = self.module_executor("docker_image", {"name": self.skopeo_image}, task_vars)
return result.get("msg", ""), result.get("failed", False), result.get("changed", False)
|
nilq/baby-python
|
python
|
import torch
from torch.multiprocessing import Pool
class Simulator(torch.nn.Module):
r"""Base simulator class.
A simulator defines the forward model.
Example usage of a potential simulator implementation::
simulator = MySimulator()
inputs = prior.sample(torch.Size([10])) # Draw 10 samples from the prior.
outputs = simulator(inputs)
"""
def __init__(self):
super(Simulator, self).__init__()
def forward(self, inputs):
r"""Defines the computation of the forward model at every call.
Note:
Should be overridden by all subclasses.
"""
raise NotImplementedError
def __del__(self):
self.terminate()
def terminate(self):
r"""Terminates the simulator and cleans up possible contexts.
Note:
Should be overridden by subclasses with a simulator state requiring graceful exits.
Note:
Subclasses should describe the expected format of ``inputs``.
"""
pass
class ParallelSimulator(Simulator):
def __init__(self, simulator, workers=2):
super(ParallelSimulator, self).__init__()
self.pool = Pool(processes=workers)
self.simulator = simulator
self.workers = workers
def _prepare_arguments(self, inputs):
arguments = []
chunks = inputs.shape[0] // self.workers
if chunks == 0:
chunks = 1
chunks = inputs.split(chunks, dim=0)
for chunk in chunks:
a = (self.simulator, chunk)
arguments.append(a)
return arguments
def forward(self, inputs):
arguments = self._prepare_arguments(inputs)
outputs = self.pool.map(self._simulate, arguments)
outputs = torch.cat(outputs, dim=0)
return outputs
def terminate(self):
self.pool.close()
del self.pool
self.pool = None
self.simulator.terminate()
@staticmethod
def _simulate(arguments):
simulator, inputs = arguments
return simulator(inputs)
|
nilq/baby-python
|
python
|
import re
from localstack.constants import TEST_AWS_ACCOUNT_ID
from localstack.utils.common import to_str
from localstack.services.generic_proxy import ProxyListener
class ProxyListenerIAM(ProxyListener):
def return_response(self, method, path, data, headers, response):
# fix hardcoded account ID in ARNs returned from this API
if response.content:
content = to_str(response.content)
pattern = r'<Arn>\s*arn:aws:iam::([0-9]+):([^<]+)</Arn>'
replacement = r'<Arn>arn:aws:iam::%s:\2</Arn>' % TEST_AWS_ACCOUNT_ID
response._content = re.sub(pattern, replacement, content)
response.headers['content-length'] = len(response._content)
# instantiate listener
UPDATE_IAM = ProxyListenerIAM()
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, print_function
from django.conf.urls import patterns, url
from .action_endpoint import SlackActionEndpoint
from .event_endpoint import SlackEventEndpoint
from .link_identity import SlackLinkIdentitiyView
urlpatterns = patterns(
"",
url(r"^action/$", SlackActionEndpoint.as_view()),
url(r"^event/$", SlackEventEndpoint.as_view()),
url(
r"^link-identity/(?P<signed_params>[^\/]+)/$",
SlackLinkIdentitiyView.as_view(),
name="sentry-integration-slack-link-identity",
),
)
|
nilq/baby-python
|
python
|
import cv2
import numpy as np
path = "./underexposed.jpg"
def _mask(img):
img = cv2.bitwise_not(img)
mask = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blured_img = cv2.GaussianBlur(mask, (15, 15), cv2.BORDER_DEFAULT)
return blured_img
def _local_contrast_correction(img, mask):
exponent = np.repeat((2 ** ( (np.full((mask.shape), 128.) - mask) / 128))[:, :, np.newaxis],
3,
2)
out = 255 * (img / 255.) ** exponent
return out.astype(np.uint8)
if __name__ == "__main__":
img = cv2.imread(path)
mask = _mask(img)
cv2.imshow("Original", img)
cv2.imshow("Mask", mask)
cv2.waitKey()
out = _local_contrast_correction(img, mask)
cv2.imshow("Corrected", out)
cv2.waitKey()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
Launch a distributed job
"""
import argparse
import os, sys
import signal
import logging
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(curr_path, "./tracker"))
#print sys.path
def dmlc_opts(opts):
"""convert from mxnet's opts to dmlc's opts
"""
args = ['--num-workers', str(opts.num_workers),
'--num-servers', str(opts.num_servers),
'--cluster', opts.launcher,
'--host-file', opts.hostfile,
'--sync-dst-dir', opts.sync_dst_dir]
args += opts.command;
try:
from dmlc_tracker import opts
except ImportError:
print("Can't load dmlc_tracker package. Perhaps you need to run")
print(" git submodule update --init --recursive")
raise
dmlc_opts = opts.get_opts(args)
return dmlc_opts
def main():
parser = argparse.ArgumentParser(description='Launch a distributed job')
parser.add_argument('-n', '--num-workers', required=True, type=int,
help = 'number of worker nodes to be launched')
parser.add_argument('-s', '--num-servers', type=int,
help = 'number of server nodes to be launched, \
in default it is equal to NUM_WORKERS')
parser.add_argument('-H', '--hostfile', type=str,
help = 'the hostfile of slave machines which will run \
the job. Required for ssh and mpi launcher')
parser.add_argument('--sync-dst-dir', type=str,
help = 'if specificed, it will sync the current \
directory into slave machines\'s SYNC_DST_DIR if ssh \
launcher is used')
parser.add_argument('--launcher', type=str, default='ssh',
choices = ['local', 'ssh', 'mpi', 'sge', 'yarn'],
help = 'the launcher to use')
parser.add_argument('command', nargs='+',
help = 'command for launching the program')
args, unknown = parser.parse_known_args()
args.command += unknown
if args.num_servers is None:
args.num_servers = args.num_workers
args = dmlc_opts(args)
if args.host_file is None or args.host_file == 'None':
if args.cluster == 'yarn':
from dmlc_tracker import yarn
yarn.submit(args)
elif args.cluster == 'local':
from dmlc_tracker import local
local.submit(args)
elif args.cluster == 'sge':
from dmlc_tracker import sge
sge.submit(args)
else:
raise RuntimeError('Unknown submission cluster type %s' % args.cluster)
else:
if args.cluster == 'ssh':
from dmlc_tracker import ssh
ssh.submit(args)
elif args.cluster == 'mpi':
from dmlc_tracker import mpi
mpi.submit(args)
else:
raise RuntimeError('Unknown submission cluster type %s' % args.cluster)
def signal_handler(signal, frame):
logging.info('Stop luancher')
sys.exit(0)
if __name__ == '__main__':
fmt = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=fmt, level=logging.INFO)
signal.signal(signal.SIGINT, signal_handler)
main()
|
nilq/baby-python
|
python
|
import logging
import copy
import numpy as np
from scipy.linalg import expm
from .population import Population
from spike_swarm_sim.utils import eigendecomposition, normalize
from spike_swarm_sim.algorithms.evolutionary.species import Species
from ..operators.crossover import *
from ..operators.mutation import *
from ..operators.selection import *
#! OJO (prov) to test NEAT: extracted from https://github.com/CodeReclaimers/neat-python/blob/c2b79c88667a1798bfe33c00dd8e251ef8be41fa/neat/reproduction.py#L84
def compute_spawn(species, pop_size, min_species_size):
"""Compute the proper number of offspring per species (proportional to fitness)."""
adjusted_fitness = [spc.mean_fitness['raw'] / spc.num_genotypes for spc in species]
af_sum = sum(adjusted_fitness)
previous_sizes = [spc.num_genotypes for spc in species]
spawn_amounts = []
for af, ps in zip(adjusted_fitness, previous_sizes):
if af_sum > 0:
s = max(min_species_size, af / af_sum * pop_size)
else:
s = min_species_size
d = (s - ps) * 0.5
c = int(round(d))
spawn = ps
if abs(c) > 0:
spawn += c
elif d > 0:
spawn += 1
elif d < 0:
spawn -= 1
spawn_amounts.append(spawn)
# Normalize the spawn amounts so that the next generation is roughly
# the population size requested by the user.
total_spawn = sum(spawn_amounts)
norm = pop_size / total_spawn
spawn_amounts = [max(min_species_size, int(round(n * norm))) for n in spawn_amounts]
while(sum(spawn_amounts) != pop_size):
spawn_amounts[np.random.choice(len(species))] += (1, -1)[sum(spawn_amounts) > pop_size]
return spawn_amounts
class NEAT_Population(Population):
"""
"""
def __init__(self, *args, p_weight_mut=0.75, p_node_mut=0.08, p_conn_mut=0.1,
compatib_thresh=2, c1=1, c2=1, c3=2, species_elites=0, **kwargs):
super(NEAT_Population, self).__init__(*args, **kwargs)
self.p_weight_mut = p_weight_mut
self.p_node_mut = p_node_mut
self.p_conn_mut = p_conn_mut
self.compatib_thresh = compatib_thresh
self.c1 = c1
self.c2 = c2
self.c3 = c3
self.species_elites = species_elites
self.species_count = 1
# list of existing species. 1 species at first.
self.species = []
self.input_nodes = [] #* Cannot be altered by NEAT
self.population = []
#* Global pointer of gene innovations
self.current_innovation = 0
#* Dict mapping (pre, post) tuple connections to innovation numbers.
#* It is used for assigning same innovations to mutations already occured in
#* the evolution.
self.innovation_history = {}
def step(self, fitness_vector, generation):
"""
==================================================================================
- Args:
fitness_vector [np.ndarray or list]: array of computed fitness values.
- Returns: None
==================================================================================
"""
offspring = []
self.best = copy.deepcopy(self.population[np.argmax(fitness_vector)])
#* Update species fitness statistics
for spc in self.species:
spc_fitness = [ft for ft, gt in zip(fitness_vector, self.population) if gt['species'] == spc.id]
spc.update_stats(np.array(spc_fitness))
#* Compute the number of offspring for each species
species_offsprings = compute_spawn(self.species, self.pop_size, 2)
#* Crossover in-between species individuals.
for n_offspring, spc in zip(species_offsprings, self.species):
#* Filter out genotypes from species.
spc_fitness, spc_genotypes = zip(*filter(lambda x: x[1]['species'] == spc.id, zip(fitness_vector, self.population)))
#* Apply species elitism
if self.species_elites > 0:
for _, (elite_gnt, _) in zip(range(self.species_elites), sorted(zip(spc_genotypes, spc_fitness), key=lambda x: x[1])[::-1]):
n_offspring -= 1
offspring.append(copy.deepcopy(elite_gnt))
#* Truncate bests
n_sel = max(1, round(0.3 * len(spc_genotypes)))
parents, fitness_parents = truncation_selection(spc_genotypes, np.array(spc_fitness), n_sel)
#* Random Mating (OJO REPLACEMENT)
parents_mating = np.random.choice(n_sel, size=2 * n_offspring)
parents = [parents[idx] for idx in parents_mating] # shuffle parents
fitness_parents = [fitness_parents[idx] for idx in parents_mating]
#* NEAT Crossover
offspring.extend(neat_crossover(parents, fitness_parents))
#* NEAT Mutation
offspring, self.current_innovation, self.innovation_history = neat_mutation(
offspring, self.input_nodes, copy.deepcopy(self.current_innovation),
copy.deepcopy(self.innovation_history), self.objects, p_weight_mut=self.p_weight_mut,
p_node_mut=self.p_node_mut, p_conn_mut=self.p_conn_mut)
#* Update popultation
self.population = offspring
if len(self.population) != self.pop_size:
logging.error('Population Size altered.')
#* Speciation
self.update_species(generation)
logging.info('Num. species is {}'.format(len(self.species)))
# #* Adaptive species thresh.
# num_tar_species = 15
# if len(self.species) != num_tar_species:
# self.compatib_thresh += 0.1 * (-1, 1)[len(self.species) > num_tar_species]
# self.compatib_thresh = np.clip(self.compatib_thresh, a_min=0.5, a_max=5)
# for sp in self.species:
# sp.compatib_thresh = self.compatib_thresh
def update_species(self, generation):
#* Assign Species. Use representatives from the previous generation.
#* If a new species is created the current representative is the genotype
#* that created it.
for spc in self.species:
if len(spc.representative) > 0:
compatible, distances = zip(*[spc.compatibility(gnt) for gnt in self.population])
spc.representative = copy.deepcopy(self.population[np.argmin(distances)])
spc.num_genotypes = 0
for genotype in self.population:
compatible, distances = zip(*[spc.compatibility(genotype) for spc in self.species])
if not any(compatible): #* create new species
self.species_count += 1
new_species = Species(self.species_count, generation, compatib_thresh=self.compatib_thresh,
c1=self.c1, c2=self.c2, c3=self.c3)
new_species.num_genotypes += 1
new_species.representative = copy.deepcopy(genotype)
self.species.append(new_species)
genotype['species'] = new_species.id
else:
compatible_species = np.arange(len(self.species))[list(compatible)]
compatible_distances = np.array(distances)[list(compatible)]
species_idx, _ = sorted(zip(compatible_species, compatible_distances), key=lambda x: x[1])[0]
self.species[species_idx].num_genotypes += 1
genotype['species'] = self.species[species_idx].id
#* check extintion
for i, species in enumerate(self.species):
if species.num_genotypes == 0:
logging.info('Extint Species {}'.format(species.id))
self.species.pop(i)
# else:
# species.representative = copy.deepcopy(self.population[np.random.choice(\
# [n for n, g in enumerate(self.population) if g['species'] == species.id])])
@property
def min_vector(self):
raise NotImplementedError
@property
def max_vector(self):
raise NotImplementedError
def initialize(self, interface):
""" Initializes the parameters and population of SNES.
=====================================================================
- Args:
interface [GeneticInterface] : Phenotype to genotype interface of
Evolutionary algs.
- Returns: None
=====================================================================
"""
self.species = [Species(self.species_count, 0, compatib_thresh=self.compatib_thresh,
c1=self.c1, c2=self.c2, c3=self.c3)]
self.input_nodes = [*interface.neural_net.graph['inputs'].keys()]
#* Only initialize weights randomly, the structure is always the same.
for n in range(self.pop_size):
interface.initGenotype(self.objects, self.min_vals, self.max_vals)
#* Initialize genotype (ANN architectural traits)
self.population.append({
'species' : self.species[0].id,
'nodes' : copy.deepcopy(interface.neural_net.graph['neurons']),
'connections' : copy.deepcopy(interface.neural_net.graph['synapses'])
})
#* Initialize genotype (ANN parameters and weights traits)
for query, min_val, max_val in zip(self.objects, self.min_vals, self.max_vals):
gnt_segment = interface.toGenotype([query], [min_val], [max_val])
gene_type = {'synapses' : 'connections', 'neurons' : 'nodes'}.get(query.split(':')[0], 'connections')
variable = {'weights' : 'weight'}.get(query.split(':')[1], query.split(':')[1])
for gene, value in zip(self.population[-1][gene_type].values(), gnt_segment):
gene[variable] = value
#* Assign innovation numbers
for i, conn in enumerate(self.population[-1]['connections'].values()):
if n == 0:
conn['innovation'] = self.current_innovation
self.innovation_history[(conn['pre'], conn['post'])] = self.current_innovation
self.current_innovation += 1
else:
conn['innovation'] = copy.deepcopy(self.innovation_history[(conn['pre'], conn['post'])])
#* Initial Speciation
self.update_species(0)
# self.species[0].representative = copy.deepcopy(self.population[np.random.randint(self.pop_size)])
# self.species[0].num_genotypes = self.pop_size
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.7 on 2019-11-30 04:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('neighbourhood', '0005_neighbourhood_image'),
]
operations = [
migrations.AddField(
model_name='business',
name='image',
field=models.ImageField(default='business.jpg', upload_to='business_avatars'),
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import exifread
import logging
class Exif2Dict:
def __init__(self, filename):
self.__logger = logging.getLogger("exif2dict.Exif2Dict")
self.__tags = {}
try:
with open(filename, 'rb') as fh:
self.__tags = exifread.process_file(fh, details=False) # reads EXIF data from target file
#####
# INCLUDE IPTC READ HERE
#####
except OSError as e:
self.__logger.warning("Can't open file: \"%s\"", filename)
self.__logger.warning("Cause: %s", e.args[1])
raise
def has_exif(self):
if self.__tags == {}:
return False
else:
return True
def __get_if_exist(self, key): #test if key exists
if key in self.__tags:
return self.__tags[key]
return None
def __convert_to_degress(self, value):
d = float(value.values[0].num) / float(value.values[0].den)
m = float(value.values[1].num) / float(value.values[1].den)
s = float(value.values[2].num) / float(value.values[2].den)
return d + (m / 60.0) + (s / 3600.0)
def get_locaction(self):
gps = {"latitude": None, "longitude": None}
lat = None
lon = None
gps_latitude = self.__get_if_exist('GPS GPSLatitude')
gps_latitude_ref = self.__get_if_exist('GPS GPSLatitudeRef')
gps_longitude = self.__get_if_exist('GPS GPSLongitude')
gps_longitude_ref = self.__get_if_exist('GPS GPSLongitudeRef')
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = self.__convert_to_degress(gps_latitude)
if gps_latitude_ref.values[0] != 'N':
lat = 0 - lat
gps["latitude"] = lat
lon = self.__convert_to_degress(gps_longitude)
if gps_longitude_ref.values[0] != 'E':
lon = 0 - lon
gps["longitude"] = lon
return gps
def get_exif(self, key): #calls for specifc EXIF key value
exif = {} # initialize exif
val = self.__get_if_exist(key) # test if key exits in EXIF data
if val:
if key == 'EXIF FNumber': #corrects FNumber
val = val.values[0].num / val.values[0].den
else:
val = val.printable
exif[key] = val
return exif
|
nilq/baby-python
|
python
|
#GUI Stuff
from tkinter import *
#GPIO setup for non-expander ports
import RPi.GPIO as GPIO
import time
#port Expander stuff
import board
import busio
from digitalio import Direction
from adafruit_mcp230xx.mcp23008 import MCP23008
#Port expander setup
i2c = busio.I2C(board.SCL, board.SDA)
mcp = MCP23008(i2c)
#Port expander declarations
fsharp6 = mcp.get_pin(7)
gsharp6 = mcp.get_pin(6)
asharp6 = mcp.get_pin(5)
csharp7 = mcp.get_pin(4)
dsharp7 = mcp.get_pin(3)
fsharp7 = mcp.get_pin(2)
gsharp7 = mcp.get_pin(1)
asharp7 = mcp.get_pin(0)
#Port expanders as output
fsharp6.direction = Direction.OUTPUT
gsharp6.direction = Direction.OUTPUT
asharp6.direction = Direction.OUTPUT
csharp7.direction = Direction.OUTPUT
dsharp7.direction = Direction.OUTPUT
fsharp7.direction = Direction.OUTPUT
gsharp7.direction = Direction.OUTPUT
asharp7.direction = Direction.OUTPUT
#Window declaration
root = Tk()
#Window Sepcifications
root.title("Xylo Ren Control")
root.geometry('300x250')
#Note port definitions
gsharp5 = 4
asharp5 = 17
csharp6 = 27
dsharp6 = 22
g5 = 10
a5 = 9
b5 = 11
c6 = 0
d6 = 5
e6 = 6
f6 = 13
g6 = 19
a6 = 26
b6 = 21
c7 = 20
d7 = 16
e7 = 12
f7 = 1
g7 = 23
a7 = 18
b7 = 25
c8 = 24
#Labels defined
welcomeTxt = Label(root, text = "Welcome!")
lbl = Label(root, text = "Choose a song below to play!")
emptyTxt = Label(root, text = " ")
#Functions
def closeWindow():
root.destroy()
def portDeclarations():
#GPIO.setmode(GPIO.BCM) deals with the port numbers
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(g5, GPIO.OUT)
GPIO.setup(gsharp5, GPIO.OUT)
GPIO.setup(a5, GPIO.OUT)
GPIO.setup(asharp5, GPIO.OUT)
GPIO.setup(b5, GPIO.OUT)
GPIO.setup(c6, GPIO.OUT)
GPIO.setup(csharp6, GPIO.OUT)
GPIO.setup(d6, GPIO.OUT)
GPIO.setup(dsharp6, GPIO.OUT)
GPIO.setup(e6, GPIO.OUT)
GPIO.setup(f6, GPIO.OUT)
GPIO.setup(g6, GPIO.OUT)
GPIO.setup(a6, GPIO.OUT)
GPIO.setup(b6, GPIO.OUT)
GPIO.setup(c7, GPIO.OUT)
GPIO.setup(d7, GPIO.OUT)
GPIO.setup(e7, GPIO.OUT)
GPIO.setup(f7, GPIO.OUT)
GPIO.setup(g7, GPIO.OUT)
GPIO.setup(a7, GPIO.OUT)
GPIO.setup(b7, GPIO.OUT)
GPIO.setup(c8, GPIO.OUT)
#PlayNote passes in note and duration (note length in seconds)
def playNote(note, duration):
if(note == fsharp6 or note == gsharp6 or note == asharp6 or note == csharp7 or note == dsharp7 or note == fsharp7 or note == gsharp7 or note == asharp7):
note.value = True
time.sleep(0.1)
note.value = False
time.sleep(duration - 0.1)
else:
GPIO.output(note, GPIO.HIGH)
time.sleep(0.1)
GPIO.output(note, GPIO.LOW)
time.sleep(duration - 0.1)
#Song 1 is Imperial March
def Song1():
portDeclarations()
for i in range(3):
#Measure 3
playNote(g6, 0.624)
playNote(g6, 0.624)
playNote(g6, 0.624)
playNote(dsharp6, 0.468)
playNote(asharp6, 0.148)
#Measure 4
playNote(g6, 0.624)
playNote(dsharp6, 0.468)
playNote(asharp6, 0.148)
playNote(g6, 1.249)
#Measure 5
playNote(d7, 0.624)
playNote(d7, 0.624)
playNote(d7, 0.624)
playNote(dsharp7, 0.468)
playNote(asharp6, 0.148)
#Measure 6
playNote(fsharp6, 0.624)
playNote(dsharp6, 0.468)
playNote(asharp6, 0.148)
playNote(g6, 1.249)
#Measure 7
playNote(g7, 0.624)
playNote(g6, 0.468)
playNote(g6, 0.148)
playNote(g7, 0.624)
playNote(fsharp7, 0.468)
playNote(f7, 0.148)
#Measure 8
playNote(e7, 0.148)
playNote(dsharp7, 0.148)
playNote(e7, 0.312)
time.sleep(0.312)
playNote(gsharp6, 0.312)
playNote(csharp7, 0.624)
playNote(c7, 0.468)
playNote(b6, 0.148)
#Measure 9
playNote(asharp6, 0.148)
playNote(a6, 0.148)
playNote(asharp6, 0.312)
time.sleep(0.312)
playNote(dsharp6, 0.312)
playNote(fsharp6, 0.624)
playNote(dsharp6, 0.468)
playNote(g6, 0.148)
#Measure 10
playNote(asharp6, 0.624)
playNote(g6, 0.468)
playNote(asharp6, 0.148)
playNote(d7, 1.249)
#Measure 11
playNote(g7, 0.624)
playNote(g6, 0.468)
playNote(g6, 0.148)
playNote(g7, 0.624)
playNote(fsharp7, 0.468)
playNote(f7, 0.148)
#Measure 12
playNote(e7, 0.148)
playNote(dsharp7, 0.148)
playNote(e7, 0.312)
time.sleep(0.312)
playNote(gsharp6, 0.312)
playNote(csharp7, 0.624)
playNote(c7, 0.468)
playNote(b6, 0.148)
#Measure 13
playNote(asharp6, 0.148)
playNote(a6, 0.148)
playNote(asharp6, 0.312)
time.sleep(0.312)
playNote(dsharp6, 0.312)
playNote(fsharp6, 0.624)
playNote(dsharp6, 0.468)
playNote(asharp6, 0.148)
#Measure 14
playNote(g6, 0.624)
playNote(dsharp6, 0.468)
playNote(asharp6, 0.148)
playNote(g6, 1.249)
GPIO.cleanup()
returnMenu()
#Song 2 is Ode 2 joy by Beethoven
def Song2():
portDeclarations()
#Pick up (Measure 1)
playNote(e6, 0.857)
playNote(e6, 0.857)
playNote(f6, 0.857)
playNote(g6, 0.857)
#Measure 2
playNote(g6, 0.857)
playNote(f6, 0.857)
playNote(e6, 0.857)
playNote(d6, 0.857)
#Measure 3
playNote(c6, 0.857)
playNote(c6, 0.857)
playNote(d6, 0.857)
playNote(e6, 0.857)
#Measure 4
playNote(e6, 1.31)
playNote(d6, 0.429)
playNote(d6, 1.63)
#Measure 5
playNote(e6, 0.857)
playNote(e6, 0.857)
playNote(f6, 0.857)
playNote(g6, 0.857)
#Measure 6
playNote(g6, 0.857)
playNote(f6, 0.857)
playNote(e6, 0.857)
playNote(d6, 0.857)
#Measure 7
playNote(c6, 0.857)
playNote(c6, 0.857)
playNote(d6, 0.857)
playNote(e6, 0.857)
#Measure 8
playNote(d6, 1.31)
playNote(c6, 0.429)
playNote(c6, 1.63)
#Measure 9
playNote(d6, 0.857)
playNote(d6, 0.857)
playNote(e6, 0.857)
playNote(c6, 0.857)
#Measure 10
playNote(d6, 0.857)
playNote(e6, 0.429)
playNote(f6, 0.429)
playNote(e6, 0.857)
playNote(c6, 0.857)
#Measure 11
playNote(d6, 0.857)
playNote(e6, 0.429)
playNote(f6, 0.429)
playNote(e6, 0.857)
playNote(d6, 0.857)
#Measure 12
playNote(c6, 0.857)
playNote(d6, 0.832)
playNote(g5, 1.714)
#Measure 13
playNote(d6, 0.857)
playNote(d6, 0.857)
playNote(e6, 0.857)
playNote(c6, 0.857)
#Measure 14
playNote(d6, 0.857)
playNote(e6, 0.429)
playNote(f6, 0.429)
playNote(e6, 0.857)
playNote(c6, 0.857)
#Measure 15
playNote(d6, 0.857)
playNote(e6, 0.429)
playNote(f6, 0.429)
playNote(e6, 0.857)
playNote(d6, 0.857)
#Measure 16
playNote(c6, 0.857)
playNote(d6, 0.832)
playNote(g5, 1.714)
#Measure 17
playNote(e6, 0.832)
playNote(e6, 0.832)
playNote(f6, 0.857)
playNote(g6, 0.857)
#Measure 18
playNote(g6, 0.857)
playNote(f6, 0.857)
playNote(e6, 0.857)
playNote(d6, 0.857)
#Measure 19
playNote(c6, 0.857)
playNote(c6, 0.857)
playNote(d6, 0.857)
playNote(e6, 0.857)
#Measure 20
playNote(e6, 1.31)
playNote(d6, 0.429)
playNote(d6, 1.63)
#Measure 21
playNote(e6, 0.857)
playNote(e6, 0.857)
playNote(f6, 0.857)
playNote(g6, 0.857)
#Measure 22
playNote(g6, 0.857)
playNote(f6, 0.857)
playNote(e6, 0.857)
playNote(d6, 0.857)
#Measure 23
playNote(c6, 0.857)
playNote(c6, 0.857)
playNote(d6, 0.857)
playNote(e6, 0.857)
#Measure 24
playNote(d6, 0.857)
playNote(c6, 0.300)
playNote(c6, 1.63)
GPIO.cleanup()
returnMenu()
#Song 3 is nocturne by chopin
def Song3():
portDeclarations()
#Pick up (Measure 1)
playNote(asharp5, 0.47)
#Measure 2
playNote(g6, 1.88)
playNote(f6, 0.47)
playNote(g6, 0.47)
playNote(f6, 1.43)
playNote(dsharp6, 0.89)
playNote(asharp5, 0.48)
#Measure 3
playNote(g6, 0.958)
playNote(c6, 0.418)
playNote(c7, 0.958)
playNote(g6, 0.477)
playNote(asharp6, 1.435)
playNote(gsharp6, 0.958)
playNote(g6, 0.444)
#Measure 4
playNote(f6, 1.41)
playNote(g6, 0.958)
playNote(d6, 0.444)
playNote(dsharp6, 1.41)
playNote(c6, 1.41)
#Measure 5
playNote(asharp5, 0.47)
playNote(d7, 0.47)
playNote(c7, 0.47)
playNote(asharp6, 0.23)
playNote(gsharp6, 0.23)
playNote(g6, 0.23)
playNote(gsharp6, 0.23)
playNote(c6, 0.23)
playNote(d6, 0.23)
playNote(dsharp6, 1.33)
time.sleep(1.013)
playNote(asharp5, 0.47)
#Measure 6
playNote(g6, 1.43)
playNote(f6, 0.23)
playNote(g6, 0.23)
playNote(f6, 0.23)
playNote(e6, 0.23)
playNote(f6, 0.23)
playNote(g6, 0.23)
playNote(f6, 0.23)
playNote(dsharp6, 1.19)
playNote(f6, 0.33)
playNote(d6, 0.23)
playNote(dsharp6, 0.23)
playNote(f6, 0.23)
#Measure 7
playNote(g6, 0.23)
playNote(b5, 0.23)
playNote(c6, 0.23)
playNote(csharp6, 0.23)
playNote(c6, 0.23)
playNote(f6, 0.23)
playNote(e6, 0.23)
playNote(gsharp6, 0.23)
playNote(g6, 0.23)
playNote(csharp6, 0.23)
playNote(c6, 0.23)
playNote(g6, 0.23)
playNote(asharp6, 1.43)
playNote(gsharp6, 0.444)
playNote(g6, 0.444)
#Measure 8
playNote(f6, 0.932)
time.sleep(0.47)
playNote(g6, 0.23)
time.sleep(0.23)
playNote(g6, 0.47)
time.sleep(0.47)
playNote(d6, 1.41)
playNote(dsharp6, 1.38)
playNote(c6 ,1.41)
#Measure 9
playNote(asharp5, 0.47)
playNote(d7, 0.47)
playNote(c7, 0.47)
playNote(asharp6, 0.23)
playNote(gsharp6, 0.23)
playNote(g6, 0.23)
playNote(gsharp6, 0.23)
playNote(c6, 0.23)
playNote(d6, 0.23)
playNote(dsharp6, 1.88)
playNote(d6, 0.47)
playNote(dsharp6, 0.47)
#Measure 10
playNote(f6, 1.41)
playNote(g6, 0.958)
playNote(f6, 0.444)
playNote(f6, 1.43)
playNote(c6, 1.41)
#Measure 11
playNote(dsharp6, 0.444)
playNote(dsharp6, 0.444)
playNote(dsharp6, 0.444)
playNote(dsharp6, 0.444)
playNote(d6, 0.23)
playNote(dsharp6, 0.23)
playNote(f6, 0.466)
playNote(dsharp6, 1.41)
playNote(asharp5, 1.41)
#Measure 12
playNote(asharp6, 1.43)
playNote(a6, 0.958)
playNote(g6, 0.444)
playNote(f6, 1.41)
playNote(d6, 1.41)
#Measure 13
playNote(dsharp6, 1.43)
playNote(d6, 0.444)
playNote(c6, 0.444)
playNote(d6, 0.444)
playNote(asharp5, 0.444)
playNote(b5, 0.444)
playNote(b5, 0.444)
playNote(c6, 0.444)
playNote(c6, 0.444)
playNote(d6, 0.444)
#Measure 14
playNote(g6, 0.958)
playNote(a5, 0.23)
playNote(asharp5, 0.23)
playNote(b5, 0.23)
playNote(asharp5, 0.23)
playNote(csharp6, 0.23)
playNote(d6, 0.23)
playNote(g6, 0.444)
playNote(f6, 0.958)
playNote(dsharp6, 0.705)
playNote(f6, 0.23)
playNote(dsharp6, 0.23)
playNote(d6, 0.23)
playNote(dsharp6, 0.23)
playNote(f6, 0.23)
#Measure 15
playNote(g6, 0.23)
playNote(b5, 0.23)
playNote(c6, 0.23)
playNote(csharp6, 0.23)
playNote(c6, 0.23)
playNote(f6, 0.23)
playNote(e6, 0.23)
playNote(gsharp6, 0.23)
playNote(g6, 0.23)
playNote(csharp7, 0.23)
playNote(c7, 0.23)
playNote(g6, 0.23)
playNote(asharp6, 1.43)
playNote(gsharp6, 0.958)
playNote(g6, 0.444)
#Measure 16
playNote(f6, 0.958)
time.sleep(0.444)
playNote(g6, 0.958)
playNote(d6, 0.444)
playNote(dsharp6, 1.41)
playNote(c6, 1.41)
#Measure 17
playNote(asharp5, 0.444)
playNote(d7, 0.444)
playNote(csharp7, 0.444)
playNote(c7, 0.135)
playNote(b6, 0.135)
playNote(asharp6, 0.135)
playNote(a6, 0.135)
playNote(gsharp6, 0.135)
playNote(f6, 0.135)
playNote(d6, 0.135)
playNote(b5, 0.135)
playNote(asharp5, 0.135)
playNote(d6, 0.135)
playNote(g6, 0.135)
playNote(f6, 0.135)
playNote(dsharp6, 1.88)
GPIO.cleanup()
returnMenu()
def Song4():
portDeclarations()
for i in range(2):
#Pick up (Measure 1)
playNote(b5, 0.304)
playNote(csharp6, 0.304)
playNote(d6, 0.304)
playNote(e6, 0.304)
playNote(fsharp6, 0.304)
playNote(d6, 0.304)
playNote(fsharp6, 0.608)
#Measure 2
playNote(f6, 0.304)
playNote(csharp6, 0.304)
playNote(f6, 0.608)
playNote(e6, 0.304)
playNote(c6, 0.304)
playNote(e6, 0.566)
#Measure 3
playNote(b5, 0.304)
playNote(csharp6, 0.304)
playNote(d6, 0.304)
playNote(e6, 0.304)
playNote(fsharp6, 0.304)
playNote(d6, 0.304)
playNote(fsharp6, 0.304)
playNote(b6, 0.304)
#Measure 4
playNote(a6, 0.304)
playNote(fsharp6, 0.304)
playNote(d6, 0.304)
playNote(fsharp6, 0.304)
playNote(a6, 1.13)
#Measure 5
playNote(b5, 0.304)
playNote(csharp6, 0.304)
playNote(d6, 0.304)
playNote(e6, 0.304)
playNote(fsharp6, 0.304)
playNote(d6, 0.304)
playNote(fsharp6, 0.608)
#Measure 6
playNote(f6, 0.304)
playNote(csharp6, 0.304)
playNote(f6, 0.608)
playNote(e6, 0.304)
playNote(c6, 0.304)
playNote(e6, 0.566)
#Measure 7
playNote(b5, 0.304)
playNote(csharp6, 0.304)
playNote(d6, 0.304)
playNote(e6, 0.304)
playNote(fsharp6, 0.304)
playNote(d6, 0.304)
playNote(fsharp6, 0.304)
playNote(b6, 0.304)
#Measure 8
playNote(a6, 0.304)
playNote(fsharp6, 0.304)
playNote(d6, 0.304)
playNote(fsharp6, 0.304)
playNote(a6, 1.13)
#Measure 9
playNote(fsharp6, 0.304)
playNote(gsharp6, 0.304)
playNote(asharp6, 0.304)
playNote(b6, 0.304)
playNote(csharp7, 0.304)
playNote(asharp6, 0.304)
playNote(csharp7, 0.608)
#Measure 10
playNote(d7, 0.304)
playNote(asharp6, 0.304)
playNote(d7, 0.608)
playNote(csharp7, 0.304)
playNote(asharp6, 0.304)
playNote(csharp7, 0.566)
#Measure 11
playNote(fsharp6, 0.304)
playNote(gsharp6, 0.304)
playNote(asharp6, 0.304)
playNote(b6, 0.304)
playNote(csharp7, 0.304)
playNote(asharp6, 0.304)
playNote(csharp7, 0.608)
#Measure 12
playNote(d7, 0.304)
playNote(asharp6, 0.304)
playNote(d7, 0.608)
playNote(csharp7, 1.13)
#Measure 13
playNote(fsharp6, 0.304)
playNote(gsharp6, 0.304)
playNote(asharp6, 0.304)
playNote(b6, 0.304)
playNote(csharp7, 0.304)
playNote(asharp6, 0.304)
playNote(csharp7, 0.608)
#Measure 14
playNote(d7, 0.304)
playNote(asharp6, 0.304)
playNote(d7, 0.608)
playNote(csharp7, 0.304)
playNote(asharp6, 0.304)
playNote(csharp7, 0.566)
#Measure 15
playNote(fsharp6, 0.304)
playNote(gsharp6, 0.304)
playNote(asharp6, 0.304)
playNote(b6, 0.304)
playNote(csharp7, 0.304)
playNote(asharp6, 0.304)
playNote(csharp7, 0.608)
#Measure 16
playNote(d7, 0.304)
playNote(asharp6, 0.304)
playNote(d7, 0.608)
playNote(csharp7, 1.13)
#Measure 17
playNote(b6, 0.304)
playNote(csharp7, 0.304)
playNote(d7, 0.304)
playNote(e7, 0.304)
playNote(fsharp7, 0.304)
playNote(d7, 0.304)
playNote(fsharp7, 0.608)
#Measure 18
playNote(f7, 0.304)
playNote(csharp7, 0.304)
playNote(f7, 0.608)
playNote(e7, 0.304)
playNote(c7, 0.304)
playNote(e7, 0.566)
#Measure 19
playNote(b6, 0.304)
playNote(csharp7, 0.304)
playNote(d7, 0.304)
playNote(e7, 0.304)
playNote(fsharp7, 0.304)
playNote(d7, 0.304)
playNote(fsharp7, 0.304)
playNote(b7, 0.304)
#Measure 20
playNote(a7, 0.304)
playNote(fsharp7, 0.304)
playNote(d7, 0.304)
playNote(fsharp7, 0.304)
playNote(a7, 1.13)
#Measure 21
time.sleep(0.304)
playNote(asharp7, 0.114)
playNote(b7, 0.306)
time.sleep(1.13)
#Measure 22
time.sleep(0.304)
playNote(asharp7, 0.114)
playNote(b7, 0.306)
time.sleep(1.13)
#Measure 45
playNote(asharp6, 0.304)
playNote(c7, 0.304)
playNote(csharp7, 0.304)
playNote(dsharp7, 0.304)
playNote(f7, 0.304)
playNote(csharp7, 0.304)
playNote(f7, 0.304)
playNote(asharp7, 0.304)
#Measure 46
playNote(a7, 0.304)
playNote(f7, 0.304)
playNote(a7, 0.304)
playNote(c8, 0.304)
playNote(asharp7, 1.13)
GPIO.cleanup()
returnMenu()
#Buttons
btnSong1 = Button(root, text = "Imperial March", fg = "red", command= Song1())
btnSong2 = Button(root, text = "Ode to Joy", fg = "red", command= Song2())
btnSong3 = Button(root, text = "Nocturne in Eb Major Op. 9 No. 2", fg = "red", command= Song3())
btnSong4 = Button(root, text = "In the Hall of the Mountain King", fg = "red", command= Song4())
btn_quit = Button(root, text = "Quit", command=closeWindow)
#Packing
btnSong1.grid()
btnSong2.grid()
btnSong3.grid()
btnSong4.grid()
#Grid Layout
welcomeTxt.grid(column=0, row=0)
lbl.grid(column=1, row=1)
btnSong1.grid(column=1, row=2)
btnSong2.grid(column=1, row=3)
btnSong3.grid(column=1, row=4)
btnSong4.grid(column=1, row=5)
emptyTxt.grid(column=1, row=6)
btn_quit.grid(column=1, row=7)
# End of file
root.mainloop()
|
nilq/baby-python
|
python
|
r""" This module implements Peak Signal-to-Noise Ratio (PSNR) in PyTorch.
"""
import torch
from typing import Union
from typing import Tuple, List, Optional, Union, Dict, Any
def _validate_input(
tensors: List[torch.Tensor],
dim_range: Tuple[int, int] = (0, -1),
data_range: Tuple[float, float] = (0., -1.),
# size_dim_range: Tuple[float, float] = (0., -1.),
size_range: Optional[Tuple[int, int]] = None,
) -> None:
r"""Check that input(-s) satisfies the requirements
Args:
tensors: Tensors to check
dim_range: Allowed number of dimensions. (min, max)
data_range: Allowed range of values in tensors. (min, max)
size_range: Dimensions to include in size comparison. (start_dim, end_dim + 1)
"""
if not __debug__:
return
x = tensors[0]
for t in tensors:
assert torch.is_tensor(t), f'Expected torch.Tensor, got {type(t)}'
assert t.device == x.device, f'Expected tensors to be on {x.device}, got {t.device}'
if size_range is None:
assert t.size() == x.size(), f'Expected tensors with same size, got {t.size()} and {x.size()}'
else:
assert t.size()[size_range[0]: size_range[1]] == x.size()[size_range[0]: size_range[1]], \
f'Expected tensors with same size at given dimensions, got {t.size()} and {x.size()}'
if dim_range[0] == dim_range[1]:
assert t.dim() == dim_range[0], f'Expected number of dimensions to be {dim_range[0]}, got {t.dim()}'
elif dim_range[0] < dim_range[1]:
assert dim_range[0] <= t.dim() <= dim_range[1], \
f'Expected number of dimensions to be between {dim_range[0]} and {dim_range[1]}, got {t.dim()}'
if data_range[0] < data_range[1]:
assert data_range[0] <= t.min(), \
f'Expected values to be greater or equal to {data_range[0]}, got {t.min()}'
assert t.max() <= data_range[1], \
f'Expected values to be lower or equal to {data_range[1]}, got {t.max()}'
def _reduce(x: torch.Tensor, reduction: str = 'mean') -> torch.Tensor:
r"""Reduce input in batch dimension if needed.
Args:
x: Tensor with shape (N, *).
reduction: Specifies the reduction type:
``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``
"""
if reduction == 'none':
return x
elif reduction == 'mean':
return x.mean(dim=0)
elif reduction == 'sum':
return x.sum(dim=0)
else:
raise ValueError("Uknown reduction. Expected one of {'none', 'mean', 'sum'}")
def psnr(x: torch.Tensor, y: torch.Tensor, data_range: Union[int, float] = 1.0,
reduction: str = 'mean', convert_to_greyscale: bool = False) -> torch.Tensor:
r"""Compute Peak Signal-to-Noise Ratio for a batch of images.
Supports both greyscale and color images with RGB channel order.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
data_range: Maximum value range of images (usually 1.0 or 255).
reduction: Specifies the reduction type:
``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'``
convert_to_greyscale: Convert RGB image to YCbCr format and computes PSNR
only on luminance channel if `True`. Compute on all 3 channels otherwise.
Returns:
PSNR Index of similarity betwen two images.
References:
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
"""
# _validate_input([x, y], dim_range=(4, 5), data_range=(0, data_range))
# Constant for numerical stability
EPS = 1e-8
x = x / float(data_range)
y = y / float(data_range)
if (x.size(1) == 3) and convert_to_greyscale:
# Convert RGB image to YCbCr and take luminance: Y = 0.299 R + 0.587 G + 0.114 B
rgb_to_grey = torch.tensor([0.299, 0.587, 0.114]).view(1, -1, 1, 1).to(x)
x = torch.sum(x * rgb_to_grey, dim=1, keepdim=True)
y = torch.sum(y * rgb_to_grey, dim=1, keepdim=True)
mse = torch.mean((x - y) ** 2, dim=[1, 2, 3])
score: torch.Tensor = - 10 * torch.log10(mse + EPS)
return _reduce(score, reduction)
|
nilq/baby-python
|
python
|
import numpy, random
import os
import uuid
import cloudpickle
import json
from flor.constants import *
from .. import stateful as flags
from torch import cuda
class Writer:
serializing = False
lsn = 0
pinned_state = []
seeds = []
store_load = []
partitioned_store_load = []
max_buffer = 5000
write_buffer = []
initialized = False
pickler = cloudpickle
stateful_adaptive_ext = None
@staticmethod
def initialize():
Writer.initialized = True
if flags.MODE is EXEC:
# fd = open(LOG_PATH, 'w')
fd = None
else:
with open(flags.MEMO_PATH.absolute, 'r') as f:
for line in f:
log_record = json.loads(line.strip())
if 'source' in log_record:
if log_record['source'] == 'pin_state':
Writer.pinned_state.append(log_record['state']) # THIS IS JUST A FILENAME
elif log_record['source'] == 'random_seed':
Writer.seeds.append(log_record['seed'])
elif log_record['source'] == 'store':
# THIS IS FILENAME, or LBRACK, or ERROR
Writer.store_load.append(
(log_record['static_key'], log_record['global_key'], log_record['value']))
if log_record['value'] == 'RBRACKET':
flags.rbracket_gk.add(int(log_record['global_key']))
elif log_record['source'] == 'stateful_adaptive_ext':
Writer.stateful_adaptive_ext = log_record
# We now do a Group By global_key on store_load
new_store_load = []
current_group = {'key': None, 'skey': None, 'list': None}
period_head = None
for sk, gk, v in Writer.store_load:
if period_head is None:
period_head = sk
if current_group['key'] != gk or current_group['list'][0] == 'LBRACKET':
# New Group
new_store_load.append((current_group['skey'], current_group['key'], current_group['list']))
current_group = {'key': gk, 'skey': sk, 'list': []}
current_group['list'].append(v)
new_store_load.append((current_group['skey'], current_group['key'], current_group['list']))
assert new_store_load.pop(0) == (None, None, None)
Writer.store_load = new_store_load
del new_store_load
# We now Group By period
current_group = None
for sk, gk, v in Writer.store_load:
if sk == period_head and v[0] == 'LBRACKET':
Writer.partitioned_store_load.append(current_group)
current_group = []
current_group.append((sk, gk, v))
Writer.partitioned_store_load.append(current_group)
assert Writer.partitioned_store_load.pop(0) is None
# for i, v in enumerate(partitioned_store_load):
# for u in partitioned_store_load[i+1:]:
# v.extend(u)
del current_group
@staticmethod
def serialize(obj):
try:
Writer.serializing = True
# ADD SOME INDIRECTION
# MAKE THIS INTO INDEX
while True:
unique_filename = uuid.uuid4().hex + '.pkl'
unique_filename_abs = os.path.join(flags.LOG_DATA_PATH.absolute, unique_filename)
unique_filename_sqg = os.path.join(flags.LOG_DATA_PATH.squiggles, unique_filename)
if not os.path.exists(unique_filename_abs):
break
with open(unique_filename_abs, 'wb') as f:
cloudpickle.dump(obj, f)
return unique_filename_sqg
except Exception as e:
print(f"Failed to serialize: {e}")
return "ERROR: failed to serialize"
finally:
Writer.serializing = False
@staticmethod
def write(obj):
obj['global_lsn'] = Writer.lsn
Writer.write_buffer.append(obj)
Writer.lsn += 1 # append to buffer and increment lsn
if len(Writer.write_buffer) >= Writer.max_buffer:
Writer.forked_write() # if buffer exceeds a certain size, or fork_now is triggered
# note: fork_now is there as a mechanism for forcing fork, we aren't using it yet
@staticmethod
def forked_write():
cuda.synchronize()
pid = os.fork()
if not pid:
path = flags.LOG_PATH.absolute.split('.')
path.insert(-1, str(Writer.lsn))
path = '.'.join(path)
fd = open(path, 'w')
os.nice(1) # child process gets lower priority and starts flushing
for each in Writer.write_buffer:
if 'value' in each and not isinstance(each['value'], str): # the dict can have 'value' or 'state'
each['value'] = Writer.serialize(each['value'])
fd.write(json.dumps(each) + '\n')
fd.close()
os._exit(0)
else:
Writer.write_buffer = [] # parent process resets buffer
@staticmethod
def flush():
Writer.write({
'source': 'stateful_adaptive_ext',
'pretraining': str(flags.pretraining),
'iterations_count': str(flags.iterations_count),
'period': str(flags.period),
'outermost_sk': str(flags.outermost_sk)
})
if Writer.write_buffer:
Writer.forked_write() # at the end of flor execution, flushes buffer to disk
try:
os.wait()
except:
pass
@staticmethod
def store(obj, static_key, global_key):
# Store the object in the memo
if obj is LBRACKET:
d = {
'source': 'store',
'static_key': static_key,
'global_key': global_key,
'value': 'LBRACKET'
}
elif obj is RBRACKET:
# This helps us garbage collect unmatched LBRACKETS
d = {
'source': 'store',
'static_key': static_key,
'global_key': global_key,
'value': 'RBRACKET'
}
else:
d = {
'source': 'store',
'static_key': static_key,
'global_key': global_key,
'value': obj
}
Writer.write(d)
@staticmethod
def load(global_key):
while True:
skey, gkey, paths = Writer.store_load.pop(0)
if gkey == global_key:
break
# paths can only contain PATHS or ERRORS
values = []
if len(paths) == 1 and paths[0] == 'RBRACKET':
# Adaptive Checkpointing case. We decided not to serialize
return values
for path in paths:
if 'ERROR' in path[0:len('ERROR')]:
# ERROR CASE
raise RuntimeError("Necessary state corrupted, unrecoverable")
elif '.pkl' == os.path.splitext(path)[-1]:
# PATH CASE
path = os.path.expanduser(path) if '~' in path[0:2] else os.path.abspath(path)
with open(path, 'rb') as f:
values.append(cloudpickle.load(f))
else:
# Raw value
value = path
values.append(value)
return values
@staticmethod
def lbrack_load():
while Writer.store_load:
skey, gkey, v = Writer.store_load.pop(0)
if 'LBRACKET' in v:
return gkey
assert False, 'LBRACKET load failed'
@staticmethod
def pin_state(library):
if flags.MODE is EXEC:
if library is numpy:
d = {'source': 'pin_state',
'library': 'numpy',
'state': Writer.serialize(library.random.get_state())}
Writer.write(d)
elif library is random:
d = {'source': 'pin_state',
'library': 'random',
'state': Writer.serialize(library.getstate())}
Writer.write(d)
else:
raise RuntimeError("Library must be `numpy` or `random`, but `{}` was given".format(library.__name__))
elif flags.MODE is REEXEC:
path = Writer.pinned_state.pop(0)
with open(path, 'rb') as f:
state = cloudpickle.load(f)
if library is numpy:
library.random.set_state(state)
elif library is random:
library.setstate(state)
else:
raise RuntimeError("Library must be `numpy` or `random`, but `{}` was given".format(library.__name__))
else:
raise RuntimeError()
@staticmethod
def random_seed(*args, **kwargs):
if flags.MODE is EXEC:
if args or kwargs:
seed = numpy.random.randint(*args, **kwargs)
else:
seed = numpy.random.randint(0, 2 ** 32)
d = {
'source': 'random_seed',
'seed': seed
}
Writer.write(d)
return seed
elif flags.MODE is REEXEC:
seed = Writer.seeds.pop(0)
return seed
else:
raise RuntimeError()
pin_state = Writer.pin_state
random_seed = Writer.random_seed
flush = Writer.flush
__all__ = ['pin_state', 'random_seed', 'Writer', 'flush']
|
nilq/baby-python
|
python
|
from leapp.actors import Actor
from leapp.models import Report, OpenSshConfig
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
from leapp.libraries.common.reporting import report_generic
class OpenSshUsePrivilegeSeparationCheck(Actor):
"""
UsePrivilegeSeparation configuration option was removed.
Check the value of UsePrivilegeSeparation in OpenSSH server config file
and warn about its deprecation if it is set to non-default value.
"""
name = 'open_ssh_use_privilege_separation'
consumes = (OpenSshConfig, )
produces = (Report, )
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
for config in self.consume(OpenSshConfig):
if config.use_privilege_separation is not None and \
config.use_privilege_separation != "sandbox":
report_generic(
title='OpenSSH configured not to use privilege separation sandbox',
summary='OpenSSH is configured to disable privilege '
'separation sandbox, which is decreasing security '
'and is no longer supported in RHEL 8',
severity='low')
|
nilq/baby-python
|
python
|
import tensorflow as tf
import tensorflow.keras as tk
import nthmc
conf = nthmc.Conf(nbatch=1, nepoch=1, nstepEpoch=1024, nstepMixing=64, stepPerTraj = 10,
initDt=0.4, refreshOpt=False, checkReverse=False, nthr=4)
nthmc.setup(conf)
beta=3.5
action = nthmc.OneD(beta=beta, transform=nthmc.Ident())
loss = nthmc.LossFun(action, cCosDiff=1.0, cTopoDiff=1.0, dHmin=0.0, topoFourierN=1)
weights=list(map(lambda x:tf.constant(x,dtype=tf.float64),
# 02f:"cy$@c:r!awk -v beta=3.5 '/^beta: /{b=$2} p>0{w=w "\n" $0} b==beta&&/^weights: /{p=1;w=$0} p==1&&/]$/{p=0} END{print w}' attic/t4.log
[0.268831031592305,
beta]))
nthmc.showTransform(conf, action, loss, weights)
action = nthmc.OneD(beta=beta, transform=nthmc.TransformChain([
nthmc.OneDNeighbor(mask='even'), nthmc.OneDNeighbor(mask='odd'),
nthmc.OneDNeighbor(mask='even',distance=2), nthmc.OneDNeighbor(mask='odd',distance=2),
nthmc.OneDNeighbor(mask='even',distance=4), nthmc.OneDNeighbor(mask='odd',distance=4),
nthmc.OneDNeighbor(mask='even',distance=8), nthmc.OneDNeighbor(mask='odd',distance=8),
nthmc.OneDNeighbor(mask='even',distance=16), nthmc.OneDNeighbor(mask='odd',distance=16),
nthmc.OneDNeighbor(mask='even',distance=32), nthmc.OneDNeighbor(mask='odd',distance=32),
nthmc.OneDNeighbor(mask='even',order=2), nthmc.OneDNeighbor(mask='odd',order=2),
nthmc.OneDNeighbor(mask='even',order=2,distance=2), nthmc.OneDNeighbor(mask='odd',order=2,distance=2),
nthmc.OneDNeighbor(mask='even',order=2,distance=4), nthmc.OneDNeighbor(mask='odd',order=2,distance=4),
nthmc.OneDNeighbor(mask='even',order=2,distance=8), nthmc.OneDNeighbor(mask='odd',order=2,distance=8),
nthmc.OneDNeighbor(mask='even',order=2,distance=16), nthmc.OneDNeighbor(mask='odd',order=2,distance=16),
nthmc.OneDNeighbor(mask='even',order=2,distance=32), nthmc.OneDNeighbor(mask='odd',order=2,distance=32),
nthmc.OneDNeighbor(mask='even',order=3), nthmc.OneDNeighbor(mask='odd',order=3),
nthmc.OneDNeighbor(mask='even',order=3,distance=2), nthmc.OneDNeighbor(mask='odd',order=3,distance=2),
nthmc.OneDNeighbor(mask='even',order=3,distance=4), nthmc.OneDNeighbor(mask='odd',order=3,distance=4),
nthmc.OneDNeighbor(mask='even',order=3,distance=8), nthmc.OneDNeighbor(mask='odd',order=3,distance=8),
nthmc.OneDNeighbor(mask='even',order=3,distance=16), nthmc.OneDNeighbor(mask='odd',order=3,distance=16),
nthmc.OneDNeighbor(mask='even',order=3,distance=32), nthmc.OneDNeighbor(mask='odd',order=3,distance=32),
nthmc.OneDNeighbor(mask='even',order=4), nthmc.OneDNeighbor(mask='odd',order=4),
nthmc.OneDNeighbor(mask='even',order=4,distance=2), nthmc.OneDNeighbor(mask='odd',order=4,distance=2),
nthmc.OneDNeighbor(mask='even',order=4,distance=4), nthmc.OneDNeighbor(mask='odd',order=4,distance=4),
nthmc.OneDNeighbor(mask='even',order=4,distance=8), nthmc.OneDNeighbor(mask='odd',order=4,distance=8),
nthmc.OneDNeighbor(mask='even',order=4,distance=16), nthmc.OneDNeighbor(mask='odd',order=4,distance=16),
nthmc.OneDNeighbor(mask='even',order=4,distance=32), nthmc.OneDNeighbor(mask='odd',order=4,distance=32),
nthmc.OneDNeighbor(mask='even'), nthmc.OneDNeighbor(mask='odd'),
nthmc.OneDNeighbor(mask='even',distance=2), nthmc.OneDNeighbor(mask='odd',distance=2),
nthmc.OneDNeighbor(mask='even',distance=4), nthmc.OneDNeighbor(mask='odd',distance=4),
nthmc.OneDNeighbor(mask='even',distance=8), nthmc.OneDNeighbor(mask='odd',distance=8),
nthmc.OneDNeighbor(mask='even',distance=16), nthmc.OneDNeighbor(mask='odd',distance=16),
nthmc.OneDNeighbor(mask='even',distance=32), nthmc.OneDNeighbor(mask='odd',distance=32),
nthmc.OneDNeighbor(mask='even',order=2), nthmc.OneDNeighbor(mask='odd',order=2),
nthmc.OneDNeighbor(mask='even',order=2,distance=2), nthmc.OneDNeighbor(mask='odd',order=2,distance=2),
nthmc.OneDNeighbor(mask='even',order=2,distance=4), nthmc.OneDNeighbor(mask='odd',order=2,distance=4),
nthmc.OneDNeighbor(mask='even',order=2,distance=8), nthmc.OneDNeighbor(mask='odd',order=2,distance=8),
nthmc.OneDNeighbor(mask='even',order=2,distance=16), nthmc.OneDNeighbor(mask='odd',order=2,distance=16),
nthmc.OneDNeighbor(mask='even',order=2,distance=32), nthmc.OneDNeighbor(mask='odd',order=2,distance=32),
nthmc.OneDNeighbor(mask='even',order=3), nthmc.OneDNeighbor(mask='odd',order=3),
nthmc.OneDNeighbor(mask='even',order=3,distance=2), nthmc.OneDNeighbor(mask='odd',order=3,distance=2),
nthmc.OneDNeighbor(mask='even',order=3,distance=4), nthmc.OneDNeighbor(mask='odd',order=3,distance=4),
nthmc.OneDNeighbor(mask='even',order=3,distance=8), nthmc.OneDNeighbor(mask='odd',order=3,distance=8),
nthmc.OneDNeighbor(mask='even',order=3,distance=16), nthmc.OneDNeighbor(mask='odd',order=3,distance=16),
nthmc.OneDNeighbor(mask='even',order=3,distance=32), nthmc.OneDNeighbor(mask='odd',order=3,distance=32),
nthmc.OneDNeighbor(mask='even',order=4), nthmc.OneDNeighbor(mask='odd',order=4),
nthmc.OneDNeighbor(mask='even',order=4,distance=2), nthmc.OneDNeighbor(mask='odd',order=4,distance=2),
nthmc.OneDNeighbor(mask='even',order=4,distance=4), nthmc.OneDNeighbor(mask='odd',order=4,distance=4),
nthmc.OneDNeighbor(mask='even',order=4,distance=8), nthmc.OneDNeighbor(mask='odd',order=4,distance=8),
nthmc.OneDNeighbor(mask='even',order=4,distance=16), nthmc.OneDNeighbor(mask='odd',order=4,distance=16),
nthmc.OneDNeighbor(mask='even',order=4,distance=32), nthmc.OneDNeighbor(mask='odd',order=4,distance=32),
]))
loss = nthmc.LossFun(action, cCosDiff=1.0, cTopoDiff=1.0, dHmin=0.0, topoFourierN=1)
# 02f:"cy$@c:r!awk '/^beta/{print} p>0{w=w "\n" $0} b==beta&&/^weights/{p=1;w=$0} p==1&&/]\)\)$/{p=0} END{print w}' i7.py
beta=1.625
weights=list(map(lambda x:tf.constant(x,dtype=tf.float64),
# 02f:"cy$@c:r!awk -v beta=1.625 '/^beta: /{b=$2} p>0{w=w "\n" $0} b==beta&&/^weights: /{p=1;w=$0} p==1&&/]$/{p=0} END{print w}' t13.log
[0.39928005894476953,
-0.16646589446724119,
-0.165116196190377,
0.030407332523959697,
0.030213236259768468,
0.079470890222058513,
0.0761346381697804,
0.029619192505227931,
0.030915611020612837,
0.00403555847393147,
0.00407719851568374,
-0.00060822007493423636,
0.0037353011339751178,
0.069686089040409807,
0.070473588467025811,
0.033146255849164606,
0.033379928079238383,
-0.0029161974044230022,
-0.0017224631344893938,
-0.00069061113081232792,
-0.0016410929512909317,
0.0016876364859234507,
-0.000733623769599814,
0.0014529279510181758,
-0.00091449778170147266,
-0.019901824910881289,
-0.017959584894213086,
-0.0059090578292857058,
-0.0054266495233532761,
0.0013726690186972,
0.00021210992451173647,
-0.0001498695177544983,
0.00064305655082401761,
0.0010931278372980787,
0.00037689345534901728,
-0.0014984995098818561,
-0.00040476075088637781,
0.0046935831026250876,
0.0032850096553108288,
-0.00054541015203022974,
-0.0014208086412517168,
-0.0002359329393992865,
-0.00035542688976354463,
-1.2157678571547889e-05,
0.00015490831515802204,
-0.00076950136336040114,
-0.00031333861450947426,
5.097857409197952e-05,
-0.00012148501847680332,
-0.16518081785315231,
-0.16337905450177662,
0.035184121942295171,
0.034570717385232527,
0.080465773703933,
0.0774896127221109,
0.02912121009107339,
0.030940522095703058,
0.0043964429072142538,
0.0040451007928214251,
-0.00080468042839712994,
0.0035457375499732395,
0.06101007963274057,
0.061368775130318916,
0.042444107322532766,
0.0429949487047859,
-0.0027232705295604813,
-0.0012932981224013512,
-0.000984564284924616,
-0.0024456764643747803,
0.0015834011617584004,
-0.00090531730999972814,
0.0017613431423082497,
-0.0012386881834937134,
-0.023626271538814435,
-0.021598075508490612,
-0.012897707141515927,
-0.012881432717533042,
0.0014793362615386902,
9.2105145307772054e-06,
-0.00020941704974683913,
0.00023779728215206694,
0.0014388740734254534,
0.00038662450216112368,
-0.0012415944776245824,
-5.7876896633756865e-05,
0.00847176568981238,
0.00680656254828831,
0.0038699954560532414,
0.002672203307567224,
-0.00032310477908741877,
-0.00027817807890187128,
2.9749369975343604e-07,
0.00056912541337158064,
-0.00016832076473673023,
-6.8163634028702889e-05,
0.00038894121879160768,
0.00021929053651325786,
beta]))
tf.print('beta: ',beta)
nthmc.showTransform(conf, action, loss, weights)
# 02f:"cy$@c:r!awk '/^beta/{print} p>0{w=w "\n" $0} b==beta&&/^weights/{p=1;w=$0} p==1&&/]\)\)$/{p=0} END{print w}' i8.py
beta=2.25
weights=list(map(lambda x:tf.constant(x,dtype=tf.float64),
# 02f:"cy$@c:r!awk -v beta=2.25 '/^beta: /{b=$2} p>0{w=w "\n" $0} b==beta&&/^weights: /{p=1;w=$0} p==1&&/]$/{p=0} END{print w}' t13.log
[0.46347687013765859,
-0.26956096774378285,
-0.27789613752492937,
0.00057889370538809464,
-0.010236247423671241,
0.0986786428228265,
0.092940163183728317,
0.048389783664764645,
0.0428352067197632,
0.0071532724177343155,
-0.00016729900977585887,
-0.0028994954411082729,
0.0045629145744148841,
0.10429797985901097,
0.10516664327725961,
0.019767444998128367,
0.017733344833014579,
-0.015701195405613568,
-0.01627707909725213,
6.1961085874725515e-05,
-0.002726021972288098,
0.0030387605699716638,
-0.00086939916322049775,
-0.0025294217069669156,
0.0023162394059350229,
-0.018197955042421207,
-0.013156170877580465,
-0.00018828285523644493,
0.00035738065232948939,
0.0020460184320699173,
0.0037571145249259536,
0.0014847460163292033,
0.0033975025807476992,
-0.0016427361682365381,
-0.00015240892204221136,
-0.00061298149379606509,
-0.00070245629535897747,
0.0049699308711759595,
0.0023881065458685458,
-0.002674100400855986,
-0.0046840431297724182,
-0.00051660018705215922,
-0.0015122462571267373,
0.0013658719371077899,
0.0024371537034333477,
-0.00076388891331814345,
0.0010928852937978671,
-0.00063912955260809286,
-0.00046236360307934886,
-0.26720377121779987,
-0.27506659960565666,
0.01386921185779756,
0.0011223971294072746,
0.10399309089493593,
0.097402127070597852,
0.049035774754181,
0.043470613107106586,
0.0070195040443017734,
-0.00064125419449594372,
-0.0041663105190666537,
0.0052679329287449823,
0.07955487719732092,
0.077760535424142033,
0.045023185143905242,
0.0424627085709664,
-0.012423562741718689,
-0.011645230113129405,
-0.00040397146191294077,
-0.0039211539692662672,
0.0044111294783447065,
-0.00095582047069014779,
-0.0011982494863965673,
0.0026672427895575112,
-0.036791369866543647,
-0.030221714902313849,
-0.020408567524268454,
-0.019107255766985697,
0.0011009778452924061,
0.0031477494894678764,
0.00014733642473982873,
0.00060935472443990151,
-0.0010207202054904839,
0.0013049792966303229,
-0.00073578299790926221,
-0.000648657507138662,
0.01345683484018945,
0.00983366514694654,
0.0063690140656229343,
0.0048874399190401109,
0.00081988498166550778,
-0.00083428871571166992,
-0.0014618929691323291,
-0.00054592505558324141,
-0.0012395250586266766,
0.00018205333858756673,
0.00068928868823799028,
-7.0524701673341993e-05,
beta]))
tf.print('beta: ',beta)
nthmc.showTransform(conf, action, loss, weights)
# 02f:"cy$@c:r!awk '/^beta/{print} p>0{w=w "\n" $0} b==beta&&/^weights/{p=1;w=$0} p==1&&/]\)\)$/{p=0} END{print w}' i9.py
beta=2.875
weights=list(map(lambda x:tf.constant(x,dtype=tf.float64),
# 02f:"cy$@c:r!awk -v beta=2.875 '/^beta: /{b=$2} p>0{w=w "\n" $0} b==beta&&/^weights: /{p=1;w=$0} p==1&&/]$/{p=0} END{print w}' t13.log
[0.45615090724163854,
-0.31097787822669354,
-0.30507920463515187,
-0.027893016314395284,
-0.031378845400177963,
0.077689083215770949,
0.075569715367494641,
0.038699510620482935,
0.029162385005325472,
0.0019581497708284694,
-0.0018231287462758918,
0.00015888456785728626,
-0.0028210982286725086,
0.13124240382350402,
0.13309785933956725,
0.017604137564691036,
0.010907674928860149,
-0.013780037257168396,
-0.022445109691812258,
-0.0045229710423886765,
-0.0029058196749805151,
0.0023048449953337728,
-0.0070235509174246284,
-0.0014313775421141036,
0.00081176147554258083,
-0.014710030999330952,
-0.010194100966722035,
0.002744086282626448,
0.0045756447355585093,
0.0031292945016411365,
0.0031592597427928843,
0.00053880411453796249,
-0.00058044090213579173,
0.00095364836258577637,
-0.0028807214952762316,
0.0018107008839567691,
-0.0013583732862177305,
0.0046931380657292757,
0.0016671741461710527,
-0.0031238965035703696,
-0.0030495300374729362,
3.7767171335432319e-05,
0.00034506965785394356,
-9.8650513910624843e-05,
0.00084275179037986137,
0.0012699466261455849,
0.0012800734726210016,
0.00078495081260056656,
-3.6750708339015154e-05,
-0.31014396639255265,
-0.3045858543098458,
-0.010885776010155591,
-0.015750481987926623,
0.087259089367838744,
0.08243283014988155,
0.040517512492184569,
0.030525468606565239,
0.0025872352327758539,
-0.0027206505719563493,
-0.00089873373216705352,
-0.0018318661211866342,
0.0967308932840898,
0.095883079309349514,
0.047763637063773574,
0.041546863771405255,
-0.012530825072081196,
-0.020478495148529022,
-0.0067227151927674068,
-0.0052179264725507176,
0.00418665071041997,
-0.00771130055753064,
-0.0013408242290686503,
0.00065100724836321812,
-0.040842057940541958,
-0.03514844539463631,
-0.025181375323195351,
-0.023134536637470358,
0.00242366467545387,
0.002806728633386199,
0.00060494371667193494,
-0.0040390056771061368,
0.0011595645810642834,
0.00015374946003506677,
0.00012011293019308769,
-0.0021145331363914585,
0.016401183428638843,
0.011602504263125767,
0.0076990960462810717,
0.0077484140578621538,
1.1511413473662876e-05,
0.0011462119410679498,
-0.0011556563594443477,
-0.00057730440795531726,
-0.0018027637615355017,
-0.0021347460580807263,
0.00058925948384115634,
-0.0010558414842687634,
beta]))
tf.print('beta: ',beta)
nthmc.showTransform(conf, action, loss, weights)
# 02f:"cy$@c:r!awk '/^beta/{print} p>0{w=w "\n" $0} b==beta&&/^weights/{p=1;w=$0} p==1&&/]\)\)$/{p=0} END{print w}' i10.py
beta=3.5
weights=list(map(lambda x:tf.constant(x,dtype=tf.float64),
# 02f:"cy$@c:r!awk -v beta=3.5 '/^beta: /{b=$2} p>0{w=w "\n" $0} b==beta&&/^weights: /{p=1;w=$0} p==1&&/]$/{p=0} END{print w}' t13.log
[0.426161809940765,
-0.320109120400013,
-0.32090020243824952,
-0.031182716984891851,
-0.036169773339796464,
0.055714318919392686,
0.057602389890724234,
0.029411886986087127,
0.02048733243498738,
0.00094839455227904755,
-0.003336858749749962,
0.0042831810194401618,
0.0055589091837478805,
0.1523380013134244,
0.15163036003180105,
0.017450942775123303,
0.01366963403033924,
-0.015362176729137129,
-0.023842410298148348,
-0.0077312457934894819,
-0.0013628219442876222,
0.0011295376199805572,
-0.00091410054524127253,
-0.00059341864473508234,
0.0025111964348351304,
-0.016444424617664447,
-0.015570829270105238,
0.0019647033660882846,
0.0059393613468408137,
0.0064600167032926427,
0.004736273804986227,
0.0022333630983046664,
-0.0011657888127998832,
0.00019669260733786145,
-0.0030779286401902473,
0.002774947111944009,
-9.6433938335267359e-05,
0.0083785133367789,
0.0053008391565818914,
-0.0014080778872983919,
-0.0024396905236594682,
-0.0015531026667714104,
-0.0015796761344081557,
-0.0012537334878866919,
-0.0015042727436904697,
0.0011413533343287735,
0.00097227804515090984,
-0.00046677598847423714,
0.00063556338329312273,
-0.32071868062103076,
-0.32148180159296041,
-0.00986116406882059,
-0.017335584106134748,
0.068029369690636679,
0.066918020242658541,
0.030819349510999603,
0.023206203501044503,
0.0017779135561217525,
-0.0034133032476216588,
0.002189343578032792,
0.00656004530207795,
0.11256550758203428,
0.11055222402865708,
0.049446153758141626,
0.045658985887769253,
-0.017581715497940329,
-0.026933901536123416,
-0.011986081801134148,
-0.0048059039456269485,
0.0017878663762805563,
-0.0025517310832571327,
0.00019610673621250042,
0.003797903258295098,
-0.04866943996936729,
-0.045885640197634261,
-0.030946502446712494,
-0.025988143680184862,
0.0058739799141497131,
0.0044195418882953643,
0.0029309881330323194,
-0.0042307734485617391,
-0.000379102785780568,
-0.00042006608019470941,
-0.000890702512832992,
-0.0015533078274466545,
0.018431797429963044,
0.01296582266989706,
0.0083730807637790484,
0.0071470949531473186,
-0.0006280677552497352,
0.00086911341441850648,
-0.00011310686430592162,
0.0010197384364829679,
-0.00042664791705881658,
-0.00060594003312396886,
8.3595033525653663e-05,
-0.00070533166824918961,
beta]))
tf.print('beta: ',beta)
nthmc.showTransform(conf, action, loss, weights)
|
nilq/baby-python
|
python
|
from __future__ import annotations
from injector import Injector
from labster.domain2.model.structure import Structure, StructureRepository
from labster.domain2.model.type_structure import CO, DU, FA, LA, UN
def test_single():
universite = Structure(nom="Sorbonne Université", type_name=UN.name, sigle="SU")
assert universite.nom == "Sorbonne Université"
assert universite.name == "Sorbonne Université"
assert universite.sigle_ou_nom == "SU"
assert universite.is_reelle
assert universite.active
assert len(universite.ancestors) == 0
assert len(universite.descendants) == 0
universite.check()
universite.delete()
assert not universite.active
def test_hierarchy():
universite = Structure(nom="Sorbonne Université", type_name=UN.name)
fac_sciences = Structure(nom="Faculté des Sciences", type_name=FA.name)
assert universite not in fac_sciences.parents
assert fac_sciences not in universite.children
universite.add_child(fac_sciences)
assert universite in fac_sciences.parents
assert fac_sciences in universite.children
assert universite.depth == 0
assert fac_sciences.depth == 1
assert fac_sciences.ancestors == [universite]
universite.check()
fac_sciences.check()
universite.remove_child(fac_sciences)
assert universite not in fac_sciences.parents
assert fac_sciences not in universite.children
assert universite.depth == 0
assert fac_sciences.depth == 0
universite.check()
fac_sciences.check()
fac_sciences.add_parent(universite)
assert universite in fac_sciences.parents
assert fac_sciences in universite.children
assert universite.depth == 0
assert fac_sciences.depth == 1
universite.check()
fac_sciences.check()
fac_sciences.remove_parent(universite)
assert universite not in fac_sciences.parents
assert fac_sciences not in universite.children
assert universite.depth == 0
assert fac_sciences.depth == 0
universite.check()
fac_sciences.check()
def test_deep_hierarchy():
universite = Structure(nom="Sorbonne Université", type_name=UN.name)
fac = Structure(nom="Faculté", type_name=FA.name)
composante = Structure(nom="Composante", type_name=CO.name)
labo = Structure(nom="Labo", type_name=LA.name)
universite.add_child(fac)
fac.add_child(composante)
composante.add_child(labo)
universite.check()
fac.check()
composante.check()
labo.check()
assert labo.ancestors == [composante, fac, universite]
def test_constraints_on_parent():
un = Structure(nom="Sorbonne Université", type_name=UN.name)
la = Structure(nom="Labo", type_name=LA.name)
du = Structure(nom="DU", type_name=DU.name)
assert not un.can_have_parent(un)
assert not un.can_have_parent(la)
assert not la.can_have_parent(la)
assert not la.can_have_parent(un)
assert not un.can_have_parent(du)
assert du.can_have_parent(un)
assert not un.can_have_child(un)
assert not un.can_have_child(la)
assert not la.can_have_child(la)
assert not la.can_have_child(un)
assert un.can_have_child(du)
assert not du.can_have_child(un)
def test_repo(injector: Injector, db_session):
repo = injector.get(StructureRepository)
universite = Structure(
nom="Sorbonne Université", type_name=UN.name, sigle="SU", dn="Top"
)
fac_sciences = Structure(nom="Faculté des Sciences", type_name=FA.name)
repo.put(universite)
repo.put(fac_sciences)
assert universite in repo.get_all()
assert fac_sciences in repo.get_all()
repo.check_all()
assert universite == repo.get_by_id(universite.id)
assert universite == repo.get_by_dn(universite.dn)
assert universite == repo.get_by_sigle(universite.sigle)
universite.add_child(fac_sciences)
assert universite in repo.get_all()
assert fac_sciences in repo.get_all()
repo.check_all()
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import Confirguracoes
# Register your models here.
admin.site.register(Confirguracoes)
|
nilq/baby-python
|
python
|
from __future__ import division
import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import numpy as np
class RobotArm(object):
def __init__(self):
self.dh_a= [ 0, 0, 340, 0, 0, 0]
self.dh_alpha= [ 0,-np.pi/2, 0, np.pi/2, -np.pi/2, np.pi/2]
self.dh_d= [ 290, 0, 0, 302, 0, 72]
self.dh_offset= [ 0,-np.pi/2, 0, 0, 0, 0]
self.radius=[90, 90, 90, 80, 70, 70, 20]
self.zone1 = [(-800,-800,-500), (-800, 800,-500), ( 800,-800,-500), (-800,-800, 100)] # ground
self.zone2 = [(-800,-250, 100), (-800, 250, 100), (-150,-250, 100), (-800,-250, 600)] # front of the robot
self.zone3a = [(-350, 250, 100), (-350, 450, 100), (-150, 250, 100), (-350, 250, 300)] # container 1
self.zone3b = [(-350,-450, 100), (-350,-250, 100), (-150,-450, 100), (-350,-450, 300)] # container 2
def get_dh_mat(self, a, alpha, d, theta):
mat = np.array([[ np.cos(theta), -np.sin(theta), 0, a ],
[ np.sin(theta)*np.cos(alpha), np.cos(theta)*np.cos(alpha), -np.sin(alpha), -d*np.sin(alpha)],
[ np.sin(theta)*np.sin(alpha), np.cos(theta)*np.sin(alpha), np.cos(alpha), d*np.cos(alpha)],
[0, 0, 0, 1]])
return mat
def model(self, angular_positions):
transforms = np.zeros((4,4,len(self.dh_a)+1))
T=np.zeros((4,4))
np.fill_diagonal(T, 1)
transforms[:,:,0] = T
for i, angle in enumerate(angular_positions):
submat = self.get_dh_mat(self.dh_a[i],self.dh_alpha[i],self.dh_d[i], self.dh_offset[i] + angle)
T=np.matmul(T,submat)
transforms[:,:,i+1] = T
return transforms
def forward_model(self, angular_positions):
conf=self.model(angular_positions)
return np.matmul(conf[:,:,-1],np.array([0,0,0,1]))[np.r_[0:3]]
def config_ax(self, ax):
ax.set_xlim3d(-1000,1000)
ax.set_ylim3d(-1000,1000)
ax.set_zlim3d(-1000,1000)
ax.set_aspect('equal', 'box')
def create_ax(self,fig):
ax = Axes3D(fig)
self.config_ax(ax)
return ax
def plot_conf(self, ax, angular_positions):
conf=self.model(angular_positions)
cube_definition = [
(-100,-100,0), (-100,100,0), (100,-100,0), (-100, -100, 100)
]
self.plot_cube(ax,cube_definition)
pos = conf[0:3,-1,:]
#self.plot_sphere(ax, [0,0,0])
for i in range(pos.shape[1]):
if i==pos.shape[1]-1:
x=np.matmul( conf[:,:,i], np.array([200,0,0,1]))[np.r_[0:3]]
y=np.matmul( conf[:,:,i], np.array([0,200,0,1]))[np.r_[0:3]]
z=np.matmul( conf[:,:,i], np.array([0,0,200,1]))[np.r_[0:3]]
ax.plot([pos[0,i],x[0]],[pos[1,i],x[1]],[pos[2,i],x[2]],'r')
ax.plot([pos[0,i],y[0]],[pos[1,i],y[1]],[pos[2,i],y[2]],'g')
ax.plot([pos[0,i],z[0]],[pos[1,i],z[1]],[pos[2,i],z[2]],'b')
if i>0:
self.plot_sphere(ax, pos[:,i],1.2*self.radius[i]/2)
self.plot_cylinder(ax, pos[:,i-1], pos[:,i],self.radius[i]/2)
self.plot_cube(ax,self.zone1,[0.3,0.3,0.3,0.35])
self.plot_cube(ax,self.zone2,[0.3,0.3,0.8,0.35])
self.plot_cube(ax,self.zone3a,[0.3,0.8,0.3,0.35])
self.plot_cube(ax,self.zone3b,[0.3,0.8,0.3,0.35])
def plot(self, angular_positions):
fig = plt.figure()
ax=self.create_ax(fig)
self.plot_conf(ax,angular_positions)
plt.show()
def animate(self, angle_init,angle_end, ax = None, predicted_pos=None):
T=100;
if (ax==None):
fig = plt.figure()
ax = self.create_ax(fig)
for t in range(T):
ax.clear()
self.config_ax(ax)
self.plot_conf(ax,angle_init + t/T * (angle_end-angle_init))
if(predicted_pos is not None):
ax.scatter( predicted_pos[0],predicted_pos[1], predicted_pos[2])
plt.pause(0.01)
print("end")
print("predicted:")
print(predicted_pos)
print("reached:")
print(self.forward_model(angle_end))
return ax
def plot_sphere(self, ax, c=[0, 0, 0], r = 0.05):
u, v = np.mgrid[0:2*np.pi:10j, 0:np.pi:5j]
x = c[0] + r*np.cos(u)*np.sin(v)
y = c[1] + r*np.sin(u)*np.sin(v)
z = c[2] + r*np.cos(v)
ax.plot_surface(x, y, z, color="r")
def plot_cylinder(self, ax, origin=np.array([0, 0, 0]), end=np.array([1,1,1]), R = 0.02):
v = end - origin
mag = np.linalg.norm(v)
if mag==0:
return
v = v / mag
not_v = np.array([1, 0, 0])
if (v == not_v).all():
not_v = np.array([0, 1, 0])
n1 = np.cross(v, not_v)
n1 /= np.linalg.norm(n1)
n2 = np.cross(v, n1)
t = np.linspace(0, mag, 10)
theta = np.linspace(0, 2 * np.pi, 10)
t, theta = np.meshgrid(t, theta)
X, Y, Z = [origin[i] + v[i] * t + R * np.sin(theta) * n1[i] + R * np.cos(theta) * n2[i] for i in [0, 1, 2]]
ax.plot_surface(X, Y, Z,color='orange')
def plot_cube(self,ax,cube_definition, color=[0.8,0.7,0.3,1]):
cube_definition_array = [
np.array(list(item))
for item in cube_definition
]
points = []
points += cube_definition_array
vectors = [
cube_definition_array[1] - cube_definition_array[0],
cube_definition_array[2] - cube_definition_array[0],
cube_definition_array[3] - cube_definition_array[0]
]
points += [cube_definition_array[0] + vectors[0] + vectors[1]]
points += [cube_definition_array[0] + vectors[0] + vectors[2]]
points += [cube_definition_array[0] + vectors[1] + vectors[2]]
points += [cube_definition_array[0] + vectors[0] + vectors[1] + vectors[2]]
points = np.array(points)
edges = [
[points[0], points[3], points[5], points[1]],
[points[1], points[5], points[7], points[4]],
[points[4], points[2], points[6], points[7]],
[points[2], points[6], points[3], points[0]],
[points[0], points[2], points[4], points[1]],
[points[3], points[6], points[7], points[5]]
]
faces = Poly3DCollection(edges, linewidths=1)
faces.set_facecolor(color)
ax.add_collection3d(faces)
|
nilq/baby-python
|
python
|
""" Exceptions for the library. """
class CatnipException(Exception):
""" Base exception class. """
class NoFrame(CatnipException):
""" Failed to receive a new frame. """
|
nilq/baby-python
|
python
|
# test of printing multiple fonts to the ILI9341 on a esp32-wrover dev kit using H/W SP
# MIT License; Copyright (c) 2017 Jeffrey N. Magee
from ili934xnew import ILI9341, color565
from machine import Pin, SPI
import tt14
import glcdfont
import tt14
import tt24
import tt32
fonts = [glcdfont,tt14,tt24,tt32]
text = 'Now is the time for all good men to come to the aid of the party.'
# https://forum.micropython.org/viewtopic.php?t=4041
# It looks like there are 2 available SPI buses on the ESP32: HSPI=1 and VSPI = 2.
# HSPI is MOSI=GPIO13, MISO=GPIO12 and SCK=GPIO14
# VSPI is MOSI=GPIO23, MISO=GPIO19 and SCK=GPIO18
TFT_SPI_ID = 2
TFT_MISO_PIN = 19
TFT_MOSI_PIN = 23
TFT_CLK_PIN = 18
TFT_CS_PIN = 15
TFT_DC_PIN = 2
TFT_RST_PIN = 4
spi = SPI(
TFT_SPI_ID,
baudrate=40000000,
miso=Pin(TFT_MISO_PIN),
mosi=Pin(TFT_MOSI_PIN),
sck=Pin(TFT_CLK_PIN))
display = ILI9341(
spi,
cs=Pin(TFT_CS_PIN),
dc=Pin(TFT_DC_PIN),
rst=Pin(TFT_RST_PIN),
w=320,
h=240,
r=3)
display.erase()
display.set_pos(0,0)
for ff in fonts:
display.set_font(ff)
display.print(text)
|
nilq/baby-python
|
python
|
"""
Simple time checker by David. Run with `python time_checker.py` in
the same folder as `bat_trips.json`
"""
import json
from datetime import datetime as dt
with open('bat_trips.json') as f:
start_times = []
end_times = []
for i in range(24):
start_times.append(0)
end_times.append(0)
data = json.load(f)
for entry in data['data']:
route = entry['route']['features']
start = route[0]
end = route[1]
start_time = start['properties']['timestamp']
end_time = end['properties']['timestamp']
start_hour = dt.fromtimestamp(start_time).hour
end_hour = dt.fromtimestamp(end_time).hour
start_times[start_hour] += 1
end_times[end_hour] += 1
for i in range(24):
print("Trips starting at hour {}: {}".format(i,start_times[i]))
print("Trips ending at hour {}: {}".format(i,end_times[i]))
|
nilq/baby-python
|
python
|
import cv2, numpy as np
import time
import math as mth
from PIL import Image, ImageDraw, ImageFont
import scipy.io
from keras.models import Sequential
from keras import initializations
from keras.initializations import normal, identity
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import RMSprop, SGD, Adam
import random
import argparse
from scipy import ndimage
from keras.preprocessing import image
from sklearn.preprocessing import OneHotEncoder
from features import get_image_descriptor_for_image, obtain_compiled_vgg_16, vgg_16, \
get_conv_image_descriptor_for_image, calculate_all_initial_feature_maps
from parse_xml_annotations import *
from image_helper import *
from metrics import *
from visualization import *
from reinforcement import *
# Read number of epoch to be trained, to make checkpointing
parser = argparse.ArgumentParser(description='Epoch:')
parser.add_argument("-n", metavar='N', type=int, default=0)
args = parser.parse_args()
epochs_id = int(args.n)
if __name__ == "__main__":
######## PATHS definition ########
# path of PASCAL VOC 2012 or other database to use for training
path_voc = "./VOC2012_train/"
# path of other PASCAL VOC dataset, if you want to train with 2007 and 2012 train datasets
# path_voc2 = "/gpfs/projects/bsc31/bsc31429/VOC2007_train/"
# path of where to store the models
path_model = "../models_pool45_crops"
# path of where to store visualizations of search sequences
path_testing_folder = '../testing'
# path of VGG16 weights
path_vgg = "../vgg16_weights.h5"
######## PARAMETERS ########
# Class category of PASCAL that the RL agent will be searching
class_object = 1
# Scale of subregion for the hierarchical regions (to deal with 2/4, 3/4)
scale_subregion = float(3)/4
scale_mask = float(1)/(scale_subregion*4)
# 1 if you want to obtain visualizations of the search for objects
bool_draw = 0
# How many steps can run the agent until finding one object
number_of_steps = 10
# Boolean to indicate if you want to use the two databases, or just one
two_databases = 0
epochs = 50
gamma = 0.90
epsilon = 1
batch_size = 100
# Pointer to where to store the last experience in the experience replay buffer,
# actually there is a pointer for each PASCAL category, in case all categories
# are trained at the same time
h = np.zeros([20])
# Each replay memory (one for each possible category) has a capacity of 100 experiences
buffer_experience_replay = 1000
# Init replay memories
replay = [[] for i in range(20)]
reward = 0
######## MODELS ########
model_vgg = get_convolutional_vgg16_compiled(path_vgg)
# If you want to train it from first epoch, first option is selected. Otherwise,
# when making checkpointing, weights of last stored weights are loaded for a particular class object
# NOTICE that for POOL45 model, this script only can train one class category at a time. We did this as
# we are pre-computing features and storing them to RAM, and it is not possible to store features for all
# objects of all classes
if epochs_id == 0:
model = get_q_network("0")
else:
model = get_q_network(path_model + '/model' + str(class_object-1) + 'h5')
######## LOAD IMAGE NAMES ########
if two_databases == 1:
image_names_1 = np.array([load_images_names_in_data_set('aeroplane_trainval', path_voc)])
labels = load_images_labels_in_data_set('aeroplane_trainval', path_voc)
image_names_1_2 = []
for i in range(0, np.size(labels)):
if labels[i] == "1":
image_names_1_2.append(image_names_1[0][i])
image_names_2 = np.array([load_images_names_in_data_set('aeroplane_trainval', path_voc2)])
labels = load_images_labels_in_data_set('aeroplane_trainval', path_voc2)
image_names_2_2 = []
for i in range(0, np.size(labels)):
if labels[i] == "1":
image_names_2_2.append(image_names_2[0][i])
image_names = np.concatenate([image_names_1_2, image_names_2_2], axis=1)
else:
image_names = np.array([load_images_names_in_data_set('aeroplane_trainval', path_voc)])
# We check in the annotations which of the images actually contain the class category that we want
# notice that as we want to train it for planes (class category 1) we input this subset of the database
labels = load_images_labels_in_data_set('aeroplane_trainval', path_voc)
image_names_2 = []
for i in range(0, np.size(labels)):
if labels[i] == "1":
image_names_2.append(image_names[0][i])
image_names = image_names_2
######## LOAD IMAGES ########
if two_databases == 1:
images1 = get_all_images_pool(image_names_1_2, path_voc)
images2 = get_all_images_pool(image_names_2_2, path_voc2)
images = images1 + images2
else:
images = get_all_images_pool(image_names, path_voc)
######## PRECOMPUTE ALL INITIAL FEATURE MAPS ########
if two_databases == 1:
initial_feature_maps1 = calculate_all_initial_feature_maps(images1, model_vgg, image_names_1_2)
initial_feature_maps2 = calculate_all_initial_feature_maps(images2, model_vgg, image_names_2_2)
initial_feature_maps = initial_feature_maps1 + initial_feature_maps2
else:
initial_feature_maps = calculate_all_initial_feature_maps(images, model_vgg, image_names)
for i in range(epochs_id, epochs_id+epochs_batch):
for j in range(np.size(image_names)):
masked = 0
not_finished = 1
image = np.array(images[j])
image_name = image_names[j]
feature_maps = initial_feature_maps[j]
annotation = get_bb_of_gt_from_pascal_xml_annotation(image_name, path_voc)
if two_databases == 1:
if j < np.size(image_names1_2):
annotation = get_bb_of_gt_from_pascal_xml_annotation(image_name, path_voc)
else:
annotation = get_bb_of_gt_from_pascal_xml_annotation(image_name, path_voc2)
gt_masks = generate_bounding_box_from_annotation(annotation, image.shape)
array_classes_gt_objects = get_ids_objects_from_annotation(annotation)
region_mask = np.ones([image.shape[0], image.shape[1]])
shape_gt_masks = np.shape(gt_masks)
available_objects = np.ones(np.size(array_classes_gt_objects))
# Iterate through all the objects in the ground truth of an image
for k in range(np.size(array_classes_gt_objects)):
# Init visualization
background = Image.new('RGBA', (10000, 2500), (255, 255, 255, 255))
draw = ImageDraw.Draw(background)
# We check whether the ground truth object is of the target class category
if array_classes_gt_objects[k] == class_object:
gt_mask = gt_masks[:, :, k]
step = 0
reward = 0
# this matrix stores the IoU of each object of the ground-truth, just in case
# the agent changes of observed object
last_matrix = np.zeros([np.size(array_classes_gt_objects)])
new_iou = 0
region_image = image
offset = (0, 0)
size_mask = (image.shape[0], image.shape[1])
original_shape = size_mask
old_region_mask = region_mask
region_mask = np.ones([image.shape[0], image.shape[1]])
# If the ground truth object is already masked by other already found masks, do not
# use it for training
if masked == 1:
for p in range(gt_masks.shape[2]):
overlap = calculate_overlapping(old_region_mask, gt_masks[:, :, p])
if overlap > 0.6:
available_objects[p] = 0
# We check if there are still objects to be found
if np.count_nonzero(available_objects) == 0:
not_finished = 0
# follow_iou function calculates at each time step which is the groun truth object
# that overlaps more with the visual region, so that we can calculate the rewards appropiately
iou, new_iou, last_matrix, index = follow_iou(gt_masks, region_mask, array_classes_gt_objects,
class_object, last_matrix, available_objects)
new_iou = iou
gt_mask = gt_masks[:, :, index]
# init of the history vector that indicates past actions (6 actions * 4 steps in the memory)
history_vector = np.zeros([24])
region_coordinates = np.array([offset[0], offset[1], size_mask[0], size_mask[1]])
# calculate descriptor of region by ROI-pooling
region_descriptor = obtain_descriptor_from_feature_map(feature_maps, region_coordinates)
region_descriptor_2 = np.reshape(region_descriptor, (25088, 1))
# computation of the initial state
state = get_state_pool45(history_vector, region_descriptor_2)
# status indicates whether the agent is still alive and has not triggered the terminal action
status = 1
action = 0
if step > number_of_steps:
background = draw_sequences(i, k, step, action, draw, region_image, background,
path_testing_folder, iou, reward, gt_mask, region_mask, image_name,
bool_draw)
step += 1
while (status == 1) & (step < number_of_steps) & not_finished:
category = int(array_classes_gt_objects[k]-1)
counter[category] += 1
qval = model.predict(state.T, batch_size=1)
background = draw_sequences(i, k, step, action, draw, region_image, background,
path_testing_folder, iou, reward, gt_mask, region_mask, image_name,
bool_draw)
step += 1
# we force terminal action in case actual IoU is higher than 0.5, to train faster the agent
if (i < 100) & (new_iou > 0.5):
action = 6
# epsilon-greedy policy
elif random.random() < epsilon:
action = np.random.randint(1, 7)
else:
action = (np.argmax(qval))+1
# terminal action
if action == 6:
iou, new_iou, last_matrix, index = follow_iou(gt_masks, region_mask,
array_classes_gt_objects, class_object,
last_matrix, available_objects)
gt_mask = gt_masks[:, :, index]
reward = get_reward_trigger(new_iou)
background = draw_sequences(i, k, step, action, draw, region_image, background,
path_testing_folder, iou, reward, gt_mask, region_mask,
image_name, bool_draw)
step += 1
# movement action, we perform the crop of the corresponding subregion
else:
region_mask = np.zeros(original_shape)
size_mask = (size_mask[0] * scale_subregion, size_mask[1] * scale_subregion)
if action == 1:
offset_aux = (0, 0)
elif action == 2:
offset_aux = (0, size_mask[1] * scale_mask)
offset = (offset[0], offset[1] + size_mask[1] * scale_mask)
elif action == 3:
offset_aux = (size_mask[0] * scale_mask, 0)
offset = (offset[0] + size_mask[0] * scale_mask, offset[1])
elif action == 4:
offset_aux = (size_mask[0] * scale_mask,
size_mask[1] * scale_mask)
offset = (offset[0] + size_mask[0] * scale_mask,
offset[1] + size_mask[1] * scale_mask)
elif action == 5:
offset_aux = (size_mask[0] * scale_mask / 2,
size_mask[0] * scale_mask / 2)
offset = (offset[0] + size_mask[0] * scale_mask / 2,
offset[1] + size_mask[0] * scale_mask / 2)
region_image = region_image[offset_aux[0]:offset_aux[0] + size_mask[0],
offset_aux[1]:offset_aux[1] + size_mask[1]]
region_mask[offset[0]:offset[0] + size_mask[0], offset[1]:offset[1] + size_mask[1]] = 1
# new_IoU=calculateIoU(region_mask,gt_mask)
iou, new_iou, last_matrix, index = follow_iou(gt_masks, region_mask,
array_classes_gt_objects, class_object,
last_matrix, available_objects)
gt_mask = gt_masks[:, :, index]
reward = get_reward_movement(iou, new_iou)
iou = new_iou
history_vector = update_history_vector(history_vector, action)
region_coordinates = np.array([offset[0], offset[1], size_mask[0], size_mask[1]])
region_descriptor = obtain_descriptor_from_feature_map(feature_maps, region_coordinates)
region_descriptor_2 = np.reshape(region_descriptor, (25088, 1))
new_state = get_state_pool45(history_vector, region_descriptor_2)
#Experience replay storage
if len(replay[category]) < buffer_experience_replay:
replay[category].append((state, action, reward, new_state))
else:
if h[category] < (buffer_experience_replay-1):
h[category] += 1
else:
h[category] = 0
h_aux = h[category]
h_aux = int(h_aux)
replay[category][h_aux] = (state, action, reward, new_state)
minibatch = random.sample(replay[category], batch_size)
X_train = []
y_train = []
# we pick from the replay memory a sampled minibatch and generate the training samples
for memory in minibatch:
old_state, action, reward, new_state = memory
old_qval = model.predict(old_state.T, batch_size=1)
newQ = model.predict(new_state.T, batch_size=1)
maxQ = np.max(newQ)
y = np.zeros([1, 6])
y = old_qval
y = y.T
if action != 6: #non-terminal state
update = (reward + (gamma * maxQ))
else: #terminal state
update = reward
y[action-1] = update #target output
X_train.append(old_state)
y_train.append(y)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_train = X_train.astype("float32")
y_train = y_train.astype("float32")
X_train = X_train[:, :, 0]
y_train = y_train[:, :, 0]
hist = model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=1, verbose=0)
state = new_state
if action == 6:
status = 0
masked = 1
# we mask object found with ground-truth so that agent learns faster
image = mask_image_with_mean_background(gt_mask, image)
else:
masked = 0
available_objects[index] = 0
if epsilon > 0.1:
epsilon -= 0.1
string = path_model + '/model' + str(class_object-1) + '_epoch_' + str(i) + 'h5'
string2 = path_model + '/model' + str(class_object-1) + 'h5'
model.save_weights(string, overwrite=True)
model.save_weights(string2, overwrite=True)
|
nilq/baby-python
|
python
|
import pytest
from typing import Any, Callable, Tuple
from aio_odoorpc_base.sync.common import login
from aio_odoorpc_base.protocols import T_HttpClient
import httpx
@pytest.fixture(scope='session')
def runbot_url_db_user_pwd(runbot_url_db_user_pwd) -> Tuple[str, str, str, str]:
base_url, url_jsonrpc, db, username, password = runbot_url_db_user_pwd
return url_jsonrpc, db, username, password
@pytest.fixture(scope='session')
def known_master_pwd_url_masterpwd(runbot_url_db_user_pwd) -> Tuple[str, str]:
# Add manually the info for an Odoo instance with known master password.
# Usually the OCA Runbot runs its instances with no Master Password set.
# Must visit https://runbot.odoo-community.org/runbot, find a running instance,
# Copy its URL below, and then access /web/database/manager and set the password to
# 'admin' or to whatever we return last/second in the tuple below
return 'http://3475626-11-0-0b1a90.runbot1.odoo-community.org/jsonrpc', 'admin'
@pytest.fixture(scope='session')
def base_args_common(runbot_url_db_user_pwd) -> Callable[[Any], Tuple[Any, str, str, str, str]]:
url, db, username, pwd = runbot_url_db_user_pwd
def func(client):
return client, url, db, username, pwd
return func
@pytest.fixture(scope='session')
def base_args_obj(runbot_url_db_user_pwd) -> Callable[[Any], Tuple[Any, str, str, int, str]]:
url, db, username, pwd = runbot_url_db_user_pwd
with httpx.Client() as http_client:
uid = login(http_client=http_client, url=url, db=db, login=username, password=pwd)
def func(client):
return client, url, db, uid, pwd
return func
@pytest.fixture(scope='session')
def base_args_db_no_masterpwd(runbot_url_db_user_pwd) -> Callable[[Any], Tuple[Any, str]]:
url = runbot_url_db_user_pwd[0]
def func(client):
return client, url
return func
@pytest.fixture(scope='session')
def base_args_db_with_masterpwd(known_master_pwd_url_masterpwd) -> Callable[[Any], Tuple[Any, str, str]]:
url, master_pwd = known_master_pwd_url_masterpwd
def func(client):
return client, url, master_pwd
return func
@pytest.fixture(scope='session')
def base_args_common(runbot_url_db_user_pwd) -> Callable[[Any], Tuple[Any, str, str, str, str]]:
url, db, username, password = runbot_url_db_user_pwd
def func(client):
return client, url, db, username, password
return func
@pytest.fixture(scope='session')
def version() -> str:
return '14.0'
@pytest.fixture(scope='session')
def http_client() -> str:
with httpx.Client() as client:
yield client
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import math
import glob
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing
from common import DataPreset, load_preset_from_file, save_plot
def plot_step(params):
name = params['name']
#preset = params['preset']
step = params['step']
f_name = params['f_name']
dir_name = params['dir_name']
preset = load_preset_from_file(name)
freq = preset.freq
with open(f_name, 'r') as f:
lines = f.readlines()
step_, N, r, mean = (x for x in lines[0].split())
step_ = int(step_)
assert(step_ == step)
N = int(N)
r = float(r)
mean = float(mean)
phases = [float(x) for x in lines[1].split()]
vel = [float(x) for x in lines[2].split()]
#print len(phases), len(vel)
print(step)
#for i in xrange(N):
# pos = (phases[i], freq[i])
# print pos
plt.figure()
plt.suptitle('Step: ' + str(step))
plt.subplot(2, 1, 1)
#py.axvline(95)
#py.axvline(35)
#plt.xlabel('Phase')
plt.ylabel('Phase histogram')
plt.hist(phases, bins=60, range=(0, 2.0 * math.pi))
plt.xlim(0, 2.0 * math.pi)
plt.subplot(2, 1, 2)
#plt.xlabel('Velocity')
plt.ylabel('Velocity histogram')
#range = (np.min(vel), np.max(vel))
range = (-30, 30)
plt.hist(vel, bins=60, range=range)
plt.xlim(range[0], range[1])
save_plot(os.path.join(dir_name, 'hist', str(step)))
plt.figure()
plt.title('Step: ' + str(step))
plt.xlabel('Phase')
plt.ylabel('Intrinsic frequency')
plt.xlim(0, 2.0 * math.pi)
plt.ylim(-3, 3)
plt.plot(phases, freq, marker='o', ls='')
save_plot(os.path.join(dir_name, 'phase', str(step)))
def gen_video(dump_dir, subdir_name, framerate):
pattern = os.path.join(dump_dir, subdir_name, '%d.png')
out_video = os.path.join(dump_dir, subdir_name + '.avi')
# TODO: ffmpeg
cmd = 'avconv -y -start_number 1 -framerate '+str(framerate)+' -i ' + pattern + ' -q:v 1 -vcodec mpeg4 ' + out_video
#print('Executing: ' + cmd)
os.system(cmd)
def gen_mean_and_r_plots(dir_name):
with open(os.path.join(dir_name, 'r.txt')) as f:
r = [float(x) for x in f.read().split()]
plt.figure()
plt.xlabel('Steps')
plt.ylabel('Order parameter')
plt.xlim(0, len(r))
plt.ylim(0, 1)
plt.plot(range(0, len(r)), r)
save_plot(os.path.join('dump_' + name, 'r'))
with open(os.path.join(dir_name, 'mean.txt')) as f:
mean = [float(x) for x in f.read().split()]
plt.figure()
plt.xlabel('Steps')
plt.ylabel('Mean phase')
plt.xlim(0, len(mean))
plt.ylim(0, 2.0 * math.pi)
plt.plot(range(0, len(mean)), mean)
save_plot(os.path.join('dump_' + name, 'mean'))
with open(os.path.join(dir_name, 'mean_vel.txt')) as f:
mean_vel = [float(x) for x in f.read().split()]
plt.figure()
plt.xlabel('Steps')
plt.ylabel('Mean velocity')
plt.xlim(0, len(mean_vel))
plt.plot(range(0, len(mean_vel)), mean_vel)
save_plot(os.path.join('dump_' + name, 'mean_vel'))
def remove_images(dir_name, remove_dir=True):
for f in glob.glob(os.path.join(dir_name, '*.png')):
os.remove(f)
if remove_dir:
try:
os.rmdir(dir_name)
except OSError as e:
print('Cannot remove directory: ' + dir_name + ' (' + str(e) + ')')
def remove_step_files(dump_dir):
for f in glob.glob(os.path.join(dump_dir, '*.txt')):
os.remove(f)
if __name__ == '__main__':
if len(sys.argv) <= 1:
print('Usage: gen_plots.py name')
sys.exit()
name = sys.argv[1]
dir_name = 'dump_' + name
steps_dir = os.path.join(dir_name, 'steps')
# read sorted list of states at specific steps
step_files_all = glob.glob(os.path.join(steps_dir, '*.txt'))
def filter_files(seq):
for el in seq:
name = os.path.basename(el).replace('.txt', '')
if 'r' not in name and 'mean' not in name:
yield el
step_files = [f for f in filter_files(step_files_all)]
input_files = [(int(os.path.basename(f).replace('.txt', '')), f) for f in step_files]
input_files.sort(key=lambda x: x[0])
# take every M-th snapshot
M = 1
input_files = input_files[::M]
gen_mean_and_r_plots(steps_dir)
if 1:
remove_images(os.path.join(dir_name, 'hist'), remove_dir=False)
remove_images(os.path.join(dir_name, 'phase'), remove_dir=False)
ctx = multiprocessing.get_context('spawn')
pool = ctx.Pool(multiprocessing.cpu_count())
args = []
for step, f_name in input_files:
args.append({
'name': name,
'step': step,
'f_name': f_name,
'dir_name': dir_name
})
#print(args)
pool.map(plot_step, args)
pool.close()
# rename step numbers to consequent integers
# this is required for video generation step
plot_num = 1
for step, f_name in input_files:
# print plot_num, step
for x in ['hist', 'phase']:
os.rename(
os.path.join(dir_name, x, str(step) + '.png'),
os.path.join(dir_name, x, str(plot_num) + '.png')
)
plot_num += 1
framerate = 8
gen_video(dir_name, 'hist', framerate)
gen_video(dir_name, 'phase', framerate)
remove_images(os.path.join(dir_name, 'hist'), remove_dir=True)
remove_images(os.path.join(dir_name, 'phase'), remove_dir=True)
#remove_step_files(dir_name)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.