id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1698621
|
import factory
from colossus.apps.templates.models import EmailTemplate
class EmailTemplateFactory(factory.DjangoModelFactory):
name = factory.Sequence(lambda n: f'campaign_{n}')
class Meta:
model = EmailTemplate
|
1698663
|
from .instagram import Instagram
from .instagram_story import InstagramStory
from .weibo import Weibo
from .twitter import Twitter
__all__ = ['Instagram', 'Weibo', 'Twitter', 'InstagramStory']
|
1698668
|
from artemis.general.should_be_builtins import all_equal
__author__ = 'peter'
import numpy as np
def is_pareto_efficient_dumb(costs):
"""
:param costs: An (n_points, n_costs) array
:return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient
"""
is_efficient = np.ones(costs.shape[0], dtype = bool)
for i, c in enumerate(costs):
is_efficient[i] = np.all(np.any(costs>=c, axis=1))
return is_efficient
def is_pareto_efficient(costs):
"""
:param costs: An (n_points, n_costs) array
:return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient
"""
is_efficient = np.ones(costs.shape[0], dtype = bool)
for i, c in enumerate(costs):
if is_efficient[i]:
is_efficient[is_efficient] = np.any(costs[is_efficient]<=c, axis=1) # Remove dominated points
return is_efficient
def is_pareto_efficient_ixs(costs):
candidates = np.arange(costs.shape[0])
for i, c in enumerate(costs):
if 0 < np.searchsorted(candidates, i) < len(candidates): # If this element has not yet been eliminated
candidates = candidates[np.any(costs[candidates]<=c, axis=1)]
is_efficient = np.zeros(costs.shape[0], dtype = bool)
is_efficient[candidates] = True
return is_efficient
def find_pareto_ixs(cost_arrays):
"""
:param cost_arrays: A collection of nd-arrays representing a grid of costs for different indices.
:return: A tuple of indices which can be used to index the pareto-efficient points.
"""
assert all_equal([c.shape for c in cost_arrays])
flat_ixs, = np.nonzero(is_pareto_efficient(np.reshape(cost_arrays, (len(cost_arrays), -1)).T), )
ixs = np.unravel_index(flat_ixs, dims=cost_arrays[0].shape)
return ixs
|
1698708
|
from MicroTokenizer import dag_tokenizer
dag_tokenizer.graph_builder.build_graph("二十四口交换机")
dag_tokenizer.graph_builder.write_graphml("output.graphml")
|
1698717
|
import pytest
from cryptotik import TheRock
from cryptotik.exceptions import APIError
from decimal import Decimal
import time
private = pytest.mark.skipif(
not pytest.config.getoption("--apikey"),
reason="needs --apikey option to run."
)
rock = TheRock(pytest.config.getoption("--apikey"),
pytest.config.getoption("--secret"))
def test_format_pair():
'''test string formating to match API expectations'''
assert rock.format_pair("eth-btc") == "ETHBTC"
def test_get_markets():
'''test get_markets'''
assert isinstance(rock.get_markets(), list)
assert "ethbtc" in rock.get_markets()
def test_get_market_ticker():
'''test get_market_ticker'''
ticker = rock.get_market_ticker("ETH-BTC")
assert isinstance(ticker, dict)
assert sorted(ticker.keys()) == ['ask', 'bid', 'close', 'date',
'fund_id', 'high', 'last', 'low',
'open', 'volume', 'volume_traded']
def test_get_market_orders():
'''test get_market_orderbook'''
market_orders = rock.get_market_orders("eth-btc")
assert isinstance(market_orders, dict)
assert isinstance(market_orders["asks"], list)
assert isinstance(market_orders["bids"], list)
def test_get_market_trade_history():
'''test get_market_trade_history'''
trade_history = rock.get_market_trade_history("eth-btc", 10)
assert isinstance(trade_history, list)
assert len(trade_history) == 10
assert sorted(trade_history[0].keys()) == ['amount', 'dark',
'date', 'fund_id', 'id', 'price', 'side']
@private
def test_get_balances(apikey, secret):
balances = rock.get_balances()
assert isinstance(balances, list)
@private
def test_get_deposit_address(apikey, secret):
time.sleep(1)
assert isinstance(rock.get_deposit_address("btc"), dict)
@private
def test_get_withdraw_history(apikey, secret):
time.sleep(1)
assert isinstance(rock.get_withdraw_history("btc"), list)
@private
def test_withdraw(apikey, secret):
time.sleep(1)
print('This is made to fail because of fake address')
with pytest.raises(APIError):
rock.withdraw("eth", 0.01, 'fake_address')
@private
def test_buy_limit(apikey, secret):
time.sleep(1)
print('This is made to fail because of small amount')
with pytest.raises(APIError):
rock.buy_limit("eth-btc", 0.0005, 0.0005)
@private
def test_sell_limit(apikey, secret):
time.sleep(1)
with pytest.raises(APIError):
rock.sell_limit("eth-btc", 0.0005, 0.0005)
@private
def test_cancel_order(apikey, secret):
time.sleep(1)
with pytest.raises(APIError):
rock.cancel_order('invalid', 'btc')
|
1698719
|
from __future__ import with_statement # this is to work with python2.5
from pyps import workspace
from os import remove
import pypips
filename="partialeval03"
pypips.delete_workspace(filename)
with workspace(filename+".c", parents=[], deleteOnClose=False,name=filename) as w:
m=w['main']
m.partial_eval()
m.display()
|
1698728
|
import Package._testcapi
if False:
from Package import EphemeralReference
select = EphemeralReference
select = 123
select = str
|
1698730
|
class Solution:
def countNumbersWithUniqueDigits(self, n: int) -> int:
choices = [9, 9, 8, 7, 6, 5, 4, 3, 2, 1]
currentSum, currentFactor = 1, 1
for i in range(n):
currentFactor *= choices[i]
currentSum += currentFactor
return currentSum
|
1698763
|
from typing import NamedTuple
from kfp.components import create_component_from_func, OutputPath
def load_dataset_using_huggingface(
dataset_name: str,
dataset_dict_path: OutputPath('HuggingFaceDatasetDict'),
) -> NamedTuple('Outputs', [
('splits', list),
]):
from datasets import load_dataset
dataset_dict = load_dataset(dataset_name)
dataset_dict.save_to_disk(dataset_dict_path)
splits = list(dataset_dict.keys())
return (splits,)
if __name__ == '__main__':
load_dataset_op = create_component_from_func(
load_dataset_using_huggingface,
base_image='python:3.9',
packages_to_install=['datasets==1.6.2'],
annotations={
'author': '<NAME> <<EMAIL>>',
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/datasets/HuggingFace/Load_dataset/component.yaml",
},
output_component_file='component.yaml',
)
|
1698808
|
from flask import Flask, abort, request
import sys,os
sys.path.append('/opt/ngsildAdapter/module')
import json
import pickle
import datetime
import io
import requests
from consts import constant
from LogerHandler import Handler
import logging
class Rest_client:
def __init__(self,url,payload):
self.url=url
self.payload=payload
self.headers=constant.header
logger_obj=Handler()
self.logger=logger_obj.get_logger()
# sending post request
def post_request(self):
self.logger.info("Sending post request")
response = requests.post(self.url, data=self.payload, headers=self.headers)
if response.ok:
self.logger.debug("post response is ok")
return response
else:
self.logger.debug("Response is None Entity may already exits")
return None
# sending patch request
def patch_request(self):
self.logger.info("Patch request is sending")
response = requests.patch(self.url, data=self.payload, headers=self.headers)
if response.ok:
self.logger.debug("Patch response is ok")
return response
else:
self.logger.info("Patch response is None Entity there may some problem in entity")
return None
|
1698810
|
from packetbeat import BaseTest
import os
"""
Tests for reading the geoip files.
"""
class Test(BaseTest):
def test_geoip_config_disabled(self):
self.render_config_template(
http_ports=[8002],
http_real_ip_header="X-Forward-For",
http_send_all_headers=True,
geoip_paths=[]
)
self.run_packetbeat(pcap="http_realip.pcap", debug_selectors=["http"])
objs = self.read_output()
assert len(objs) == 1
o = objs[0]
assert o["real_ip"] == "192.168.3.11"
assert "client_location" not in o
def test_geoip_config_from_file(self):
self.render_config_template(
http_ports=[8002],
http_real_ip_header="X-Forward-For",
http_send_all_headers=True,
geoip_paths=["geoip_city.dat"]
)
# geoip_onrange.dat is generated from geoip_onerange.csv
# by using https://github.com/mteodoro/mmutils
self.copy_files(["geoip_city.dat"])
self.run_packetbeat(pcap="http_realip.pcap", debug_selectors=["http"])
objs = self.read_output()
assert len(objs) == 1
o = objs[0]
assert o["real_ip"] == "192.168.3.11"
assert o["client_location"] == "52.528503, 13.410904"
def test_geoip_symlink(self):
"""
Should be able to follow symlinks to GeoIP libs.
"""
self.render_config_template(
http_ports=[8002],
http_real_ip_header="X-Forward-For",
http_send_all_headers=True,
geoip_paths=["geoip.dat"]
)
self.copy_files(["geoip_city.dat"])
os.symlink("geoip_city.dat",
os.path.join(self.working_dir, "geoip.dat"))
self.run_packetbeat(pcap="http_realip.pcap", debug_selectors=["http"])
objs = self.read_output()
assert len(objs) == 1
o = objs[0]
assert o["real_ip"] == "192.168.3.11"
assert o["client_location"] == "52.528503, 13.410904"
|
1698849
|
from . import pairwise
from . import losses
from . import objects
from .losses import gmm
from . import phantoms
from . import utils
|
1698874
|
def process_input(file_contents):
lines_stripped = [line.strip() for line in file_contents]
octos = [int(i) for lines in lines_stripped for i in lines]
return octos
def iterate_octos(flashes,octos):
flashed = set()
flashed_temp = flashed.copy()
octos = [octo + 1 for octo in octos]
if any([octo == 10 for octo in octos]):
flash_locations = [i for i, e in enumerate(octos) if e >9]
flashed_temp.update(flash_locations)
while flashed != flashed_temp:
for octo in flashed_temp-flashed:
if octo > 9:
octos[octo-10] += 1
if octo % 10 != 0:
octos[octo-11] += 1
if (octo+1) % 10 != 0:
octos[octo-9] += 1
if octo < 90:
octos[octo+10] += 1
if octo % 10 != 0:
octos[octo+9] += 1
if (octo+1) % 10 != 0:
octos[octo+11] += 1
if octo % 10 != 0:
octos[octo-1] +=1
if (octo+1) % 10 != 0:
octos[octo+1] += 1
flashed = flashed_temp.copy()
if any([octo > 9 for octo in octos]):
flash_locations = [i for i, e in enumerate(octos) if e >9]
flashed_temp.update(flash_locations)
flashes += sum([1 if octo>9 else 0 for octo in octos])
octos = [octo if octo <10 else 0 for octo in octos]
return flashes,octos
def main():
with open("input.txt",'r') as octo_file:
octo_lines = octo_file.readlines()
octos = process_input(octo_lines)
octos_temp = octos.copy()
flashes = 0
#star 1
for i in range(100):
flashes,octos = iterate_octos(flashes,octos)
print(flashes)
#star 2
count = 0
octos = octos_temp
while octos != [0]*100:
flashes,octos = iterate_octos(flashes,octos)
count += 1
print(count)
main()
|
1698879
|
import numpy as np
def _make_gaussian(x_pts, y_pts, mfd, x_offset=0, y_offset=0):
x0 = (x_pts[-1]+x_pts[0])/2 + x_offset
y0 = (y_pts[-1]+y_pts[0])/2 + y_offset
xx, yy = np.meshgrid(x_pts, y_pts)
sigma = mfd * 0.707 / 2.355
sigma_x = sigma
sigma_y = sigma
gaus_2d = np.exp(-((xx-x0)**2/(2*sigma_x**2)+
(yy-y0)**2/(2*sigma_y**2)))
gaus_2d /= np.sum(gaus_2d)
return gaus_2d
def _overlap(mode, gaussian):
mode_1 = mode
mode_2 = np.sqrt(gaussian) # square-root for E-field (not power)
eta = np.abs(np.sum(np.conj(mode_1)*mode_2))**2 / \
(np.sum(np.abs(mode_1)**2) * np.sum(np.abs(mode_2)**2))
return eta
def reflection(n1, n2):
'''
Calculate the power reflection at the interface
of two refractive index materials.
Args:
n1 (float): Refractive index of material 1.
n2 (float): Refractive index of material 2.
Returns:
float: The percentage of reflected power.
'''
r = abs((n1-n2) / (n1+n2))**2
return r
def transmission(n1, n2):
'''
Calculate the power transmission at the interface
of two refractive index materials.
Args:
n1 (float): Refractive index of material 1.
n2 (float): Refractive index of material 2.
Returns:
float: The percentage of transmitted power.
'''
return 1-reflection(n1, n2)
def coupling_efficiency(mode_solver, fibre_mfd,
fibre_offset_x=0, fibre_offset_y=0,
n_eff_fibre=1.441):
'''
Finds the coupling efficiency between a solved
fundamental mode and a fibre of given MFD.
Args:
mode_solver (_ModeSolver): Mode solver that
has found a fundamental mode.
fibre_mfd (float): The mode-field diameter
(MFD) of the fibre.
fibre_offset_x (float): Offset the fibre
from the centre position of the window
in x. Default is 0 (no offset).
fibre_offset_y (float): Offset the fibre
from the centre position of the window
in y. Default is 0 (no offset).
n_eff_fibre (float): The effective index
of the fibre mode. Default is 1.441.
Returns:
float: The power coupling efficiency.
'''
etas = []
gaus = _make_gaussian(mode_solver._structure.xc, mode_solver._structure.yc,
fibre_mfd, fibre_offset_x, fibre_offset_y)
for mode, n_eff in zip(mode_solver.modes, mode_solver.n_effs):
o = abs(_overlap(mode, gaus))
t = abs(transmission(n_eff, n_eff_fibre))
eta = o * t
etas.append(eta)
return etas
|
1698897
|
import wx
import sys, os
kToolRunMode_Entry = 0
kToolRunMode_Master = 1
kToolRunMode_SblOta = 2
kBootSeqColor_Invalid = wx.Colour( 64, 64, 64 )
kBootSeqColor_Optional = wx.Colour( 166, 255, 255 )
kBootSeqColor_Active = wx.Colour( 147, 255, 174 )
kBootSeqColor_Failed = wx.Colour( 255, 0, 0 )
kConnectStage_Rom = 1
kConnectStage_Flashloader = 2
kConnectStage_ExternalMemory = 3
kConnectStage_Reset = 4
kMcuSeries_iMXRT = 'i.MXRT'
kMcuSeries_iMXRT10yy = 'RT10yy'
kMcuSeries_iMXRTxxx = 'RTxxx'
kMcuSeries_iMXRT11yy = 'RT11yy'
kMcuSeries_iMXRTyyyy = [kMcuSeries_iMXRT10yy, kMcuSeries_iMXRT11yy]
kMcuSeries_LPC = 'LPC'
kMcuSeries_Kinetis = 'Kinetis'
kMcuSeries_v1_0_0 = [kMcuSeries_iMXRT]
kMcuSeries_v2_0_0 = [kMcuSeries_iMXRT]
kMcuSeries_v3_0_0 = [kMcuSeries_iMXRT, kMcuSeries_LPC, kMcuSeries_Kinetis]
kMcuSeries_Latest = kMcuSeries_v3_0_0
kMcuDevice_iMXRT500 = 'i.MXRT5xx'
kMcuDevice_iMXRT500S = 'i.MXRT5xxS'
kMcuDevice_iMXRT600 = 'i.MXRT6xx'
kMcuDevice_iMXRT600S = 'i.MXRT6xxS'
kMcuDevice_iMXRTxxx = [kMcuDevice_iMXRT500, kMcuDevice_iMXRT600]
kMcuDevice_iMXRT1011 = 'i.MXRT1011'
kMcuDevice_iMXRT1015 = 'i.MXRT1015'
kMcuDevice_iMXRT102x = 'i.MXRT1021'
kMcuDevice_iMXRT1024 = 'i.MXRT1024 SIP'
kMcuDevice_iMXRT105x = 'i.MXRT105x'
kMcuDevice_iMXRT106x = 'i.MXRT106x'
kMcuDevice_iMXRT1064 = 'i.MXRT1064 SIP'
kMcuDevice_iMXRT10yy = [kMcuDevice_iMXRT1011, kMcuDevice_iMXRT1015, kMcuDevice_iMXRT102x, kMcuDevice_iMXRT1024, kMcuDevice_iMXRT105x, kMcuDevice_iMXRT106x, kMcuDevice_iMXRT1064]
kMcuDevice_iMXRT116x = 'i.MXRT116x'
kMcuDevice_iMXRT117x = 'i.MXRT117x'
kMcuDevice_iMXRT11yy = [kMcuDevice_iMXRT116x, kMcuDevice_iMXRT117x]
kMcuDevice_L0PB = 'MKL03Z'
kMcuDevice_L3KS_0 = 'MKL13Z'
kMcuDevice_L2KS_0 = 'MKL27Z'
kMcuDevice_L5K = 'MKL28Z'
kMcuDevice_L3KS_1 = 'MKL33Z'
kMcuDevice_L4KS_0 = 'MKL43Z'
kMcuDevice_MKL80 = 'MKL8xZ'
kMcuDevice_MT256P = 'MKE16Z'
kMcuDevice_MT512P = 'MKE18F'
kMcuDevice_MK28F_0 = 'MK27F'
kMcuDevice_MK28F_1 = 'MK28F'
kMcuDevice_MK80 = 'MK8xF'
kMcuDevice_L2KS_1 = 'K32L2'
kMcuDevice_L4KS_1 = 'K32L3'
kMcuDevice_L3KSs = [kMcuDevice_L3KS_0, kMcuDevice_L3KS_1]
kMcuDevice_MK28Fs = [kMcuDevice_MK28F_0, kMcuDevice_MK28F_1]
kMcuDevice_Kinetis = [kMcuDevice_L0PB, kMcuDevice_L3KS_0, kMcuDevice_L3KS_1, kMcuDevice_L2KS_0, kMcuDevice_L5K, kMcuDevice_L4KS_0, kMcuDevice_MKL80, kMcuDevice_MT256P, kMcuDevice_MT512P, kMcuDevice_MK28F_0, kMcuDevice_MK28F_1, kMcuDevice_MK80, kMcuDevice_L2KS_1, kMcuDevice_L4KS_1]
kMcuDevice_Niobe4mini_0 = 'LPC55(S)0x'
kMcuDevice_Niobe4mini_1 = 'LPC55(S)1x'
kMcuDevice_Niobe4_0 = 'LPC55(S)2x'
kMcuDevice_Niobe4_1 = 'LPC55S6x'
kMcuDevice_Niobe4minis = [kMcuDevice_Niobe4mini_0, kMcuDevice_Niobe4mini_1]
kMcuDevice_Niobe4s = [kMcuDevice_Niobe4_0, kMcuDevice_Niobe4_1]
kMcuDevice_LPC = [kMcuDevice_Niobe4mini_0, kMcuDevice_Niobe4mini_1, kMcuDevice_Niobe4_0, kMcuDevice_Niobe4_1]
kMcuDevice_iMXRT_v1_0_0 = [kMcuDevice_iMXRT102x, kMcuDevice_iMXRT105x, kMcuDevice_iMXRT106x, kMcuDevice_iMXRT1064]
kMcuDevice_iMXRT_v1_1_0 = [kMcuDevice_iMXRT1015, kMcuDevice_iMXRT102x, kMcuDevice_iMXRT105x, kMcuDevice_iMXRT106x, kMcuDevice_iMXRT1064]
kMcuDevice_iMXRT_v2_0_0 = [kMcuDevice_iMXRT500, kMcuDevice_iMXRT600, kMcuDevice_iMXRT1011, kMcuDevice_iMXRT1015, kMcuDevice_iMXRT102x, kMcuDevice_iMXRT105x, kMcuDevice_iMXRT106x, kMcuDevice_iMXRT1064, kMcuDevice_iMXRT117x]
kMcuDevice_iMXRT_v3_1_0 = [kMcuDevice_iMXRT500, kMcuDevice_iMXRT600, kMcuDevice_iMXRT1011, kMcuDevice_iMXRT1015, kMcuDevice_iMXRT102x, kMcuDevice_iMXRT1024, kMcuDevice_iMXRT105x, kMcuDevice_iMXRT106x, kMcuDevice_iMXRT1064, kMcuDevice_iMXRT117x]
kMcuDevice_iMXRT_v3_2_0 = [kMcuDevice_iMXRT500, kMcuDevice_iMXRT600, kMcuDevice_iMXRT1011, kMcuDevice_iMXRT1015, kMcuDevice_iMXRT102x, kMcuDevice_iMXRT1024, kMcuDevice_iMXRT105x, kMcuDevice_iMXRT106x, kMcuDevice_iMXRT1064, kMcuDevice_iMXRT116x, kMcuDevice_iMXRT117x]
kMcuDevice_Kinetis_v3_0_0 = [kMcuDevice_L0PB, kMcuDevice_L3KS_0, kMcuDevice_L2KS_0, kMcuDevice_L5K, kMcuDevice_L3KS_1, kMcuDevice_L4KS_0, kMcuDevice_MKL80, kMcuDevice_MT256P, kMcuDevice_MT512P, kMcuDevice_MK28F_0, kMcuDevice_MK28F_1, kMcuDevice_MK80]
kMcuDevice_Kinetis_v3_1_0 = [kMcuDevice_L0PB, kMcuDevice_L3KS_0, kMcuDevice_L2KS_0, kMcuDevice_L5K, kMcuDevice_L3KS_1, kMcuDevice_L4KS_0, kMcuDevice_MKL80, kMcuDevice_MT256P, kMcuDevice_MT512P, kMcuDevice_MK28F_0, kMcuDevice_MK28F_1, kMcuDevice_MK80, kMcuDevice_L2KS_1, kMcuDevice_L4KS_1]
kMcuDevice_LPC_v3_0_0 = [kMcuDevice_Niobe4mini_0, kMcuDevice_Niobe4mini_1, kMcuDevice_Niobe4_0, kMcuDevice_Niobe4_1]
kMcuDevice_iMXRT_Latest = kMcuDevice_iMXRT_v3_2_0
kMcuDevice_Kinetis_Latest = kMcuDevice_Kinetis_v3_1_0
kMcuDevice_LPC_Latest = kMcuDevice_LPC_v3_0_0
kBootDevice_XspiNor = 'XSPI NOR'
kFlexspiNorDevice_None = 'No'
kFlexspiNorDevice_FDCB = 'Complete_FDCB'
kFlexspiNorDevice_ISSI_IS25LP064A = 'ISSI_IS25LPxxxA_IS25WPxxxA'
kFlexspiNorDevice_ISSI_IS26KS512S = 'ISSI_IS26KSxxxS_IS26KLxxxS'
kFlexspiNorDevice_MXIC_MX25L12845G = 'Macronix_MX25Uxxx32F_MX25Lxxx45G'
kFlexspiNorDevice_MXIC_MX25UM51245G = 'Macronix_MX25UMxxx45G_MX66UMxxx45G_MX25LMxxx45G'
kFlexspiNorDevice_MXIC_MX25UM51345G = 'Macronix_MX25UM51345G'
kFlexspiNorDevice_MXIC_MX25UM51345G_2nd = 'Macronix_MX25UM51345G_2nd'
kFlexspiNorDevice_Micron_MT25QL128A = 'Micron_MT25QLxxxA'
kFlexspiNorDevice_Micron_MT35X = 'Micron_MT35XLxxxA_MT35XUxxxA'
kFlexspiNorDevice_Adesto_AT25SF128A = 'Adesto_AT25SFxxxA'
kFlexspiNorDevice_Adesto_ATXP032 = 'Adesto_ATXPxxx'
kFlexspiNorDevice_Cypress_S25FL128S = 'Cypress_S25FSxxxS_S25FLxxxS'
kFlexspiNorDevice_Cypress_S26KS512S = 'Cypress_S26KSxxxS_S26KLxxxS'
kFlexspiNorDevice_GigaDevice_GD25Q64C = 'GigaDevice_GD25QxxxC'
kFlexspiNorDevice_GigaDevice_GD25LB256E = 'GigaDevice_GD25LBxxxE'
kFlexspiNorDevice_GigaDevice_GD25LT256E = 'GigaDevice_GD25LTxxxE'
kFlexspiNorDevice_GigaDevice_GD25LX256E = 'GigaDevice_GD25LXxxxE'
kFlexspiNorDevice_Winbond_W25Q128JV = 'Winbond_W25QxxxJV'
kFlexspiNorDevice_Microchip_SST26VF064B = 'Microchip_SST26VFxxxB'
kFlexspiNorDevice_FudanMicro_FM25Q64 = 'FudanMicro_FM25Qxxx'
kFlexspiNorDevice_BoyaMicro_BY25Q16BS = 'BoyaMicro_BY25QxxxBS'
kFlexspiNorDevice_XMC_XM25QH64B = 'XMC_XM25QHxxxB_XM25QUxxxB'
kFlexspiNorDevice_XTXtech_X25Q64D = 'XTXtech_X25FxxxB_X25QxxxD'
kFlexspiNorDevice_Puya_P25Q64LE = 'Puya_P25QxxxLE_P25QxxxH_P25QxxxU'
kFlexspiNorDevice_AMIC_A25LQ64 = 'AMIC_A25LQxxx'
kFlexspiNorOpt0_ISSI_IS25LP064A = 0xc0000007
kFlexspiNorOpt0_ISSI_IS26KS512S = 0xc0233007
kFlexspiNorOpt0_MXIC_MX25L12845G = 0xc0000007
kFlexspiNorOpt0_MXIC_MX25UM51245G = 0xc0403037
kFlexspiNorOpt0_MXIC_MX25UM51345G = 0xc0403007
kFlexspiNorOpt0_MXIC_MX25UM51345G_2nd = 0xc1503051
kFlexspiNorOpt1_MXIC_MX25UM51345G_2nd = 0x20000014
kFlexspiNorOpt0_Micron_MT25QL128A = 0xc0000007
kFlexspiNorOpt0_Micron_MT35X = 0xC0603005
kFlexspiNorOpt0_Adesto_AT25SF128A = 0xc0000007
kFlexspiNorOpt0_Adesto_ATXP032 = 0xc0803007
kFlexspiNorOpt0_Cypress_S25FL128S = 0xc0000007
kFlexspiNorOpt0_Cypress_S26KS512S = 0xc0233007
kFlexspiNorOpt0_GigaDevice_GD25Q64C = 0xc0000406
kFlexspiNorOpt0_GigaDevice_GD25LB256E = 0xc0000007
kFlexspiNorOpt0_GigaDevice_GD25LT256E = 0xc0000008
kFlexspiNorOpt0_GigaDevice_GD25LX256E = 0xc0600008
kFlexspiNorOpt0_Winbond_W25Q128JV = 0xc0000207
kFlexspiNorOpt0_Microchip_SST26VF064B = 0xc0000005
kFlexspiNorOpt0_FudanMicro_FM25Q64 = 0xc0000205
kFlexspiNorOpt0_BoyaMicro_BY25Q16BS = 0xc0000405
kFlexspiNorOpt0_XMC_XM25QH64B = 0xc0000007
kFlexspiNorOpt0_XTXtech_X25Q64D = 0xc0000407
kFlexspiNorOpt0_Puya_P25Q64LE = 0xc0000405
kFlexspiNorOpt0_AMIC_A25LQ64 = 0xc0000105
kSemcNorDevice_None = 'No'
kSemcNorDevice_Micron_MT28EW128ABA = 'Micron_MT28EW128ABA'
kSemcNorDevice_Micron_MT28UG128ABA = 'Micron_MT28UG128ABA'
kSemcNorOpt0_Micron_MT28EW128ABA = 0xD0000600
kSemcNorOpt0_Micron_MT28UG128ABA = 0xD0000601
kAdvancedSettings_Tool = 0
kAdvancedSettings_Cert = 1
kAdvancedSettings_Sign = 2
kAdvancedSettings_BD = 3
kAdvancedSettings_OtpmkKey = 4
kAdvancedSettings_UserKeys = 5
kAppImageFormat_AutoDetect = 'Auto-detect image format'
kAppImageFormat_AxfFromMdk = '.out(axf) from Keil MDK'
kAppImageFormat_ElfFromIar = '.out(elf) from IAR EWARM'
kAppImageFormat_AxfFromMcux = '.out(axf) from MCUXpresso'
kAppImageFormat_ElfFromGcc = '.out(elf) from GCC ARM'
kAppImageFormat_MotoSrec = 'Motorola S-Records (.srec/.s19)'
kAppImageFormat_IntelHex = 'Intel Extended Hex (.hex)'
kAppImageFormat_RawBinary = 'Raw Binary (.bin)'
kSoundEffectFilename_Success = 'snd_success.wav'
kSoundEffectFilename_Failure = 'snd_failure.wav'
kSoundEffectFilename_Progress = 'snd_progress.wav'
kSoundEffectFilename_Restart = 'snd_restart.wav'
kMemBlockColor_Background = wx.WHITE
kMemBlockColor_Padding = wx.BLACK
kSecureBootSeqStep_AllInOne = 0
kSecureBootSeqStep_GenCert = 1
kSecureBootSeqStep_GenImage = 2
kSecureBootSeqStep_PrepHwCrypto = 3
kSecureBootSeqStep_ProgSrk = 4
kSecureBootSeqStep_OperHwCrypto = 5
kSecureBootSeqStep_FlashImage = 6
kSecureBootSeqStep_ProgDek = 7
kPageIndex_ImageGenerationSequence = 0
kPageIndex_ImageLoadingSequence = 1
kPageIndex_EfuseOperationUtility = 2
kPageIndex_BootDeviceMemory = 3
|
1698925
|
from typing import Any, Callable
import httpx
class TransportHandler:
def __init__(self, transport: httpx.BaseTransport) -> None:
self.transport = transport
def __call__(self, request: httpx.Request) -> httpx.Response:
if not isinstance(request.stream, httpx.SyncByteStream): # pragma: nocover
raise RuntimeError("Attempted to route an async request to a sync app.")
return self.transport.handle_request(request)
class AsyncTransportHandler:
def __init__(self, transport: httpx.AsyncBaseTransport) -> None:
self.transport = transport
async def __call__(self, request: httpx.Request) -> httpx.Response:
if not isinstance(request.stream, httpx.AsyncByteStream): # pragma: nocover
raise RuntimeError("Attempted to route a sync request to an async app.")
return await self.transport.handle_async_request(request)
class WSGIHandler(TransportHandler):
def __init__(self, app: Callable, **kwargs: Any) -> None:
super().__init__(httpx.WSGITransport(app=app, **kwargs))
class ASGIHandler(AsyncTransportHandler):
def __init__(self, app: Callable, **kwargs: Any) -> None:
super().__init__(httpx.ASGITransport(app=app, **kwargs))
|
1698930
|
from models.base_model import BaseModel
from tests.unit.base_interface_test import BaseInterfaceTest
from marshmallow import Schema
class TestBaseModel(BaseInterfaceTest):
def setup(self):
self.object = BaseModel()
self.custom_404_msg = "Base not found"
self.schema = Schema
|
1699004
|
from typing import Dict
from UE4Parse.BinaryReader import BinaryStream
class FStringTable:
TableNamespace: str
KeysToMetadata: Dict[str, Dict[str, str]]
def __init__(self, reader: BinaryStream):
self.TableNamespace = reader.readFString()
self.KeysToMetadata = {}
NumEntries = reader.readInt32()
for i in range(NumEntries):
key = reader.readFString()
text = reader.readFString()
self.KeysToMetadata[key] = text
def GetValue(self):
return {
"Namespace": self.TableNamespace,
"Table": self.KeysToMetadata
}
|
1699006
|
def sort_priority(values, group):
def helper(x):
if x in group:
return 1, x
elif x == 200: # NOTE: 1
return 0, x
else:
return 2, x
values.sort(key=helper)
numbers = [8, 3, 1, 5, 4, 7, 6, 200]
group = {3, 5, 2, 7}
sort_priority(numbers, group)
print(numbers) # [200, 3, 5, 7, 1, 4, 6, 8]
# NOTE: 1 Python has specific rules for comparing tuples. It first compares
# items in index zero, then index one, then index two, and so on.
# This is why the return value from the helper closure causes the
# sort order to have two distinct groups.
"""Similar but with A class"""
numbers = [8, 3, 1, 5, 4, 7, 6, 200]
group = {3, 5, 2, 7}
class Sorter(object):
def __init__(self, group):
self.group = group
self.found = False
def __call__(self, x):
if x in self.group:
self.found = True
return (0, x)
return (1, x)
sorter = Sorter(group)
numbers.sort(key=sorter)
assert sorter.found is True
|
1699022
|
from sklearn.externals.joblib import dump
from sklearn.pipeline import _name_estimators, Pipeline as _Pipeline
from sklearn.utils.metaestimators import if_delegate_has_method
__all__ = ['make_pipeline', 'Pipeline']
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators. This is a shorthand for
the Pipeline constructor; it does not require, and does not permit, naming
the estimators. Instead, their names will be set to the lowercase of their
types automatically.
Parameters
----------
*steps : list
List of estimators.
Returns
-------
p : Pipeline
Examples
--------
>>> from kenchi.outlier_detection import MiniBatchKMeans
>>> from kenchi.pipeline import make_pipeline
>>> from sklearn.preprocessing import StandardScaler
>>> scaler = StandardScaler()
>>> det = MiniBatchKMeans()
>>> pipeline = make_pipeline(scaler, det)
"""
return Pipeline(_name_estimators(steps))
class Pipeline(_Pipeline):
"""Pipeline of transforms with a final estimator.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
memory : instance of joblib.Memory or string, default None
Used to cache the fitted transformers of the pipeline. By default, no
caching is performed. If a string is given, it is the path to the
caching directory. Enabling caching triggers a clone of the
transformers before fitting. Therefore, the transformer instance given
to the pipeline cannot be inspected directly. Use the attribute
``named_steps`` or ``steps`` to inspect estimators within the pipeline.
Caching the transformers is advantageous when fitting is time
consuming.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> import numpy as np
>>> from kenchi.outlier_detection import MiniBatchKMeans
>>> from kenchi.pipeline import Pipeline
>>> from sklearn.preprocessing import StandardScaler
>>> X = np.array([
... [0., 0.], [1., 1.], [2., 0.], [3., -1.], [4., 0.],
... [5., 1.], [6., 0.], [7., -1.], [8., 0.], [1000., 1.]
... ])
>>> det = MiniBatchKMeans(n_clusters=1, random_state=0)
>>> scaler = StandardScaler()
>>> pipeline = Pipeline([('scaler', scaler), ('det', det)])
>>> pipeline.fit_predict(X)
array([ 1, 1, 1, 1, 1, 1, 1, 1, 1, -1])
"""
def __len__(self):
return len(self.named_steps)
def __getitem__(self, key):
return self.named_steps[key]
def __iter__(self):
return iter(self.named_steps)
def _pre_transform(self, X):
if X is None:
return X
for _, transform in self.steps[:-1]:
if transform is not None:
X = transform.transform(X)
return X
@if_delegate_has_method(delegate='_final_estimator')
def score_samples(self, X=None):
"""Apply transforms, and compute the opposite of the anomaly score for
each sample with the final estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features), default None
Data. If None, compute the opposite of the anomaly score for each
training sample.
Returns
-------
score_samples : array-like of shape (n_samples,)
Opposite of the anomaly score for each sample.
"""
return -self.anomaly_score(X)
@if_delegate_has_method(delegate='_final_estimator')
def anomaly_score(self, X=None, **kwargs):
"""Apply transforms, and compute the anomaly score for each sample with
the final estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data. If None, compute the anomaly score for each training samples.
normalize : bool, default False
If True, return the normalized anomaly score.
Returns
-------
anomaly_score : array-like of shape (n_samples,)
Anomaly score for each sample.
"""
X = self._pre_transform(X)
return self._final_estimator.anomaly_score(X, **kwargs)
@if_delegate_has_method(delegate='_final_estimator')
def featurewise_anomaly_score(self, X):
"""Apply transforms, and compute the feature-wise anomaly scores for
each sample with the final estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data.
Returns
-------
anomaly_score : array-like of shape (n_samples, n_features)
Feature-wise anomaly scores for each sample.
"""
X = self._pre_transform(X)
return self._final_estimator.featurewise_anomaly_score(X)
def to_pickle(self, filename, **kwargs):
"""Persist a pipeline object.
Parameters
----------
filename : str or pathlib.Path
Path of the file in which it is to be stored.
kwargs : dict
Other keywords passed to ``sklearn.externals.joblib.dump``.
Returns
-------
filenames : list
List of file names in which the data is stored.
"""
return dump(self, filename, **kwargs)
@if_delegate_has_method(delegate='_final_estimator')
def plot_anomaly_score(self, X=None, **kwargs):
"""Apply transoforms, and plot the anomaly score for each sample with
the final estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features), default None
Data. If None, plot the anomaly score for each training samples.
normalize : bool, default False
If True, plot the normalized anomaly score.
ax : matplotlib Axes, default None
Target axes instance.
bins : int, str or array-like, default 'auto'
Number of hist bins.
figsize : tuple, default None
Tuple denoting figure size of the plot.
filename : str, default None
If provided, save the current figure.
hist : bool, default True
If True, plot a histogram of anomaly scores.
kde : bool, default True
If True, plot a gaussian kernel density estimate.
title : string, default None
Axes title. To disable, pass None.
xlabel : string, default 'Samples'
X axis title label. To disable, pass None.
xlim : tuple, default None
Tuple passed to ``ax.xlim``.
ylabel : string, default 'Anomaly score'
Y axis title label. To disable, pass None.
ylim : tuple, default None
Tuple passed to ``ax.ylim``.
**kwargs : dict
Other keywords passed to ``ax.plot``.
Returns
-------
ax : matplotlib Axes
Axes on which the plot was drawn.
"""
X = self._pre_transform(X)
return self._final_estimator.plot_anomaly_score(X, **kwargs)
@if_delegate_has_method(delegate='_final_estimator')
def plot_roc_curve(self, X, y, **kwargs):
"""Apply transoforms, and plot the Receiver Operating Characteristic
(ROC) curve with the final estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data.
y : array-like of shape (n_samples,)
Labels.
ax : matplotlib Axes, default None
Target axes instance.
figsize: tuple, default None
Tuple denoting figure size of the plot.
filename : str, default None
If provided, save the current figure.
title : string, default 'ROC curve'
Axes title. To disable, pass None.
xlabel : string, default 'FPR'
X axis title label. To disable, pass None.
ylabel : string, default 'TPR'
Y axis title label. To disable, pass None.
**kwargs : dict
Other keywords passed to ``ax.plot``.
Returns
-------
ax : matplotlib Axes
Axes on which the plot was drawn.
"""
X = self._pre_transform(X)
return self._final_estimator.plot_roc_curve(X, y, **kwargs)
@property
def plot_graphical_model(self):
"""Apply transforms, and plot the Gaussian Graphical Model (GGM) with
the final estimator.
Parameters
----------
ax : matplotlib Axes, default None
Target axes instance.
figsize : tuple, default None
Tuple denoting figure size of the plot.
filename : str, default None
If provided, save the current figure.
random_state : int, RandomState instance, default None
Seed of the pseudo random number generator.
title : string, default 'GGM (n_clusters, n_features, n_isolates)'
Axes title. To disable, pass None.
**kwargs : dict
Other keywords passed to ``nx.draw_networkx``.
Returns
-------
ax : matplotlib Axes
Axes on which the plot was drawn.
"""
return self._final_estimator.plot_graphical_model
@property
def plot_partial_corrcoef(self):
"""Apply transforms, and plot the partial correlation coefficient
matrix with the final estimator.
Parameters
----------
ax : matplotlib Axes, default None
Target axes instance.
cbar : bool, default True.
If True, draw a colorbar.
figsize : tuple, default None
Tuple denoting figure size of the plot.
filename : str, default None
If provided, save the current figure.
title : string, default 'Partial correlation'
Axes title. To disable, pass None.
**kwargs : dict
Other keywords passed to ``ax.pcolormesh``.
Returns
-------
ax : matplotlib Axes
Axes on which the plot was drawn.
"""
return self._final_estimator.plot_partial_corrcoef
|
1699054
|
import tensorflow as tf
import numpy as np
from bregman.suite import *
k = 4
segment_size = 50 # out of 24,526
max_iterations = 100
chromo = tf.placeholder(tf.float32)
max_freqs = tf.argmax(chromo, 0)
def get_chromogram(audio_file):
F = Chromagram(audio_file, nfft=16384, wfft=8192, nhop=2205)
return F.X
def get_dataset(sess, audio_file):
chromo_data = get_chromogram(audio_file)
print('chromo_data', np.shape(chromo_data))
chromo_length = np.shape(chromo_data)[1]
xs = []
for i in range(chromo_length/segment_size):
chromo_segment = chromo_data[:, i*segment_size:(i+1)*segment_size]
x = extract_feature_vector(sess, chromo_segment)
if len(xs) == 0:
xs = x
else:
xs = np.vstack((xs, x))
return xs
def initial_cluster_centroids(X, k):
return X[0:k, :]
# op
def assign_cluster(X, centroids):
expanded_vectors = tf.expand_dims(X, 0)
expanded_centroids = tf.expand_dims(centroids, 1)
distances = tf.reduce_sum(tf.square(tf.sub(expanded_vectors, expanded_centroids)), 2)
mins = tf.argmin(distances, 0)
return mins
# op
def recompute_centroids(X, Y):
sums = tf.unsorted_segment_sum(X, Y, k)
counts = tf.unsorted_segment_sum(tf.ones_like(X), Y, k)
return sums / counts
def extract_feature_vector(sess, chromo_data):
num_features, num_samples = np.shape(chromo_data)
freq_vals = sess.run(max_freqs, feed_dict={chromo: chromo_data})
hist, bins = np.histogram(freq_vals, bins=range(num_features + 1))
return hist.astype(float) / num_samples
with tf.Session() as sess:
X = get_dataset(sess, 'sysk.wav')
print(np.shape(X))
centroids = initial_cluster_centroids(X, k)
i, converged = 0, False
# prev_Y = None
while not converged and i < max_iterations:
i += 1
Y = assign_cluster(X, centroids)
# if prev_Y == Y:
# converged = True
# break
# prev_Y = Y
centroids = sess.run(recompute_centroids(X, Y))
if i % 50 == 0:
print('iteration', i)
segments = sess.run(Y)
for i in range(len(segments)):
seconds = (i * segment_size) / float(10)
min, sec = divmod(seconds, 60)
time_str = str(min) + 'm ' + str(sec) + 's'
print(time_str, segments[i])
|
1699126
|
from django.test import TestCase
from councils.models import Council
from pollingstations.tests.factories import (
PollingStationFactory,
PollingDistrictFactory,
)
class TestPollingStationFactory(TestCase):
def test_polling_station_factory(self):
station = PollingStationFactory()
self.assertIsInstance(station.council, Council)
self.assertRegex(station.internal_council_id, r"^PS-\d+$")
class TestPollingDistrictFactory(TestCase):
def test_polling_district_factory(self):
station = PollingDistrictFactory()
self.assertIsInstance(station.council, Council)
self.assertRegex(station.internal_council_id, r"^PD-\d+$")
|
1699146
|
import unittest
import pymagsac
#ToDo: add real tests
class MainTest(unittest.TestCase):
def test_fundamental(self):
self.assertEqual(2, 2)
def test_homography(self):
self.assertEqual(2, 0)
if __name__ == '__main__':
unittest.main()
|
1699162
|
from raco.cpp_datalog_utils import emitCode
from raco.backends.cpp import CCAlgebra
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
if __name__ == "__main__":
query = sys.argv[1]
print query
name = sys.argv[2]
print name
plan = ""
if len(sys.argv) > 3:
plan = sys.argv[3]
lst = []
alg = CCAlgebra
if plan: lst.append(plan)
if name: lst.append(name)
emitCode(query, "_".join(lst), alg, plan)
|
1699203
|
from notion.block.media import BreadcrumbBlock
from smoke_tests.conftest import assert_block_is_okay
def test_media_block(notion):
pass
# TODO: fix
# block = notion.root_page.children.add_new(MediaBlock)
# assert_block_is_okay(**locals(), type='media')
def test_breadcrumb_block(notion):
block = notion.root_page.children.add_new(BreadcrumbBlock)
assert_block_is_okay(**locals(), type="breadcrumb")
|
1699219
|
from compat.functools import wraps as _wraps
from sys import exc_info as _exc_info
class _from(object):
def __init__(self, EXPR):
self.iterator = iter(EXPR)
def supergenerator(genfunct):
"""Implements PEP 380. Use as:
@supergenerator
def genfunct(*args):
try:
sent1 = (yield val1)
,,,
retval = yield _from(iterator)
...
except Exception, e:
# caller did generator.throw
pass
finally:
pass # closing
"""
@_wraps(genfunct)
def wrapper(*args, **kwargs):
gen = genfunct(*args, **kwargs)
try:
# if first poll of gen raises StopIteration
# or any other Exception, we propagate
item = gen.next()
# OUTER loop
while True:
# yield _from(EXPR)
# semantics based on PEP 380, Revised**12, 19 April
if isinstance(item, _from):
_i = item.iterator
try:
# first poll of the subiterator
_y = _i.next()
except StopIteration, _e:
# subiterator exhausted on first poll
# extract return value
_r = _e.args if _e.args else (None,)
else:
# INNER loop
while True:
try:
# yield what the subiterator did
_s = (yield _y)
except GeneratorExit, _e:
# close the subiterator if possible
try:
_close = _i.close
except AttributeError:
pass
else:
_close()
# finally clause will gen.close()
raise _e
except BaseException:
# caller did wrapper.throw
_x = _exc_info()
# throw to the subiterator if possible
try:
_throw = _i.throw
except AttributeError:
# doesn't attempt to close _i?
# if gen raises StopIteration
# or any other Exception, we propagate
item = gen.throw(*_x)
_r = None
# fall through to INTERSECTION A
# then to OUTER loop
pass
else:
try:
_y = _throw(*_x)
except StopIteration, _e:
_r = _e.args if _e.args else (None,)
# fall through to INTERSECTION A
# then to INTERSECTION B
pass
else:
# restart INNER loop
continue
# INTERSECTION A
# restart OUTER loop or proceed to B?
if _r is None: break
else:
try:
# re-poll the subiterator
if _s is None:
_y = _i.next()
else:
_y = _i.send(_s)
except StopIteration, _e:
# subiterator is exhausted
# extract return value
_r = _e.args if _e.args else (None,)
# fall through to INTERSECTION B
pass
else:
# restart INNER loop
continue
# INTERSECTION B
# done yielding from subiterator
# send retvalue to gen
# if gen raises StopIteration
# or any other Exception, we propagate
item = gen.send(_r[0])
# restart OUTER loop
break
# traditional yield from gen
else:
try:
sent = (yield item)
except Exception:
# caller did wrapper.throw
_x = _exc_info()
# if gen raises StopIteration
# or any other Exception, we propagate
item = gen.throw(*_x)
else:
# if gen raises StopIteration
# or any other Exception, we propagate
item = gen.send(sent)
# end of OUTER loop, restart it
pass
finally:
# gen raised Exception
# or caller did wrapper.close()
# or wrapper was garbage collected
gen.close()
return wrapper
|
1699232
|
import envi.symstore.resolver as e_sym_resolv
import unittest
class SymResolverTests(unittest.TestCase):
def setUp(self):
self.symres = e_sym_resolv.SymbolResolver()
def test_sym(self):
fname, base, size, width = ('foo', 0x1234, 0x5678, 4)
fres = e_sym_resolv.FileSymbol(fname, base, size, width)
# check Symbol vars
assert(fres.name == fname)
assert(fres.value == base)
assert(fres.size == size)
#assert(fres.fname == fname)
# check SymbolResolver vars
assert(fres.width == width)
assert(fres.baseaddr == base)
def test_resolver_fname_none(self):
with self.assertRaises(Exception):
fresolv = e_sym_resolv.FileSymbol(None, 0, 0, 4)
def test_resolver(self):
fname, base, size, width = ('foo', 0x0, 0, 4)
fres = e_sym_resolv.FileSymbol(fname, base, size, width)
self.symres.addSymbol(fres)
self.assertIn(fname, self.symres.symobjsbyname)
self.assertIsInstance(self.symres.symobjsbyname[fname], e_sym_resolv.SymbolResolver)
fnsym = e_sym_resolv.FunctionSymbol('TestFooFuncSym', 0x123456, size=4, fname=fname)
self.symres.addSymbol(fnsym)
secsym = e_sym_resolv.SectionSymbol('TestFooSectionSym', 0x123456, size=400, fname=fname)
self.symres.addSymbol(secsym)
self.symres.delSymbol(fnsym)
self.assertNotIn(fnsym, self.symres.symobjsbyname)
def test_getSymByAddr_exact_false(self):
'''
test for bug that was replacing symresolvers with symbol objects.
'''
fname, base, size, width = ('foo', 0x0, 0, 4)
fres = e_sym_resolv.FileSymbol(fname, base, size, width)
self.symres.addSymbol(fres)
# now symobjsbyname['foo'] = FileSymbol
symcache = [(0x16001, 0, 'alpha', e_sym_resolv.SYMSTOR_SYM_SYMBOL)]
self.symres.impSymCache(symcache, symfname='foo')
# look up the FileSymbol as a 'Symbol' (not a resolver)
# this causes the symobjsbyname to smash in a Symbol instead of a
# SymbolResolver (symobjsbyname['foo'] = Symbol)
sym = self.symres.getSymByAddr(0x10, exact=False)
assert(sym is not None)
# now symobjsbyname['foo'] = Symbol
# force return of a sym that has a fname set
# causes a .get on symobjsbyname, retrieves Symbol, but then tries
# to cache since it should be a resolver since we have an fname.
# boom.
sym = self.symres.getSymByAddr(0x16010, exact=False)
assert(sym is not None)
#def test_import
|
1699255
|
from django.contrib import admin
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.views.main import ChangeList
from django.core.paginator import Paginator
from django.template import Context, Template
from django.test import TransactionTestCase
from models import (Child, Parent, Genre, Band, Musician, Group, Quartet,
Membership, ChordsMusician, ChordsBand, Invitation)
class ChangeListTests(TransactionTestCase):
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_query_set() shouldn't
overwrite a custom select_related provided by ModelAdmin.queryset().
"""
m = ChildAdmin(Child, admin.site)
cl = ChangeList(MockRequest(), Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_editable, m)
self.assertEqual(cl.query_set.query.select_related, {'parent': {'name': {}}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = MockRequest()
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
row_html = '<tbody><tr class="row1"><td class="action-checkbox"><input type="checkbox" class="action-select" value="%d" name="_selected_action" /></td><th><a href="%d/">name</a></th><td class="nowrap">(None)</td></tr></tbody>' % (new_child.id, new_child.id)
self.assertFalse(table_output.find(row_html) == -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = MockRequest()
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
row_html = '<tbody><tr class="row1"><td class="action-checkbox"><input type="checkbox" class="action-select" value="%d" name="_selected_action" /></td><th><a href="%d/">name</a></th><td class="nowrap">Parent object</td></tr></tbody>' % (new_child.id, new_child.id)
self.assertFalse(table_output.find(row_html) == -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = MockRequest()
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_editable, m)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = '<div class="hiddenfields"><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></div>' % new_child.id
self.assertFalse(table_output.find(hiddenfields_div) == -1,
'Failed to find hidden fields in: %s' % table_output)
# make sure that list editable fields are rendered in divs correctly
editable_name_field = '<input name="form-0-name" value="name" class="vTextField" maxlength="30" type="text" id="id_form-0-name" />'
self.assertFalse('<td>%s</td>' % editable_name_field == -1,
'Failed to find "name" list_editable field in: %s' % table_output)
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
new_child = Child.objects.create(name='name %s' % i, parent=new_parent)
request = MockRequest()
request.GET['p'] = -1 # Anything outside range
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
self.assertRaises(IncorrectLookupParameters, lambda: \
ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_editable, m))
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
new_child = Child.objects.create(name='name %s' % i, parent=new_parent)
request = MockRequest()
m = ChildAdmin(Child, admin.site)
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
m.paginator = CustomPaginator
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_editable, m)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, admin.site)
cl = ChangeList(MockFilteredRequestA(blues.pk), Band, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_editable, m)
cl.get_results(MockFilteredRequestA(blues.pk))
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, admin.site)
cl = ChangeList(MockFilteredRequestB(lead.pk), Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_editable, m)
cl.get_results(MockFilteredRequestB(lead.pk))
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, admin.site)
cl = ChangeList(MockFilteredRequestB(lead.pk), Quartet, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_editable, m)
cl.get_results(MockFilteredRequestB(lead.pk))
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, admin.site)
cl = ChangeList(MockFilteredRequestB(lead.pk), ChordsBand, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_editable, m)
cl.get_results(MockFilteredRequestB(lead.pk))
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = MockRequest()
# Test default queryset
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_editable, m)
self.assertEqual(cl.query_set.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(cl.paginator.page_range, [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_editable, m)
self.assertEqual(cl.query_set.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(cl.paginator.page_range, [1, 2, 3])
class ChildAdmin(admin.ModelAdmin):
list_display = ['name', 'parent']
list_per_page = 10
def queryset(self, request):
return super(ChildAdmin, self).queryset(request).select_related("parent__name")
class FilteredChildAdmin(admin.ModelAdmin):
list_display = ['name', 'parent']
list_per_page = 10
def queryset(self, request):
return super(FilteredChildAdmin, self).queryset(request).filter(
name__contains='filtered')
class MockRequest(object):
GET = {}
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(queryset, 5, orphans=2,
allow_empty_first_page=allow_empty_first_page)
class BandAdmin(admin.ModelAdmin):
list_filter = ['genres']
class GroupAdmin(admin.ModelAdmin):
list_filter = ['members']
class QuartetAdmin(admin.ModelAdmin):
list_filter = ['members']
class ChordsBandAdmin(admin.ModelAdmin):
list_filter = ['members']
class MockFilteredRequestA(object):
def __init__(self, pk):
self.GET = { 'genres' : pk }
class MockFilteredRequestB(object):
def __init__(self, pk):
self.GET = { 'members': pk }
|
1699281
|
from django.conf import settings
from django.http import HttpResponseServerError
from django.template import loader
from django.template.context import RequestContext
from django.views.decorators.csrf import requires_csrf_token
@requires_csrf_token
def server_error(request, template_name='500.html', **param_dict):
# You need to create a 500.html template.
t = loader.get_template(template_name)
return HttpResponseServerError(t.render(RequestContext(
request,
{
'MEDIA_URL': settings.MEDIA_URL,
'STATIC_URL': settings.STATIC_URL,
'request': request,
},
)))
def page_not_found(request, template_name='404.html', exception=None):
response = server_error(
request,
template_name=template_name,
exception=exception
)
response.status_code = 404
return response
|
1699299
|
import unittest
from abpytools import ChainCollection, SequenceAlignment
from . import read_sequence_from_file
class SequenceAlignmentTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ab_collection_1 = ChainCollection.load_from_file(path='./tests/Data/sequence_alignment_seq_1.json',
show_progressbar=False, verbose=False)
cls.ab_collection_2 = ChainCollection.load_from_file(path='./tests/Data/sequence_alignment_seq_2.json',
show_progressbar=False, verbose=False)
cls.seq2_aligned = read_sequence_from_file('./tests/Data/BLOSUM62_aligned_sequence')
def test_sequence_alignment_target(self):
sa = SequenceAlignment(self.ab_collection_1[0], self.ab_collection_2, 'needleman_wunsch', 'BLOSUM62')
self.assertEqual(sa.target_sequence, self.ab_collection_1[0].sequence)
def test_needleman_wunsch_score_BLOSUMXX(self):
test_cases = [
("BLOSUM45", 513),
("BLOSUM62", 426),
("BLOSUM80", 452)
]
for x, output in test_cases:
with self.subTest(name=x):
sa = SequenceAlignment(self.ab_collection_1[0], self.ab_collection_2, 'needleman_wunsch', x)
sa.align_sequences()
self.assertEqual(sa.score[self.ab_collection_2.names[0]], output)
def test_needleman_wunsch_aligned_sequences(self):
sa = SequenceAlignment(self.ab_collection_1[0], self.ab_collection_2, 'needleman_wunsch', 'BLOSUM62')
sa.align_sequences()
self.assertEqual(sa.aligned_sequences[self.ab_collection_2.names[0]], self.seq2_aligned)
def test_alignment_exception_1(self):
# catch exception when substitution matrix is not known
self.assertRaises(ValueError, SequenceAlignment, self.ab_collection_1[0],
self.ab_collection_2, 'needleman_wunsch', 'foo')
def test_alignment_exception_2(self):
# catch exception when algorithm is not known
sa = SequenceAlignment(self.ab_collection_1[0], self.ab_collection_2, 'foo', 'BLOSUM62')
self.assertRaises(ValueError, sa.align_sequences)
def test_alignment_exception_3(self):
# catch error when user tries to print alignment before calling .align_sequences()
sa = SequenceAlignment(self.ab_collection_1[0], self.ab_collection_2, 'foo', 'BLOSUM62')
self.assertRaises(ValueError, sa.print_aligned_sequences)
def test_alignment_indel_sign(self):
# if indel is positive the user receives a warning and indel will be set to -indel
sa = SequenceAlignment(self.ab_collection_1[0], self.ab_collection_2, 'needleman_wunsch', 'BLOSUM62')
sa.align_sequences(indel=-10)
sa_score_1 = sa.score
sa = SequenceAlignment(self.ab_collection_1[0], self.ab_collection_2, 'needleman_wunsch', 'BLOSUM62')
sa.align_sequences(indel=10)
sa_score_2 = sa.score
# since the second indel is positive it will be set to
# its negative, which runs the algorithms
# with the same params again and will get the same scores
self.assertEqual(sa_score_1, sa_score_2)
def test_alignment_print_string(self):
sa = SequenceAlignment(self.ab_collection_1[0], self.ab_collection_2, 'needleman_wunsch', 'BLOSUM62')
sa.align_sequences()
self.assertEqual(len(sa._aligned_sequences_string()), 3)
|
1699309
|
from .codecs import PackGeneticBits
def create_dask_codec_plugin():
from dask.distributed import WorkerPlugin
class DaskCodecPlugin(WorkerPlugin):
def setup(self, worker):
from numcodecs.registry import register_codec
register_codec(PackGeneticBits)
return DaskCodecPlugin()
|
1699325
|
import asyncio
import apigpio_fork
import config as conf
from utils import topics, PubSub
from functools import partial
from coroutines import Base
@apigpio_fork.Debounce()
def on_input_forward_to_hub(gpio, level, tick, hub, topic, pi):
hub.publish(topics.TOPIC_BUTTON_PROXY, (gpio, level, tick, topic))
class PigpioPins(Base):
def __init__(self, hub, pi):
super().__init__(hub)
self.pi = pi
@asyncio.coroutine
def subscribe_to_pins(self, pi, hub):
maintained_switch_pins = {
conf.brew_button_pin: topics.TOPIC_COFFEE_BUTTON,
conf.steam_button_pin: topics.TOPIC_STEAM_BUTTON,
conf.water_button_pin: topics.TOPIC_WATER_BUTTON,
}
momentary_switch_pins = {
conf.red_button_pin: topics.TOPIC_RED_BUTTON,
conf.blue_button_pin: topics.TOPIC_BLUE_BUTTON,
conf.white_button_pin: topics.TOPIC_WHITE_BUTTON,
}
for pin in maintained_switch_pins:
yield from pi.set_mode(pin, apigpio_fork.INPUT)
yield from pi.set_pull_up_down(pin, apigpio_fork.PUD_DOWN)
yield from pi.set_glitch_filter(pin, 5000)
yield from pi.add_callback(pin, edge=apigpio_fork.EITHER_EDGE, func=partial(on_input_forward_to_hub, hub=hub, topic=maintained_switch_pins[pin], pi=pi))
for pin in momentary_switch_pins:
yield from pi.set_mode(pin, apigpio_fork.INPUT)
yield from pi.set_pull_up_down(pin, apigpio_fork.PUD_DOWN)
yield from pi.set_glitch_filter(pin, 5000)
yield from pi.add_callback(pin, edge=apigpio_fork.RISING_EDGE, func=partial(on_input_forward_to_hub, hub=hub, topic=momentary_switch_pins[pin], pi=pi))
async def maybe_update_button(self):
with PubSub.Subscription(self.hub, topics.TOPIC_BUTTON_PROXY) as queue:
while True:
(gpio, level, tick, topic) = await queue.get()
await asyncio.sleep(0.005)
new_level = await self.pi.read(gpio)
if new_level == level:
self.hub.publish(topic, level == 1)
else:
print("False button "+topic)
def futures(self, loop):
return [self.maybe_update_button(), self.subscribe_to_pins(self.pi, self.hub)]
|
1699340
|
import json, os, csv
# DATE = "2018-05-10"
DATE=None
REVERTED_ONLY = True
JOURNAL_CSV = "/home/richard/tmp/doaj/history/journals.csv"
OUT_DIR = "/home/richard/tmp/doaj/history/workspace/"
def history_records_analyse(source, out_dir, reverted_only=False, date=None):
ids = set()
if date is not None:
with open(source, "r", encoding="utf-8") as f:
reader = csv.reader(f)
for row in reader:
if row[1] == date:
ids.add(row[0])
records = {}
with open(source, "r", encoding="utf-8") as f:
reader = csv.reader(f)
next(reader)
for row in reader:
if date is None or row[0] in ids:
if row[0] not in records:
records[row[0]] = []
records[row[0]].append(row[:3])
count = 1
out = os.path.join(out_dir, "owners.csv")
with open(out, "w", encoding="utf-8") as o:
writer = csv.writer(o)
writer.writerow(["count", "id", "reverted", "change history"])
writer.writerow([])
for id, rows in records.items():
rows = sorted(rows, key=lambda x: x[1])
owners = []
lastOwner = False
ownerTransitions = []
flagged = False
for row in rows:
with open(row[2], "r", encoding="utf-8") as f:
data = json.load(f)
owner = data.get("admin", {}).get("owner")
if len(ownerTransitions) == 0 or owner != ownerTransitions[-1]:
ownerTransitions.append(owner)
if owner != lastOwner and row[1] == date:
flagged = True
owners.append((row[1], owner))
lastOwner = owner
out_row_1 = [o[0] for o in owners]
out_row_2 = [o[1] for o in owners]
owner_set = set(out_row_2)
if date is None: flagged = True
if len(owner_set) > 1 and flagged:
reverted = False
for i in range(len(ownerTransitions)):
o = ownerTransitions[i]
if i + 2 < len(ownerTransitions):
for j in range(i + 2, len(ownerTransitions)):
comp = ownerTransitions[j]
if o == comp:
reverted = True
break
if reverted:
break
if not reverted_only or (reverted_only and reverted):
writer.writerow([count, id, "X" if reverted else ""] + out_row_1)
writer.writerow(["", "", "X" if reverted else ""] + out_row_2)
writer.writerow([])
count += 1
if __name__ == "__main__":
history_records_analyse(JOURNAL_CSV, OUT_DIR, REVERTED_ONLY, DATE)
|
1699364
|
import os
import sys
import argparse
import torch
import numpy as np
from random import shuffle
from collections import OrderedDict
from dataloaders.datasetGen import SplitGen, PermutedGen
from utils.utils import factory
import random
def run(args):
if not os.path.exists('outputs'):
os.mkdir('outputs')
# Prepare dataloaders
# train_dataset, val_dataset = dataloaders.base.__dict__[args.dataset](args.dataroot, args.train_aug)
train_dataset, val_dataset = factory(
'dataloaders', 'base', args.dataset)(args.dataroot, args.train_aug)
if args.n_permutation > 0:
train_dataset_splits, val_dataset_splits, task_output_space = PermutedGen(train_dataset, val_dataset,
args.n_permutation,
remap_class=not args.no_class_remap)
else:
train_dataset_splits, val_dataset_splits, task_output_space = SplitGen(train_dataset, val_dataset,
first_split_sz=args.first_split_size,
other_split_sz=args.other_split_size,
rand_split=args.rand_split,
remap_class=not args.no_class_remap)
# Prepare the Agent (model)
dataset_name = args.dataset + \
'_{}'.format(args.first_split_size) + \
'_{}'.format(args.other_split_size)
agent_config = {'model_lr': args.model_lr, 'momentum': args.momentum, 'model_weight_decay': args.model_weight_decay,
'schedule': args.schedule,
'model_type': args.model_type, 'model_name': args.model_name, 'model_weights': args.model_weights,
'out_dim': {'All': args.force_out_dim} if args.force_out_dim > 0 else task_output_space,
'model_optimizer': args.model_optimizer,
'print_freq': args.print_freq,
'gpu': True if args.gpuid[0] >= 0 else False,
'with_head': args.with_head,
'reset_model_opt': args.reset_model_opt,
'reg_coef': args.reg_coef,
'head_lr': args.head_lr,
'svd_lr': args.svd_lr,
'bn_lr': args.bn_lr,
'svd_thres': args.svd_thres,
'gamma': args.gamma,
'dataset_name': dataset_name
}
# agent = agents.__dict__[args.agent_type].__dict__[args.agent_name](agent_config)
agent = factory('svd_agent', args.agent_type,
args.agent_name)(agent_config)
# Decide split ordering
task_names = sorted(list(task_output_space.keys()), key=int)
print('Task order:', task_names)
acc_table = OrderedDict()
acc_table_train = OrderedDict()
for i in range(len(task_names)):
train_name = task_names[i]
print('======================', train_name,
'=======================')
train_loader = torch.utils.data.DataLoader(train_dataset_splits[train_name],
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers)
val_loader = torch.utils.data.DataLoader(val_dataset_splits[train_name],
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
if args.incremental_class:
agent.add_valid_output_dim(task_output_space[train_name])
# Learn
agent.train_task(train_loader, val_loader)
torch.cuda.empty_cache()
# Evaluate
acc_table[train_name] = OrderedDict()
acc_table_train[train_name] = OrderedDict()
for j in range(i + 1):
val_name = task_names[j]
print('validation split name:', val_name)
val_data = val_dataset_splits[val_name] if not args.eval_on_train_set else train_dataset_splits[
val_name]
val_loader = torch.utils.data.DataLoader(val_data,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
acc_table[val_name][train_name] = agent.validation(val_loader)
print("**************************************************")
print('training split name:', val_name)
train_data = train_dataset_splits[val_name] if not args.eval_on_train_set else train_dataset_splits[
val_name]
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
acc_table_train[val_name][train_name] = agent.validation(
train_loader)
print("**************************************************")
return acc_table, task_names
def get_args(argv):
# This function prepares the variables shared across demo.py
parser = argparse.ArgumentParser()
parser.add_argument('--gpuid', nargs="+", type=int, default=[1],
help="The list of gpuid, ex:--gpuid 3 1. Negative value means cpu-only")
parser.add_argument('--model_type', type=str, default='resnet',
help="The type (mlp|lenet|vgg|resnet) of backbone network")
parser.add_argument('--model_name', type=str, default='resnet18',
help="The name of actual model for the backbone")
parser.add_argument('--force_out_dim', type=int, default=0,
help="Set 0 to let the task decide the required output dimension")
parser.add_argument('--agent_type', type=str,
default='svd_based', help="The type (filename) of agent")
parser.add_argument('--agent_name', type=str,
default='svd_based', help="The class name of agent")
parser.add_argument('--model_optimizer', type=str, default='Adam',
help="SGD|Adam|RMSprop|amsgrad|Adadelta|Adagrad|Adamax ...")
parser.add_argument('--dataroot', type=str, default='../data',
help="The root folder of dataset or downloaded data")
parser.add_argument('--dataset', type=str, default='CIFAR100',
help="MNIST(default)|CIFAR10|CIFAR100")
parser.add_argument('--n_permutation', type=int, default=0,
help="Enable permuted tests when >0")
parser.add_argument('--first_split_size', type=int, default=10)
parser.add_argument('--other_split_size', type=int, default=10)
parser.add_argument('--no_class_remap', dest='no_class_remap', default=False, action='store_true',
help="Avoid the dataset with a subset of classes doing the remapping. Ex: [2,5,6 ...] -> [0,1,2 ...]") # class:we need to know specific class,other:no need to know specific class
parser.add_argument('--train_aug', dest='train_aug', default=True, action='store_false',
help="Allow data augmentation during training")
parser.add_argument('--rand_split', dest='rand_split', default=False, action='store_true',
help="Randomize the classes in splits")
parser.add_argument('--workers', type=int, default=0,
help="#Thread for dataloader")
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--model_lr', type=float,
default=0.0005, help="Classifier Learning rate")
parser.add_argument('--head_lr', type=float,
default=0.0005, help="Classifier Learning rate")
parser.add_argument('--svd_lr', type=float, default=0.0005,
help="Classifier Learning rate")
parser.add_argument('--bn_lr', type=float, default=0.0005,
help="Classifier Learning rate")
parser.add_argument('--gamma', type=float, default=0.5,
help="Learning rate decay")
parser.add_argument('--svd_thres', type=float,
default=1.0, help='reserve eigenvector')
parser.add_argument('--momentum', type=float, default=0)
parser.add_argument('--model_weight_decay',
type=float, default=1e-5) # 1e-4
parser.add_argument('--schedule', nargs="+", type=int, default=[1],
help="epoch ")
parser.add_argument('--print_freq', type=float, default=10,
help="Print the log at every x iteration")
parser.add_argument('--model_weights', type=str, default=None,
help="The path to the file for the model weights (*.pth).")
parser.add_argument('--eval_on_train_set', dest='eval_on_train_set', default=False, action='store_true',
help="Force the evaluation on train set")
parser.add_argument('--offline_training', dest='offline_training', default=False, action='store_true',
help="Non-incremental learning by make all data available in one batch. For measuring the upperbound performance.")
parser.add_argument('--repeat', type=int, default=1,
help="Repeat the experiment N times")
parser.add_argument('--incremental_class', dest='incremental_class', default=False, action='store_true',
help="The number of output node in the single-headed model increases along with new categories.")
parser.add_argument('--with_head', dest='with_head', default=False, action='store_true',
help="whether constraining head")
parser.add_argument('--reset_model_opt', dest='reset_model_opt', default=True, action='store_true',
help="whether reset optimizer for model at the start of training each tasks")
parser.add_argument('--reg_coef', type=float, default=100,
help="The coefficient for ewc reg")
args = parser.parse_args(argv)
return args
if __name__ == '__main__':
args = get_args(sys.argv[1:])
avg_final_acc = np.zeros(args.repeat)
final_bwt = np.zeros(args.repeat)
torch.cuda.set_device(args.gpuid[0])
# Seed
SEED = 0
np.random.seed(SEED)
torch.manual_seed(SEED)
random.seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available():
torch.cuda.manual_seed(SEED)
for r in range(args.repeat):
# Run the experiment
acc_table, task_names = run(args)
print(acc_table)
# Calculate average performance across tasks
# Customize this part for a different performance metric
avg_acc_history = [0] * len(task_names)
bwt_history = [0] * len(task_names)
for i in range(len(task_names)):
train_name = task_names[i]
cls_acc_sum = 0
backward_transfer = 0
for j in range(i + 1):
val_name = task_names[j]
cls_acc_sum += acc_table[val_name][train_name]
backward_transfer += acc_table[val_name][train_name] - \
acc_table[val_name][val_name]
avg_acc_history[i] = cls_acc_sum / (i + 1)
bwt_history[i] = backward_transfer / i if i > 0 else 0
print('Task', train_name, 'average acc:', avg_acc_history[i])
print('Task', train_name, 'backward transfer:', bwt_history[i])
# Gather the final avg accuracy
avg_final_acc[r] = avg_acc_history[-1]
final_bwt[r] = bwt_history[-1]
# Print the summary so far
print('===Summary of experiment repeats:',
r + 1, '/', args.repeat, '===')
print('The last avg acc of all repeats:', avg_final_acc)
print('The last bwt of all repeats:', final_bwt)
print('acc mean:', avg_final_acc.mean(),
'acc std:', avg_final_acc.std())
print('bwt mean:', final_bwt.mean(), 'bwt std:', final_bwt.std())
|
1699372
|
import unittest
from CoolProp.CoolProp import PropsSI
import CoolProp
import numpy as np
def test_input_types():
for Fluid in ['Water']:
for Tvals in [0.5 * PropsSI(Fluid, 'Tmin') + 0.5 * PropsSI(Fluid, 'Tcrit'),
[PropsSI(Fluid, 'Tmin') + 1e-5, PropsSI(Fluid, 'Tcrit') - 1e-5],
np.linspace(PropsSI(Fluid, 'Tmin') + 1e-5, PropsSI(Fluid, 'Tcrit') - 1e-5, 30)
]:
yield check_type, Fluid, Tvals
def check_type(fluid, Tvals):
PropsSI('P', 'T', Tvals, 'Q', 0, fluid)
class PropsFailures(unittest.TestCase):
def testUnmatchedLengths(self):
self.assertRaises(TypeError, PropsSI, 'P', 'T', [280, 290, 300], 'Q', [0, 1], 'R134a')
def testMatrix(self):
self.assertRaises(TypeError, PropsSI, 'P', 'T', np.array([280, 290, 300, 280, 290, 300]).reshape(2, 3), 'Q', np.array([0, 0.5, 1, 0.0, 0.5, 1]).reshape(2, 3), 'R134a')
if __name__ == '__main__':
import nose
nose.runmodule()
|
1699380
|
import unittest
from coba.environments import SimulatedInteraction, LoggedInteraction
class SimulatedInteraction_Tests(unittest.TestCase):
def test_context_none(self):
interaction = SimulatedInteraction(None, (1,2,3), rewards=(4,5,6))
self.assertEqual(None, interaction.context)
def test_context_str(self):
interaction = SimulatedInteraction("A", (1,2,3), rewards=(4,5,6))
self.assertEqual("A", interaction.context)
def test_context_dense(self):
interaction = SimulatedInteraction((1,2,3), (1,2,3), rewards=(4,5,6))
self.assertEqual((1,2,3), interaction.context)
def test_context_dense_2(self):
interaction = SimulatedInteraction((1,2,3,(0,0,1)), (1,2,3), rewards=(4,5,6))
self.assertEqual((1,2,3,(0,0,1)), interaction.context)
def test_context_sparse_dict(self):
interaction = SimulatedInteraction({1:0}, (1,2,3), rewards=(4,5,6))
self.assertEqual({1:0}, interaction.context)
def test_actions_correct_1(self) -> None:
self.assertSequenceEqual([1,2], SimulatedInteraction(None, [1,2], rewards=[1,2]).actions)
def test_actions_correct_2(self) -> None:
self.assertSequenceEqual(["A","B"], SimulatedInteraction(None, ["A","B"], rewards=[1,2]).actions)
def test_actions_correct_3(self) -> None:
self.assertSequenceEqual([(1,2), (3,4)], SimulatedInteraction(None, [(1,2), (3,4)], rewards=[1,2]).actions)
def test_custom_rewards(self):
interaction = SimulatedInteraction((1,2), (1,2,3), rewards=[4,5,6])
self.assertEqual((1,2), interaction.context)
self.assertCountEqual((1,2,3), interaction.actions)
self.assertEqual({"rewards":[4,5,6] }, interaction.kwargs)
def test_reveals_results(self):
interaction = SimulatedInteraction((1,2), (1,2,3), reveals=[(1,2),(3,4),(5,6)],rewards=[4,5,6])
self.assertEqual((1,2), interaction.context)
self.assertCountEqual((1,2,3), interaction.actions)
self.assertEqual({"reveals":[(1,2),(3,4),(5,6)], "rewards":[4,5,6]}, interaction.kwargs)
class LoggedInteraction_Tests(unittest.TestCase):
def test_simple_with_actions(self):
interaction = LoggedInteraction(1, 2, reward=3, probability=.2, actions=[1,2,3])
self.assertEqual(1, interaction.context)
self.assertEqual(2, interaction.action)
self.assertEqual(3, interaction.kwargs["reward"])
self.assertEqual(.2, interaction.kwargs["probability"])
self.assertEqual([1,2,3], interaction.kwargs["actions"])
def test_simple_sans_actions(self):
interaction = LoggedInteraction(1, 2, reward=3, probability=.2)
self.assertEqual(1, interaction.context)
self.assertEqual(2, interaction.action)
self.assertEqual(3, interaction.kwargs["reward"])
self.assertEqual(.2, interaction.kwargs["probability"])
if __name__ == '__main__':
unittest.main()
|
1699399
|
from runners.python import Submission
class XavierSubmission(Submission):
def run(self, s):
s = list(map(int, s.split(",")))
c_list = list(range(256))
cur_pos = 0
skip = 0
for length in s:
buffer = [c_list[(cur_pos + i) % 256] for i in range(length)]
buffer = buffer[::-1]
for i in range(length):
c_list[(cur_pos + i) % 256] = buffer[i]
cur_pos += length + skip
cur_pos = cur_pos % 256
skip += 1
return c_list[0] * c_list[1]
|
1699403
|
from securityheaders.models import SecurityHeader
from securityheaders.models.xpcdp import XPermittedCrossDomainPoliciesDirective
from securityheaders.models.annotations import *
@description('This header defines a cross-domain policy for clients such as Adobe Flash Player or Adobe Acrobat.')
@headername('x-permitted-cross-domain-policies')
@headerref('https://www.adobe.com/devnet/adobe-media-server/articles/cross-domain-xml-for-streaming.html')
class XPermittedCrossDomainPolicies(SecurityHeader):
directive = XPermittedCrossDomainPoliciesDirective
def __init__(self, unparsedstring):
SecurityHeader.__init__(self, unparsedstring, XPermittedCrossDomainPolicies.directive)
def is_none(self):
try:
if self.parsedstring:
return XPermittedCrossDomainPoliciesDirective.NONE in self.keys()
return False
except:
pass
return False
|
1699453
|
import logging
from typing import Any, Dict, List, Optional
import torch
import torch.nn.functional as F
from overrides import overrides
import numpy as np
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward
from allennlp.modules import TimeDistributed
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from scirex.metrics.thresholding_f1_metric import BinaryThresholdF1
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class SpanClassifier(Model):
def __init__(
self,
vocab: Vocabulary,
mention_feedforward: FeedForward,
label_namespace: str,
n_features: int = 0,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super(SpanClassifier, self).__init__(vocab, regularizer)
self._label_namespace = label_namespace
self._mention_feedforward = TimeDistributed(mention_feedforward)
self._ner_scorer = TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim() + n_features, 1))
self._ner_metrics = BinaryThresholdF1()
initializer(self)
@overrides
def forward(
self, # type: ignore
spans: torch.IntTensor, # (Batch Size, Number of Spans, 2)
span_embeddings: torch.IntTensor, # (Batch Size, Number of Spans, Span Embedding SIze)
span_features: torch.FloatTensor = None,
span_labels: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
# Shape: (Batch_size, Number of spans, H)
span_feedforward = self._mention_feedforward(span_embeddings)
if span_features is not None :
span_feedforward = torch.cat([span_feedforward, span_features], dim=-1)
ner_scores = self._ner_scorer(span_feedforward).squeeze(-1) #(B, NS)
ner_probs = torch.sigmoid(ner_scores)
output_dict = {
"spans" : spans,
"ner_probs": ner_probs,
"loss" : 0.0
}
if span_labels is not None:
assert ner_probs.shape == span_labels.shape, breakpoint()
assert len(ner_probs.shape) == 2, breakpoint()
self._ner_metrics(ner_probs, span_labels)
loss = self._compute_loss_for_scores(ner_probs, span_labels, metadata)
output_dict["loss"] = loss
if metadata is not None:
output_dict["metadata"] = metadata
return output_dict
def _compute_loss_for_scores(self, ner_probs, ner_labels, metadata):
ner_probs_flat = ner_probs.view(-1)
ner_labels_flat = ner_labels.view(-1)
loss = torch.nn.BCELoss(reduction="mean")(ner_probs_flat, ner_labels_flat.float())
return loss
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]):
output_dict['decoded_spans'] = []
if 'spans' in output_dict :
for spans, spans_prob in zip(output_dict['spans'], output_dict['ner_probs']) :
decoded = {(span[0].item(), span[1].item() + 1): label.item() for span, label in zip(spans, spans_prob)}
output_dict['decoded_spans'].append(decoded)
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics = self._ner_metrics.get_metric(reset)
metrics = {"span_" + k: v for k, v in metrics.items()}
return metrics
|
1699530
|
import flask_whooshalchemy as whooshalchemy
from dataviva import app
from models import Post
whooshalchemy.whoosh_index(app, Post)
|
1699531
|
import numpy as np
import pytest
from scipy.stats import halfnorm, invgamma
from skopt.learning.gaussian_process.kernels import RBF, ConstantKernel
from bask.bayesgpr import BayesGPR
@pytest.fixture(params=[False, True])
def minimal_gp(request):
kernel = ConstantKernel(
constant_value=1 ** 2, constant_value_bounds=(0.01 ** 2, 1 ** 2)
) * RBF(length_scale=1.0, length_scale_bounds=(0.5, 1.5))
gp = BayesGPR(
random_state=1, normalize_y=False, kernel=kernel, warp_inputs=request.param
)
return gp
@pytest.fixture
def minimal_priors():
return [
lambda x: halfnorm(scale=1.0).logpdf(np.sqrt(np.exp(x)))
+ x / 2.0
- np.log(2.0),
lambda x: invgamma(a=5.0, scale=1.0).logpdf(np.exp(x)) + x,
lambda x: halfnorm(scale=1.0).logpdf(np.sqrt(np.exp(x)))
+ x / 2.0
- np.log(2.0),
]
def test_noise_vector(minimal_gp, minimal_priors):
X = np.array([[0.0], [0.0]])
y = np.array([1.0, 0.0])
noise_vector = np.array([1234, 0.0])
minimal_gp.fit(
X,
y,
noise_vector=noise_vector,
n_burnin=1,
progress=False,
priors=minimal_priors,
)
prediction = minimal_gp.predict(np.array([[0.0]]))
assert (
prediction < 0.01
) # The high noise is supposed to diminish the effect of the datapoint
def test_noise_set_to_zero(minimal_gp, minimal_priors):
X = np.array([[0.1], [0.0], [-0.1]])
y = np.array([0.0, 0.0, 0.0])
minimal_gp.fit(X, y, n_burnin=1, progress=False, priors=minimal_priors)
minimal_gp.theta = np.array([0.0, 0.0, 0.0])
assert minimal_gp.predict(np.array([[0.0]]), return_std=True)[1] >= 1.0
with minimal_gp.noise_set_to_zero():
assert minimal_gp.predict(np.array([[0.0]]), return_std=True)[1] < 1.0
assert minimal_gp.predict(np.array([[0.0]]), return_std=True)[1] >= 1.0
def test_sample_without_fit(minimal_gp):
# Calling sample without data (X, y) or a previous fit, should raise a ValueError:
with pytest.raises(ValueError):
minimal_gp.sample()
|
1699556
|
import datetime
import databases
import pytest
import sqlalchemy as sa
from sqlalchemy import create_engine
import ormar
from tests.settings import DATABASE_URL
metadata = sa.MetaData()
db = databases.Database(DATABASE_URL)
engine = create_engine(DATABASE_URL)
class User(ormar.Model):
class Meta(ormar.ModelMeta):
tablename = "users"
metadata = metadata
database = db
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=50, unique=True, index=True)
class RelationalAuditModel(ormar.Model):
class Meta:
abstract = True
created_by: User = ormar.ForeignKey(User, nullable=False)
updated_by: User = ormar.ForeignKey(User, nullable=False)
class AuditModel(ormar.Model):
class Meta:
abstract = True
created_by: str = ormar.String(max_length=100)
updated_by: str = ormar.String(max_length=100, default="Sam")
class DateFieldsModel(ormar.Model):
class Meta(ormar.ModelMeta):
abstract = True
metadata = metadata
database = db
created_date: datetime.datetime = ormar.DateTime(
default=datetime.datetime.now, name="creation_date"
)
updated_date: datetime.datetime = ormar.DateTime(
default=datetime.datetime.now, name="modification_date"
)
class Category(DateFieldsModel, AuditModel):
class Meta(ormar.ModelMeta):
tablename = "categories"
exclude_parent_fields = ["updated_by", "updated_date"]
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=50, unique=True, index=True)
code: int = ormar.Integer()
class Item(DateFieldsModel, AuditModel):
class Meta(ormar.ModelMeta):
tablename = "items"
exclude_parent_fields = ["updated_by", "updated_date"]
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=50, unique=True, index=True)
code: int = ormar.Integer()
updated_by: str = ormar.String(max_length=100, default="Bob")
class Gun(RelationalAuditModel, DateFieldsModel):
class Meta(ormar.ModelMeta):
tablename = "guns"
exclude_parent_fields = ["updated_by"]
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=50)
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
metadata.create_all(engine)
yield
metadata.drop_all(engine)
def test_model_definition():
model_fields = Category.Meta.model_fields
sqlalchemy_columns = Category.Meta.table.c
pydantic_columns = Category.__fields__
assert "updated_by" not in model_fields
assert "updated_by" not in sqlalchemy_columns
assert "updated_by" not in pydantic_columns
assert "updated_date" not in model_fields
assert "updated_date" not in sqlalchemy_columns
assert "updated_date" not in pydantic_columns
assert "updated_by" not in Gun.Meta.model_fields
assert "updated_by" not in Gun.Meta.table.c
assert "updated_by" not in Gun.__fields__
@pytest.mark.asyncio
async def test_model_works_as_expected():
async with db:
async with db.transaction(force_rollback=True):
test = await Category(name="Cat", code=2, created_by="Joe").save()
assert test.created_date is not None
test2 = await Category.objects.get(pk=test.pk)
assert test2.name == "Cat"
assert test2.created_by == "Joe"
@pytest.mark.asyncio
async def test_exclude_with_redefinition():
async with db:
async with db.transaction(force_rollback=True):
test = await Item(name="Item", code=3, created_by="Anna").save()
assert test.created_date is not None
assert test.updated_by == "Bob"
test2 = await Item.objects.get(pk=test.pk)
assert test2.name == "Item"
assert test2.code == 3
@pytest.mark.asyncio
async def test_exclude_with_relation():
async with db:
async with db.transaction(force_rollback=True):
user = await User(name="<NAME>").save()
test = await Gun(name="AK47", created_by=user).save()
assert test.created_date is not None
with pytest.raises(AttributeError):
assert test.updated_by
test2 = await Gun.objects.select_related("created_by").get(pk=test.pk)
assert test2.name == "AK47"
assert test2.created_by.name == "<NAME>"
|
1699557
|
import django
from django.db import connection
from django.core.management import call_command
from .base import BaseCommand
def copy_tmp(tablename):
cursor = connection.cursor()
print('copying data from table ' + tablename)
cursor.execute("DROP TABLE IF EXISTS tmp_{t};".format(t=tablename))
cursor.execute("CREATE TABLE tmp_{t} (LIKE {t});".format(t=tablename))
cursor.execute("INSERT INTO tmp_{t} SELECT * FROM {t};".format(t=tablename))
def restore_from_tmp(tablename):
print('restoring data to table ' + tablename)
cursor = connection.cursor()
cursor.execute("INSERT INTO {t} SELECT * FROM tmp_{t};".format(t=tablename))
cursor.execute("DROP TABLE IF EXISTS tmp_{t};".format(t=tablename))
def drop_tables(skip_divisions=False):
tables = connection.introspection.table_names()
cursor = connection.cursor()
for table in tables:
if table.startswith(('opencivicdata_', 'pupa_')):
print('dropping table ' + table)
cursor.execute("DROP TABLE IF EXISTS {} CASCADE;".format(table))
cursor.execute("DELETE FROM django_migrations WHERE app='core';")
cursor.execute("DELETE FROM django_migrations WHERE app='legislative';")
cursor.execute("DELETE FROM django_migrations WHERE app='pupa';")
class Command(BaseCommand):
name = 'dbinit'
help = 'initialize a pupa database'
def add_args(self):
self.add_argument('--reset', action='store_true', default=False,
help='reset entire database - USE WITH CAUTION')
self.add_argument('--partial-reset', action='store_true', default=False,
help='reset entire database, except for divisions - USE WITH CAUTION')
self.add_argument(type=str, dest='country', nargs='+',
help='country to load divisions for')
def handle(self, args, other):
django.setup()
if args.partial_reset:
copy_tmp('opencivicdata_division')
drop_tables()
elif args.reset:
drop_tables()
else:
pass
call_command('migrate', interactive=False)
if args.partial_reset:
restore_from_tmp('opencivicdata_division')
else:
for country in args.country:
call_command('loaddivisions', country)
|
1699562
|
from typing import List, Optional
from pydantic import Field, BaseModel
from tottle.types.objects.alert import ProximityAlertTriggered
from tottle.types.objects.animation import Animation
from tottle.types.objects.audio import Audio
from tottle.types.objects.chat import Chat
from tottle.types.objects.contact import Contact
from tottle.types.objects.dice import Dice
from tottle.types.objects.entity import Entity
from tottle.types.objects.game import Game
from tottle.types.objects.invoice import Invoice
from tottle.types.objects.keyboard import InlineKeyboardMarkup
from tottle.types.objects.location import Location
from tottle.types.objects.passport import PassportData
from tottle.types.objects.payment import SuccessfulPayment
from tottle.types.objects.photo import PhotoSize
from tottle.types.objects.poll import Poll
from tottle.types.objects.sticker import Sticker
from tottle.types.objects.user import User
from tottle.types.objects.venue import Venue
from tottle.types.objects.video import Video, VideoNote
class Message(BaseModel):
chat: Optional["Chat"] = None
date: Optional[int] = None
message_id: Optional[int] = None
from_user: Optional["User"] = Field(alias="from")
forward_from: Optional["User"] = None
forward_from_chat: Optional["Chat"] = None
forward_from_message_id: Optional[int] = None
forward_signature: Optional[str] = None
forward_sender_name: Optional[str] = None
forward_date: Optional[int] = None
reply_to_message: Optional["Message"] = None
via_bot: Optional["User"] = None
edit_date: Optional[int] = None
media_group_id: Optional[str] = None
author_signature: Optional[str] = None
text: Optional[str] = None
entities: Optional[List["Entity"]] = None
animation: Optional["Animation"] = None
audio: Optional["Audio"] = None
photo: Optional[List["PhotoSize"]] = None
sticker: Optional["Sticker"] = None
video: Optional["Video"] = None
video_note: Optional["VideoNote"] = None
caption: Optional[str] = None
caption_entities: Optional[List["Entity"]] = None
contact: Optional["Contact"] = None
dice: Optional["Dice"] = None
game: Optional["Game"] = None
poll: Optional["Poll"] = None
venue: Optional["Venue"] = None
location: Optional["Location"] = None
new_chat_members: Optional[List["User"]] = None
new_chat_title: Optional[str] = None
new_chat_photo: Optional[List["PhotoSize"]] = None
delete_chat_photo: Optional[bool] = None
group_chat_created: Optional[bool] = None
supergroup_chat_created: Optional[bool] = None
channel_chat_created: Optional[bool] = None
migrate_to_chat_id: Optional[int] = None
migrate_from_chat_id: Optional[int] = None
pinned_message: Optional["Message"] = None
invoice: Optional["Invoice"] = None
successful_payment: Optional["SuccessfulPayment"] = None
connected_website: Optional[str] = None
passport_data: Optional["PassportData"] = None
proximity_alert_triggered: Optional["ProximityAlertTriggered"] = None
reply_markup: Optional["InlineKeyboardMarkup"] = None
Message.update_forward_refs()
|
1699586
|
import subprocess
from distutils.core import setup, Extension
def pkgconfig(flag, package):
p = subprocess.Popen(['pkg-config', flag, package],
stdout=subprocess.PIPE)
return p.stdout.read().split()
mod = Extension('bulletphysics',
sources=['src/bulletphysics.cpp',
'src/DbvtBroadphase.cpp',
'src/DefaultCollisionConfiguration.cpp',
'src/CollisionDispatcher.cpp',
'src/SequentialImpulseConstraintSolver.cpp',
'src/DiscreteDynamicsWorld.cpp',
'src/Vector3.cpp',
'src/Quaternion.cpp',
'src/CollisionShape.cpp',
'src/StaticPlaneShape.cpp',
'src/SphereShape.cpp',
'src/Transform.cpp',
'src/DefaultMotionState.cpp',
'src/RigidBodyConstructionInfo.cpp',
'src/RigidBody.cpp',
'src/BoxShape.cpp',
'src/PersistentManifold.cpp',
'src/VehicleTuning.cpp',
'src/WheelInfo.cpp',
'src/DefaultVehicleRaycaster.cpp',
'src/RaycastVehicle.cpp',
'src/CompoundShape.cpp',
'src/CylinderShape.cpp',
],
extra_compile_args=pkgconfig('--cflags', 'bullet'),
extra_link_args=pkgconfig('--libs', 'bullet'))
setup(
name='bulletphysics',
version='0.1',
description='python wrapper for bulletphysics library',
ext_modules=[mod],
author='<NAME>.',
author_email='<EMAIL>',
url='https://github.com/20tab/pybulletphysics',
license='MIT License'
)
|
1699600
|
import pyparsing
from pyparsing import Word, WordStart, WordEnd, ZeroOrMore, Optional
class reference_patterns:
def __init__(self):
real_word_dashes = Word(pyparsing.alphas + "-")
punctuation = Word(".!?:,;-")
punctuation_no_dash = Word(".!?:,;")
punctuation_reference_letter = Word(".:,;-")
printable = Word(pyparsing.printables, exact=1)
letter = Word(pyparsing.alphas, exact=1)
letter_reference = punctuation_reference_letter + letter
nums = (
Word(pyparsing.nums)
+ Optional(letter)
+ ZeroOrMore(letter_reference)
)
word_end = (
pyparsing.ZeroOrMore(Word(")") | Word("}") | Word("]"))
+ Optional(punctuation_no_dash)
+ WordEnd()
)
self.single_number = WordStart() + real_word_dashes + nums + word_end
self.single_number_parens = (
printable
+ letter
+ Optional(punctuation_no_dash)
+ pyparsing.OneOrMore(
Word("([{", exact=1)
+ pyparsing.OneOrMore(nums | Word("-"))
+ Word(")]}", exact=1)
)
+ Optional(punctuation_no_dash)
+ word_end
)
self.number_then_punctuation = (
printable
+ letter
+ nums
+ punctuation
+ pyparsing.ZeroOrMore(nums | punctuation)
+ word_end
)
self.punctuation_then_number = (
printable
+ letter
+ punctuation_no_dash
+ nums
+ pyparsing.ZeroOrMore(punctuation | nums)
+ word_end
)
|
1699658
|
from os import path
from urllib.parse import urlparse
import logging
from scrapy.utils.misc import load_object
from .base_store import BaseStore
logger = logging.getLogger(__name__)
class FileStore(BaseStore):
'''Store class abstracting an input file.
It can handle file stored in the local file system or in Amazon AWS S3.
This is extensible by adding the given URI scheme to `SPIDERFEEDER_FILE_HANDLERS`.
The file formats handled are txt, csv and json.
If a new file format is required, it is just a matter of adding the file extension to
`SPIDERFEEDER_FILE_HANDLERS`.
For csv and json files, the URL is read from the field set in `SPIDERFEEDER_INPUT_FIELD`.
The standard file encoding is _utf-8_, but it can be changed through `SPIDERFEEDER_INPUT_FILE_ENCODING`.
'''
FILE_HANDLERS = {
'': 'spider_feeder.store.file_handler.local.open',
'file': 'spider_feeder.store.file_handler.local.open',
's3': 'spider_feeder.store.file_handler.s3.open',
'http': 'spider_feeder.store.file_handler.http.open',
'https': 'spider_feeder.store.file_handler.http.open',
}
FILE_PARSERS = {
'txt': 'spider_feeder.store.parser.parse_txt',
'csv': 'spider_feeder.store.parser.parse_csv',
'json': 'spider_feeder.store.parser.parse_json',
}
def __init__(self, input_file_uri, settings):
super().__init__(settings)
self._input_file_uri = input_file_uri
self._settings = settings
self._input_file_encoding = settings.get('SPIDERFEEDER_INPUT_FILE_ENCODING', 'utf-8')
self._input_format = settings.get('SPIDERFEEDER_INPUT_FORMAT', None)
handlers = settings.getdict('SPIDERFEEDER_FILE_HANDLERS', {})
self._handlers = dict(self.FILE_HANDLERS, **handlers)
parsers = settings.getdict('SPIDERFEEDER_FILE_PARSERS', {})
self._parsers = dict(self.FILE_PARSERS, **parsers)
@property
def _file_format(self):
if self._input_format:
return self._input_format
(_, file_extension) = path.splitext(self._input_file_uri)
return file_extension[1:] # remove the "."
def _open(self):
parsed = urlparse(self._input_file_uri)
logger.info(f'Opening file {self._input_file_uri} with scheme {parsed.scheme}.')
open = load_object(self._handlers[parsed.scheme])
return open(
self._input_file_uri,
encoding=self._input_file_encoding,
settings=self._settings
)
def _parse(self, fd):
file_format = self._file_format
logger.info(f'Parsing file {self._input_file_uri} with format {file_format}.')
parser = load_object(self._parsers[file_format])
return parser(fd, self._settings)
def read_input_items(self):
with self._open() as fd:
return self._parse(fd)
|
1699660
|
import unittest
from frag.utils import parser
from frag.utils.parser import _get_c_of_mass_list
from rdkit import Chem
class ParserTest(unittest.TestCase):
water_data = """HETATM 2008 O HOH B 184 53.034 -39.489 96.872 1.00 67.70 O
HETATM 2010 O HOH B 186 39.366 -30.950 88.735 1.00 66.27 O
HETATM 2011 O HOH B 187 38.861 -67.134 82.852 1.00 69.11 O
HETATM 2012 O HOH B 188 48.438 -40.466 97.529 1.00 41.98 O
HETATM 2015 O HOH B 190 47.858 -60.571 77.866 1.00 52.55 O
HETATM 2016 O HOH B 191 52.415 -50.993 68.148 1.00 50.73 O
HETATM 2017 O HOH B 192 48.922 -42.540 98.150 1.00 52.43 O
HETATM 2018 O HOH B 193 60.968 -55.453 92.185 1.00 37.56 O
HETATM 2019 O HOH B 194 26.058 -55.837 68.104 1.00 60.13 O
HETATM 2020 O HOH B 195 26.923 -52.747 83.917 1.00 59.04 O
HETATM 2021 O HOH B 196 42.376 -40.566 71.629 1.00 45.71 O
HETATM 2022 O HOH B 197 45.966 -45.361 100.995 1.00 56.39 O
HETATM 2023 O HOH B 198 40.498 -49.338 59.760 1.00 74.54 O
HETATM 2024 O HOH B 199 62.006 -56.842 90.642 1.00 50.69 O"""
single_water = """HETATM 2008 O HOH B 184 53.034 -39.489 96.872 1.00 67.70 O"""
ligand_data = """
RDKit
14 15 0 0 0 0 0 0 0 0999 V2000
0.0000 0.0000 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.0000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 2 0
2 3 1 0
2 4 1 0
4 5 1 0
5 6 1 0
6 7 1 0
7 8 2 0
7 9 1 0
9 10 2 0
10 11 1 0
11 12 2 0
12 13 1 0
13 14 2 0
14 5 1 0
14 9 1 0
M END
$$$$
RDKit 3D
8 8 0 0 0 0 0 0 0 0999 V2000
-0.2375 1.2479 -0.2221 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.5033 1.0878 0.3249 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.9231 -0.1960 0.6598 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.4265 -1.1855 -0.1809 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.0741 -1.1112 -0.4792 C 0 0 0 0 0 0 0 0 0 0 0 0
0.5447 0.1297 -0.4205 C 0 0 0 0 0 0 0 0 0 0 0 0
2.0371 0.2224 -0.4859 C 0 0 0 0 0 0 0 0 0 0 0 0
2.5826 -0.1950 0.8038 N 0 0 0 0 0 0 0 0 0 0 0 0
1 2 2 0
2 3 1 0
3 4 2 0
4 5 1 0
5 6 2 0
6 7 1 0
7 8 1 0
6 1 1 0
M END"""
def test_water_parser(self):
out_data = parser._get_waters(self.water_data.split("\n"))
self.assertEqual(len(out_data),14)
out_data = parser._get_waters(self.single_water.split("\n"))
self.assertEqual(len(out_data),1)
def test_water_reader(self):
out_data = parser._get_waters(self.water_data.split("\n"))
water_coords = parser._get_water_coords(out_data)
self.assertEqual(len(water_coords),14)
self.assertAlmostEqual(water_coords[4][2],77.866)
out_data = parser._get_waters(self.single_water.split("\n"))
water_coords = parser._get_water_coords(out_data)
self.assertEqual(len(water_coords), 1)
self.assertAlmostEqual(water_coords[0][1],-39.489)
if __name__ == '__main__':
unittest.main()
|
1699667
|
import pytest
from busy_beaver.apps.slack_integration.blocks import AppHome
from busy_beaver.common.wrappers.slack import SlackClient
from busy_beaver.config import SLACK_TOKEN
from busy_beaver.exceptions import SlackTooManyBlocks
MODULE_TO_TEST = "busy_beaver.common.wrappers.slack"
@pytest.fixture(scope="module")
def slack():
return SlackClient(SLACK_TOKEN)
@pytest.mark.vcr()
def test_slack_dm(slack: SlackClient):
# Act
result = slack.dm("test", user_id="U5FTQ3QRZ")
# Assert
assert result["ok"] is True
assert result["message"]["text"] == "test"
@pytest.mark.vcr()
def test_slack_get_channel_details(slack: SlackClient):
details = slack.channel_details("C5GQNTS07")
assert details["name"] == "general"
@pytest.mark.vcr()
def test_slack_get_channel_members(slack: SlackClient):
members = slack.get_channel_members("C5GQNTS07")
assert len(members) > 0
@pytest.mark.vcr()
def test_slack_get_channel_members__channel_does_not_exist(slack: SlackClient):
with pytest.raises(ValueError):
slack.get_channel_members("channel-does-not-exist")
@pytest.mark.vcr()
def test_slack_get_bot_channels(slack: SlackClient):
channels = slack.get_bot_channels()
assert ("C5GQNTS07", "general") in channels
@pytest.mark.vcr()
def test_slack_post_ephemeral_message_success(slack: SlackClient):
# Act
result = slack.post_ephemeral_message(
"test", channel="CEWD83Y74", user_id="U5FTQ3QRZ"
)
# Assert
assert result["ok"] is True
@pytest.mark.vcr()
def test_slack_user_is_admin(slack: SlackClient):
result = slack.is_admin(user_id="U5FTQ3QRZ")
assert result is True
@pytest.mark.vcr()
def test_slack_user_is_not_admin(slack: SlackClient):
result = slack.is_admin(user_id="UGG6065AP")
assert result is False
@pytest.mark.vcr()
def test_slack_user_does_not_exist(slack: SlackClient):
with pytest.raises(ValueError):
slack.is_admin(user_id="not-real-id")
@pytest.mark.vcr()
def test_slack_post_message_success(slack: SlackClient):
# Act
result = slack.post_message("test", channel="general")
# Assert
assert result["ok"] is True
assert result["message"]["text"] == "test"
@pytest.mark.vcr()
def test_slack_post_message_failed_channel_does_not_exist(slack: SlackClient):
# Act
with pytest.raises(ValueError, match="Channel not found"):
slack.post_message("test", channel="d03s_n0t_3x1s7")
@pytest.mark.vcr()
def test_slack_post_message_failed_not_in_channel(slack: SlackClient):
# Act
with pytest.raises(ValueError, match="Not in channel"):
slack.post_message(message="hello follow human", channel="humans-only")
@pytest.mark.vcr()
def test_slack_post_message_without_specifying_channel(slack: SlackClient):
with pytest.raises(ValueError, match="Must specify channel"):
slack.post_message(message="test")
def test_slack_post_message_failed_too_many_blocks(slack: SlackClient):
# Arrange
block = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "A message *with some bold text* and _some italicized text_.",
},
}
blocks = [block] * 1000
# Act
with pytest.raises(SlackTooManyBlocks):
slack.post_message(channel="general", blocks=blocks)
@pytest.mark.vcr()
def test_slack_post_message_failed_invalid_blocks(slack: SlackClient):
# Arrange -- missing 'type' key
block = {
"text": {
"type": "mrkdwn",
"text": "A message *with some bold text* and _some italicized text_.",
}
}
blocks = [block]
# Act
with pytest.raises(ValueError, match="Invalid blocks"):
slack.post_message(channel="general", blocks=blocks)
@pytest.mark.vcr()
def test_slack_display_app_home(slack: SlackClient):
result = slack.display_app_home("U5FTQ3QRZ", view=AppHome().to_dict())
assert result.status_code == 200
assert result["ok"] is True
assert result["view"]
|
1699694
|
squares = [i**2 for i in range(5)]
print(squares)
# [0, 1, 4, 9, 16]
squares = []
for i in range(5):
squares.append(i**2)
print(squares)
# [0, 1, 4, 9, 16]
odds = [i for i in range(10) if i % 2 == 1]
print(odds)
# [1, 3, 5, 7, 9]
odds = []
for i in range(10):
if i % 2 == 1:
odds.append(i)
print(odds)
# [1, 3, 5, 7, 9]
odd_even = ['odd' if i % 2 == 1 else 'even' for i in range(10)]
print(odd_even)
# ['even', 'odd', 'even', 'odd', 'even', 'odd', 'even', 'odd', 'even', 'odd']
odd_even = []
for i in range(10):
if i % 2 == 1:
odd_even.append('odd')
else:
odd_even.append('even')
print(odd_even)
# ['even', 'odd', 'even', 'odd', 'even', 'odd', 'even', 'odd', 'even', 'odd']
odd10 = [i * 10 if i % 2 == 1 else i for i in range(10)]
print(odd10)
# [0, 10, 2, 30, 4, 50, 6, 70, 8, 90]
matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
flat = [x for row in matrix for x in row]
print(flat)
# [1, 2, 3, 4, 5, 6, 7, 8, 9]
flat = []
for row in matrix:
for x in row:
flat.append(x)
print(flat)
# [1, 2, 3, 4, 5, 6, 7, 8, 9]
cells = [(row, col) for row in range(3) for col in range(2)]
print(cells)
# [(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
cells = [(row, col) for row in range(3)
for col in range(2) if col == row]
print(cells)
# [(0, 0), (1, 1)]
cells = [(row, col) for row in range(3) if row % 2 == 0
for col in range(2) if col % 2 == 0]
print(cells)
# [(0, 0), (2, 0)]
l_str1 = ['a', 'b', 'c']
l_str2 = ['x', 'y', 'z']
l_zip = [(s1, s2) for s1, s2 in zip(l_str1, l_str2)]
print(l_zip)
# [('a', 'x'), ('b', 'y'), ('c', 'z')]
l_zip = []
for s1, s2 in zip(l_str1, l_str2):
l_zip.append((s1, s2))
print(l_zip)
# [('a', 'x'), ('b', 'y'), ('c', 'z')]
l_enu = [(i, s) for i, s in enumerate(l_str1)]
print(l_enu)
# [(0, 'a'), (1, 'b'), (2, 'c')]
l_enu = []
for i, s in enumerate(l_str1):
l_enu.append((i, s))
print(l_enu)
# [(0, 'a'), (1, 'b'), (2, 'c')]
l_zip_if = [(s1, s2) for s1, s2 in zip(l_str1, l_str2) if s1 != 'b']
print(l_zip_if)
# [('a', 'x'), ('c', 'z')]
l_int1 = [1, 2, 3]
l_int2 = [10, 20, 30]
l_sub = [i2 - i1 for i1, i2 in zip(l_int1, l_int2)]
print(l_sub)
# [9, 18, 27]
|
1699764
|
import json
from django.core import management
from django.core.urlresolvers import reverse
from allegation.factories import OfficerFactory, OfficerAllegationFactory
from common.tests.core import SimpleTestCase
class CountViewTestCase(SimpleTestCase):
def test_count_by_num_complaints(self):
self.officers = []
for _ in range(4):
self.officers.append(OfficerFactory())
for i in range(2):
OfficerAllegationFactory(officer=self.officers[i])
management.call_command('calculate_allegations_count')
response = self.client.get(reverse('officer:count'))
count = json.loads(response.content.decode())
# Does not count officers with 0 complaint
self.assertListEqual(count, [0, 2])
def test_count_no_complaint(self):
self.visit(reverse('officer:count'))
data = self.json(self.response)
data.should.equal([0])
|
1699783
|
from django.core.cache import cache
import contextlib
import time
@contextlib.contextmanager
def lock(key, timeout=5000):
"""
A simple context manager that raises the passed exception
if a lock can't be acquired.
"""
lock_id = 'lock-transmission-{0}'.format(key)
acquire_lock = lambda: cache.add(lock_id, 1, 90) # fix to keep the key for 90secs in redis instead of 5000sec
release_lock = lambda: cache.delete(lock_id)
waited, hops = 0, 10
while not acquire_lock():
time.sleep(float(hops) / 1000.0)
waited += hops
if waited > timeout:
raise RuntimeError('Lock could not be acquired after {}ms'.format(waited))
try:
yield
finally:
release_lock()
|
1699851
|
from dataclasses import dataclass
from typing import ClassVar, Dict, List, Optional
import resotolib.logger
from resotolib.baseresources import (
BaseAccount,
BaseDatabase,
BaseInstance,
BaseIPAddress,
BaseLoadBalancer,
BaseNetwork,
BaseRegion,
BaseResource,
BaseSnapshot,
BaseVolume,
InstanceStatus,
VolumeStatus,
BaseBucket,
BaseEndpoint,
BaseCertificate,
BaseKeyPair,
BaseDomain,
BaseDomainRecord,
)
from resotolib.graph import Graph
from resoto_plugin_digitalocean.client import get_team_credentials
from resoto_plugin_digitalocean.client import StreamingWrapper
from .utils import dump_tag
log = resotolib.logger.getLogger("resoto." + __name__)
@dataclass(eq=False)
class DigitalOceanResource(BaseResource): # type: ignore
"""A class that implements the abstract method delete() as well as update_tag()
and delete_tag().
delete() must be implemented. update_tag() and delete_tag() are optional.
"""
kind: ClassVar[str] = "digitalocean_resource"
urn: str = ""
def delete_uri_path(self) -> Optional[str]:
return None
def tag_resource_name(self) -> Optional[str]:
"""Resource name in case tagging is supported by digitalocean.
Not all resources support tagging.
"""
return None
def delete(self, graph: Graph) -> bool:
"""Delete a resource in the cloud"""
delete_uri_path = self.delete_uri_path()
if delete_uri_path:
log.debug(
f"Deleting resource {self.id} in account {self.account(graph).id} region {self.region(graph).id}"
)
team = self.account(graph)
credentials = get_team_credentials(team.id)
if credentials is None:
raise RuntimeError(
f"Cannot delete resource {self.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
return client.delete(delete_uri_path, self.id)
raise NotImplementedError
def update_tag(self, key: str, value: str) -> bool:
tag_resource_name = self.tag_resource_name()
if tag_resource_name:
log.debug(f"Updating tag {key} on resource {self.id}")
team = self._account
credentials = get_team_credentials(team.id)
if credentials is None:
raise RuntimeError(
f"Cannot update tag on resource {self.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
if key in self.tags:
# resotocore knows about the tag. Therefore we need to clean it first
tag_key = dump_tag(key, self.tags.get(key))
client.untag_resource(tag_key, tag_resource_name, self.id)
# we tag the resource using the key-value formatted tag
tag_kv = dump_tag(key, value)
tag_ready: bool = True
tag_count = client.get_tag_count(tag_kv)
# tag count call failed irrecoverably, we can't continue
if isinstance(tag_count, str):
raise RuntimeError(f"Tag update failed. Reason: {tag_count}")
# tag does not exist, create it
if tag_count is None:
tag_ready = client.create_tag(tag_kv)
return tag_ready and client.tag_resource(tag_kv, tag_resource_name, self.id)
else:
raise NotImplementedError(f"resource {self.kind} does not support tagging")
def delete_tag(self, key: str) -> bool:
tag_resource_name = self.tag_resource_name()
if tag_resource_name:
log.debug(f"Deleting tag {key} on resource {self.id}")
team = self._account
credentials = get_team_credentials(team.id)
if credentials is None:
raise RuntimeError(
f"Cannot update tag on resource {self.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
if key not in self.tags:
# tag does not exist, nothing to do
return False
tag_key = dump_tag(key, self.tags.get(key))
untagged = client.untag_resource(tag_key, tag_resource_name, self.id)
if not untagged:
return False
tag_count = client.get_tag_count(tag_key)
if tag_count == 0:
return client.delete("/tags", tag_key)
return True
else:
raise NotImplementedError(f"resource {self.kind} does not support tagging")
@dataclass(eq=False)
class DigitalOceanTeam(DigitalOceanResource, BaseAccount): # type: ignore
"""DigitalOcean Team"""
kind: ClassVar[str] = "digitalocean_team"
@dataclass(eq=False)
class DigitalOceanRegion(DigitalOceanResource, BaseRegion): # type: ignore
"""DigitalOcean region"""
kind: ClassVar[str] = "digitalocean_region"
do_region_slug: Optional[str] = None
do_region_features: Optional[List[str]] = None
is_available: Optional[bool] = None
do_region_droplet_sizes: Optional[List[str]] = None
@dataclass(eq=False)
class DigitalOceanProject(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean project"""
kind: ClassVar[str] = "digitalocean_project"
owner_uuid: Optional[str] = None
owner_id: Optional[str] = None
description: Optional[str] = None
purpose: Optional[str] = None
environment: Optional[str] = None
is_default: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/projects"
@dataclass(eq=False)
class DigitalOceanDroplet(DigitalOceanResource, BaseInstance): # type: ignore
"""A DigitalOcean Droplet Resource
Droplet have a class variable `instance_status_map` which contains
a mapping from the droplet status string the cloud API returns
to our internal InstanceStatus state.
"""
kind: ClassVar[str] = "digitalocean_droplet"
instance_status_map: ClassVar[Dict[str, InstanceStatus]] = {
"new": InstanceStatus.BUSY,
"active": InstanceStatus.RUNNING,
"off": InstanceStatus.TERMINATED,
"archive": InstanceStatus.TERMINATED,
}
droplet_backup_ids: Optional[List[str]] = None
is_locked: Optional[bool] = None
droplet_features: Optional[List[str]] = None
droplet_image: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/droplets"
def _instance_status_setter(self, value: str) -> None:
"""Setter that looks up the instance status
Based on the string that was give we're doing a dict lookup
for the corresponding instance status and assign it or
InstanceStatus.UNKNOWN.
"""
self._instance_status = self.instance_status_map.get(
value, InstanceStatus.UNKNOWN
)
def tag_resource_name(self) -> Optional[str]:
return "droplet"
# Because we are using dataclasses and allow to supply the `instance_status`
# string to the constructor we can not use the normal @property decorator.
# Instead we assign the property once the class has been fully defined.
DigitalOceanDroplet.instance_status = property(
DigitalOceanDroplet._instance_status_getter,
DigitalOceanDroplet._instance_status_setter,
)
@dataclass(eq=False)
class DigitalOceanKubernetesCluster(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean Kubernetes Cluster"""
kind: ClassVar[str] = "digitalocean_kubernetes_cluster"
k8s_version: Optional[str] = None
k8s_cluster_subnet: Optional[str] = None
k8s_service_subnet: Optional[str] = None
ipv4_address: Optional[str] = None
endpoint: Optional[str] = None
auto_upgrade_enabled: Optional[bool] = None
cluster_status: Optional[str] = None
surge_upgrade_enabled: Optional[bool] = None
registry_enabled: Optional[bool] = None
ha_enabled: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/kubernetes/clusters"
@dataclass(eq=False)
class DigitalOceanVolume(DigitalOceanResource, BaseVolume): # type: ignore
kind: ClassVar[str] = "digitalocean_volume"
volume_status_map: ClassVar[Dict[str, VolumeStatus]] = {
"creating": VolumeStatus.BUSY,
"available": VolumeStatus.AVAILABLE,
"in-use": VolumeStatus.IN_USE,
"deleting": VolumeStatus.BUSY,
"deleted": VolumeStatus.DELETED,
"error": VolumeStatus.ERROR,
"busy": VolumeStatus.BUSY,
}
description: Optional[str] = None
filesystem_type: Optional[str] = None
filesystem_label: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/volumes"
def _volume_status_setter(self, value: str) -> None:
self._volume_status = self.volume_status_map.get(value, VolumeStatus.UNKNOWN)
def tag_resource_name(self) -> Optional[str]:
return "volume"
DigitalOceanVolume.volume_status = property(
DigitalOceanVolume._volume_status_getter, DigitalOceanVolume._volume_status_setter
)
@dataclass(eq=False)
class DigitalOceanDatabase(DigitalOceanResource, BaseDatabase): # type: ignore
kind: ClassVar[str] = "digitalocean_database"
def delete_uri_path(self) -> Optional[str]:
return "/databases"
def tag_resource_name(self) -> Optional[str]:
return "database"
@dataclass(eq=False)
class DigitalOceanNetwork(DigitalOceanResource, BaseNetwork): # type: ignore
"""DigitalOcean network
This is what instances and other networking related resources might reside in.
"""
kind: ClassVar[str] = "digitalocean_network"
ip_range: Optional[str] = None
description: Optional[str] = None
is_default: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/vpcs"
@dataclass(eq=False)
class DigitalOceanSnapshot(DigitalOceanResource, BaseSnapshot): # type: ignore
"""DigitalOcean snapshot"""
kind: ClassVar[str] = "digitalocean_snapshot"
snapshot_size_gigabytes: Optional[int] = None
resource_id: Optional[str] = None
resource_type: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/snapshots"
def tag_resource_name(self) -> Optional[str]:
return "volume_snapshot"
@dataclass(eq=False)
class DigitalOceanLoadBalancer(DigitalOceanResource, BaseLoadBalancer): # type: ignore
"""DigitalOcean load balancer"""
kind: ClassVar[str] = "digitalocean_load_balancer"
nr_nodes: Optional[int] = None
loadbalancer_status: Optional[str] = None
redirect_http_to_https: Optional[bool] = None
enable_proxy_protocol: Optional[bool] = None
enable_backend_keepalive: Optional[bool] = None
disable_lets_encrypt_dns_records: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/load_balancers"
@dataclass(eq=False)
class DigitalOceanFloatingIP(DigitalOceanResource, BaseIPAddress): # type: ignore
"""DigitalOcean floating IP"""
kind: ClassVar[str] = "digitalocean_floating_ip"
is_locked: Optional[bool] = None
def delete(self, graph: Graph) -> bool:
log.debug(
f"Deleting resource {self.id} in account {self.account(graph).id} region {self.region(graph).id}"
)
team = self.account(graph)
credentials = get_team_credentials(team.id)
if credentials is None:
raise RuntimeError(
f"Cannot delete resource {self.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
# un-assign the ip just in case it's still assigned to a droplet
client.unassign_floating_ip(self.id)
return client.delete("/floating_ips", self.id)
@dataclass(eq=False)
class DigitalOceanImage(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean image"""
kind: ClassVar[str] = "digitalocean_image"
distribution: Optional[str] = None
image_slug: Optional[str] = None
is_public: Optional[bool] = None
min_disk_size: Optional[int] = None
image_type: Optional[str] = None
size_gigabytes: Optional[int] = None
description: Optional[str] = None
image_status: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/images"
def tag_resource_name(self) -> Optional[str]:
return "image"
@dataclass(eq=False)
class DigitalOceanSpace(DigitalOceanResource, BaseBucket): # type: ignore
"""DigitalOcean space"""
kind: ClassVar[str] = "digitalocean_space"
def delete(self, graph: Graph) -> bool:
log.debug(
f"Deleting space {self.id} in account {self.account(graph).id} region {self.region(graph).id}"
)
team = self.account(graph)
credentials = get_team_credentials(team.id)
if credentials is None:
raise RuntimeError(
f"Cannot delete resource {self.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
return client.delete_space(self.region(graph).id, self.id)
@dataclass(eq=False)
class DigitalOceanApp(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean app"""
kind: ClassVar[str] = "digitalocean_app"
tier_slug: Optional[str] = None
default_ingress: Optional[str] = None
live_url: Optional[str] = None
live_url_base: Optional[str] = None
live_domain: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/apps"
@dataclass(eq=False)
class DigitalOceanCdnEndpoint(DigitalOceanResource, BaseEndpoint): # type: ignore
"""DigitalOcean CDN endpoint"""
kind = "digitalocean_cdn_endpoint"
origin: Optional[str] = None
endpoint: Optional[str] = None
certificate_id: Optional[str] = None
custom_domain: Optional[str] = None
ttl: Optional[int] = None
def delete_uri_path(self) -> Optional[str]:
return "/cdn/endpoints"
@dataclass(eq=False)
class DigitalOceanCertificate(DigitalOceanResource, BaseCertificate): # type: ignore
"""DigitalOcean certificate"""
kind = "digitalocean_certificate"
certificate_state: Optional[str] = None
certificate_type: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/certificates"
@dataclass(eq=False)
class DigitalOceanContainerRegistry(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean container registry"""
kind = "digitalocean_container_registry"
storage_usage_bytes: Optional[int] = None
is_read_only: Optional[bool] = None
def delete(self, graph: Graph) -> bool:
"""Delete the container registry from the cloud"""
log.debug(
f"Deleting registry {self.id} in account {self.account(graph).id} region {self.region(graph).id}"
)
team = self.account(graph)
credentials = get_team_credentials(team.id)
if credentials is None:
raise RuntimeError(
f"Cannot delete resource {self.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
return client.delete("/registry", None)
@dataclass(eq=False)
class DigitalOceanContainerRegistryRepository(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean container registry repository"""
kind = "digitalocean_container_registry_repository"
tag_count: Optional[int] = None
manifest_count: Optional[int] = None
@dataclass(eq=False)
class DigitalOceanContainerRegistryRepositoryTag(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean container registry repository tag"""
kind = "digitalocean_container_registry_repository_tag"
registry_name: Optional[str] = None
repository_name: Optional[str] = None
manifest_digest: Optional[str] = None
compressed_size_bytes: Optional[int] = None
size_bytes: Optional[int] = None
def delete_uri_path(self) -> Optional[str]:
return (
f"/registry/{self.registry_name}/repositories/{self.repository_name}/tags"
)
@dataclass(eq=False)
class DigitalOceanSSHKey(DigitalOceanResource, BaseKeyPair): # type: ignore
"""DigitalOcean ssh key"""
kind = "digitalocean_ssh_key"
public_key: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/account/keys"
@dataclass(eq=False)
class DigitalOceanTag(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean tag"""
kind = "digitalocean_tag"
def delete_uri_path(self) -> Optional[str]:
return "/tags"
@dataclass(eq=False)
class DigitalOceanDomain(DigitalOceanResource, BaseDomain): # type: ignore
"""DigitalOcean domain"""
kind = "digitalocean_domain"
def delete_uri_path(self) -> Optional[str]:
return "/domains"
@dataclass(eq=False)
class DigitalOceanDomainRecord(DigitalOceanResource, BaseDomainRecord): # type: ignore
"""DigitalOcean domain record"""
kind = "digitalocean_domain_record"
domain_name: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return f"/domains/{self.domain_name}/records"
@dataclass(eq=False)
class DigitalOceanFirewall(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean firewall"""
kind = "digitalocean_firewall"
firewall_status: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/firewalls"
@dataclass(eq=False)
class DigitalOceanAlertPolicy(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean alert policy"""
kind = "digitalocean_alert_policy"
policy_type: Optional[str] = None
description: Optional[str] = None
is_enabled: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/monitoring/alerts"
|
1699916
|
import KratosMultiphysics as KM
import KratosMultiphysics.ShallowWaterApplication as SW
from KratosMultiphysics.time_based_ascii_file_writer_utility import TimeBasedAsciiFileWriterUtility
def Factory(params, model):
if not isinstance(params, KM.Parameters):
raise Exception('expected input shall be a Parameters object, encapsulating a json string')
return ComputeBoundaryForceProcess(model, params['Parameters'])
class ComputeBoundaryForceProcess(KM.Process):
'''
Get the external accelerations and computes the hydrostatic forces.
The results are written in a file or printed into screen.
'''
def __init__(self, model, params):
'''Constructor of ComputeBoundaryForceProcess.'''
super().__init__()
default_settings = KM.Parameters("""
{
"model_part_wall_name" : "",
"model_part_bottom_name" : "",
"interval" : [0.0, 1e30],
"print_to_screen" : false,
"print_format" : ".8f",
"write_output_file" : true,
"output_file_settings" : {}
}
""")
self.interval = KM.IntervalUtility(params)
params.ValidateAndAssignDefaults(default_settings)
self.model_part_wall_name = params['model_part_wall_name'].GetString()
self.model_part_wall = model[self.model_part_wall_name]
self.model_part_bottom_name = params['model_part_bottom_name'].GetString()
self.model_part_bottom = model[self.model_part_bottom_name]
self.print_to_screen = params['print_to_screen'].GetBool()
self.write_output_file = params['write_output_file'].GetBool()
self.print_format = params["print_format"].GetString()
if (self.model_part_wall.GetCommunicator().MyPID() == 0):
if (self.write_output_file):
default_file_name = params["model_part_wall_name"].GetString() + "_global_force.dat"
file_handler_params = KM.Parameters(
params["output_file_settings"])
if file_handler_params.Has("file_name"):
file_name = file_handler_params["file_name"].GetString()
msg = 'Unexpected user-specified entry found in "output_file_settings":\n'
msg += '\t"file_name" : "{}"\n'
msg += 'Using this specified file name instead of the default ("{}")'
KM.Logger.PrintWarning("ComputeBoundaryForceProcess", msg.format(file_name, default_file_name))
else:
file_handler_params.AddString("file_name", default_file_name)
file_header = self._GetFileHeader()
self.output_file = TimeBasedAsciiFileWriterUtility(
self.model_part_wall, file_handler_params, file_header).file
def ExecuteFinalizeSolutionStep(self):
'''Print the boundary forces to a file or into screen.'''
current_time = self.model_part_wall.ProcessInfo[KM.TIME]
if self.interval.IsInInterval(current_time):
accelerations, forces = self._EvaluateGlobalForces()
if self.model_part_wall.GetCommunicator().MyPID() == 0:
output_values = []
# not formatting time in order to not lead to problems with time recognition
# in the file writer when restarting
output_values.append(str(current_time))
for val in accelerations : output_values.append(format(val, self.print_format))
for val in forces : output_values.append(format(val, self.print_format))
if self.print_to_screen:
result_msg = 'Global force evaluation for model part ' + \
self.model_part_wall_name + '\n'
res_labels = ['time: ','acc_x: ','acc_y: ','acc_z: ','f_x: ', 'f_y: ', 'f_z: ']
result_msg += ', '.join([a+b for a,b in zip(res_labels, output_values)])
self._PrintToScreen(result_msg)
if self.write_output_file:
self.output_file.write(' '.join(output_values) + '\n')
def _EvaluateGlobalForces(self):
for node in self.model_part_wall.Nodes:
acceleration = node.GetSolutionStepValue(KM.MESH_ACCELERATION)
break
process_info = self.model_part_wall.ProcessInfo
sum_forces = SW.ShallowWaterUtilities().ComputeHydrostaticForces(self.model_part_wall.Conditions, process_info)
sum_forces += SW.ShallowWaterUtilities().ComputeHydrostaticForces(self.model_part_bottom.Elements, process_info)
return acceleration, sum_forces
def _GetFileHeader(self):
header = '# Global force for model part ' + self.model_part_wall_name + '\n'
header += '# Time acc_x acc_y acc_z f_x f_y f_z\n'
return header
@staticmethod
def _PrintToScreen(result_msg):
KM.Logger.PrintInfo('ComputeBoundaryForceProcess', 'Global force results - flow- and body-attached:')
KM.Logger.PrintInfo('ComputeBoundaryForceProcess', 'Current time: ' + result_msg)
|
1700005
|
import os
from parslepy.base import Parselet
from nose.tools import assert_dict_equal
html = '<html><body><h1>hi</h1><a href="/">click</a></body></html>'
expected = {"title":"hi", "link":"/"}
dirname = os.path.dirname(os.path.abspath(__file__))
def test_parslepy_from_jsonstring():
s = '{ "title": "h1", "link": "a @href"}'
p = Parselet.from_jsonstring(s)
extracted = p.parse_fromstring(html)
assert_dict_equal(extracted, expected)
def test_parslepy_from_yamlstring():
s = '''---
title: h1
link: a @href
'''
p = Parselet.from_yamlstring(s)
extracted = p.parse_fromstring(html)
assert_dict_equal(extracted, expected)
def test_parslepy_from_jsonstring():
s = '{ "title": "h1", "link": "a @href"}'
with open(os.path.join(dirname, 'data/parselet.json')) as fp:
p = Parselet.from_jsonfile(fp)
extracted = p.parse_fromstring(html)
assert_dict_equal(extracted, expected)
def test_parslepy_from_yamlstring():
s = '''---
title: h1
link: a @href
'''
with open(os.path.join(dirname, 'data/parselet.yml')) as fp:
p = Parselet.from_yamlfile(fp)
extracted = p.parse_fromstring(html)
assert_dict_equal(extracted, expected)
|
1700015
|
import o3seespy as o3 # for testing only
def test_plain():
osi = o3.OpenSeesInstance(ndm=2)
ts = o3.time_series.Linear(osi, factor=1.0)
o3.pattern.Plain(osi, ts=ts, fact=1.0)
def test_uniform_excitation():
osi = o3.OpenSeesInstance(ndm=2)
ts = o3.time_series.Linear(osi, factor=1.0)
o3.pattern.UniformExcitation(osi, dir=1, accel_series=ts, vel0=1.0, fact=1.0)
def test_multiple_support():
osi = o3.OpenSeesInstance(ndm=2)
o3.pattern.MultipleSupport(osi)
|
1700017
|
def longest_increasing_subarray(a):
A = a + [float('-inf')]
best_s, best_n = 0, 1
s, n = 0, 1
for i in range(len(a)):
if A[i] <= A[i+1]:
n += 1
continue
if n > best_n:
best_s, best_n = s, n
s = i+1
n = 1
return best_s, best_s + best_n
def test():
# example in the book
a = [2, 11, 3, 5, 13, 7, 19, 17, 23]
r = longest_increasing_subarray(a)
assert r == (2, 5)
# entire thing is increasing
a = range(13)
r = longest_increasing_subarray(a)
assert r == (0, 13)
print 'pass'
def main():
test()
if __name__ == '__main__':
main()
|
1700056
|
from __future__ import division
import scipy.linalg
import autograd.numpy as anp
from autograd.numpy.numpy_wrapper import wrap_namespace
from autograd.extend import defvjp, defvjp_argnums, defjvp, defjvp_argnums
wrap_namespace(scipy.linalg.__dict__, globals()) # populates module namespace
def _vjp_sqrtm(ans, A, disp=True, blocksize=64):
assert disp, "sqrtm vjp not implemented for disp=False"
ans_transp = anp.transpose(ans)
def vjp(g):
return anp.real(solve_sylvester(ans_transp, ans_transp, g))
return vjp
defvjp(sqrtm, _vjp_sqrtm)
def _flip(a, trans):
if anp.iscomplexobj(a):
return 'H' if trans in ('N', 0) else 'N'
else:
return 'T' if trans in ('N', 0) else 'N'
def grad_solve_triangular(ans, a, b, trans=0, lower=False, **kwargs):
tri = anp.tril if (lower ^ (_flip(a, trans) == 'N')) else anp.triu
transpose = lambda x: x if _flip(a, trans) != 'N' else x.T
al2d = lambda x: x if x.ndim > 1 else x[...,None]
def vjp(g):
v = al2d(solve_triangular(a, g, trans=_flip(a, trans), lower=lower))
return -transpose(tri(anp.dot(v, al2d(ans).T)))
return vjp
defvjp(solve_triangular,
grad_solve_triangular,
lambda ans, a, b, trans=0, lower=False, **kwargs:
lambda g: solve_triangular(a, g, trans=_flip(a, trans), lower=lower))
def _jvp_sqrtm(dA, ans, A, disp=True, blocksize=64):
assert disp, "sqrtm jvp not implemented for disp=False"
return solve_sylvester(ans, ans, dA)
defjvp(sqrtm, _jvp_sqrtm)
def _jvp_sylvester(argnums, dms, ans, args, _):
a, b, q = args
if 0 in argnums:
da = dms[0]
db = dms[1] if 1 in argnums else 0
else:
da = 0
db = dms[0] if 1 in argnums else 0
dq = dms[-1] if 2 in argnums else 0
rhs = dq - anp.dot(da, ans) - anp.dot(ans, db)
return solve_sylvester(a, b, rhs)
defjvp_argnums(solve_sylvester, _jvp_sylvester)
def _vjp_sylvester(argnums, ans, args, _):
a, b, q = args
def vjp(g):
vjps = []
q_vjp = solve_sylvester(anp.transpose(a), anp.transpose(b), g)
if 0 in argnums: vjps.append(-anp.dot(q_vjp, anp.transpose(ans)))
if 1 in argnums: vjps.append(-anp.dot(anp.transpose(ans), q_vjp))
if 2 in argnums: vjps.append(q_vjp)
return tuple(vjps)
return vjp
defvjp_argnums(solve_sylvester, _vjp_sylvester)
|
1700057
|
import sys, string
def get_gff_hash(gffile):
hash = {}
fo = open(gffile)
for line in fo:
gf = GeneFeature(line)
if not hash.has_key(gf.seqid): hash[gf.seqid] = []
hash[gf.seqid].append(gf)
fo.close()
return hash
class GeneFeature():
def __init__(self, line):
columns = line.rstrip().split("\t")
if not len(columns) == 9:
print >> sys.stderr, "GFF3 with incorrect number of columns. Expected: 9 | Observed: %s" % len(columns)
print >> sys.stderr, "\"%s\"" % line
sys.exit(1)
self.seqid = columns.pop(0)
self.source = columns.pop(0)
self.ftype = columns.pop(0)
self.start = int(columns.pop(0))
self.stop = int(columns.pop(0))
self.score = columns.pop(0)
self.strand = columns.pop(0)
self.phase = columns.pop(0)
self.attributes = columns.pop(0)
def get_attributes(self):
hash = {}
for e in self.attributes.split(";"):
if e == '': continue
k, v = e.split("=")
hash[k] = v
return hash
def set_attribute(self, key, value):
hash = {}
for e in self.attributes.split(";"):
if e == '': continue
k, v = e.split("=")
hash[k] = v
if hash.has_key(key):
hash[key] = value
self.attributes = ""
for k, v in hash.iteritems(): self.attributes += "%s=%s;" %(k, v)
else:
self.attributes += "%s=%s;" %(key, value)
def to_string(self):
return string.join([self.seqid, self.source, self.ftype, str(self.start), str(self.stop), self.score, self.strand, self.phase, self.attributes], "\t")
|
1700123
|
from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
MAX_LINES = 18
class ProgrammerHelpPlugin(WillPlugin):
@respond_to("^programmer help$")
def help(self, message):
"""programmer help: Advanced programmer-y help."""
all_regexes = self.load("all_listener_regexes")
self.say("Here's everything I know how to listen to:", message, start_thread=True)
for r in range(0, len(all_regexes), MAX_LINES):
text = "\n".join(all_regexes[r:r+MAX_LINES])
self.say(f'```{text}```', message, start_thread=True)
|
1700144
|
import tensorflow as tf
import os
import pdb
relu = tf.nn.relu
elu = tf.nn.elu
normal = tf.distributions.Normal
kldv = tf.distributions.kl_divergence
class Network(object):
def __init__(self, name):
self.name = name
self.eps = 1e-3
def dense(self, x, units, name='dense', reuse=None):
with tf.variable_scope(name, reuse=reuse):
kernel = tf.get_variable('kernel', [x.shape[1].value, units])
bias = tf.get_variable('bias', [units],
initializer=tf.zeros_initializer())
x = tf.matmul(x, kernel) + bias
return x
def conv(self, x, filters, kernel_size=3, strides=1, padding='SAME',
name='conv', reuse=None):
with tf.variable_scope(name, reuse=reuse):
kernel = tf.get_variable('kernel',
[kernel_size, kernel_size, x.shape[1].value, filters])
x = tf.nn.conv2d(x, kernel, [1, 1, strides, strides],
padding=padding, data_format='NCHW')
return x
def deconv(self, x, filters, kernel_size=3, strides=1, padding='SAME',
name='deconv', reuse=None):
with tf.variable_scope(name, reuse=reuse):
x = tf.layers.conv2d_transpose(x, filters, kernel_size, strides, data_format='channels_first',
reuse=reuse, padding=padding)
return x
def batch_norm(self, x, training, decay=0.9, name='batch_norm', reuse=None):
with tf.variable_scope(name, reuse=reuse):
dim = x.shape[1].value
moving_mean = tf.get_variable('moving_mean', [dim],
initializer=tf.zeros_initializer(), trainable=False)
moving_var = tf.get_variable('moving_var', [dim],
initializer=tf.ones_initializer(), trainable=False)
beta = tf.get_variable('beta', [dim],
initializer=tf.zeros_initializer())
gamma = tf.get_variable('gamma', [dim],
initializer=tf.ones_initializer())
if training:
x, batch_mean, batch_var = tf.nn.fused_batch_norm(x, gamma, beta, data_format='NCHW')
update_mean = moving_mean.assign_sub((1-decay)*(moving_mean - batch_mean))
update_var = moving_var.assign_sub((1-decay)*(moving_var - batch_var))
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_var)
else:
x, batch_mean, batch_var = tf.nn.fused_batch_norm(x, gamma, beta,
mean=moving_mean, variance=moving_var, is_training=False,
data_format='NCHW')
return x
def global_avg_pool(self, x):
return tf.reduce_mean(x, [2, 3])
def simple_conv(self, in_x, reuse=False, isTr=True):
def conv_block(x, name, reuse, isTr):
x = self.conv(x, 64, name=name+'/conv', reuse=reuse)
x = self.batch_norm(x, isTr, name=name+'/bn', reuse=reuse)
x = relu(x)
x = tf.nn.max_pool(x, [1,1,2,2], [1,1,2,2], 'VALID', 'NCHW')
return x
x = in_x
for i in range(4):
x = conv_block(x, 'b{}'.format(i+1), reuse=reuse, isTr=isTr)
x = tf.layers.flatten(x)
return x
def Lrn2evl(name, in_x, nway, transduction=True, scalar=1e+2):
# learning to evaluate
with tf.variable_scope(name):
tl = tf.layers.conv2d(in_x, 512, kernel_size=[1,nway],
padding='VALID')
tl = tf.nn.relu(tl)
tl = tf.layers.conv2d(tl, 128, kernel_size=[1,1],
padding='SAME')
tl = tf.nn.relu(tl)
if transduction:
tl = tf.reduce_mean(tl, axis=(1,2))
else:
shape = tl.get_shape().as_list()
tl = tf.reshape(tl, [-1,shape[-1]])
tl = tf.layers.dense(tl, 1)
fe = tf.nn.sigmoid(tl) * scalar
return fe
class ProtoNet(Network):
def __init__(self, name, nway, kshot, qsize, isTr, reuse=False,
input_dict=None):
self.name = name
self.nway = nway
self.kshot = kshot
self.qsize = qsize
self.hdim = 1600
if input_dict is None:
self.inputs = {\
'sx': tf.placeholder(tf.float32, [None,84,84,3]),
'qx': tf.placeholder(tf.float32, [None,84,84,3]),
'qy': tf.placeholder(tf.float32, [None,None])}
else:
self.inputs = input_dict
self.outputs = {}
with tf.variable_scope(name):
self._build_network(isTr, reuse=reuse)
def _build_network(self, isTr, reuse, scale=1e+1):
ip = self.inputs
sq_inputs = tf.concat([ip['sx'], ip['qx']], axis=0)
sq_outputs = self.base_cnn(sq_inputs, isTr=isTr, reuse=reuse)
if True: # normalized
sq_outputs = sq_outputs / (tf.norm(sq_outputs, axis=1,
keepdims=True) + 1e-8) * scale
support_h = sq_outputs[:self.nway*self.kshot]
query_h = sq_outputs[self.nway*self.kshot:]
# support_h = self.base_cnn(ip['sx'], isTr, reuse=reuse)
# query_h = self.base_cnn(ip['qx'], isTr, reuse=True)
# if True:
# support_h = support_h /(tf.norm(support_h, axis=1,
# keep_dims=True)+1e-8) * scale
# query_h = query_h /(tf.norm(query_h, axis=1,
# keep_dims=True)+1e-8) * scale
proto_vec = tf.reshape(support_h, [self.nway, -1, self.hdim])
proto_vec = tf.reduce_mean(proto_vec, axis=1)
_p = tf.expand_dims(proto_vec, axis=0)
_q = tf.expand_dims(query_h, axis=1)
embedding = (_p - _q)**2
# dist = tf.reduce_mean(embedding, axis=2)
dist = tf.reduce_sum(embedding, axis=2)
prediction = tf.nn.softmax(-dist)
self.outputs['embedding'] = embedding
self.outputs['pred'] = prediction
self.outputs['loss'] = cross_entropy(prediction, ip['qy'])
self.outputs['acc'] = tf_acc(prediction, ip['qy'])
def base_cnn(self, in_x, isTr, reuse=False):
in_x = tf.transpose(in_x, [0,3,1,2])
return self.simple_conv(in_x, reuse=reuse, isTr=isTr)
def cross_entropy(pred, label):
return -tf.reduce_mean(tf.reduce_sum(label*tf.log(pred+1e-10), axis=1))
def cross_entropy_with_metabatch(pred, label):
# shape of pred, label: (metabatch, batch, nway)
return -tf.reduce_mean(tf.reduce_sum(label*tf.log(pred+1e-10), axis=2), axis=1)
def tf_acc(p, y):
acc = tf.equal(tf.argmax(y,1), tf.argmax(p,1))
acc = tf.reduce_mean(tf.cast(acc, 'float'))
return acc
def ckpt_restore_with_prefix(sess, ckpt_dir, prefix):
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=prefix)
var_list_name = [i.name.split(':')[0] for i in var_list]
for var_name, _ in tf.contrib.framework.list_variables(ckpt_dir):
var = tf.contrib.framework.load_variable(ckpt_dir, var_name)
new_name = prefix + '/' + var_name
if new_name in var_list_name:
with tf.variable_scope(prefix, reuse=True):
tfvar = tf.get_variable(var_name)
sess.run(tfvar.assign(var))
|
1700243
|
from teleport import Teleport
import logging
from contextlib import contextmanager
from subprocess import check_call
def _run_commands(commands):
for command in commands:
logging.debug("running command '%s'", " ".join(command))
check_call(command)
def allow_traffic_only_to(address, dns_servers=None):
host = address.split(":")[0]
logging.info("Allowing traffic only to %s on eth0", host)
commands = [
["iptables", "-A", "OUTPUT", "-m", "state", "--state", "RELATED,ESTABLISHED", "-j", "ACCEPT"],
["iptables", "-A", "OUTPUT", "-o", "tun0", "-j", "ACCEPT"],
["iptables", "-A", "OUTPUT", "-o", "lo", "-j", "ACCEPT"],
["iptables", "-A", "OUTPUT", "-o", "eth0", "-d", host, "-j", "ACCEPT"],
["iptables", "-P", "OUTPUT", "DROP"],
]
if dns_servers is not None:
logging.info('Allowing traffic to dns servers %s', dns_servers)
for dns_server in dns_servers:
commands.append(["iptables", "-A", "OUTPUT", "-o", "eth0", "-d", dns_server, "-j", "ACCEPT"])
_run_commands(commands)
def reset_firewall():
logging.info("resetting firewall")
commands = [
["iptables", "-F"],
["iptables", "-P", "OUTPUT", "ACCEPT"],
]
_run_commands(commands)
@contextmanager
def FirewallContext(address, dns_servers=None):
allow_traffic_only_to(address, dns_servers)
try:
yield
finally:
reset_firewall()
@contextmanager
def Teleporter(config, place, with_firewall=True, dns_servers=None):
t = Teleport(config).goto(place)
try:
if with_firewall:
with FirewallContext(t.get_peer_address(), dns_servers=dns_servers):
yield t
else:
yield t
finally:
t.go_home()
|
1700265
|
import tensorflow as tf
from collections import namedtuple
from graph_module import GraphModule
from tensorflow.contrib.rnn.python.ops import rnn
EncoderOutput = namedtuple(
"EncoderOutput",
"outputs final_state attention_values attention_values_length")
class StackedBidirectionalEncoder(GraphModule):
""" multi-layer bidirectional encoders
"""
def __init__(self, cell, name='stacked_bidirectional'):
super(StackedBidirectionalEncoder, self).__init__(name)
self.cell = cell
def _build(self, inputs, lengths):
outputs, final_fw_state, final_bw_state = rnn.stack_bidirectional_dynamic_rnn(
cells_fw=self.cell._cells,
cells_bw=self.cell._cells,
inputs=inputs,
sequence_length=lengths,
dtype=tf.float32)
# Concatenate states of the forward and backward RNNs
final_state = final_fw_state, final_bw_state
return EncoderOutput(
outputs=outputs,
final_state=final_state,
attention_values=outputs,
attention_values_length=lengths)
class BidirectionalEncoder(GraphModule):
""" single-layer bidirectional encoder
"""
def __init__(self, cell1, cell2, name='bidirectional'):
super(BidirectionalEncoder, self).__init__(name)
self.cell1 = cell1
self.cell2 = cell2
def _build(self, inputs, lengths):
outputs_pre, final_state = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self.cell1,
cell_bw=self.cell2,
inputs=inputs,
sequence_length=lengths,
dtype=tf.float32)
# Concatenate outputs of the forward and backward RNNs
outputs = tf.concat(outputs_pre, 2)
return EncoderOutput(
outputs=outputs,
final_state=final_state,
attention_values=outputs,
attention_values_length=lengths)
class IdentityEncoder(GraphModule):
""" do-nothing encoder
"""
def __init__(self, name='identity'):
super(IdentityEncoder, self).__init__(name)
def _build(self, inputs, lengths):
return EncoderOutput(
outputs=inputs,
final_state=tf.zeros_like(inputs[:,0,:]),
attention_values=inputs,
attention_values_length=lengths)
|
1700281
|
from unittest import TestCase
from irlib.preprocessor import Preprocessor, my_nltk
class TestPreprocessor(TestCase):
def setUp(self):
pass
def test_term2ch(self):
p = Preprocessor()
charlist = p.term2ch('help')
self.assertEqual(charlist, ['h', 'e', 'l', 'p'])
def test_stemmer(self):
p = Preprocessor(stem=True)
stemmed = p.stemmer('running')
if my_nltk:
self.assertEqual(stemmed,'run')
else:
self.assertTrue(False,'NLTK is not installed')
def test_stemmer_lower(self):
p = Preprocessor(lower=True, stem=True)
stemmed = p.stemmer('Running')
if my_nltk:
self.assertEqual(stemmed,'run')
else:
self.assertTrue(False,'NLTK is not installed')
def test_tokenizer_lower(self):
p = Preprocessor(lower=True, stem=False)
tokens = p.tokenizer('This is IRLib')
self.assertEqual(tokens,['this','is','irlib'])
def test_2gram_tokenizer(self):
p = Preprocessor(lower=False, stem=False, ngram=2)
returned_tokens = p.ngram_tokenizer('how do you do?')
expected_tokens = ['how do', 'do you', 'you do']
self.assertEqual(returned_tokens, expected_tokens)
def test_3gram_tokenizer(self):
p = Preprocessor(lower=False, stem=False, ngram=3)
returned_tokens = p.ngram_tokenizer('how do you do?')
expected_tokens = ['how do you', 'do you do']
self.assertEqual(returned_tokens, expected_tokens)
def test_is_mention(self):
is_it = Preprocessor.is_mention('@twitter')
self.assertEqual(is_it, True)
is_it = Preprocessor.is_mention('#twitter')
self.assertEqual(is_it, False)
def test_is_hashtag(self):
is_it = Preprocessor.is_hashtag('@twitter')
self.assertEqual(is_it, False)
is_it = Preprocessor.is_hashtag('#twitter')
self.assertEqual(is_it, True)
def test_is_link(self):
is_it = Preprocessor.is_link('hello world')
self.assertEqual(is_it, False)
is_it = Preprocessor.is_link('http://www.yahoo.com')
self.assertEqual(is_it, True)
is_it = Preprocessor.is_link('https://www.yahoo.com')
self.assertEqual(is_it, True)
is_it = Preprocessor.is_link('www.yahoo.com')
self.assertEqual(is_it, True)
|
1700284
|
import string
import unittest
from ..util import PseudoStr, StrProxy, Object
from .. import tool_imports_for_tests
with tool_imports_for_tests():
from c_analyzer.common.info import (
UNKNOWN,
ID,
)
class IDTests(unittest.TestCase):
VALID_ARGS = (
'x/y/z/spam.c',
'func',
'eggs',
)
VALID_KWARGS = dict(zip(ID._fields, VALID_ARGS))
VALID_EXPECTED = VALID_ARGS
def test_from_raw(self):
tests = [
('', None),
(None, None),
('spam', (None, None, 'spam')),
(('spam',), (None, None, 'spam')),
(('x/y/z/spam.c', 'spam'), ('x/y/z/spam.c', None, 'spam')),
(self.VALID_ARGS, self.VALID_EXPECTED),
(self.VALID_KWARGS, self.VALID_EXPECTED),
]
for raw, expected in tests:
with self.subTest(raw):
id = ID.from_raw(raw)
self.assertEqual(id, expected)
def test_minimal(self):
id = ID(
filename=None,
funcname=None,
name='eggs',
)
self.assertEqual(id, (
None,
None,
'eggs',
))
def test_init_typical_global(self):
id = ID(
filename='x/y/z/spam.c',
funcname=None,
name='eggs',
)
self.assertEqual(id, (
'x/y/z/spam.c',
None,
'eggs',
))
def test_init_typical_local(self):
id = ID(
filename='x/y/z/spam.c',
funcname='func',
name='eggs',
)
self.assertEqual(id, (
'x/y/z/spam.c',
'func',
'eggs',
))
def test_init_all_missing(self):
for value in ('', None):
with self.subTest(repr(value)):
id = ID(
filename=value,
funcname=value,
name=value,
)
self.assertEqual(id, (
None,
None,
None,
))
def test_init_all_coerced(self):
tests = [
('str subclass',
dict(
filename=PseudoStr('x/y/z/spam.c'),
funcname=PseudoStr('func'),
name=PseudoStr('eggs'),
),
('x/y/z/spam.c',
'func',
'eggs',
)),
('non-str',
dict(
filename=StrProxy('x/y/z/spam.c'),
funcname=Object(),
name=('a', 'b', 'c'),
),
('x/y/z/spam.c',
'<object>',
"('a', 'b', 'c')",
)),
]
for summary, kwargs, expected in tests:
with self.subTest(summary):
id = ID(**kwargs)
for field in ID._fields:
value = getattr(id, field)
self.assertIs(type(value), str)
self.assertEqual(tuple(id), expected)
def test_iterable(self):
id = ID(**self.VALID_KWARGS)
filename, funcname, name = id
values = (filename, funcname, name)
for value, expected in zip(values, self.VALID_EXPECTED):
self.assertEqual(value, expected)
def test_fields(self):
id = ID('a', 'b', 'z')
self.assertEqual(id.filename, 'a')
self.assertEqual(id.funcname, 'b')
self.assertEqual(id.name, 'z')
def test_validate_typical(self):
id = ID(
filename='x/y/z/spam.c',
funcname='func',
name='eggs',
)
id.validate() # This does not fail.
def test_validate_missing_field(self):
for field in ID._fields:
with self.subTest(field):
id = ID(**self.VALID_KWARGS)
id = id._replace(**{field: None})
if field == 'funcname':
id.validate() # The field can be missing (not set).
id = id._replace(filename=None)
id.validate() # Both fields can be missing (not set).
continue
with self.assertRaises(TypeError):
id.validate()
def test_validate_bad_field(self):
badch = tuple(c for c in string.punctuation + string.digits)
notnames = (
'1a',
'a.b',
'a-b',
'&a',
'a++',
) + badch
tests = [
('filename', ()), # Any non-empty str is okay.
('funcname', notnames),
('name', notnames),
]
seen = set()
for field, invalid in tests:
for value in invalid:
seen.add(value)
with self.subTest(f'{field}={value!r}'):
id = ID(**self.VALID_KWARGS)
id = id._replace(**{field: value})
with self.assertRaises(ValueError):
id.validate()
for field, invalid in tests:
valid = seen - set(invalid)
for value in valid:
with self.subTest(f'{field}={value!r}'):
id = ID(**self.VALID_KWARGS)
id = id._replace(**{field: value})
id.validate() # This does not fail.
|
1700327
|
Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param {ListNode} head
# @param {integer} n
# @return {ListNode}
def removeNthFromEnd(self, head, n):
if (not head) or (n == 0): return head
fast = head; slow = head
for _ in xrange(n):
fast = fast.next
if not fast:
return head.next
while fast.next:
slow = slow.next
fast = fast.next
slow.next = slow.next.next
return head
|
1700329
|
import hazelcast
# Start the Hazelcast Client and connect to an already running Hazelcast Cluster on 127.0.0.1
client = hazelcast.HazelcastClient()
# Get a Topic called "my-distributed-topic"
topic = client.get_reliable_topic("my-distributed-topic").blocking()
# Add a Listener to the Topic
topic.add_listener(lambda message: print(message))
# Publish a message to the Topic
topic.publish("Hello to distributed world")
# Shutdown this Hazelcast Client
client.shutdown()
|
1700345
|
import numpy as np
import pytest
from emukit.examples.fabolas import FabolasModel
@pytest.fixture
def model():
rng = np.random.RandomState(42)
x_init = rng.rand(5, 2)
s_min = 10
s_max = 10000
s = np.random.uniform(s_min, s_max, x_init.shape[0])
x_init = np.concatenate((x_init, s[:, None]), axis=1)
y_init = rng.rand(5, 1)
model = FabolasModel(X_init=x_init, Y_init=y_init, s_min=s_min, s_max=s_max)
return model
def test_predict_shape(model):
rng = np.random.RandomState(43)
x_test = rng.rand(10, 2)
s = np.random.uniform(model.s_min, model.s_max, x_test.shape[0])
x_test = np.concatenate((x_test, s[:, None]), axis=1)
m, v = model.predict(x_test)
assert m.shape == (10, 1)
assert v.shape == (10, 1)
def test_update_data(model):
rng = np.random.RandomState(43)
x_new = rng.rand(5, 2)
s = np.random.uniform(model.s_min, model.s_max, x_new.shape[0])
x_new = np.concatenate((x_new, s[:, None]), axis=1)
y_new = rng.rand(5, 1)
model.set_data(x_new, y_new)
assert model.X.shape == x_new.shape
assert model.Y.shape == y_new.shape
|
1700381
|
import argparse
from itertools import count
import numpy as np
import h5py
from traits.api import HasTraits, Range, Instance, Bool, Int, on_trait_change
from traitsui.api import View, Item, HGroup, RangeEditor
from tvtk.api import tvtk
from tvtk.pyface.scene_editor import SceneEditor
from tvtk.common import configure_input, configure_input_data
from mayavi.tools.mlab_scene_model import MlabSceneModel
from mayavi.core.ui.mayavi_scene import MayaviScene
from pyface.timer.api import Timer
from util import veclen
from inout import load_splocs
class Visualization(HasTraits):
component = Int(0)
_max_component_index = Int()
activation = Range(-1., 1.)
oscillate = Bool(True)
allow_negative = Bool(False)
pd = Instance(tvtk.PolyData)
normals = Instance(tvtk.PolyDataNormals)
actor = Instance(tvtk.Actor)
scene = Instance(MlabSceneModel, (), kw=dict(background=(1,1,1)))
timer = Instance(Timer)
def __init__(self, Xmean, tris, components):
HasTraits.__init__(self)
self._components = components
self._max_component_index = len(components)
self._Xmean = Xmean
self.pd = tvtk.PolyData(points=Xmean, polys=tris)
self.normals = tvtk.PolyDataNormals(splitting=False)
configure_input_data(self.normals, self.pd)
mapper = tvtk.PolyDataMapper(immediate_mode_rendering=True)
self.actor = tvtk.Actor(mapper=mapper)
configure_input(self.actor.mapper, self.normals)
self.actor.mapper.lookup_table = tvtk.LookupTable(
hue_range = (0.45, 0.6),
saturation_range = (0., 0.8),
value_range = (.6, 1.),
)
self.scene.add_actor(self.actor)
self.timer = Timer(40, self.animate().next)
def animate(self):
for i in count():
if self.oscillate:
frame = i % 30
alpha = np.sin(frame/30. * np.pi*2)
if not self.allow_negative:
alpha = np.abs(alpha)
self.activation = alpha
yield
@on_trait_change('activation, component')
def update_plot(self):
c = self._components[self.component]
self.pd.points = self._Xmean + self.activation * c
magnitude = veclen(c)
self.pd.point_data.scalars = magnitude
self.actor.mapper.scalar_range = (0, magnitude.max())
self.scene.render()
view = View(
Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=600, width=800, show_label=False),
HGroup(
Item('component', editor=RangeEditor(
is_float=False, low=0, high_name='_max_component_index', mode='spinner')),
'activation',
'oscillate',
'allow_negative',
),
resizable=True, title="View SPLOC's",
)
def main(component_hdf5_file):
Xmean, tris, components, names = load_splocs(component_hdf5_file)
visualization = Visualization(Xmean, tris, components)
visualization.configure_traits()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Viewer for sparse localized deformation components')
parser.add_argument('input_sploc_file')
args = parser.parse_args()
main(args.input_sploc_file)
|
1700389
|
import logging
from dataclasses import dataclass, field
import click
from .click_common import command
from .miot_device import DeviceStatus as DeviceStatusContainer
from .miot_device import MiotDevice, MiotMapping
from .dreame_const import *
from random import randint
_LOGGER = logging.getLogger(__name__)
class DreameVacuumStatus(DeviceStatusContainer):
def __init__(self, data):
self.data = data
@property
def status(self) -> VacuumStatus:
try:
return VacuumStatus(self.data["property_device_status"])
except ValueError:
_LOGGER.error(
"Unknown device_status (%s)", self.data["property_device_status"]
)
return VacuumStatus.Unknown
@property
def error(self) -> ErrorCodes:
try:
return ErrorCodes(self.data["property_device_fault"])
except ValueError:
_LOGGER.error(
"Unknown device_fault (%s)", self.data["property_device_fault"]
)
return ErrorCodes.Unknown
@property
def battery(self) -> int:
return self.data["property_battery_level"]
@property
def state(self) -> ChargeStatus:
try:
return ChargeStatus(self.data["property_charging_state"])
except ValueError:
_LOGGER.error(
"Unknown charging_state (%s)", self.data["property_charging_state"]
)
return ChargeStatus.Unknown
@property
def operating_mode(self) -> OperatingMode:
try:
return OperatingMode(self.data["property_operating_mode"])
except ValueError:
_LOGGER.error(
"Unknown operating_mode (%s)", self.data["property_operating_mode"]
)
return OperatingMode.Unknown
@property
def cleaning_time(self) -> str:
return self.data["property_cleaning_time"]
@property
def cleaning_area(self) -> str:
return self.data["property_cleaning_area"]
@property
def fan_speed(self) -> VacuumSpeed:
try:
return VacuumSpeed(self.data["property_cleaning_mode"])
except ValueError:
_LOGGER.error(
"Unknown cleaning_mode (%s)", self.data["property_cleaning_mode"]
)
return VacuumSpeed.Unknown
@property
def water_level(self) -> WaterLevel:
try:
return WaterLevel(self.data["property_water_level"])
except ValueError:
_LOGGER.error("Unknown water_level (%s)", self.data["property_water_level"])
return WaterLevel.Unknown
@property
def waterbox_status(self) -> Waterbox:
try:
return Waterbox(self.data["property_waterbox_status"])
except ValueError:
_LOGGER.error(
"Unknown waterbox_status (%s)", self.data["property_waterbox_status"]
)
return Waterbox.Unknown
@property
def operation_status(self) -> OperationStatus:
try:
return OperationStatus(self.data["property_operation_status"])
except ValueError:
_LOGGER.error(
"Unknown operation_status (%s)", self.data["property_operation_status"]
)
return OperationStatus.Unknown
@property
def carpet_boost(self) -> bool:
return bool(self.data["property_carpet_boost"])
@property
def multi_map_enabled(self) -> bool:
return bool(self.data["property_multi_map_enabled"])
@property
def dnd_enabled(self) -> bool:
return self.data["property_dnd_enabled"]
@property
def dnd_start_time(self) -> str:
return self.data["property_dnd_start_time"]
@property
def dnd_stop_time(self) -> str:
return self.data["property_dnd_stop_time"]
@property
def audio_volume(self) -> int:
return self.data["property_audio_volume"]
@property
def audio_language(self) -> str:
return self.data["property_audio_language"]
@property
def timezone(self) -> str:
return self.data["property_timezone"]
@property
def schedule(self) -> str:
return self.data["property_scheduled-clean"]
@property
def main_brush_left_time(self) -> int:
return self.data["property_main_brush_left_time"]
@property
def main_brush_life_level(self) -> int:
return self.data["property_main_brush_life_level"]
@property
def side_brush_left_time(self) -> int:
return self.data["property_side_brush_left_time"]
@property
def side_brush_life_level(self) -> int:
return self.data["property_side_brush_life_level"]
@property
def filter_life_level(self) -> int:
return self.data["property_filter_life_level"]
@property
def filter_left_time(self) -> int:
return self.data["property_filter_left_time"]
@property
def total_log_start(self) -> int:
return self.data["property_first-clean-time"]
@property
def total_clean_time(self) -> int:
return self.data["property_total_clean_time"]
@property
def total_clean_count(self) -> int:
return self.data["property_total_clean_count"]
@property
def total_clean_area(self) -> int:
return self.data["property_total_clean_area"]
@property
def clean_cloth_tip(self) -> str:
return self.data["property_clean_cloth_tip"]
@property
def serial_number(self) -> str:
return self.data["property_serial_number"]
class DreameVacuum(MiotDevice):
"""Support for dreame vacuum robot d9 (dreame.vacuum.p2009)."""
mapping = DreameD9Mapping
def status(self) -> DreameVacuumStatus:
"""State of the vacuum."""
return DreameVacuumStatus(
{
prop["did"]: (prop["value"] if "value" in prop.keys() else None) if prop["code"] == 0 else None
for prop in self.get_properties_for_mapping()
}
)
@command(click.argument("speed", type=int))
def set_fan_speed(self, speed):
"""Set vacuum cleaning mode."""
return self.set_property("property_cleaning_mode", speed)
@command()
def return_home(self) -> None:
"""Return home for charging."""
return self.call_action("action_start_charging")
@command()
def start_sweep(self) -> None:
"""Start cleaning."""
return self.call_action("action_start_sweeping")
@command()
def pause_sweeping(self) -> None:
"""Pause cleaning."""
return self.call_action("action_pause_sweeping")
@command()
def reset_brush_life(self) -> None:
"""Reset main brush's life."""
return self.call_action("action_reset_main_brush_life")
@command()
def reset_filter_life(self) -> None:
"""Reset filter's life."""
return self.call_action("action_reset_filter_life")
@command()
def reset_side_brush_life(self) -> None:
"""Reset side brush's life."""
return self.call_action("action_reset_side_brush_life")
@command()
def start_sweeping_advanced(self, params) -> None:
"""Start cleaning (advanced). Specify cleaning mode like room, zone,..."""
return self.call_action("action_start_sweeping_advanced", params)
@command()
def stop_sweeping(self) -> None:
"""Stop cleaning."""
return self.call_action("action_stop_sweeping")
@command()
def set_map(self, params) -> None:
"""Set map related features like: switching to another map, setting restricted area, etc."""
return self.call_action("action_set_map", params)
@command()
def fast_map(self) -> None:
"""Start fast mapping."""
payload = [{"piid": 1, "value": 21}]
return self.start_sweeping_advanced(payload)
@command()
def set_carpet_boost(self, carpet_boost_enabled) -> None:
"""Enable or disable carpet boost."""
return self.set_property(
"property_carpet_boost", 1 if carpet_boost_enabled else 0
)
@command()
def set_multi_map(self, multi_map_enabled) -> None:
"""Enable or disable multi map feature."""
return self.set_property(
"property_multi_map_enabled", 1 if multi_map_enabled else 0
)
@command()
def rename_map(self, map_id, map_name) -> None:
"""Rename a map"""
payload = [
{
"piid": 4,
"value": '{"nrism":{%(id)s:{"name":%(name)s} } }'
% {"id": map_id, "name": map_name},
},
]
return self.call_action("action_set_map", payload)
@command()
def set_dnd(self, dnd_enabled) -> None:
"""Enable or disable do not disturb."""
return self.set_property("property_dnd_enabled", dnd_enabled)
@command()
def set_dnd_start(self, dnd_start) -> None:
"""set start time for do not disturb function."""
return self.set_property("property_dnd_start_time", dnd_start)
@command()
def set_dnd_stop(self, dnd_stop) -> None:
"""set end time for do not disturb function."""
return self.set_property("property_dnd_stop_time", dnd_stop)
@command(click.argument("coords", type=str), click.argument("repeats", type=int))
def zone_cleanup(self, coords, repeats) -> None:
"""Start zone cleaning."""
payload = [
{"piid": 1, "value": 19},
{
"piid": 10,
"value": '{"areas": [[%(coords)s,%(repeats)d,0,0]]}'
% {"coords": coords, "repeats": repeats},
# TODO find out why the two last parameters do not affect fan speed or water level / what do they do?
},
]
return self.start_sweeping_advanced(payload)
@command()
def room_cleanup_by_id(self, rooms, repeats, clean_mode, mop_mode) -> None:
"""Start room-id cleaning."""
cleanlist = []
for sublist in rooms:
if len(sublist) > 1:
repeats = sublist[1]
if len(sublist) > 2:
clean_mode = sublist[2]
if len(sublist) > 3:
mop_mode = sublist[3]
cleanlist.append(
[
ord(sublist[0].upper()) - 64,
repeats,
clean_mode,
mop_mode,
rooms.index(sublist) + 1,
]
)
payload = [
{"piid": 1, "value": 18},
{
"piid": 10,
"value": '{"selects": ' + str(cleanlist).replace(" ", "") + "}",
},
]
return self.start_sweeping_advanced(payload)
@command(
click.argument("walls", type=str),
click.argument("zones", type=str),
click.argument("mops", type=str),
)
def set_restricted_zone(self, walls, zones, mops) -> None:
"""set restricted/ no-mop zone"""
value = '{"vw":{"line":[%(walls)s],"rect":[%(zones)s],"mop":[%(mops)s]}}' % {
"walls": walls,
"zones": zones,
"mops": mops,
}
payload = [{"piid": 4, "value": value}]
return self.set_map(payload)
@command()
def remote_control_step(self, rotation, velocity) -> None:
"""
Move robot manually one time.
:param int rotation: angle to rotate in binary angles 128 to -128
:param int velocity: speed to move forward or backward 100 to -300
"""
value = '{"spdv":%(velocity)d,"spdw":%(rotation)d,"audio":"false"}' % {
"velocity": velocity,
"rotation": rotation,
}
return self.set_property("property_remote_control_step", value)
@command()
def request_map(self, params) -> None:
# TODO find out the parameters
return self.call_action("action_req_map", params)
@command()
def select_map(self, map_id) -> None:
"""Switch to another map."""
payload = [
{
"piid": 4,
"value": '{"sm": ' + "{" + "}" + ', "mapid":' + str(map_id) + "}",
}
]
return self.set_map(payload)
@command(click.argument("water", type=int))
def set_water_level(self, water):
"""Set water level"""
return self.set_property("property_water_level", water)
@command()
def locate(self) -> None:
"""Locate vacuum robot."""
return self.call_action("action_locate")
@command()
def install_voice_pack(self, lang_id: str, url: str, md5: str, size: int) -> None:
"""Install given voice pack."""
value = (
'{"id":"%(lang_id)s","url":"%(url)s","md5":"%(md5)s","size":%(size)d}'
% {"lang_id": lang_id, "url": url, "md5": md5, "size": size}
)
self.set_property("property_voice", value)
@command(click.argument("volume", type=int))
def set_audio_volume(self, volume):
"""Set voice audio volume"""
return self.set_property("property_audio_volume", volume)
@command()
def test_sound(self) -> None:
"""Plays a confirmation sound to check the volume"""
return self.call_action("action_test_sound")
@command(click.argument("time", type=int))
def set_cloth_cleaning_tip(self, delay):
"""Set reminder delay for cleaning mop, 0 to disable the tip"""
return self.set_property("property_clean_cloth_tip", delay)
|
1700396
|
import os
from functools import wraps
from flask import request
from app.vendors.rest import response
def auth_required(f):
"""Decorate given function with authentication check."""
@wraps(f)
def decorated_function(*args, **kwargs):
user_key = request.headers.get("X-API-Key", None)
app_key = os.environ.get("RESTKNOT_API_KEY")
if user_key != app_key:
return response(400, message="Access denied")
return f(*args, **kwargs)
return decorated_function
|
1700400
|
from unittest import TextTestRunner, TestSuite
import JournalTest
import TransactionTest
import PostingTest
suites = [
JournalTest.suite(),
TransactionTest.suite(),
PostingTest.suite()
]
TextTestRunner().run(TestSuite(suites))
|
1700414
|
try:
from rpython.rlib.rerased import new_erasing_pair # pylint: disable=unused-import
from rpython.rlib.rerased import erase_int # pylint: disable=unused-import
from rpython.rlib.rerased import unerase_int # pylint: disable=unused-import
except ImportError:
"NOT_RPYTHON"
def new_erasing_pair(name):
identity = _ErasingPairIdentity(name)
def erase(x):
return _Erased(x, identity)
def unerase(y):
assert y._identity is identity # pylint: disable=W
return y._x # pylint: disable=W
return erase, unerase
def erase_int(val):
return val
def unerase_int(val):
return val
class _ErasingPairIdentity(object):
def __init__(self, name):
self.name = name
class _Erased(object):
def __init__(self, x, identity):
self._x = x
self._identity = identity
def __str__(self):
return "Erased(" + str(self._x) + ", " + self._identity.name + ")"
|
1700479
|
def create_dimension_financial_orgs(cursor):
cursor.execute('''CREATE TABLE dimension_financial_orgs
( financial_org_name text,
financial_org_permalink text,
created_at text,
number_of_employees real,
founded_year real,
founded_month real,
founded_day real,
founded_date text,
country_code text,
state_code text,
city text,
latitude real,
longitude real,
investments real,
total_fund real,
extracted_at text
)''')
|
1700495
|
from pydantic import BaseModel # pylint: disable=no-name-in-module
class Migrate(BaseModel):
enabled: bool
|
1700518
|
import torch
import torch.nn as nn
def get_network_for_size(size):
"""
Size is expected to be [channel, dim, dim]
"""
size = list(size) # In case the input is a tuple
if size[-2:] == [7, 7]:
net = ConvNet7x7
elif size[-2:] == [28, 28]:
net = ConvNet28x28
elif size[-2:] == [84, 84]:
net = ConvNet84x84
elif size[-2:] == [64, 64]:
# just use 84x84, it should compute output dim
net = ConvNet84x84
else:
raise AttributeError("Unexpected input size")
return net(size)
class ModelUtils(object):
"""
Allows for images larger than their stated minimums, and will auto-compute the output size accordingly
"""
@classmethod
def compute_output_size(cls, net, observation_size):
dummy_input = torch.zeros(observation_size).unsqueeze(0) # Observation size doesn't include batch, so add it
dummy_output = net(dummy_input).squeeze(0) # Remove batch
output_size = dummy_output.shape[0]
return output_size
class CommonConv(nn.Module):
def __init__(self, conv_net, post_flatten, output_size):
super().__init__()
self._conv_net = conv_net
self._post_flatten = post_flatten
self.output_size = output_size
def forward(self, x):
x = self._conv_net(x.float())
x = self._post_flatten(x)
return x
class ConvNet84x84(CommonConv):
def __init__(self, observation_shape):
# This is the same as used in AtariNet in Impala (torchbeast implementation)
output_size = 512
conv_net = nn.Sequential(
nn.Conv2d(in_channels=observation_shape[0], out_channels=32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1),
nn.ReLU(),
nn.Flatten())
intermediate_dim = ModelUtils.compute_output_size(conv_net, observation_shape)
post_flatten = nn.Linear(intermediate_dim, output_size)
super().__init__(conv_net, post_flatten, output_size)
class ConvNet28x28(CommonConv):
def __init__(self, observation_shape):
output_size = 32
conv_net = nn.Sequential(
nn.Conv2d(observation_shape[0], 24, kernel_size=5),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(), # TODO: this is new... (check)
nn.Conv2d(24, 48, kernel_size=5),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(),
nn.Flatten(),
)
intermediate_dim = ModelUtils.compute_output_size(conv_net, observation_shape)
post_flatten = nn.Linear(intermediate_dim, output_size)
super().__init__(conv_net, post_flatten, output_size)
class ConvNet7x7(CommonConv):
def __init__(self, observation_shape):
# From: https://github.com/lcswillems/rl-starter-files/blob/master/model.py, modified by increasing each
# latent size (2x)
output_size = 64
conv_net = nn.Sequential(
nn.Conv2d(observation_shape[0], 32, kernel_size=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(32, 64, kernel_size=2),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=2),
nn.ReLU(),
nn.Flatten()
)
intermediate_dim = ModelUtils.compute_output_size(conv_net, observation_shape)
post_flatten = nn.Linear(intermediate_dim, output_size)
super().__init__(conv_net, post_flatten, output_size)
|
1700530
|
import re
import time
import logging
import datetime
import shutil
import tempfile
from django.db import transaction
from django.utils.encoding import smart_unicode
from jellybaby.models import Item, CodeRepository, CodeCommit
from jellybaby.providers import utils
try:
import git
except ImportError:
git = None
log = logging.getLogger("jellybaby.providers.gitscm")
#
# Public API
#
def enabled():
ok = git is not None
if not ok:
log.warn("The GIT provider is not available because the GitPython module "
"isn't installed.")
return ok
def update():
for repository in CodeRepository.objects.filter(type="git"):
_update_repository(repository)
#
# Private API
#
def _update_repository(repository):
source_identifier = "%s:%s" % (__name__, repository.url)
last_update_date = Item.objects.get_last_update_of_model(CodeCommit, source=source_identifier)
log.info("Updating changes from %s since %s", repository.url, last_update_date)
# Git chokes on the 1969-12-31 sentinal returned by
# get_last_update_of_model, so fix that up.
if last_update_date.date() == datetime.date(1969, 12, 31):
last_update_date = datetime.datetime(1970, 1, 1)
working_dir, repo = _create_local_repo(repository)
commits = repo.commits_since(since=last_update_date.strftime("%Y-%m-%d"))
for commit in reversed(commits):
if commit.author.email == repository.username:
_handle_revision(repository, commit)
log.debug("Removing working dir %s.", working_dir)
shutil.rmtree(working_dir)
def _create_local_repo(repository):
working_dir = tempfile.mkdtemp()
g = git.Git(working_dir)
log.debug("Cloning %s into %s", repository.url, working_dir)
res = g.clone(repository.url)
# This is pretty nasty.
m = re.match('^Initialized empty Git repository in (.*)', res)
repo_location = m.group(1).rstrip('/')
return working_dir, git.Repo(repo_location)
@transaction.commit_on_success
def _handle_revision(repository, commit):
log.debug("Handling [%s] from %s", commit.id[:7], repository.url)
ci, created = CodeCommit.objects.get_or_create(
revision = commit.id,
repository = repository,
defaults = {"message": smart_unicode(commit.message)}
)
if created:
timestamp = datetime.datetime.fromtimestamp(time.mktime(commit.committed_date))
return Item.objects.create_or_update(
instance = ci,
timestamp = timestamp,
source = "%s:%s" % (__name__, repository.url),
)
|
1700559
|
pkgname = "python-babel"
pkgver = "2.9.1"
pkgrel = 0
build_style = "python_module"
hostmakedepends = ["python-setuptools"]
checkdepends = ["python-pytz"]
depends = ["python-setuptools", "python-pytz"]
pkgdesc = "Tools for internationalizing Python applications"
maintainer = "q66 <<EMAIL>>"
license = "BSD-3-Clause"
url = "http://babel.pocoo.org"
source = f"$(PYPI_SITE)/B/Babel/Babel-{pkgver}.tar.gz"
sha256 = "bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0"
# needs pytest, is a dependency of pytest
options = ["!check"]
def post_install(self):
self.install_license("LICENSE")
|
1700605
|
from contextlib import contextmanager
# copied from https://stackoverflow.com/a/7039175/5692176 (and wrapped in a class)
class TerminalLoadingAnimation:
ANIMATION_CHARACTERS = ["|", "/", "-", "\\"]
def __init__(self, loading_title=None):
self._loading_title = loading_title or ""
self._next_char_idx = 0
def start(self):
self.update()
def update(self):
print(self._next_line(), end="\r")
def end(self):
print(self._next_line(), end="\n")
def _next_line(self):
next_char = self.ANIMATION_CHARACTERS[
self._next_char_idx % len(self.ANIMATION_CHARACTERS)
]
self._next_char_idx += 1
next_line = f"{next_char} {self._loading_title}"
return next_line
@staticmethod
@contextmanager
def open(loading_title):
loading_animation = TerminalLoadingAnimation(loading_title)
loading_animation.start()
try:
yield loading_animation
finally:
loading_animation.end()
|
1700609
|
import os
import socket
import getpass
folders = []
if socket.gethostname() == 'pmous008':
intermediate_folder = os.environ['HOME'] + "/projects/pcad"
slicer_dir = os.environ['HOME'] + "/sources/Slicer-4.8.1-linux-amd64"
elif socket.gethostname() == 'pirads-trainer':
intermediate_folder = os.environ['HOME'] + "/projects/pcad"
slicer_dir = os.environ['HOME'] + "/sources/Slicer-4.8.1-linux-amd64"
else:
intermediate_folder = "/data"
dicom_folder = intermediate_folder + "/dicom"
nrrd_folder = intermediate_folder + "/nrrd"
|
1700645
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
def scatter_nd_impl(data, indices, updates):
# type: (np.ndarray, np.ndarray, np.ndarray) -> np.ndarray
# Check tensor shapes
assert indices.shape[-1] <= len(data.shape)
assert updates.shape == indices.shape[:-1] + data.shape[indices.shape[-1]:]
# Compute output
output = np.copy(data)
for i in np.ndindex(indices.shape[:-1]):
# NOTE: The order of iteration in this loop is not specified.
# In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2].
# This ensures that the output value does not depend on the iteration order.
output[indices[i]] = updates[i]
return output
class ScatterND(Base):
@staticmethod
def export_scatternd(): # type: () -> None
node = onnx.helper.make_node(
'ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['y'],
)
data = np.array(
[[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
[[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
[[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)
indices = np.array([[0], [2]], dtype=np.int64)
updates = np.array(
[[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]], dtype=np.float32)
# Expecting output as np.array(
# [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)
output = scatter_nd_impl(data, indices, updates)
expect(node, inputs=[data, indices, updates], outputs=[output],
name='test_scatternd')
|
1700673
|
import shutil
import subprocess
import sys
import pytest
from numpy.distutils import mingw32ccompiler
@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test')
def test_build_import():
'''Test the mingw32ccompiler.build_import_library, which builds a
`python.a` from the MSVC `python.lib`
'''
# make sure `nm.exe` exists and supports the current python version. This
# can get mixed up when the PATH has a 64-bit nm but the python is 32-bit
try:
out = subprocess.check_output(['nm.exe', '--help'])
except FileNotFoundError:
pytest.skip("'nm.exe' not on path, is mingw installed?")
supported = out[out.find(b'supported targets:'):]
if sys.maxsize < 2**32:
if b'pe-i386' not in supported:
raise ValueError("'nm.exe' found but it does not support 32-bit "
"dlls when using 32-bit python. Supported "
"formats: '%s'" % supported)
elif b'pe-x86-64' not in supported:
raise ValueError("'nm.exe' found but it does not support 64-bit "
"dlls when using 64-bit python. Supported "
"formats: '%s'" % supported)
# Hide the import library to force a build
has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib()
if has_import_lib:
shutil.move(fullpath, fullpath + '.bak')
try:
# Whew, now we can actually test the function
mingw32ccompiler.build_import_library()
finally:
if has_import_lib:
shutil.move(fullpath + '.bak', fullpath)
|
1700679
|
from org.transcrypt.stubs.browser import *
# write down any library you need for the client
# this uses Transcrypt to create the javascript library
|
1700684
|
from dataclasses import dataclass
import pytest
from dataslots import DataslotsDescriptor, dataslots, DataDescriptor
class PositiveIntegerDS(DataslotsDescriptor):
def __get__(self, instance, owner):
return self.get_value(instance)
def __set__(self, instance, value):
if value < 0:
raise ValueError('must be positive')
self.set_value(instance, value)
class PositiveIntegerNonDS:
def __init__(self):
self.value = None
def __get__(self, instance, owner):
return self.value
def __set__(self, instance, value):
if value < 0:
raise ValueError('must be positive')
self.value = value
class SimpleDataDescriptor(DataDescriptor):
def __init__(self, name):
self._name = name
@property
def slot_name(self):
return self._name
def __get__(self, instance, owner):
return getattr(instance, self.slot_name)
def __set__(self, instance, value):
setattr(instance, self.slot_name, value)
def test_data_descriptor(assertions):
@dataslots
@dataclass
class A:
x: int = PositiveIntegerDS()
a = A(10)
assert a.x == a._dataslots_x == 10
assert str(a).endswith('A(x=10)')
assertions.assert_slots(A, ('_dataslots_x', ))
assertions.assert_init_raises(A, -10, exception=ValueError, msg='must be positive')
def test_data_descriptor_inheritance(assertions):
@dataslots
@dataclass
class A:
x: int = PositiveIntegerDS()
@dataslots
@dataclass
class B(A):
y: int = PositiveIntegerDS()
b = B(10, 20)
assert b.x == 10
assert b.y == b._dataslots_y == 20
assert str(b).endswith('B(x=10, y=20)')
assertions.assert_slots(A, ('_dataslots_x',))
assertions.assert_slots(B, ('_dataslots_y',))
assertions.assert_init_raises(B, -10, 10, exception=ValueError, msg='must be positive')
assertions.assert_init_raises(B, 10, -10, exception=ValueError, msg='must be positive')
def test_slots_on_derived(assertions):
@dataclass
class A:
x: int = PositiveIntegerDS()
@dataslots
class B(A):
pass
assert B(10).x == 10
assertions.assert_init_raises(A, -20, exception=ValueError, msg='must be positive')
assertions.assert_init_raises(B, -10, exception=ValueError, msg='must be positive')
assertions.assert_not_member(A, '__slots__')
assertions.assert_slots(B, ['_dataslots_x'])
def test_duplicated_field_only_derived_slots(assertions):
@dataclass
class A:
x: int
@dataslots
@dataclass
class B(A):
x: int = PositiveIntegerDS()
assert A(-5).x == -5
assert B(10).x == 10
assertions.assert_init_raises(B, -10, exception=ValueError, msg='must be positive')
assertions.assert_not_member(A, '__slots__')
assertions.assert_slots(B, ['_dataslots_x'])
def test_duplicated_field_both_in_slots(assertions):
@dataslots
@dataclass
class A:
x: int
@dataslots
@dataclass
class B(A):
x: int = PositiveIntegerDS()
assert A(-5).x == -5
assert B(10).x == 10
assertions.assert_init_raises(B, -10, exception=ValueError, msg='must be positive')
assertions.assert_slots(A, ['x'])
assertions.assert_slots(B, ['_dataslots_x'])
def test_delete_field():
@dataslots
@dataclass
class A:
some_field: int = PositiveIntegerDS()
a = A(10)
assert a.some_field == 10
del a.some_field
with pytest.raises(AttributeError) as exc_info:
_ = a.some_field
assert exc_info.match('(?!(?<=(_dataslots_)))some_field')
with pytest.raises(AttributeError) as exc_info:
del a.some_field
assert exc_info.match('(?!(?<=(_dataslots_)))some_field')
a.some_field = 20
assert a.some_field == 20
def test_skip_data_descriptor(assertions):
@dataslots
@dataclass
class A:
x: int = PositiveIntegerNonDS()
a = A(10)
assert a.x == 10
assertions.assert_slots(A, ())
assertions.assert_init_raises(A, -10, exception=ValueError, msg='must be positive')
def test_custom_data_descriptor(assertions):
slot_name = '_custom_x'
@dataslots
@dataclass
class A:
x: int = SimpleDataDescriptor(slot_name)
a = A(10)
assert a.x == 10
assertions.assert_slots(A, [slot_name])
def test_redefined_data_descriptor(assertions):
@dataslots
@dataclass
class A:
x: int = SimpleDataDescriptor('simple_x')
@dataslots
@dataclass
class B(A):
x: int = PositiveIntegerDS()
@dataslots
@dataclass
class C(B):
x: int = SimpleDataDescriptor('_dataslots_x')
assert A(-5).x == -5
assert B(10).x == 10
assert C(10).x == 10
assertions.assert_init_raises(B, -10, exception=ValueError, msg='must be positive')
assertions.assert_slots(A, ['simple_x'])
assertions.assert_slots(B, ['_dataslots_x'])
assertions.assert_slots(C, [])
def test_redefined_data_descriptor_not_in_slots(assertions):
@dataclass
class A:
x: int = SimpleDataDescriptor('simple_x')
@dataslots
@dataclass
class B(A):
x: int = PositiveIntegerDS()
assertions.assert_init_raises(B, -10, exception=ValueError, msg='must be positive')
assertions.assert_not_member(A, '__slots__')
assertions.assert_slots(B, ['_dataslots_x'])
|
1700694
|
from os import getenv
from river.common.database_connection.db_connection import MSSQL, ORACLE, ORACLE11, POSTGRES
DATABASES = {
MSSQL: {
"host": getenv("TEST_MSSQL_HOST"),
"port": int(getenv("TEST_MSSQL_PORT", 1433)),
"database": getenv("TEST_MSSQL_DB"),
"login": getenv("TEST_MSSQL_LOGIN"),
"password": getenv("<PASSWORD>_MSSQL_PASSWORD"),
"owner": "dbo",
"model": "MSSQL",
},
ORACLE11: {
"host": getenv("TEST_ORACLE_11_HOST"),
"port": int(getenv("TEST_ORACLE_11_PORT", 1521)),
"database": getenv("TEST_ORACLE_11_DB"),
"login": getenv("TEST_ORACLE_11_LOGIN"),
"password": getenv("<PASSWORD>"),
"owner": "SYSTEM",
"model": "ORACLE11",
},
ORACLE: {
"host": getenv("TEST_ORACLE_HOST"),
"port": int(getenv("TEST_ORACLE_PORT", 1531)),
"database": getenv("TEST_ORACLE_DB"),
"login": getenv("TEST_ORACLE_LOGIN"),
"password": getenv("<PASSWORD>"),
"owner": "SYSTEM",
"model": "ORACLE",
},
POSTGRES: {
"host": getenv("TEST_POSTGRES_HOST"),
"port": int(getenv("TEST_POSTGRES_PORT", 5432)),
"database": getenv("TEST_POSTGRES_DB"),
"login": getenv("TEST_POSTGRES_LOGIN", "test"),
"password": getenv("TEST_POSTGRES_PASSWORD", "<PASSWORD>"),
"owner": "public",
"model": "POSTGRES",
},
}
|
1700702
|
import os
import string
import numpy as np
from PIL import Image
import torch as th
from torchvision.transforms.functional import to_tensor
from . import utils, templates
class FontsDataset(th.utils.data.Dataset):
def __init__(self, root, chamfer, n_samples_per_curve, val=False):
self.root = root
self.chamfer = chamfer
self.n_samples_per_curve = n_samples_per_curve
self.files = [f[:-4] for f in os.listdir(os.path.join(self.root, 'pngs')) if f.endswith('.png')]
np.random.shuffle(self.files)
cutoff = int(0.9*len(self.files))
if val:
self.files = self.files[cutoff:]
else:
self.files = self.files[:cutoff]
self.n_loops_dict = templates.n_loops
def __repr__(self):
return "FontsDataset | {} entries".format(len(self))
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
fname = self.files[idx]
im = Image.open(os.path.join(self.root, 'pngs', fname + '.png')).convert('L')
distance_fields = th.from_numpy(
np.load(os.path.join(self.root, 'distances', fname + '.npy'))[31:-31,31:-31].astype(np.float32)) ** 2
alignment_fields = utils.compute_alignment_fields(distance_fields)
distance_fields = distance_fields[1:-1,1:-1]
occupancy_fields = utils.compute_occupancy_fields(distance_fields)
points = th.Tensor([])
if self.chamfer:
points = th.from_numpy(np.load(os.path.join(self.root, 'points', fname + '.npy')).astype(np.float32))
points = points[:self.n_samples_per_curve*sum(templates.topology)]
return {
'fname': fname,
'im': to_tensor(im),
'distance_fields': distance_fields,
'alignment_fields': alignment_fields,
'occupancy_fields': occupancy_fields,
'points': points,
'letter_idx': string.ascii_uppercase.index(fname[0]),
'n_loops': self.n_loops_dict[fname[0]]
}
|
1700704
|
import logging
from . import FinishAfter, CompositeExtension
from .training import TrackTheBest
from .predicates import OnLogRecord
logger = logging.getLogger(__name__)
class FinishIfNoImprovementAfter(FinishAfter):
"""Stop after improvements have ceased for a given period.
Parameters
----------
notification_name : str
The name of the log record to look for which indicates a new
best performer has been found. Note that the value of this
record is not inspected.
iterations : int, optional
The number of iterations to wait for a new best. Exactly one of
`iterations` or `epochs` must be not `None` (default).
epochs : int, optional
The number of epochs to wait for a new best. Exactly one of
`iterations` or `epochs` must be not `None` (default).
patience_log_record : str, optional
The name under which to record the number of iterations we
are currently willing to wait for a new best performer.
Defaults to `notification_name + '_patience_epochs'` or
`notification_name + '_patience_iterations'`, depending
which measure is being used.
Notes
-----
By default, runs after each epoch. This can be manipulated via
keyword arguments (see :class:`blocks.extensions.SimpleExtension`).
"""
def __init__(self, notification_name, iterations=None, epochs=None,
patience_log_record=None, **kwargs):
if (epochs is None) == (iterations is None):
raise ValueError("Need exactly one of epochs or iterations "
"to be specified")
self.notification_name = notification_name
self.iterations = iterations
self.epochs = epochs
kwargs.setdefault('after_epoch', True)
self.last_best_iter = self.last_best_epoch = None
if patience_log_record is None:
self.patience_log_record = (notification_name + '_patience' +
('_epochs' if self.epochs is not None
else '_iterations'))
else:
self.patience_log_record = patience_log_record
super(FinishIfNoImprovementAfter, self).__init__(**kwargs)
def update_best(self):
# Here mainly so we can easily subclass different criteria.
if self.notification_name in self.main_loop.log.current_row:
self.last_best_iter = self.main_loop.log.status['iterations_done']
self.last_best_epoch = self.main_loop.log.status['epochs_done']
def do(self, which_callback, *args):
self.update_best()
# If we haven't encountered a best yet, then we should just bail.
if self.last_best_iter is None:
return
if self.epochs is not None:
since = (self.main_loop.log.status['epochs_done'] -
self.last_best_epoch)
patience = self.epochs - since
else:
since = (self.main_loop.log.status['iterations_done'] -
self.last_best_iter)
patience = self.iterations - since
logger.debug('%s: Writing patience of %d to current log record (%s) '
'at iteration %d', self.__class__.__name__, patience,
self.patience_log_record,
self.main_loop.log.status['iterations_done'])
self.main_loop.log.current_row[self.patience_log_record] = patience
if patience == 0:
super(FinishIfNoImprovementAfter, self).do(which_callback,
*args)
class EarlyStopping(CompositeExtension):
"""A 'batteries-included' early stopping extension.
Parameters
----------
record_name : str
The log record entry whose value represents the quantity to base
early stopping decisions on, e.g. some measure of validation set
performance.
checkpoint_extension : :class:`~blocks.extensions.Checkpoint`, optional
A :class:`~blocks.extensions.Checkpoint` instance to configure to
save a checkpoint when a new best performer is found.
checkpoint_filename : str, optional
The filename to use for the 'current best' checkpoint. Must be
provided if ``checkpoint_extension`` is specified.
notification_name : str, optional
The name to be written in the log when a new best-performing
model is found. Defaults to ``record_name + '_best_so_far'``.
choose_best : callable, optional
See :class:`TrackTheBest`.
iterations : int, optional
See :class:`FinishIfNoImprovementAfter`.
epochs : int, optional
See :class:`FinishIfNoImprovementAfter`.
Notes
-----
.. warning::
If you want the best model to be saved, you need to specify
a value for the ``checkpoint_extension`` and
``checkpoint_filename`` arguments!
Trigger keyword arguments will affect how often the log is inspected
for the record name (in order to determine if a new best has been
found), as well as how often a decision is made about whether to
continue training. By default, ``after_epoch`` is set,
as is ``before_training``, where some sanity checks are performed
(including the optional self-management of checkpointing).
If ``checkpoint_extension`` is not in the main loop's extensions list
when the `before_training` trigger is run, it will be added as a
sub-extension of this object.
Examples
--------
To simply track the best value of a log entry and halt training
when it hasn't improved in a sufficient amount of time, we could
use e.g.
>>> stopping_ext = EarlyStopping('valid_error', iterations=100)
which would halt training if a new minimum ``valid_error`` has not
been achieved in 100 iterations (i.e. minibatches/steps). To measure
in terms of epochs (which usually correspond to passes through the
training set), you could use
>>> epoch_stop_ext = EarlyStopping('valid_error', epochs=5)
If you are tracking a log entry where there's a different definition
of 'best', you can provide a callable that takes two log values
and returns the one that :class:`EarlyStopping` should consider
"better". For example, if you were tracking accuracy, where higher
is better, you could pass the built-in ``max`` function:
>>> max_acc_stop = EarlyStopping('valid_accuracy', choose_best=max,
... notification_name='highest_acc',
... epochs=10)
Above we've also provided an alternate notification name, meaning
a value of ``True`` will be written under the entry name
``highest_acc`` whenever a new highest accuracy is found (by default
this would be a name like ``valid_accuracy_best_so_far``).
Let's configure a checkpointing extension to save the model and log
(but not the main loop):
>>> from blocks.extensions.saveload import Checkpoint
>>> checkpoint = Checkpoint('my_model.tar', save_main_loop=False,
... save_separately=['model', 'log'],
... after_epoch=True)
When we pass this object to :class:`EarlyStopping`, along with a
different filename, :class:`EarlyStopping` will configure that same
checkpointing extension to *also* serialize to ``best_model.tar`` when
a new best value of validation error is achieved.
>>> stopping = EarlyStopping('valid_error', checkpoint,
... 'best_model.tar', epochs=5)
Finally, we'll set up the main loop:
>>> from blocks.main_loop import MainLoop
>>> # You would, of course, use a real algorithm and data stream here.
>>> algorithm = data_stream = None
>>> main_loop = MainLoop(algorithm=algorithm,
... data_stream=data_stream,
... extensions=[stopping, checkpoint])
Note that you do want to place the checkpoint extension *after*
the stopping extension, so that the appropriate log records
have been written in order to trigger the checkpointing
extension.
It's also possible to in-line the creation of the
checkpointing extension:
>>> main_loop = MainLoop(algorithm=algorithm,
... data_stream=data_stream,
... extensions=[EarlyStopping(
... 'valid_error',
... Checkpoint('my_model.tar',
... save_main_loop=False,
... save_separately=['model',
... 'log'],
... after_epoch=True),
... 'my_best_model.tar',
... epochs=5)])
Note that we haven't added the checkpointing extension to the
main loop's extensions list. No problem: :class:`EarlyStopping` will
detect that it isn't being managed by the main loop and manage it
internally. It will automatically be executed in the right order
for it to function properly alongside :class:`EarlyStopping`.
"""
def __init__(self, record_name, checkpoint_extension=None,
checkpoint_filename=None, notification_name=None,
choose_best=min, iterations=None, epochs=None, **kwargs):
if notification_name is None:
notification_name = record_name + '_best_so_far'
kwargs.setdefault('after_epoch', True)
tracking_ext = TrackTheBest(record_name, notification_name,
choose_best=choose_best, **kwargs)
stopping_ext = FinishIfNoImprovementAfter(notification_name,
iterations=iterations,
epochs=epochs,
**kwargs)
self.checkpoint_extension = checkpoint_extension
if checkpoint_extension and checkpoint_filename:
checkpoint_extension.add_condition(['after_batch'],
OnLogRecord(notification_name),
(checkpoint_filename,))
elif checkpoint_extension is not None and checkpoint_filename is None:
raise ValueError('checkpoint_extension specified without '
'checkpoint_filename')
kwargs.setdefault('before_training', True)
super(EarlyStopping, self).__init__([tracking_ext, stopping_ext],
**kwargs)
def do(self, which_callback, *args):
if which_callback == 'before_training' and self.checkpoint_extension:
if self.checkpoint_extension not in self.main_loop.extensions:
logger.info('%s: checkpoint extension %s not in main loop '
'extensions, adding as sub-extension of %s',
self.__class__.__name__, self.checkpoint_extension,
self)
self.checkpoint_extension.main_loop = self.main_loop
self.sub_extensions.append(self.checkpoint_extension)
else:
exts = self.main_loop.extensions
if exts.index(self.checkpoint_extension) < exts.index(self):
logger.warn('%s: configured checkpointing extension '
'appears after this extension in main loop '
'extensions list. This may lead to '
'unwanted results, as the notification '
'that would trigger serialization '
'of a new best will not have been '
'written yet when the checkpointing '
'extension is run.', self.__class__.__name__)
|
1700748
|
from django import template
from markdown import markdown
register = template.Library()
@register.filter
def md2html(text):
return markdown(text)
@register.simple_tag
def update_param(request, clear_keys='', **kwargs):
items = []
keys_to_remove = [key.strip().lower() for key in clear_keys.split(',')]
for key in request.GET:
if key in keys_to_remove:
continue
value = kwargs.get(key, request.GET.getlist(key))
# Django groups values of multiple select into list of values
# within a single key; however, in the query string we need to expand
# it:
if value is not None:
if isinstance(value, list):
items.extend([f'{key}={item}' for item in value])
else:
items.append(f'{key}={value}')
# Append all keys not found in request.GET, but passed in kwargs:
for name in [key for key in kwargs if
key not in request.GET and key not in keys_to_remove]:
items.append(f'{name}={kwargs[name]}')
return '&'.join(items)
@register.filter
def get_item(d, key):
return d.get(key)
|
1700785
|
from typing import List
from pydantic import BaseModel
class BaseGitaModel(BaseModel):
id: int
class Config:
orm_mode = True
class GitaTranslation(BaseGitaModel):
description: str
author_name: str
language: str
class GitaCommentary(BaseGitaModel):
description: str
author_name: str
language: str
class GitaVerse(BaseGitaModel):
verse_number: int
chapter_number: int
slug: str
text: str
transliteration: str
word_meanings: str
translations: List[GitaTranslation] = []
commentaries: List[GitaCommentary] = []
class GitaChapter(BaseGitaModel):
name: str
slug: str
name_transliterated: str
name_translated: str
verses_count: int
chapter_number: int
name_meaning: str
chapter_summary: str
chapter_summary_hindi: str
class VerseOfDay(BaseGitaModel):
id: int
verse_order: int
|
1700830
|
from ploy.common import gzip_string
import base64
import email
import os
class Template(object):
def __init__(self, path, pre_filter=None, post_filter=None):
self.path = path
self.template = email.message_from_file(open(path))
self.pre_filter = pre_filter
self.post_filter = post_filter
def __call__(self, **kwargs):
options = {}
body = self.template.get_payload()
if callable(self.pre_filter):
body = self.pre_filter(body)
for key, value in self.template.items():
commands, value = value.rsplit(None, 1)
for cmd in commands.split(','):
if cmd == 'file':
path = value
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(self.path), path)
value = open(path).read()
elif cmd == 'base64':
if not isinstance(value, bytes):
value = value.encode('ascii')
value = base64.encodestring(value).decode('ascii')
elif cmd == 'format':
value = value.format(**kwargs)
elif cmd == 'template':
path = value
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(self.path), path)
value = Template(path)(**kwargs)
elif cmd == 'gzip':
value = gzip_string(value)
elif cmd == 'escape_eol':
value = value.replace('\n', '\\n')
else:
raise ValueError("Unknown command '%s' for option '%s' in startup script '%s'." % (cmd, key, self.path))
options[key] = value
for key in kwargs:
options[key] = kwargs[key]
result = body.format(**options)
if callable(self.post_filter):
result = self.post_filter(result)
return result
|
1700856
|
import PortScanner as ps
def main():
# Initialize a Scanner object that will scan top 50 commonly used ports.
scanner = ps.PortScanner(target_ports=50)
host_name = 'google.com'
message = 'put whatever message you want here'
'''
output contains a dictionary of {port:status} pairs
in which port is the list of ports we scanned
and status is either 'OPEN' or 'CLOSE'
'''
# This line sets the thread limit of the scanner to 1500
scanner.set_thread_limit(1500)
# This line sets the timeout delay to 15s
scanner.set_delay(15)
# This line shows the target port list of the scanner
scanner.show_target_ports()
'''
Current port list is:
[blah, blah ....]
'''
# This line shows the timeout delay of the scanner
scanner.show_delay()
'''
Current timeout delay is 15 seconds.
'''
# This line shows the top 100 commonly used ports.
scanner.show_top_k_ports(100)
'''
Top 100 commonly used ports:
[blah, blah ....]
'''
output = scanner.scan(host_name, message)
'''
start scanning website: google.com
server ip is: 172.16.31.10
80: OPEN
443: OPEN
2000: OPEN
5060: OPEN
host google.com scanned in 30.956103 seconds
finish scanning!
'''
if __name__ == "__main__":
main()
|
1700868
|
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from .models import Submission, SubmissionComment, SubmissionTag, SubmissionType
class SubmissionCommentInline(admin.TabularInline):
model = SubmissionComment
@admin.register(Submission)
class SubmissionAdmin(admin.ModelAdmin):
list_display = (
"title",
"speaker_id",
"type",
"status",
"conference",
"topic",
"duration",
"audience_level",
)
fieldsets = (
(
_("Submission"),
{
"fields": (
"title",
"slug",
"speaker_id",
"status",
"type",
"duration",
"topic",
"conference",
"audience_level",
"languages",
)
},
),
(_("Details"), {"fields": ("elevator_pitch", "abstract", "notes", "tags")}),
(_("Speaker"), {"fields": ("speaker_level", "previous_talk_video")}),
)
list_filter = ("conference", "type", "topic", "status")
search_fields = ("title", "abstract")
prepopulated_fields = {"slug": ("title",)}
filter_horizontal = ("tags",)
inlines = [SubmissionCommentInline]
class Media:
js = ["admin/js/jquery.init.js"]
@admin.register(SubmissionType)
class SubmissionTypeAdmin(admin.ModelAdmin):
list_display = ("name",)
@admin.register(SubmissionTag)
class SubmissionTagAdmin(admin.ModelAdmin):
list_display = ("name",)
@admin.register(SubmissionComment)
class SubmissionCommentAdmin(admin.ModelAdmin):
list_display = ("submission", "author_id", "text")
|
1700909
|
import secrets
import string
import uuid
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
def user_directory_path(instance, filename):
ext = filename.split(".")[-1]
return f"images/{instance.author.pk}/{uuid.uuid4().hex}.{ext}"
def image_slug():
"""
Assigns a slug to an image. (Tries again recursively if the slug is taken.)
"""
slug = "".join(secrets.choice(string.ascii_lowercase + string.digits) for _i in range(8))
try:
Image.objects.get(slug=slug)
return image_slug()
except Image.DoesNotExist:
return slug
class Image(models.Model):
author = models.ForeignKey("Author", null=True, on_delete=models.SET_NULL, verbose_name=_("Author"))
file = models.ImageField(upload_to=user_directory_path, verbose_name=_("File"))
slug = models.SlugField(default=image_slug, unique=True, editable=False)
is_deleted = models.BooleanField(default=False, verbose_name=_("Unpublished"))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_("Date created"))
class Meta:
verbose_name = _("image")
verbose_name_plural = _("images")
def __str__(self):
return str(self.slug)
def delete(self, *args, **kwargs):
super().delete()
self.file.delete(save=False)
def get_absolute_url(self):
return reverse("image-detail", kwargs={"slug": self.slug})
|
1700915
|
from __future__ import annotations
import warnings
import numpy as np
import napari
import os
from ..arrays import *
from ..frame import *
from .._const import Const
from ..core import imread, lazy_imread
def copy_layer(layer):
args, kwargs, *_ = layer.as_layer_data_tuple()
# linear interpolation is valid only in 3D mode.
if kwargs.get("interpolation", None) == "linear":
kwargs = kwargs.copy()
kwargs["interpolation"] = "nearest"
# This is necessarry for text bound layers.
kwargs.pop("properties", None)
kwargs.pop("property_choices", None)
copy = layer.__class__(args, **kwargs)
return copy
def iter_layer(viewer:"napari.Viewer", layer_type:str):
"""
Iterate over layers and yield only certain type of layers.
Parameters
----------
layer_type : str, {"shape", "image", "point"}
Type of layer.
Yields
-------
napari.layers
Layers specified by layer_type
"""
if isinstance(layer_type, str):
layer_type = [layer_type]
layer_type = tuple(getattr(napari.layers, t) for t in layer_type)
for layer in viewer.layers:
if isinstance(layer, layer_type):
yield layer
def iter_selected_layer(viewer:"napari.Viewer", layer_type:str|list[str]):
if isinstance(layer_type, str):
layer_type = [layer_type]
layer_type = tuple(getattr(napari.layers, t) for t in layer_type)
for layer in viewer.layers.selection:
if isinstance(layer, layer_type):
yield layer
def front_image(viewer:"napari.Viewer"):
"""
From list of image layers return the most front visible image.
"""
front = None
for img in iter_layer(viewer, "Image"):
if img.visible:
front = img # This is ImgArray
if front is None:
raise ValueError("There is no visible image layer.")
return front
def to_labels(layer:napari.layers.Shapes, labels_shape, zoom_factor=1):
return layer._data_view.to_labels(labels_shape=labels_shape, zoom_factor=zoom_factor)
def make_world_scale(obj):
scale = []
for a in obj._axes:
if a in "zyx":
scale.append(obj.scale[a])
elif a == "c":
pass
else:
scale.append(1)
return scale
def upon_add_layer(event):
try:
new_layer = event.sources[0][-1]
except IndexError:
return None
new_layer.translate = new_layer.translate.astype(np.float64)
if isinstance(new_layer, napari.layers.Shapes):
_text_bound_init(new_layer)
new_layer._rotation_handle_length = 20/np.mean(new_layer.scale[-2:])
@new_layer.bind_key("Left", overwrite=True)
def left(layer):
_translate_shape(layer, -1, -1)
@new_layer.bind_key("Right", overwrite=True)
def right(layer):
_translate_shape(layer, -1, 1)
@new_layer.bind_key("Up", overwrite=True)
def up(layer):
_translate_shape(layer, -2, -1)
@new_layer.bind_key("Down", overwrite=True)
def down(layer):
_translate_shape(layer, -2, 1)
elif isinstance(new_layer, napari.layers.Points):
_text_bound_init(new_layer)
new_layer.metadata["init_translate"] = new_layer.translate.copy()
new_layer.metadata["init_scale"] = new_layer.scale.copy()
return None
def image_tuple(input:"napari.layers.Image", out:ImgArray, translate="inherit", **kwargs):
data = input.data
scale = make_world_scale(data)
if out.dtype.kind == "c":
out = np.abs(out)
contrast_limits = [float(x) for x in out.range]
if data.ndim == out.ndim:
if isinstance(translate, str) and translate == "inherit":
translate = input.translate
elif data.ndim > out.ndim:
if isinstance(translate, str) and translate == "inherit":
translate = [input.translate[i] for i in range(data.ndim) if data.axes[i] in out.axes]
scale = [scale[i] for i in range(data.ndim) if data.axes[i] in out.axes]
else:
if isinstance(translate, str) and translate == "inherit":
translate = [0.0] + list(input.translate)
scale = [1.0] + list(scale)
kw = dict(scale=scale, colormap=input.colormap, translate=translate,
blending=input.blending, contrast_limits=contrast_limits)
kw.update(kwargs)
return (out, kw, "image")
def label_tuple(input:"napari.layers.Labels", out:Label, translate="inherit", **kwargs):
data = input.data
scale = make_world_scale(data)
if isinstance(translate, str) and translate == "inherit":
translate = input.translate
kw = dict(opacity=0.3, scale=scale, translate=translate)
kw.update(kwargs)
return (out, kw, "labels")
def _translate_shape(layer, ind, direction):
data = layer.data
selected = layer.selected_data
for i in selected:
data[i][:, ind] += direction
layer.data = data
layer.selected_data = selected
layer._set_highlight()
return None
def _text_bound_init(new_layer):
@new_layer.bind_key("Alt-A", overwrite=True)
def select_all(layer):
layer.selected_data = set(np.arange(len(layer.data)))
layer._set_highlight()
@new_layer.bind_key("Control-Shift-<", overwrite=True)
def size_down(layer):
if layer.text.size > 4:
layer.text.size -= 1.0
else:
layer.text.size *= 0.8
@new_layer.bind_key("Control-Shift->", overwrite=True)
def size_up(layer):
if layer.text.size < 4:
layer.text.size += 1.0
else:
layer.text.size /= 0.8
return None
def viewer_imread(viewer:"napari.Viewer", path:str):
if "*" in path or os.path.getsize(path)/1e9 < Const["MAX_GB"]:
img = imread(path)
else:
img = lazy_imread(path)
layer = add_labeledarray(viewer, img)
viewer.text_overlay.font_size = 4 * Const["FONT_SIZE_FACTOR"]
viewer.text_overlay.visible = True
viewer.text_overlay.color = "white"
viewer.text_overlay.text = repr(img)
return layer
def add_labeledarray(viewer:"napari.Viewer", img:LabeledArray, **kwargs):
if not img.axes.is_sorted() and img.ndim > 2:
msg = f"Input image has axes that are not correctly sorted: {img.axes}. "\
"This may cause unexpected results."
warnings.warn(msg, UserWarning)
chn_ax = img.axisof("c") if "c" in img.axes else None
if isinstance(img, PhaseArray) and not "colormap" in kwargs.keys():
kwargs["colormap"] = "hsv"
kwargs["contrast_limits"] = img.border
elif img.dtype.kind == "c" and not "colormap" in kwargs.keys():
kwargs["colormap"] = "plasma"
scale = make_world_scale(img)
if "name" in kwargs:
name = kwargs.pop("name")
else:
name = "No-Name" if img.name is None else img.name
if chn_ax is not None:
name = [f"[C{i}]{name}" for i in range(img.shape.c)]
else:
name = [name]
if img.dtype.kind == "c":
img = np.abs(img)
layer = viewer.add_image(img, channel_axis=chn_ax, scale=scale,
name=name if len(name)>1 else name[0],
**kwargs)
if viewer.scale_bar.unit:
if viewer.scale_bar.unit != img.scale_unit:
msg = f"Incompatible scales. Viewer is {viewer.scale_bar.unit} while image is {img.scale_unit}."
warnings.warn(msg)
else:
viewer.scale_bar.unit = img.scale_unit
new_axes = [a for a in img.axes if a != "c"]
# add axis labels to slide bars and image orientation.
if len(new_axes) >= len(viewer.dims.axis_labels):
viewer.dims.axis_labels = new_axes
return layer
def add_labels(viewer:"napari.Viewer", labels:Label, opacity:float=0.3, name:str|list[str]=None,
**kwargs):
scale = make_world_scale(labels)
# prepare label list
if "c" in labels.axes:
lbls = labels.split("c")
else:
lbls = [labels]
# prepare name list
if isinstance(name, list):
names = [f"[L]{n}" for n in name]
elif isinstance(name, str):
names = [f"[L]{name}"] * len(lbls)
else:
names = [labels.name]
kw = dict(opacity=opacity, scale=scale)
kw.update(kwargs)
out_layers = []
for lbl, name in zip(lbls, names):
layer = viewer.add_labels(lbl.value, name=name, **kw)
out_layers.append(layer)
return out_layers
def add_dask(viewer:"napari.Viewer", img:LazyImgArray, **kwargs):
chn_ax = img.axisof("c") if "c" in img.axes else None
scale = make_world_scale(img)
if "contrast_limits" not in kwargs.keys():
# contrast limits should be determined quickly.
leny, lenx = img.shape[-2:]
sample = img.img[..., ::leny//min(10, leny), ::lenx//min(10, lenx)]
kwargs["contrast_limits"] = [float(sample.min().compute()),
float(sample.max().compute())]
name = "No-Name" if img.name is None else img.name
if chn_ax is not None:
name = [f"[Lazy][C{i}]{name}" for i in range(img.shape.c)]
else:
name = ["[Lazy]" + name]
layer = viewer.add_image(img, channel_axis=chn_ax, scale=scale,
name=name if len(name)>1 else name[0], **kwargs)
viewer.scale_bar.unit = img.scale_unit
new_axes = [a for a in img.axes if a != "c"]
# add axis labels to slide bars and image orientation.
if len(new_axes) >= len(viewer.dims.axis_labels):
viewer.dims.axis_labels = new_axes
return layer
def add_points(viewer:"napari.Viewer", points, **kwargs):
if isinstance(points, MarkerFrame):
scale = make_world_scale(points)
points = points.get_coords()
else:
scale=None
if "c" in points._axes:
pnts = points.split("c")
else:
pnts = [points]
for each in pnts:
metadata = {"axes": str(each._axes), "scale": each.scale}
kw = dict(size=3.2, face_color=[0,0,0,0], metadata=metadata, edge_color=viewer.window.cmap())
kw.update(kwargs)
viewer.add_points(each.values, scale=scale, **kw)
return None
def add_tracks(viewer:"napari.Viewer", track:TrackFrame, **kwargs):
if "c" in track._axes:
track_list = track.split("c")
else:
track_list = [track]
scale = make_world_scale(track[[a for a in track._axes if a != Const["ID_AXIS"]]])
for tr in track_list:
metadata = {"axes": str(tr._axes), "scale": tr.scale}
viewer.add_tracks(tr, scale=scale, metadata=metadata, **kwargs)
return None
def add_paths(viewer:"napari.Viewer", paths:PathFrame, **kwargs):
if "c" in paths._axes:
path_list = paths.split("c")
else:
path_list = [paths]
scale = make_world_scale(paths[[a for a in paths._axes if a != Const["ID_AXIS"]]])
kw = {"edge_color":"lime", "edge_width":0.3, "shape_type":"path"}
kw.update(kwargs)
for path in path_list:
metadata = {"axes": str(path._axes), "scale": path.scale}
paths = [single_path.values for single_path in path.split(Const["ID_AXIS"])]
viewer.add_shapes(paths, scale=scale, metadata=metadata, **kw)
return None
def add_table(viewer:"napari.Viewer", data=None, columns=None, name=None):
from .widgets import TableWidget
table = TableWidget(viewer, data, columns=columns, name=name)
viewer.window.add_dock_widget(table, area="right", name=table.name)
return table
def get_viewer_scale(viewer:"napari.Viewer"):
return {a: r[2] for a, r in zip(viewer.dims.axis_labels, viewer.dims.range)}
def layer_to_impy_object(viewer:"napari.Viewer", layer):
"""
Convert layer to real data.
Parameters
----------
layer : napari.layers.Layer
Input layer.
Returns
-------
ImgArray, Label, MarkerFrame or TrackFrame, or Shape features.
"""
data = layer.data
axes = "".join(viewer.dims.axis_labels)
scale = get_viewer_scale(viewer)
if isinstance(layer, (napari.layers.Image, napari.layers.Labels)):
# manually drawn ones are np.ndarray, need conversion
if type(data) is np.ndarray:
ndim = data.ndim
axes = axes[-ndim:]
if isinstance(layer, napari.layers.Image):
data = ImgArray(data, name=layer.name, axes=axes, dtype=layer.data.dtype)
else:
try:
data = layer.metadata["destination_image"].labels
except (KeyError, AttributeError):
data = Label(data, name=layer.name, axes=axes)
data.set_scale({k: v for k, v in scale.items() if k in axes})
return data
elif isinstance(layer, napari.layers.Shapes):
return data
elif isinstance(layer, napari.layers.Points):
ndim = data.shape[1]
axes = axes[-ndim:]
df = MarkerFrame(data, columns=layer.metadata.get("axes", axes))
df.set_scale(layer.metadata.get("scale",
{k: v for k, v in scale.items() if k in axes}))
return df.as_standard_type()
elif isinstance(layer, napari.layers.Tracks):
ndim = data.shape[1]
axes = axes[-ndim:]
df = TrackFrame(data, columns=layer.metadata.get("axes", axes))
df.set_scale(layer.metadata.get("scale",
{k: v for k, v in scale.items() if k in axes}))
return df.as_standard_type()
else:
raise NotImplementedError(type(layer))
def get_a_selected_layer(viewer:"napari.Viewer"):
selected = list(viewer.layers.selection)
if len(selected) == 0:
raise ValueError("No layer is selected.")
elif len(selected) > 1:
raise ValueError("More than one layers are selected.")
return selected[0]
def crop_rotated_rectangle(img:LabeledArray, crds:np.ndarray, dims="yx"):
translate = np.min(crds, axis=0)
# check is sorted
ids = [img.axisof(a) for a in dims]
if sorted(ids) == ids:
cropped_img = img.rotated_crop(crds[1], crds[0], crds[2], dims=dims)
else:
crds = np.fliplr(crds)
cropped_img = img.rotated_crop(crds[3], crds[0], crds[2], dims=dims)
return cropped_img, translate
def crop_rectangle(img:LabeledArray, crds:np.ndarray, dims="yx") -> tuple[LabeledArray, np.ndarray]:
start = crds[0]
end = crds[2]
sl = []
translate = np.empty(2)
for i in [0, 1]:
sl0 = sorted([start[i], end[i]])
x0 = max(int(sl0[0]), 0)
x1 = min(int(sl0[1]), img.sizeof(dims[i]))
sl.append(f"{dims[i]}={x0}:{x1}")
translate[i] = x0
area_to_crop = ";".join(sl)
cropped_img = img[area_to_crop]
return cropped_img, translate
class ColorCycle:
def __init__(self, cmap="rainbow") -> None:
import matplotlib.pyplot as plt
self.cmap = plt.get_cmap(cmap, 16)
self.color_id = 0
def __call__(self):
"""return next colormap"""
self.color_id += 1
return list(self.cmap(self.color_id * (self.cmap.N//2+1) % self.cmap.N))
|
1700930
|
from arm.logicnode.arm_nodes import *
class PickLocationNode(ArmLogicTreeNode):
"""Pick a location coordinates in the given NavMesh."""
bl_idname = 'LNPickLocationNode'
bl_label = 'Pick NavMesh Location'
arm_version = 1
def arm_init(self, context):
self.add_input('ArmNodeSocketObject', 'NavMesh')
self.add_input('ArmVectorSocket', 'Screen Coords')
self.add_output('ArmVectorSocket', 'Location')
|
1700949
|
import ipaddress
import socket
def net_family(net):
if isinstance(ipaddress.ip_network(net, strict=False),
ipaddress.IPv6Network):
return socket.AF_INET6
return socket.AF_INET
def _flag6(net):
return '-6' if net_family(net) == socket.AF_INET6 else ''
class NetDev():
DEV_PROPS = {
'netns': {'type': 'string'},
'subnets': {'type': 'array', 'items': {'type': 'string'}},
'mtu': {'type': 'integer'},
'ethtool': {'type': 'object'},
'xdp': {'type': 'string'}
}
SCHEMA = {
'type': 'object',
'additionalProperties': False,
'required': ['netns'],
'properties': DEV_PROPS
}
@staticmethod
def set_peers(d1, d2):
d1.peer = d2
d2.peer = d1
def __init__(self, topology, name, owner, ns, **kwargs):
self.topology = topology
self.name = name
self.owner = owner
self.ns = ns
self.ns.add_dev(self)
self.mtu = kwargs.get('mtu')
subnets = kwargs.get('subnets') or []
self.addrs = []
self.addr_subnets = {}
self.addr_pools = {}
self.peer = None
self.master = None
self.vrf = None
self.link = kwargs.get('link')
self.ports = kwargs.get('ports') or []
self.subnets = subnets
self.noarp = kwargs.get('noarp') or False
self.ethtool = kwargs.get('ethtool') or {}
self.xdp = kwargs.get('xdp')
self.tss = {}
def p(self, *args, **kwargs):
self.topology.printfn(*args, **kwargs)
@classmethod
def args_from_params(cls, topology, params):
ret = {}
subnets = params.get('subnets')
if subnets:
ret['subnets'] = [topology.members[s] for s in subnets]
mtu = params.get('mtu')
if mtu:
ret['mtu'] = mtu
ethtool = params.get('ethtool')
if ethtool:
ret['ethtool'] = ethtool
xdp = params.get('xdp')
if xdp:
ret['xdp'] = xdp
return ret
@property
def dotname(self):
name_ = self.name.replace('.', '_')
ns_name = self.ns.name
return f'"netdev_{ns_name}_{name_}-{self.owner.REF}_{self.owner.name}"'
@property
def main_addr(self):
return self.addrs[0].split('/')[0]
def add_addr(self, addr, subnet, pool):
self.addrs.append(addr)
self.addr_subnets[addr] = subnet
self.addr_pools[addr] = pool
def render_dot(self):
label = f'{self.name}'
if self.mtu:
label += f'|mtu={self.mtu}'
if self.addrs:
addrs_str = '|'.join(self.addrs)
label += f'|{addrs_str}'
self.p(f'{self.dotname} [label="{{{label}}}", shape=record]')
if self.xdp:
xdp = self.topology.members.get(self.xdp)
if xdp:
self.p(f'{xdp.dotname} -- {self.dotname} '
f'[label="XDP", style=dashed]')
def render_bash_set_state(self, state):
self.p(f'ip -net {self.ns.name} link set {self.name} {state}')
def render_bash(self):
ns_name = self.ns.name
name = self.name
self.render_bash_set_state('up')
if self.mtu:
self.p(f'ip -net {ns_name} link set {name} mtu {self.mtu}')
for a in self.addrs:
dad = ' nodad' if net_family(a) == socket.AF_INET6 else ''
self.p(f'ip {_flag6(a)} -net {ns_name} addr add {a} '
f'dev {name}{dad}')
for k, v in self.ethtool.items():
if isinstance(v, str):
v = v in ('y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True',
'TRUE', 'on', 'On', 'ON')
val = 'on' if v else 'off'
self.p(f'ip netns exec {ns_name} ethtool -K {name} {k} {val}')
if self.xdp:
xdp = self.topology.members.get(self.xdp)
if xdp:
prog_var = xdp.bash_ebpf_var
self.p(f'ip -net {ns_name} link set {name} '
f'xdp object "${prog_var}"')
self.topology.done_list.add(self)
|
1700955
|
from riemann import tx
from riemann.tx import shared
from riemann import utils as rutils
from btcspv import utils
from typing import List
from btcspv.types import RelayHeader, SPVProof
def validate_vin(vin: bytes) -> bool:
'''Checks that the vin is properly formatted'''
if vin[0] > 0xfc or vin[0] == 0:
return False
try:
deser = _deserialize_vin(vin)
except (IndexError, ValueError):
return False
return sum(map(len, deser)) + 1 == len(vin)
def _deserialize_vin(vin: bytes) -> List[tx.TxIn]:
# Get the length of the tx_in vector
tx_ins = []
tx_ins_num = shared.VarInt.from_bytes(vin)
# `current` is the index of next read
current = len(tx_ins_num)
# Deserialize all tx_ins
for _ in range(tx_ins_num.number):
tx_in = tx.TxIn.from_bytes(vin[current:])
current += len(tx_in)
tx_ins.append(tx_in)
return tx_ins
def validate_vout(vout: bytes) -> bool:
'''Checks that the vout is properly formatted'''
if vout[0] > 0xfc or vout[0] == 0:
return False
try:
deser = _deserialize_vout(vout)
except (IndexError, ValueError):
return False
return sum(map(len, deser)) + 1 == len(vout)
def _deserialize_vout(vout: bytes) -> List[tx.TxOut]:
# Get the length of the tx_in vector
tx_outs = []
tx_outs_num = shared.VarInt.from_bytes(vout)
# `current` is the index of next read
current = len(tx_outs_num)
# Deserialize all tx_outs
for _ in range(tx_outs_num.number):
tx_out = tx.TxOut.from_bytes(vout[current:])
current += len(tx_out)
tx_outs.append(tx_out)
return tx_outs
def extract_merkle_root_le(header: bytes) -> bytes:
'''Extracts the transaction merkle root from a header (little-endian)'''
return header[36:68]
def extract_prev_block_le(header: bytes) -> bytes:
'''Extracts the previous block's hash from a header (little-endian)'''
return header[4:36]
def prove(
txid: bytes, merkle_root: bytes, intermediate_nodes: bytes, index: int) \
-> bool:
'''
Validates a tx inclusion in the block.
Note that `index` is not a reliable indicator of location within a block.
'''
if txid == merkle_root and index == 0 and len(intermediate_nodes) == 0:
return True
proof = txid + intermediate_nodes + merkle_root
return utils.verify_proof(proof, index)
def validate_header(header: RelayHeader) -> bool:
'''
Verifies a bitcoin header
Args:
header (RelayHeader): The header as an object
Returns:
(bool): True if valid header, else False
'''
# Check that HashLE is the correct hash of the raw header
header_hash = rutils.hash256(header['raw'])
if header_hash != header['hash']:
return False
# Check that the MerkleRootLE is the correct MerkleRoot for the header
extracted_merkle_root = extract_merkle_root_le(header['raw'])
if extracted_merkle_root != header['merkle_root']:
return False
# Check that PrevHash is the correct PrevHash for the header
extracted_prevhash = extract_prev_block_le(header['raw'])
if extracted_prevhash != header['prevhash']:
return False
return True
def validate_spvproof(proof: SPVProof) -> bool:
'''
Verifies an SPV proof object
Args:
proof (SPVProof): The SPV Proof as an object
Returns:
(bool): True if valid proof, else False
'''
if not validate_vin(proof['vin']):
return False
if not validate_vout(proof['vout']):
return False
tx_id = rutils.hash256(
proof['version'] +
proof['vin'] +
proof['vout'] +
proof['locktime']
)
if tx_id != proof['tx_id']:
return False
if not validate_header(proof['confirming_header']):
return False
valid_proof = prove(
proof['tx_id'],
proof['confirming_header']['merkle_root'],
proof['intermediate_nodes'],
proof['index']
)
if not valid_proof:
return False
return True
|
1700957
|
from peon.src.project.file.function_def.function import FunctionLint
def test_class_constant():
assert FunctionLint.EMPTY_RETURNED_VALUE is True
assert FunctionLint.PYTHON_REFLECTION_EXPRESSIONS == ('type', 'isinstance')
assert FunctionLint.MUTABLE_TYPES == ('set', 'dict', 'list')
|
1700972
|
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# importing the Kratos Library
import KratosMultiphysics
import KratosMultiphysics.DelaunayMeshingApplication as KratosDelaunay
import KratosMultiphysics.ContactMechanicsApplication as KratosContact
from multiprocessing import Pool
def Factory(settings, Model):
if( not isinstance(settings,KratosMultiphysics.Parameters) ):
raise Exception("Expected input shall be a Parameters object, encapsulating a json string")
return ParametricWallsProcess(Model, settings["Parameters"])
class ParametricWallsProcess(KratosMultiphysics.Process):
#
def __init__(self, Model, custom_settings ):
KratosMultiphysics.Process.__init__(self)
##settings string in json format
default_settings = KratosMultiphysics.Parameters("""
{
"model_part_name" : "Solid Domain",
"search_control_type" : "step",
"search_frequency" : 1.0,
"parametric_walls" : []
}
""")
##overwrite the default settings with user-provided parameters
self.settings = custom_settings
self.settings.ValidateAndAssignDefaults(default_settings)
self.echo_level = 1
self.search_frequency = self.settings["search_frequency"].GetDouble()
self.search_control_is_time = False
search_control_type = self.settings["search_control_type"].GetString()
if(search_control_type == "time"):
self.search_control_is_time = True
elif(search_control_type == "step"):
self.search_control_is_time = False
self.step_count = 1
self.counter = 1
self.next_search = 0.0
self.Model = Model
#
def ExecuteInitialize(self):
self.main_model_part = self.Model[self.settings["model_part_name"].GetString()]
self.dimension = self.main_model_part.ProcessInfo[KratosMultiphysics.SPACE_DIMENSION]
#construct parametric wall domains
self.parametric_walls = []
walls_list = self.settings["parametric_walls"]
self.number_of_walls = walls_list.size()
for i in range(0,self.number_of_walls):
item = walls_list[i]
parametric_wall_module = __import__(item["python_module"].GetString())
wall = parametric_wall_module.CreateParametricWall(self.main_model_part, item)
self.parametric_walls.append(wall)
# mesh mesher initial values
self.search_contact_active = False
if( self.number_of_walls ):
self.search_contact_active = True
# build parametric walls
for i in range(0,self.number_of_walls):
self.parametric_walls[i].BuildParametricWall()
# check restart
self.restart = False
if( self.main_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED] ):
self.restart = True
self.step_count = self.main_model_part.ProcessInfo[KratosMultiphysics.STEP]
if self.search_control_is_time:
self.next_search = self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]
else:
self.next_search = self.main_model_part.ProcessInfo[KratosMultiphysics.STEP]
# initialize wall domains
import domain_utilities
domain_utils = domain_utilities.DomainUtilities()
domain_utils.InitializeDomains(self.main_model_part,self.echo_level)
for wall in self.parametric_walls:
wall.Initialize();
print(self._class_prefix()+" Ready")
###
#
def ExecuteInitializeSolutionStep(self):
self.step_count += 1
#clean all contacts from main_model_part
for wall in self.parametric_walls:
wall.InitializeSearch();
#build all contacts :: when building check if the condition exists and clone it
if(self.search_contact_active):
if(self.IsSearchStep()):
self.SearchContact()
#
def ExecuteFinalizeSolutionStep(self):
pass
###
#
def ExecuteSearch(wall):
wall.ExecuteSearch()
#
def SearchContact(self):
if self.echo_level > 0:
print(self._class_prefix()+" [ Contact Search (call:", str(self.counter)+") ]")
self.wall_contact_model= KratosContact.ClearPointContactConditions(self.main_model_part, self.echo_level)
self.wall_contact_model.ExecuteInitialize()
# serial
for wall in self.parametric_walls:
wall.ExecuteSearch()
# parallel (not working pickling instances not enabled)
# walls_number = len(self.parametric_walls)
# if(walls_number>8):
# walls_number = 8
# pool = Pool(walls_number)
# pool.map(self.ExecuteSearch,self.parametric_walls)
# pool.close()
# pool.joint()
self.wall_contact_model.ExecuteFinalize()
self.counter += 1
# schedule next search
if(self.search_frequency > 0.0): # note: if == 0 always active
if(self.search_control_is_time):
time = self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]
while(self.next_search <= time):
self.next_search += self.search_frequency
else:
while self.next_search <= self.step_count:
self.next_search += self.search_frequency
#
def GetSearchStep(self):
return self.counter
#
def IsSearchStep(self):
if(self.search_control_is_time):
return (self.main_model_part.ProcessInfo[KratosMultiphysics.TIME] > self.next_search)
else:
return (self.step_count >= self.next_search)
#
def GetVariables(self):
nodal_variables = ['RIGID_WALL', 'VELOCITY', 'ACCELERATION', 'CONTACT_STRESS']
nodal_variables = nodal_variables + ['CONTACT_FORCE', 'CONTACT_NORMAL']
nodal_variables = nodal_variables + ['VOLUME_ACCELERATION']
nodal_variables = nodal_variables + ['NORMAL', 'NODAL_H']
return nodal_variables
#
@classmethod
def _class_prefix(self):
header = "::[---Walls Contact---]::"
return header
|
1700999
|
import torch
import torch.nn as nn
class RNNCell(nn.Module):
def __init__(self,n_input, n_hidden):
super(RNNCell,self).__init__()
self.linear1=nn.Linear(n_input+n_hidden, n_hidden)
self.out=nn.Linear(n_hidden, n_hidden)
def forward(self,x,h):
h=torch.cat((h,x), dim=1)
h=self.linear1(h)
h=torch.relu(h)
out=torch.relu(self.out(h))
return out,h
class RNN(nn.Module):
def __init__(self, n_input, n_hidden):
super(RNN,self).__init__()
self.n_input=n_input
self.n_hidden=n_hidden
self.RNNCell=RNNCell(n_input, n_hidden)
def forward(self, input, h=None):
# Input dims are (batch_size, seq_length, timestep_features)
sequence_length=input.size()[1]
# Initializing hidden state if not provided
if h==None:
h=torch.zeros((input.size()[0], self.n_hidden), device=input.device)
outs=torch.tensor([], device=input.device)
for i in range(sequence_length):
x_timestep_features=torch.squeeze(input[:,i,:], dim=1)
out, h = self.RNNCell(x_timestep_features,h)
out=torch.unsqueeze(out,dim=1)
outs=torch.cat((outs,out), dim=1)
return outs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.