id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11585877
|
import pdb,sys,os
__all__=['File','BioUtils','tbsp']
dir_path=os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path)
for i in __all__:
__import__(i)
|
11585915
|
import pandas as pd
from Address_Format_Funcs import AddressClean_en, AddressClean_fr
test=pd.read_csv('example.csv')
test=AddressClean_en(test,'street_name','formatted_en')
test=AddressClean_fr(test,'street_name','formatted_fr')
test.to_csv('example_formatted.csv',index=False)
|
11585941
|
from enum import Enum, unique
from typing import Dict, List, Optional
from pydantic import BaseModel, Field, validator
from astrobase.helpers.name import random_name
class EKSNodegroupScalingConfig(BaseModel):
minSize: int = 1
maxSize: int = 3
desiredSize: int = 1
@unique
class EKSNodegroupAmiType(str, Enum):
al2_x86_64 = "AL2_x86_64"
al2_x86_64_gpu = "AL2_x86_64_GPU"
al2_arm_64 = "AL2_ARM_64"
@unique
class EKSNodegroupCapacityType(str, Enum):
on_demand = "ON_DEMAND"
spot = "SPOT"
class EKSNodegroup(BaseModel):
clusterName: Optional[str]
nodegroupName: str
scalingConfig: EKSNodegroupScalingConfig
diskSize: int = 100
subnets: Optional[List[str]]
instanceTypes: List[str] = ["t3.medium"]
amiType: EKSNodegroupAmiType = EKSNodegroupAmiType.al2_x86_64
nodeRole: str
labels: Optional[Dict[str, str]] = {}
tags: Optional[Dict[str, str]] = {}
capacityType: EKSNodegroupCapacityType = EKSNodegroupCapacityType.spot
class ResourcesVpcConfig(BaseModel):
subnetIds: List[str]
securityGroupIds: List[str]
endpointPublicAccess: bool = True
endpointPrivateAccess: bool = True
publicAccessCidrs: List[str] = ["0.0.0.0/0"]
@unique
class ClusterLoggingType(str, Enum):
api = "api"
audit = "audit"
scheduler = "scheduler"
authenticator = "authenticator"
controllerManager = "controllerManager"
class ClusterLoggingConfig(BaseModel):
types: List[ClusterLoggingType] = []
enabled: bool = False
class ClusterLogging(BaseModel):
clusterLogging: List[ClusterLoggingConfig] = [ClusterLoggingConfig()]
class EKSBase(BaseModel):
name: Optional[str] = Field(default_factory=random_name)
region: str
roleArn: str
resourcesVpcConfig: ResourcesVpcConfig
tags: Optional[Dict[str, str]] = {}
logging: ClusterLogging = ClusterLogging()
nodegroups: List[EKSNodegroup]
@validator("nodegroups", pre=True, always=True)
def set_nodegroup_name_subnets(
cls: BaseModel, v: List[dict], values: dict
) -> List[dict]:
for nodegroup in v:
if not nodegroup.get("clusterName"):
nodegroup["clusterName"] = values["name"]
if not nodegroup.get("subnets"):
nodegroup["subnets"] = values["resourcesVpcConfig"].subnetIds
return v
class EKSCreate(EKSBase):
pass
class EKSCreateAPIFilter(BaseModel):
name: str
roleArn: str
resourcesVpcConfig: ResourcesVpcConfig
tags: Optional[Dict[str, str]] = {}
logging: Optional[ClusterLogging] = ClusterLogging()
|
11585945
|
from pydantic import BaseModel
from typing import Optional
class Invoice(BaseModel):
title: Optional[str] = None
description: Optional[str] = None
start_parameter: Optional[str] = None
currency: Optional[str] = None
total_amount: Optional[int] = None
|
11585948
|
from __future__ import division
from textwrap import dedent
import numpy.testing as npt
import pandas.util.testing as pdtest
import numpy
from numpy.testing import assert_equal
import pandas
import pytest
from statsmodels.imputation import ros
from statsmodels.compat.python import StringIO
if pandas.__version__.split('.')[1] < '14':
__test__ = False
def load_basic_data():
raw_csv = StringIO(
"res,qual\n2.00,=\n4.20,=\n4.62,=\n5.00,ND\n5.00,ND\n5.50,ND\n"
"5.57,=\n5.66,=\n5.75,ND\n5.86,=\n6.65,=\n6.78,=\n6.79,=\n7.50,=\n"
"7.50,=\n7.50,=\n8.63,=\n8.71,=\n8.99,=\n9.50,ND\n9.50,ND\n9.85,=\n"
"10.82,=\n11.00,ND\n11.25,=\n11.25,=\n12.20,=\n14.92,=\n16.77,=\n"
"17.81,=\n19.16,=\n19.19,=\n19.64,=\n20.18,=\n22.97,=\n"
)
df = pandas.read_csv(raw_csv)
df.loc[:, 'conc'] = df['res']
df.loc[:, 'censored'] = df['qual'] == 'ND'
return df
def load_intermediate_data():
df = pandas.DataFrame([
{'censored': True, 'conc': 5.0, 'det_limit_index': 1, 'rank': 1},
{'censored': True, 'conc': 5.0, 'det_limit_index': 1, 'rank': 2},
{'censored': True, 'conc': 5.5, 'det_limit_index': 2, 'rank': 1},
{'censored': True, 'conc': 5.75, 'det_limit_index': 3, 'rank': 1},
{'censored': True, 'conc': 9.5, 'det_limit_index': 4, 'rank': 1},
{'censored': True, 'conc': 9.5, 'det_limit_index': 4, 'rank': 2},
{'censored': True, 'conc': 11.0, 'det_limit_index': 5, 'rank': 1},
{'censored': False, 'conc': 2.0, 'det_limit_index': 0, 'rank': 1},
{'censored': False, 'conc': 4.2, 'det_limit_index': 0, 'rank': 2},
{'censored': False, 'conc': 4.62, 'det_limit_index': 0, 'rank': 3},
{'censored': False, 'conc': 5.57, 'det_limit_index': 2, 'rank': 1},
{'censored': False, 'conc': 5.66, 'det_limit_index': 2, 'rank': 2},
{'censored': False, 'conc': 5.86, 'det_limit_index': 3, 'rank': 1},
{'censored': False, 'conc': 6.65, 'det_limit_index': 3, 'rank': 2},
{'censored': False, 'conc': 6.78, 'det_limit_index': 3, 'rank': 3},
{'censored': False, 'conc': 6.79, 'det_limit_index': 3, 'rank': 4},
{'censored': False, 'conc': 7.5, 'det_limit_index': 3, 'rank': 5},
{'censored': False, 'conc': 7.5, 'det_limit_index': 3, 'rank': 6},
{'censored': False, 'conc': 7.5, 'det_limit_index': 3, 'rank': 7},
{'censored': False, 'conc': 8.63, 'det_limit_index': 3, 'rank': 8},
{'censored': False, 'conc': 8.71, 'det_limit_index': 3, 'rank': 9},
{'censored': False, 'conc': 8.99, 'det_limit_index': 3, 'rank': 10},
{'censored': False, 'conc': 9.85, 'det_limit_index': 4, 'rank': 1},
{'censored': False, 'conc': 10.82, 'det_limit_index': 4, 'rank': 2},
{'censored': False, 'conc': 11.25, 'det_limit_index': 5, 'rank': 1},
{'censored': False, 'conc': 11.25, 'det_limit_index': 5, 'rank': 2},
{'censored': False, 'conc': 12.2, 'det_limit_index': 5, 'rank': 3},
{'censored': False, 'conc': 14.92, 'det_limit_index': 5, 'rank': 4},
{'censored': False, 'conc': 16.77, 'det_limit_index': 5, 'rank': 5},
{'censored': False, 'conc': 17.81, 'det_limit_index': 5, 'rank': 6},
{'censored': False, 'conc': 19.16, 'det_limit_index': 5, 'rank': 7},
{'censored': False, 'conc': 19.19, 'det_limit_index': 5, 'rank': 8},
{'censored': False, 'conc': 19.64, 'det_limit_index': 5, 'rank': 9},
{'censored': False, 'conc': 20.18, 'det_limit_index': 5, 'rank': 10},
{'censored': False, 'conc': 22.97, 'det_limit_index': 5, 'rank': 11}
])
return df
def load_advanced_data():
df = pandas.DataFrame([
{'Zprelim': -1.4456202174142005, 'censored': True, 'conc': 5.0,
'det_limit_index': 1, 'plot_pos': 0.07414187643020594, 'rank': 1},
{'Zprelim': -1.2201035333697587, 'censored': True, 'conc': 5.0,
'det_limit_index': 1, 'plot_pos': 0.11121281464530891, 'rank': 2},
{'Zprelim': -1.043822530159519, 'censored': True, 'conc': 5.5,
'det_limit_index': 2, 'plot_pos': 0.14828375286041187, 'rank': 1},
{'Zprelim': -1.0438225301595188, 'censored': True, 'conc': 5.75,
'det_limit_index': 3, 'plot_pos': 0.1482837528604119, 'rank': 1},
{'Zprelim': -0.8109553641377003, 'censored': True, 'conc': 9.5,
'det_limit_index': 4, 'plot_pos': 0.20869565217391303, 'rank': 1},
{'Zprelim': -0.4046779045300476, 'censored': True, 'conc': 9.5,
'det_limit_index': 4, 'plot_pos': 0.34285714285714286, 'rank': 2},
{'Zprelim': -0.20857169501420522, 'censored': True, 'conc': 11.0,
'det_limit_index': 5, 'plot_pos': 0.41739130434782606, 'rank': 1},
{'Zprelim': -1.5927654676048002, 'censored': False, 'conc': 2.0,
'det_limit_index': 0, 'plot_pos': 0.055606407322654455, 'rank': 1},
{'Zprelim': -1.2201035333697587, 'censored': False, 'conc': 4.2,
'det_limit_index': 0, 'plot_pos': 0.11121281464530891, 'rank': 2},
{'Zprelim': -0.9668111610681008, 'censored': False, 'conc': 4.62,
'det_limit_index': 0, 'plot_pos': 0.16681922196796337, 'rank': 3},
{'Zprelim': -0.6835186393930371, 'censored': False, 'conc': 5.57,
'det_limit_index': 2, 'plot_pos': 0.24713958810068648, 'rank': 1},
{'Zprelim': -0.6072167256926887, 'censored': False, 'conc': 5.66,
'det_limit_index': 2, 'plot_pos': 0.27185354691075514, 'rank': 2},
{'Zprelim': -0.44953240276543616, 'censored': False, 'conc': 5.86,
'det_limit_index': 3, 'plot_pos': 0.3265238194299979, 'rank': 1},
{'Zprelim': -0.36788328223414807, 'censored': False, 'conc': 6.65,
'det_limit_index': 3, 'plot_pos': 0.35648013313917204, 'rank': 2},
{'Zprelim': -0.28861907892223937, 'censored': False, 'conc': 6.78,
'det_limit_index': 3, 'plot_pos': 0.38643644684834616, 'rank': 3},
{'Zprelim': -0.21113039741112186, 'censored': False, 'conc': 6.79,
'det_limit_index': 3, 'plot_pos': 0.4163927605575203, 'rank': 4},
{'Zprelim': -0.1348908823006299, 'censored': False, 'conc': 7.5,
'det_limit_index': 3, 'plot_pos': 0.4463490742666944, 'rank': 5},
{'Zprelim': -0.05942854708257491, 'censored': False, 'conc': 7.5,
'det_limit_index': 3, 'plot_pos': 0.4763053879758685, 'rank': 6},
{'Zprelim': 0.015696403006170083, 'censored': False, 'conc': 7.5,
'det_limit_index': 3, 'plot_pos': 0.5062617016850427, 'rank': 7},
{'Zprelim': 0.09091016994359362, 'censored': False, 'conc': 8.63,
'det_limit_index': 3, 'plot_pos': 0.5362180153942168, 'rank': 8},
{'Zprelim': 0.16664251178856201, 'censored': False, 'conc': 8.71,
'det_limit_index': 3, 'plot_pos': 0.5661743291033909, 'rank': 9},
{'Zprelim': 0.24334426739770573, 'censored': False, 'conc': 8.99,
'det_limit_index': 3, 'plot_pos': 0.596130642812565, 'rank': 10},
{'Zprelim': 0.3744432988606558, 'censored': False, 'conc': 9.85,
'det_limit_index': 4, 'plot_pos': 0.6459627329192545, 'rank': 1},
{'Zprelim': 0.4284507519609981, 'censored': False, 'conc': 10.82,
'det_limit_index': 4, 'plot_pos': 0.6658385093167701, 'rank': 2},
{'Zprelim': 0.5589578655042562, 'censored': False, 'conc': 11.25,
'det_limit_index': 5, 'plot_pos': 0.7119047619047619, 'rank': 1},
{'Zprelim': 0.6374841609623771, 'censored': False, 'conc': 11.25,
'det_limit_index': 5, 'plot_pos': 0.7380952380952381, 'rank': 2},
{'Zprelim': 0.7201566171385521, 'censored': False, 'conc': 12.2,
'det_limit_index': 5, 'plot_pos': 0.7642857142857142, 'rank': 3},
{'Zprelim': 0.8080746339118065, 'censored': False, 'conc': 14.92,
'det_limit_index': 5, 'plot_pos': 0.7904761904761904, 'rank': 4},
{'Zprelim': 0.9027347916438648, 'censored': False, 'conc': 16.77,
'det_limit_index': 5, 'plot_pos': 0.8166666666666667, 'rank': 5},
{'Zprelim': 1.0062699858608395, 'censored': False, 'conc': 17.81,
'det_limit_index': 5, 'plot_pos': 0.8428571428571429, 'rank': 6},
{'Zprelim': 1.1219004674623523, 'censored': False, 'conc': 19.16,
'det_limit_index': 5, 'plot_pos': 0.8690476190476191, 'rank': 7},
{'Zprelim': 1.2548759122271174, 'censored': False, 'conc': 19.19,
'det_limit_index': 5, 'plot_pos': 0.8952380952380953, 'rank': 8},
{'Zprelim': 1.414746425534976, 'censored': False, 'conc': 19.64,
'det_limit_index': 5, 'plot_pos': 0.9214285714285714, 'rank': 9},
{'Zprelim': 1.622193585315426, 'censored': False, 'conc': 20.18,
'det_limit_index': 5, 'plot_pos': 0.9476190476190476, 'rank': 10},
{'Zprelim': 1.9399896117517081, 'censored': False, 'conc': 22.97,
'det_limit_index': 5, 'plot_pos': 0.9738095238095239, 'rank': 11}
])
return df
def load_basic_cohn():
cohn = pandas.DataFrame([
{'lower_dl': 2.0, 'ncen_equal': 0.0, 'nobs_below': 0.0,
'nuncen_above': 3.0, 'prob_exceedance': 1.0, 'upper_dl': 5.0},
{'lower_dl': 5.0, 'ncen_equal': 2.0, 'nobs_below': 5.0,
'nuncen_above': 0.0, 'prob_exceedance': 0.77757437070938218, 'upper_dl': 5.5},
{'lower_dl': 5.5, 'ncen_equal': 1.0, 'nobs_below': 6.0,
'nuncen_above': 2.0, 'prob_exceedance': 0.77757437070938218, 'upper_dl': 5.75},
{'lower_dl': 5.75, 'ncen_equal': 1.0, 'nobs_below': 9.0,
'nuncen_above': 10.0, 'prob_exceedance': 0.7034324942791762, 'upper_dl': 9.5},
{'lower_dl': 9.5, 'ncen_equal': 2.0, 'nobs_below': 21.0,
'nuncen_above': 2.0, 'prob_exceedance': 0.37391304347826088, 'upper_dl': 11.0},
{'lower_dl': 11.0, 'ncen_equal': 1.0, 'nobs_below': 24.0,
'nuncen_above': 11.0, 'prob_exceedance': 0.31428571428571428, 'upper_dl': numpy.inf},
{'lower_dl': numpy.nan, 'ncen_equal': numpy.nan, 'nobs_below': numpy.nan,
'nuncen_above': numpy.nan, 'prob_exceedance': 0.0, 'upper_dl': numpy.nan}
])
return cohn
class Test__ros_sort(object):
def setup(self):
self.df = load_basic_data()
self.expected_baseline = pandas.DataFrame([
{'censored': True, 'conc': 5.0}, {'censored': True, 'conc': 5.0},
{'censored': True, 'conc': 5.5}, {'censored': True, 'conc': 5.75},
{'censored': True, 'conc': 9.5}, {'censored': True, 'conc': 9.5},
{'censored': True, 'conc': 11.0}, {'censored': False, 'conc': 2.0},
{'censored': False, 'conc': 4.2}, {'censored': False, 'conc': 4.62},
{'censored': False, 'conc': 5.57}, {'censored': False, 'conc': 5.66},
{'censored': False, 'conc': 5.86}, {'censored': False, 'conc': 6.65},
{'censored': False, 'conc': 6.78}, {'censored': False, 'conc': 6.79},
{'censored': False, 'conc': 7.5}, {'censored': False, 'conc': 7.5},
{'censored': False, 'conc': 7.5}, {'censored': False, 'conc': 8.63},
{'censored': False, 'conc': 8.71}, {'censored': False, 'conc': 8.99},
{'censored': False, 'conc': 9.85}, {'censored': False, 'conc': 10.82},
{'censored': False, 'conc': 11.25}, {'censored': False, 'conc': 11.25},
{'censored': False, 'conc': 12.2}, {'censored': False, 'conc': 14.92},
{'censored': False, 'conc': 16.77}, {'censored': False, 'conc': 17.81},
{'censored': False, 'conc': 19.16}, {'censored': False, 'conc': 19.19},
{'censored': False, 'conc': 19.64}, {'censored': False, 'conc': 20.18},
{'censored': False, 'conc': 22.97},
])[['conc', 'censored']]
self.expected_with_warning = self.expected_baseline.iloc[:-1]
def test_baseline(self):
result = ros._ros_sort(self.df, 'conc', 'censored')
pdtest.assert_frame_equal(result, self.expected_baseline)
def test_censored_greater_than_max(self):
df = self.df.copy()
max_row = df['conc'].idxmax()
df.loc[max_row, 'censored'] = True
result = ros._ros_sort(df, 'conc', 'censored')
pdtest.assert_frame_equal(result, self.expected_with_warning)
class Test_cohn_numbers(object):
def setup(self):
self.df = load_basic_data()
self.final_cols = ['lower_dl', 'upper_dl', 'nuncen_above', 'nobs_below',
'ncen_equal', 'prob_exceedance']
self.expected_baseline = pandas.DataFrame([
{'lower_dl': 2.0, 'ncen_equal': 0.0, 'nobs_below': 0.0,
'nuncen_above': 3.0, 'prob_exceedance': 1.0, 'upper_dl': 5.0},
{'lower_dl': 5.0, 'ncen_equal': 2.0, 'nobs_below': 5.0,
'nuncen_above': 0.0, 'prob_exceedance': 0.77757437070938218, 'upper_dl': 5.5},
{'lower_dl': 5.5, 'ncen_equal': 1.0, 'nobs_below': 6.0,
'nuncen_above': 2.0, 'prob_exceedance': 0.77757437070938218, 'upper_dl': 5.75},
{'lower_dl': 5.75, 'ncen_equal': 1.0, 'nobs_below': 9.0,
'nuncen_above': 10.0, 'prob_exceedance': 0.7034324942791762, 'upper_dl': 9.5},
{'lower_dl': 9.5, 'ncen_equal': 2.0, 'nobs_below': 21.0,
'nuncen_above': 2.0, 'prob_exceedance': 0.37391304347826088, 'upper_dl': 11.0},
{'lower_dl': 11.0, 'ncen_equal': 1.0, 'nobs_below': 24.0,
'nuncen_above': 11.0, 'prob_exceedance': 0.31428571428571428, 'upper_dl': numpy.inf},
{'lower_dl': numpy.nan, 'ncen_equal': numpy.nan, 'nobs_below': numpy.nan,
'nuncen_above': numpy.nan, 'prob_exceedance': 0.0, 'upper_dl': numpy.nan}
])[self.final_cols]
def test_baseline(self):
result = ros.cohn_numbers(self.df, observations='conc', censorship='censored')
pdtest.assert_frame_equal(result, self.expected_baseline)
def test_no_NDs(self):
_df = self.df.copy()
_df['qual'] = False
result = ros.cohn_numbers(_df, observations='conc', censorship='qual')
assert result.shape == (0, 6)
class Test__detection_limit_index(object):
def setup(self):
self.cohn = load_basic_cohn()
self.empty_cohn = pandas.DataFrame(numpy.empty((0, 7)))
def test_empty(self):
assert_equal(ros._detection_limit_index(None, self.empty_cohn), 0)
def test_populated(self):
assert_equal(ros._detection_limit_index(3.5, self.cohn), 0)
assert_equal(ros._detection_limit_index(6.0, self.cohn), 3)
assert_equal(ros._detection_limit_index(12.0, self.cohn), 5)
def test_out_of_bounds(self):
with pytest.raises(IndexError):
ros._detection_limit_index(0, self.cohn)
def test__ros_group_rank():
df = pandas.DataFrame({
'dl_idx': [1] * 12,
'params': list('AABCCCDE') + list('DCBA'),
'values': list(range(12))
})
result = ros._ros_group_rank(df, 'dl_idx', 'params')
expected = pandas.Series([1, 2, 1, 1, 2, 3, 1, 1, 2, 4, 2, 3], name='rank')
pdtest.assert_series_equal(result.astype(int), expected.astype(int))
class Test__ros_plot_pos(object):
def setup(self):
self.cohn = load_basic_cohn()
def test_uncensored_1(self):
row = {'censored': False, 'det_limit_index': 2, 'rank': 1}
result = ros._ros_plot_pos(row, 'censored', self.cohn)
assert_equal(result, 0.24713958810068648)
def test_uncensored_2(self):
row = {'censored': False, 'det_limit_index': 2, 'rank': 12}
result = ros._ros_plot_pos(row, 'censored', self.cohn)
assert_equal(result, 0.51899313501144173)
def test_censored_1(self):
row = {'censored': True, 'det_limit_index': 5, 'rank': 4}
result = ros._ros_plot_pos(row, 'censored', self.cohn)
assert_equal(result, 1.3714285714285714)
def test_censored_2(self):
row = {'censored': True, 'det_limit_index': 4, 'rank': 2}
result = ros._ros_plot_pos(row, 'censored', self.cohn)
assert_equal(result, 0.41739130434782606)
def test__norm_plot_pos():
result = ros._norm_plot_pos([1, 2, 3, 4])
expected = numpy.array([ 0.159104, 0.385452, 0.614548, 0.840896])
npt.assert_array_almost_equal(result, expected)
def test_plotting_positions():
df = load_intermediate_data()
cohn = load_basic_cohn()
results = ros.plotting_positions(df, 'censored', cohn)
expected = numpy.array([
0.07414188, 0.11121281, 0.14828375, 0.14828375, 0.20869565,
0.34285714, 0.4173913 , 0.05560641, 0.11121281, 0.16681922,
0.24713959, 0.27185355, 0.32652382, 0.35648013, 0.38643645,
0.41639276, 0.44634907, 0.47630539, 0.5062617 , 0.53621802,
0.56617433, 0.59613064, 0.64596273, 0.66583851, 0.71190476,
0.73809524, 0.76428571, 0.79047619, 0.81666667, 0.84285714,
0.86904762, 0.8952381 , 0.92142857, 0.94761905, 0.97380952
])
npt.assert_array_almost_equal(results, expected)
def test__impute():
expected = numpy.array([
3.11279729, 3.60634338, 4.04602788, 4.04602788,
4.71008116, 6.14010906, 6.97841457, 2. ,
4.2 , 4.62 , 5.57 , 5.66 ,
5.86 , 6.65 , 6.78 , 6.79 ,
7.5 , 7.5 , 7.5 , 8.63 ,
8.71 , 8.99 , 9.85 , 10.82 ,
11.25 , 11.25 , 12.2 , 14.92 ,
16.77 , 17.81 , 19.16 , 19.19 ,
19.64 , 20.18 , 22.97
])
df = load_advanced_data()
df = ros._impute(df, 'conc', 'censored', numpy.log, numpy.exp)
result = df['final'].values
npt.assert_array_almost_equal(result, expected)
def test__do_ros():
expected = numpy.array([
3.11279729, 3.60634338, 4.04602788, 4.04602788,
4.71008116, 6.14010906, 6.97841457, 2. ,
4.2 , 4.62 , 5.57 , 5.66 ,
5.86 , 6.65 , 6.78 , 6.79 ,
7.5 , 7.5 , 7.5 , 8.63 ,
8.71 , 8.99 , 9.85 , 10.82 ,
11.25 , 11.25 , 12.2 , 14.92 ,
16.77 , 17.81 , 19.16 , 19.19 ,
19.64 , 20.18 , 22.97
])
df = load_basic_data()
df = ros._do_ros(df, 'conc', 'censored', numpy.log, numpy.exp)
result = df['final'].values
npt.assert_array_almost_equal(result, expected)
class CheckROSMixin(object):
def test_ros_df(self):
result = ros.impute_ros(self.rescol, self.cencol, df=self.df)
npt.assert_array_almost_equal(
sorted(result),
sorted(self.expected_final),
decimal=self.decimal
)
def test_ros_arrays(self):
result = ros.impute_ros(self.df[self.rescol], self.df[self.cencol], df=None)
npt.assert_array_almost_equal(
sorted(result),
sorted(self.expected_final),
decimal=self.decimal
)
def test_cohn(self):
cols = [
'nuncen_above', 'nobs_below',
'ncen_equal', 'prob_exceedance'
]
cohn = ros.cohn_numbers(self.df, self.rescol, self.cencol)
pdtest.assert_frame_equal(
cohn[cols],
self.expected_cohn[cols],
check_less_precise=True,
)
class Test_ROS_HelselAppendixB(CheckROSMixin):
"""
Appendix B dataset from "Estimation of Descriptive Statists for
Multiply Censored Water Quality Data", Water Resources Research,
Vol 24, No 12, pp 1997 - 2004. December 1988.
"""
decimal = 2
res = numpy.array([
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 10., 10., 10.,
3.0, 7.0, 9.0, 12., 15., 20., 27., 33., 50.
])
cen = numpy.array([
True, True, True, True, True, True, True, True, True,
False, False, False, False, False, False, False,
False, False
])
rescol = 'obs'
cencol = 'cen'
df = pandas.DataFrame({rescol: res, cencol: cen})
expected_final = numpy.array([
0.47, 0.85, 1.11, 1.27, 1.76, 2.34, 2.50, 3.00, 3.03,
4.80, 7.00, 9.00, 12.0, 15.0, 20.0, 27.0, 33.0, 50.0
])
expected_cohn = pandas.DataFrame({
'nuncen_above': numpy.array([3.0, 6.0, numpy.nan]),
'nobs_below': numpy.array([6.0, 12.0, numpy.nan]),
'ncen_equal': numpy.array([6.0, 3.0, numpy.nan]),
'prob_exceedance': numpy.array([0.55556, 0.33333, 0.0]),
})
class Test_ROS_HelselArsenic(CheckROSMixin):
"""
Oahu arsenic data from Nondetects and Data Analysis by
<NAME> (<NAME>, 2005)
Plotting positions are fudged since relative to source data since
modeled data is what matters and (source data plot positions are
not uniformly spaced, which seems weird)
"""
decimal = 2
res = numpy.array([
3.2, 2.8, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 1.7, 1.5, 1.0, 1.0, 1.0, 1.0,
0.9, 0.9, 0.7, 0.7, 0.6, 0.5, 0.5, 0.5
])
cen = numpy.array([
False, False, True, True, True, True, True,
True, True, True, False, False, True, True,
True, True, False, True, False, False, False,
False, False, False
])
rescol = 'obs'
cencol = 'cen'
df = pandas.DataFrame({rescol: res, cencol: cen})
expected_final = numpy.array([
3.20, 2.80, 1.42, 1.14, 0.95, 0.81, 0.68, 0.57,
0.46, 0.35, 1.70, 1.50, 0.98, 0.76, 0.58, 0.41,
0.90, 0.61, 0.70, 0.70, 0.60, 0.50, 0.50, 0.50
])
expected_cohn = pandas.DataFrame({
'nuncen_above': numpy.array([6.0, 1.0, 2.0, 2.0, numpy.nan]),
'nobs_below': numpy.array([0.0, 7.0, 12.0, 22.0, numpy.nan]),
'ncen_equal': numpy.array([0.0, 1.0, 4.0, 8.0, numpy.nan]),
'prob_exceedance': numpy.array([1.0, 0.3125, 0.21429, 0.0833, 0.0]),
})
class Test_ROS_RNADAdata(CheckROSMixin):
decimal = 3
datastring = StringIO(dedent("""\
res cen
0.090 True
0.090 True
0.090 True
0.101 False
0.136 False
0.340 False
0.457 False
0.514 False
0.629 False
0.638 False
0.774 False
0.788 False
0.900 True
0.900 True
0.900 True
1.000 True
1.000 True
1.000 True
1.000 True
1.000 True
1.000 False
1.000 True
1.000 True
1.000 True
1.000 True
1.000 True
1.000 True
1.000 True
1.000 True
1.000 True
1.000 True
1.000 True
1.000 True
1.100 False
2.000 False
2.000 False
2.404 False
2.860 False
3.000 False
3.000 False
3.705 False
4.000 False
5.000 False
5.960 False
6.000 False
7.214 False
16.000 False
17.716 False
25.000 False
51.000 False"""
))
rescol = 'res'
cencol = 'cen'
df = pandas.read_csv(datastring, sep='\s+')
expected_final = numpy.array([
0.01907990, 0.03826254, 0.06080717, 0.10100000, 0.13600000,
0.34000000, 0.45700000, 0.51400000, 0.62900000, 0.63800000,
0.77400000, 0.78800000, 0.08745914, 0.25257575, 0.58544205,
0.01711153, 0.03373885, 0.05287083, 0.07506079, 0.10081573,
1.00000000, 0.13070334, 0.16539309, 0.20569039, 0.25257575,
0.30725491, 0.37122555, 0.44636843, 0.53507405, 0.64042242,
0.76644378, 0.91850581, 1.10390531, 1.10000000, 2.00000000,
2.00000000, 2.40400000, 2.86000000, 3.00000000, 3.00000000,
3.70500000, 4.00000000, 5.00000000, 5.96000000, 6.00000000,
7.21400000, 16.00000000, 17.71600000, 25.00000000, 51.00000000
])
expected_cohn = pandas.DataFrame({
'nuncen_above': numpy.array([9., 0.0, 18., numpy.nan]),
'nobs_below': numpy.array([3., 15., 32., numpy.nan]),
'ncen_equal': numpy.array([3., 3., 17., numpy.nan]),
'prob_exceedance': numpy.array([0.84, 0.36, 0.36, 0]),
})
class Test_NoOp_ZeroND(CheckROSMixin):
decimal = 2
numpy.random.seed(0)
N = 20
res = numpy.random.lognormal(size=N)
cen = [False] * N
rescol = 'obs'
cencol = 'cen'
df = pandas.DataFrame({rescol: res, cencol: cen})
expected_final = numpy.array([
0.38, 0.43, 0.81, 0.86, 0.90, 1.13, 1.15, 1.37, 1.40,
1.49, 1.51, 1.56, 2.14, 2.59, 2.66, 4.28, 4.46, 5.84,
6.47, 9.4
])
expected_cohn = pandas.DataFrame({
'nuncen_above': numpy.array([]),
'nobs_below': numpy.array([]),
'ncen_equal': numpy.array([]),
'prob_exceedance': numpy.array([]),
})
class Test_ROS_OneND(CheckROSMixin):
decimal = 3
res = numpy.array([
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 10., 10., 10.,
3.0, 7.0, 9.0, 12., 15., 20., 27., 33., 50.
])
cen = numpy.array([
True, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False,
False, False
])
rescol = 'conc'
cencol = 'cen'
df = pandas.DataFrame({rescol: res, cencol: cen})
expected_final = numpy.array([
0.24, 1.0, 1.0, 1.0, 1.0, 1.0, 10., 10., 10.,
3.0 , 7.0, 9.0, 12., 15., 20., 27., 33., 50.
])
expected_cohn = pandas.DataFrame({
'nuncen_above': numpy.array([17.0, numpy.nan]),
'nobs_below': numpy.array([1.0, numpy.nan]),
'ncen_equal': numpy.array([1.0, numpy.nan]),
'prob_exceedance': numpy.array([0.94444, 0.0]),
})
class Test_HalfDLs_80pctNDs(CheckROSMixin):
decimal = 3
res = numpy.array([
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 10., 10., 10.,
3.0, 7.0, 9.0, 12., 15., 20., 27., 33., 50.
])
cen = numpy.array([
True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, False,
False, False
])
rescol = 'value'
cencol = 'qual'
df = pandas.DataFrame({rescol: res, cencol: cen})
expected_final = numpy.array([
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 5.0, 5.0, 5.0,
1.5, 3.5, 4.5, 6.0, 7.5, 10., 27., 33., 50.
])
expected_cohn = pandas.DataFrame({
'nuncen_above': numpy.array([0., 0., 0., 0., 0., 0., 0., 3., numpy.nan]),
'nobs_below': numpy.array([6., 7., 8., 9., 12., 13., 14., 15., numpy.nan]),
'ncen_equal': numpy.array([6., 1., 1., 1., 3., 1., 1., 1., numpy.nan]),
'prob_exceedance': numpy.array([0.16667] * 8 + [0.]),
})
class Test_HaflDLs_OneUncensored(CheckROSMixin):
decimal = 3
res = numpy.array([1.0, 1.0, 12., 15., ])
cen = numpy.array([True, True, True, False ])
rescol = 'value'
cencol = 'qual'
df = pandas.DataFrame({rescol: res, cencol: cen})
expected_final = numpy.array([0.5, 0.5, 6. , 15.])
expected_cohn = pandas.DataFrame({
'nuncen_above': numpy.array([0., 1., numpy.nan]),
'nobs_below': numpy.array([2., 3., numpy.nan]),
'ncen_equal': numpy.array([2., 1., numpy.nan]),
'prob_exceedance': numpy.array([0.25, 0.25, 0.]),
})
class Test_ROS_MaxCen_GT_MaxUncen(Test_ROS_HelselAppendixB):
res = numpy.array([
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 10., 10., 10.,
3.0, 7.0, 9.0, 12., 15., 20., 27., 33., 50.,
60, 70
])
cen = numpy.array([
True, True, True, True, True, True, True, True, True,
False, False, False, False, False, False, False,
False, False, True, True
])
class Test_ROS_OnlyDL_GT_MaxUncen(Test_NoOp_ZeroND):
numpy.random.seed(0)
N = 20
res = [
0.38, 0.43, 0.81, 0.86, 0.90, 1.13, 1.15, 1.37, 1.40,
1.49, 1.51, 1.56, 2.14, 2.59, 2.66, 4.28, 4.46, 5.84,
6.47, 9.40, 10.0, 10.0
]
cen = ([False] * N) + [True, True]
|
11585958
|
import os
import six
import pyblish.api
import copy
from datetime import datetime
from openpype.lib.plugin_tools import prepare_template_data
from openpype.lib import OpenPypeMongoConnection
class IntegrateSlackAPI(pyblish.api.InstancePlugin):
""" Send message notification to a channel.
Triggers on instances with "slack" family, filled by
'collect_slack_family'.
Expects configured profile in
Project settings > Slack > Publish plugins > Notification to Slack.
If instance contains 'thumbnail' it uploads it. Bot must be present
in the target channel.
If instance contains 'review' it could upload (if configured) or place
link with {review_filepath} placeholder.
Message template can contain {} placeholders from anatomyData.
"""
order = pyblish.api.IntegratorOrder + 0.499
label = "Integrate Slack Api"
families = ["slack"]
optional = True
def process(self, instance):
thumbnail_path = self._get_thumbnail_path(instance)
review_path = self._get_review_path(instance)
publish_files = set()
for message_profile in instance.data["slack_channel_message_profiles"]:
message = self._get_filled_message(message_profile["message"],
instance,
review_path)
self.log.info("message:: {}".format(message))
if not message:
return
if message_profile["upload_thumbnail"] and thumbnail_path:
publish_files.add(thumbnail_path)
if message_profile["upload_review"] and review_path:
publish_files.add(review_path)
project = instance.context.data["anatomyData"]["project"]["code"]
for channel in message_profile["channels"]:
if six.PY2:
msg_id, file_ids = \
self._python2_call(instance.data["slack_token"],
channel,
message,
publish_files)
else:
msg_id, file_ids = \
self._python3_call(instance.data["slack_token"],
channel,
message,
publish_files)
msg = {
"type": "slack",
"msg_id": msg_id,
"file_ids": file_ids,
"project": project,
"created_dt": datetime.now()
}
mongo_client = OpenPypeMongoConnection.get_mongo_client()
database_name = os.environ["OPENPYPE_DATABASE_NAME"]
dbcon = mongo_client[database_name]["notification_messages"]
dbcon.insert_one(msg)
def _get_filled_message(self, message_templ, instance, review_path=None):
"""Use message_templ and data from instance to get message content.
Reviews might be large, so allow only adding link to message instead of
uploading only.
"""
fill_data = copy.deepcopy(instance.context.data["anatomyData"])
fill_pairs = [
("asset", instance.data.get("asset", fill_data.get("asset"))),
("subset", instance.data.get("subset", fill_data.get("subset"))),
("username", instance.data.get("username",
fill_data.get("username"))),
("app", instance.data.get("app", fill_data.get("app"))),
("family", instance.data.get("family", fill_data.get("family"))),
("version", str(instance.data.get("version",
fill_data.get("version"))))
]
if review_path:
fill_pairs.append(("review_filepath", review_path))
task_data = instance.data.get("task")
if not task_data:
task_data = fill_data.get("task")
for key, value in task_data.items():
fill_key = "task[{}]".format(key)
fill_pairs.append((fill_key, value))
fill_pairs.append(("task", task_data["name"]))
self.log.debug("fill_pairs ::{}".format(fill_pairs))
multiple_case_variants = prepare_template_data(fill_pairs)
fill_data.update(multiple_case_variants)
message = None
try:
message = message_templ.format(**fill_data)
except Exception:
self.log.warning(
"Some keys are missing in {}".format(message_templ),
exc_info=True)
return message
def _get_thumbnail_path(self, instance):
"""Returns abs url for thumbnail if present in instance repres"""
published_path = None
for repre in instance.data['representations']:
if repre.get('thumbnail') or "thumbnail" in repre.get('tags', []):
if os.path.exists(repre["published_path"]):
published_path = repre["published_path"]
break
return published_path
def _get_review_path(self, instance):
"""Returns abs url for review if present in instance repres"""
published_path = None
for repre in instance.data['representations']:
tags = repre.get('tags', [])
if (repre.get("review")
or "review" in tags
or "burnin" in tags):
if os.path.exists(repre["published_path"]):
published_path = repre["published_path"]
if "burnin" in tags: # burnin has precedence if exists
break
return published_path
def _python2_call(self, token, channel, message, publish_files):
from slackclient import SlackClient
try:
client = SlackClient(token)
attachment_str = "\n\n Attachment links: \n"
file_ids = []
for p_file in publish_files:
with open(p_file, 'rb') as pf:
response = client.api_call(
"files.upload",
file=pf,
channel=channel,
title=os.path.basename(p_file)
)
attachment_str += "\n<{}|{}>".format(
response["file"]["permalink"],
os.path.basename(p_file))
file_ids.append(response["file"]["id"])
if publish_files:
message += attachment_str
response = client.api_call(
"chat.postMessage",
channel=channel,
text=message
)
if response.get("error"):
error_str = self._enrich_error(str(response.get("error")),
channel)
self.log.warning("Error happened: {}".format(error_str))
else:
return response["ts"], file_ids
except Exception as e:
# You will get a SlackApiError if "ok" is False
error_str = self._enrich_error(str(e), channel)
self.log.warning("Error happened: {}".format(error_str))
def _python3_call(self, token, channel, message, publish_files):
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
try:
client = WebClient(token=token)
attachment_str = "\n\n Attachment links: \n"
file_ids = []
for published_file in publish_files:
response = client.files_upload(
file=published_file,
filename=os.path.basename(published_file))
attachment_str += "\n<{}|{}>".format(
response["file"]["permalink"],
os.path.basename(published_file))
file_ids.append(response["file"]["id"])
if publish_files:
message += attachment_str
response = client.chat_postMessage(
channel=channel,
text=message
)
return response.data["ts"], file_ids
except SlackApiError as e:
# You will get a SlackApiError if "ok" is False
error_str = self._enrich_error(str(e.response["error"]), channel)
self.log.warning("Error happened {}".format(error_str))
def _enrich_error(self, error_str, channel):
"""Enhance known errors with more helpful notations."""
if 'not_in_channel' in error_str:
# there is no file.write.public scope, app must be explicitly in
# the channel
msg = " - application must added to channel '{}'.".format(channel)
error_str += msg + " Ask Slack admin."
return error_str
|
11586000
|
import pandas as pd
import pandas.testing as pdt
import pytest
from cape_privacy.pandas.transformations import ReversibleTokenizer
from cape_privacy.pandas.transformations import Tokenizer
from cape_privacy.pandas.transformations import TokenReverser
def test_tokenizer():
transform = Tokenizer(key="secret_key")
df = pd.DataFrame({"name": ["Alice", "Bob"]})
expected = pd.DataFrame(
{
"name": [
"<KEY>",
"<KEY>",
]
}
)
df["name"] = transform(df["name"])
pdt.assert_frame_equal(df, expected)
def test_tokenizer_with_max_size():
transform = Tokenizer(max_token_len=10, key="secret_key")
df = pd.DataFrame({"name": ["Alice", "Bob"]})
expected = pd.DataFrame({"name": ["70a4b1a987", "dd4532a296"]})
df["name"] = transform(df["name"])
pdt.assert_frame_equal(df, expected)
def test_reversible_tokenizer():
key = b"5" * 32
plaintext = pd.DataFrame({"name": ["Alice", "Bob"]})
tokenizer = ReversibleTokenizer(key=key)
tokenized_expected = pd.DataFrame(
{
"name": [
"<KEY>",
"e0f40aea0d5c21b35967c4231b98b5b3e5338e",
]
}
)
tokenized = pd.DataFrame()
tokenized["name"] = tokenizer(plaintext["name"])
pdt.assert_frame_equal(tokenized, tokenized_expected)
reverser = TokenReverser(key=key)
recovered = pd.DataFrame()
recovered["name"] = reverser(tokenized["name"])
pdt.assert_frame_equal(recovered, plaintext)
def test_reversible_tokenizer_string_key():
_ = ReversibleTokenizer(key="5" * 32)
def test_reversible_tokenizer_insufficient_key():
with pytest.raises(ValueError):
_ = ReversibleTokenizer(key=b"5" * 10)
|
11586022
|
import sys
class Sort:
def __new__(self, array, algo, reverse=False):
'''
args:
:array: (list) : a python list
:algo: (str): sorting algorithm type
values supported are:
1.bubble
2.merge
3.bubble_recursion
4.selection
5.quick
:reverse: (bool) : default = False
if True order is reversed.
return:
sorted array
'''
self.array = array
self.algo = algo
self.reverse = reverse
if self.algo == 'bubble':
return bubble(self.array, self.reverse)
if self.algo == 'merge':
return merge(self.array, self.reverse)
if self.algo =='bubble_recursion':
return bubble_recursion(self.array,self.reverse)
if self.algo =='selection':
return selection(self.array,self.reverse)
if self.algo =='quick':
return quick(self.array,self.reverse)
else:
sys.stderr.write("Error: unsupported sorting algorithm passed!")
def bubble(array, reverse=False):
'''
A bubble sort algorithm is an algorithm
that repeatedly swaps adjacent elements.
The smallest values comes in front and
large values goes at back, similar to that a
lighter bubbles comes up, hence bubble sort
args:
:array:(list) -> list to be sorted
:reverse:(boolean) -> default = False,
can be True for sort
in reverse order
'''
n=len(array)
for i in range(n):
swap=0
for j in range(0,n-i-1):
if array[j]>array[j+1]:
array[j],array[j+1]=array[j+1],array[j]
swap=1
if swap==0:
break
if reverse==True:
return array[::-1]
return array
def merge(array,reverse=False):
"""
1.Divide:
If q is the half-way point between p and r, then we can split the subarray A[p..r]
into two arrays A[p..q] and A[q+1, r].
2.Conquer:
In the conquer step, we try to sort both the subarrays A[p..q] and A[q+1, r].
If we haven't yet reached the base case,
we again divide both these subarrays and try to sort them.
3.Combine:
When the conquer step reaches the base step and we get two sorted subarrays A[p..q] and A[q+1, r] for array A[p..r],
we combine the results by creating a sorted array A[p..r] from two sorted subarrays A[p..q] and A[q+1, r].
"""
if len(array) >1:
mid = len(array)//2 # mid
left = array[:mid] # Dividing the array elements
right = array[mid:] # into 2 halves
merge(left) # Sorting the first half
merge(right) # Sorting the second half
i = j = k = 0
# Copy data to left[] and right[]
while i < len(left) and j < len(right):
if left[i] < right[j]:
array[k] = left[i]
i+= 1
else:
array[k] = right[j]
j+= 1
k+= 1
# Checking if any element was left
while i < len(left):
array[k] = left[i]
i+= 1
k+= 1
while j < len(right):
array[k] = right[j]
j+= 1
k+= 1
if reverse==True :
return array[::-1]
return array
def bubble_recursion(array,reverse=False):
for i, num in enumerate(array):
try:
if array[i+1] < num:
array[i] = array[i+1]
array[i+1] = num
bubble_recursion(array)
except IndexError:
pass
if reverse==True:
return array[::-1]
return array
def selection(array,reverse=False):
"""The selection sort algorithm sorts an array by repeatedly finding the minimum element (considering ascending order)
from unsorted part and putting it at the beginning. The algorithm maintains two subarrays in a given array.
1) The subarray which is already sorted.
2) Remaining subarray which is unsorted.
In every iteration of selection sort, the minimum element (considering ascending order)
from the unsorted subarray is picked and moved to the sorted subarray."""
for i in range(len(array)):
min_idx = i
for j in range(i+1, len(array)):
if array[min_idx] > array[j]:
min_idx = j
array[i], array[min_idx] = array[min_idx], array[i] #Swapping values
if reverse==True:
return array[::-1]
return array
def quick(array,reverse=False):
"""The algorithm can be broken down into three parts:
1.Partitioning the array about the pivot.
2.Passing the smaller arrays to the recursive calls.
3.Joining the sorted arrays that are returned from the recursive call and the pivot.
"""
start=0
end=len(array)-1
quick_sort(array,start,end)
if reverse==True:
return array[::-1]
return array
def quick_sort(array, start, end):
if start >= end:
return
p = partition(array, start, end)
quick_sort(array, start, p-1)
quick_sort(array, p+1, end)
def partition(array, start, end):
pivot = array[start]
low = start + 1
high = end
while True:
#If the current value we're looking at is larger than the pivot
# it's in the right place (right side of pivot) and we can move left,
# to the next element.
# We also need to make sure we haven't surpassed the low pointer, since that
# indicates we have already moved all the elements to their correct side of the pivot
while low <= high and array[high] >= pivot:
high = high - 1
# Opposite process of the one above
while low <= high and array[low] <= pivot:
low = low + 1
# We either found a value for both high and low that is out of order
# or low is higher than high, in which case we exit the loop
if low <= high:
array[low], array[high] = array[high], array[low]
# The loop continues
else:
# We exit out of the loop
break
array[start], array[high] = array[high], array[start]
return high
|
11586086
|
import re
import logging
from os import getenv
from copy import deepcopy
from random import choice
from common.custom_requests import request_triples_wikidata
import sentry_sdk
logger = logging.getLogger(__name__)
sentry_sdk.init(getenv("SENTRY_DSN"))
other_skills = {
"intent_responder",
"dff_program_y_dangerous",
"misheard_asr",
"christmas_new_year_skill",
"superbowl_skill",
"oscar_skill",
"valentines_day_skill",
}
scenario_skills = {
"dff_movie_skill",
"personal_info_skill", # 'short_story_skill',
"dff_book_skill",
"dff_weather_skill",
"emotion_skill",
"dummy_skill_dialog",
"meta_script_skill",
"dff_coronavirus_skill",
"small_talk_skill",
"news_api_skill",
"game_cooperative_skill",
}
retrieve_skills = {
"dff_program_y",
"alice",
"eliza",
"book_tfidf_retrieval",
"entertainment_tfidf_retrieval",
"fashion_tfidf_retrieval",
"movie_tfidf_retrieval",
"music_tfidf_retrieval",
"politics_tfidf_retrieval",
"science_technology_tfidf_retrieval",
"sport_tfidf_retrieval",
"animals_tfidf_retrieval",
"convert_reddit",
"topicalchat_convert_retrieval",
"dff_program_y_wide",
"knowledge_grounding_skill",
}
okay_statements = {
"Okay.",
"That's cool!",
"Interesting.",
"Sounds interesting.",
"Sounds interesting!",
"OK.",
"Cool!",
"Thanks!",
"Okay, thanks.",
"I'm glad you think so!",
"Sorry, I don't have an answer for that!",
"Let's talk about something else.",
"As you wish.",
"All right.",
"Right.",
"Anyway.",
"Oh, okay.",
"Oh, come on.",
"Really?",
"Okay. I got it.",
"Well, okay.",
"Well, as you wish.",
}
service_intents = {
"lets_chat_about",
"tell_me_more",
"topic_switching",
"yes",
"opinion_request",
"dont_understand",
"no",
"stupid",
"weather_forecast_intent",
"doing_well",
"tell_me_a_story",
"choose_topic",
}
high_priority_intents = {
"intent_responder": {
"cant_do",
"exit",
"repeat",
"what_can_you_do",
"what_is_your_job",
"what_is_your_name",
"where_are_you_from",
"who_made_you",
},
"dff_grounding_skill": {"what_are_you_talking_about"},
}
low_priority_intents = {"dont_understand", "what_time"}
combined_classes = {
"factoid_classification": ["is_factoid", "is_conversational"],
"emotion_classification": ["anger", "fear", "joy", "love", "sadness", "surprise", "neutral"],
"toxic_classification": [
"identity_hate",
"insult",
"obscene",
"severe_toxic",
"sexual_explicit",
"threat",
"toxic",
"not_toxic",
],
"sentiment_classification": ["positive", "negative", "neutral"],
"cobot_topics": [
"Phatic",
"Other",
"Movies_TV",
"Music",
"SciTech",
"Literature",
"Travel_Geo",
"Celebrities",
"Games",
"Pets_Animals",
"Sports",
"Psychology",
"Religion",
"Weather_Time",
"Food_Drink",
"Politics",
"Sex_Profanity",
"Art_Event",
"Math",
"News",
"Entertainment",
"Fashion",
],
"cobot_dialogact_topics": [
"Other",
"Phatic",
"Entertainment_Movies",
"Entertainment_Books",
"Entertainment_General",
"Interactive",
"Entertainment_Music",
"Science_and_Technology",
"Sports",
"Politics",
"Inappropriate_Content",
],
"cobot_dialogact_intents": [
"Information_DeliveryIntent",
"General_ChatIntent",
"Information_RequestIntent",
"User_InstructionIntent",
"InteractiveIntent",
"Opinion_ExpressionIntent",
"OtherIntent",
"ClarificationIntent",
"Topic_SwitchIntent",
"Opinion_RequestIntent",
"Multiple_GoalsIntent",
],
}
midas_classes = {
"semantic_request": {
"question": [
"open_question_factual",
"open_question_opinion",
"open_question_personal",
"yes_no_question",
"clarifying_question",
],
"command": ["command", "dev_command"],
"opinion": ["appreciation", "opinion", "complaint", "comment"],
"statement": ["statement"],
"answer": ["other_answers", "pos_answer", "neg_answer"],
},
"functional_request": {
"incomplete": ["abandon", "nonsense"],
"social_convention": ["opening", "closing", "hold", "back-channeling"],
"apology": [],
"other": ["uncertain", "non_compliant", "correction"],
},
}
MIDAS_SEMANTIC_LABELS = sum([intent_list for intent_list in midas_classes["semantic_request"].values()], [])
MIDAS_FUNCTIONAL_LABELS = sum([intent_list for intent_list in midas_classes["functional_request"].values()], [])
def join_words_in_or_pattern(words):
return r"(" + r"|".join([r"\b%s\b" % word for word in words]) + r")"
def join_word_beginnings_in_or_pattern(words):
return r"(" + r"|".join([r"\b%s" % word for word in words]) + r")"
def join_sentences_in_or_pattern(sents):
return r"(" + r"|".join(sents) + r")"
def get_skill_outputs_from_dialog(utterances, skill_name, activated=False):
"""
Extract list of dictionaries with already formatted outputs of `skill_name` from full dialog.
If `activated=True`, skill also should be chosen as `active_skill`;
otherwise, empty list.
Args:
utterances: utterances, the first one is user's reply
skill_name: name of target skill
activated: if target skill should be chosen by response selector on previous step or not
Returns:
list of dictionaries with formatted outputs of skill
"""
result = []
skills_outputs = []
for uttr in utterances:
if "active_skill" in uttr:
final_response = uttr.get("orig_text", None) if uttr.get("orig_text", None) is not None else uttr["text"]
for skop in skills_outputs:
# need to check text-response for skills with several hypotheses
if skop["skill_name"] == skill_name:
if activated and skop["text"] in final_response and uttr["active_skill"] == skill_name:
# removed one condition as if scop contains skill_name and text, its len is > 0
result.append(skop)
else:
if not activated and skop:
result.append(skop)
elif "hypotheses" in uttr:
skills_outputs = uttr["hypotheses"]
return result
def transform_vbg(s):
"""
Transform infinitive form of verb to Ving form.
Args:
s: verb infinitive
Returns:
string with required verb form
"""
import re
# by <NAME>
s += "+VBG"
# irregular cases
s1 = re.compile(r"(?<![a-z])be\+VBG")
s2 = re.compile(r"(?<![aouiey])([^aouiey][aouiey]([^aouieywr]))\+VBG")
s3 = re.compile(r"ie\+VBG")
s4 = re.compile(r"(ee)\+VBG")
s5 = re.compile(r"e\+VBG")
# regular case
s6 = re.compile(r"\+VBG")
# irregular cases
s = re.sub(s1, "being", s)
s = re.sub(s2, r"\1\2ing", s)
s = re.sub(s3, r"ying", s)
s = re.sub(s4, r"\1ing", s)
s = re.sub(s5, r"ing", s)
# regular case
s = re.sub(s6, "ing", s)
return s
def get_list_of_active_skills(utterances):
"""
Extract list of active skills names
Args:
utterances: utterances, the first one is user's reply
Returns:
list of string skill names
"""
result = []
for uttr in utterances:
if "active_skill" in uttr:
result.append(uttr["active_skill"])
return result
def get_user_replies_to_particular_skill(utterances, skill_name):
"""
Return user's responses to particular skill if it was active
Args:
utterances:
skill_name:
Returns:
list of string response
"""
result = []
for i, uttr in enumerate(utterances):
if uttr.get("active_skill", "") == skill_name:
result.append(utterances[i - 1]["text"])
return result
yes_templates = re.compile(
r"(\byes\b|\byup\b|\byep\b|\bsure\b|go ahead|\byeah\b|\bok\b|okay|^(kind of|kinda)\.?$|"
r"^why not\.?$|^tell me\.?$|^i (agree|do|did|like|have|had|think so)\.?$)"
)
def is_yes(annotated_phrase):
yes_detected = "yes" in get_intents(annotated_phrase, which="intent_catcher", probs=False)
midas_yes_detected = "pos_answer" in get_intents(annotated_phrase, which="midas", probs=False)
# TODO: intent catcher not catches 'yes thanks!'
if yes_detected or midas_yes_detected or re.search(yes_templates, annotated_phrase.get("text", "").lower()):
return True
return False
no_templates = re.compile(r"(\bno\b|\bnot\b|no way|don't|no please|i disagree|^neither.?$)")
DONOTKNOW_LIKE = [r"(i )?(do not|don't) know", "you (choose|decide|pick up)", "no idea"]
DONOTKNOW_LIKE_PATTERN = re.compile(join_sentences_in_or_pattern(DONOTKNOW_LIKE), re.IGNORECASE)
def is_donot_know(annotated_phrase):
if DONOTKNOW_LIKE_PATTERN.search(annotated_phrase.get("text", "")):
return True
return False
def is_no_intent(annotated_phrase):
no_detected = "no" in get_intents(annotated_phrase, which="intent_catcher", probs=False)
midas_no_detected = False # "neg_answer" in get_intents(annotated_phrase, which='midas', probs=False)
is_not_idontknow = not is_donot_know(annotated_phrase)
if (no_detected or midas_no_detected) and is_not_idontknow:
return True
return False
def is_no(annotated_phrase):
no_detected = "no" in get_intents(annotated_phrase, which="intent_catcher", probs=False)
midas_no_detected = "neg_answer" in get_intents(annotated_phrase, which="midas", probs=False)
# TODO: intent catcher thinks that horrible is no intent'
user_phrase = annotated_phrase.get("text", "").lower().strip().replace(".", "")
is_not_horrible = "horrible" != user_phrase
no_regexp_detected = re.search(no_templates, annotated_phrase.get("text", "").lower())
is_not_idontknow = not is_donot_know(annotated_phrase)
_yes = is_yes(annotated_phrase)
if is_not_horrible and (no_detected or midas_no_detected or no_regexp_detected) and is_not_idontknow and not _yes:
return True
return False
def is_question(text):
return "?" in text
def substitute_nonwords(text):
return re.sub(r"\W+", " ", text).strip()
def get_intent_name(text):
splitter = "#+#"
if splitter not in text:
return None
intent_name = text.split(splitter)[-1]
intent_name = re.sub(r"\W", " ", intent_name.lower()).strip()
return intent_name
OPINION_REQUEST_PATTERN = re.compile(
r"(don't|do not|not|are not|are|do)?\s?you\s"
r"(like|dislike|adore|hate|love|believe|consider|get|know|taste|think|"
r"recognize|sure|understand|feel|fond of|care for|fansy|appeal|suppose|"
r"imagine|guess)",
re.IGNORECASE,
)
OPINION_EXPRESSION_PATTERN = re.compile(
r"\bi (don't|do not|not|am not|'m not|am|do)?\s?"
r"(like|dislike|adore|hate|love|believe|consider|get|know|taste|think|"
r"recognize|sure|understand|feel|fond of|care for|fansy|appeal|suppose|"
r"imagine|guess)",
re.IGNORECASE,
)
def is_opinion_request(annotated_utterance):
intents = get_intents(annotated_utterance, which="all", probs=False)
intent_detected = any([intent in intents for intent in ["Opinion_RequestIntent", "open_question_opinion"]])
uttr_text = annotated_utterance.get("text", "")
if intent_detected or (OPINION_REQUEST_PATTERN.search(uttr_text) and "?" in uttr_text):
return True
else:
return False
def is_opinion_expression(annotated_utterance):
all_intents = get_intents(annotated_utterance, which="all")
intent_detected = any([intent in all_intents for intent in ["opinion", "Opinion_ExpressionIntent"]])
uttr_text = annotated_utterance.get("text", "")
if intent_detected or OPINION_EXPRESSION_PATTERN.search(uttr_text):
return True
else:
return False
def get_outputs_with_response_from_dialog(utterances, response, activated=False):
"""
Extract list of dictionaries with already formatted outputs of different skills from full dialog
which replies containe `response`.
If `activated=True`, skill also should be chosen as `active_skill`;
otherwise, empty list.
Args:
utterances: utterances, the first one is user's reply
response: target text to search among bot utterances
activated: if target skill should be chosen by response selector on previous step or not
Returns:
list of dictionaries with formatted outputs of skill
"""
result = []
skills_outputs = []
for uttr in utterances:
if "active_skill" in uttr:
final_response = uttr["text"]
for skop in skills_outputs:
# need to check text-response for skills with several hypotheses
if response in skop["text"]:
if activated and skop["text"] in final_response and skop:
result.append(skop)
else:
if not activated and skop:
result.append(skop)
elif "hypotheses" in uttr:
skills_outputs = uttr["hypotheses"]
return result
def get_not_used_template(used_templates, all_templates, any_if_no_available=True):
"""
Choose not used template among all templates
Args:
used_templates: list of templates already used in the dialog
all_templates: list of all available templates
Returns:
string template
"""
available = list(set(all_templates).difference(set(used_templates)))
if available:
return choice(available)
elif any_if_no_available:
return choice(all_templates)
else:
return ""
def get_all_not_used_templates(used_templates, all_templates):
"""
Return all not used template among all templates
Args:
used_templates: list of templates already used in the dialog
all_templates: list of all available templates
Returns:
string template
"""
available = list(set(all_templates).difference(set(used_templates)))
return available
def _probs_to_labels(answer_probs, max_proba=True, threshold=0.5):
answer_labels = [label for label in answer_probs if answer_probs[label] > threshold]
if not answer_labels and max_proba:
answer_labels = [key for key in answer_probs if answer_probs[key] == max(answer_probs.values())]
return answer_labels
def _labels_to_probs(answer_labels, all_labels):
answer_probs = dict()
for label in all_labels:
if label in answer_labels:
answer_probs[label] = 1
else:
answer_probs[label] = 0
return answer_probs
def _get_combined_annotations(annotated_utterance, model_name):
answer_probs, answer_labels = {}, []
try:
annotations = annotated_utterance["annotations"]
combined_annotations = annotations.get("combined_classification", {})
if combined_annotations and isinstance(combined_annotations, list):
combined_annotations = combined_annotations[0]
if model_name in combined_annotations:
answer_probs = combined_annotations[model_name]
else:
raise Exception(f"Not found Model name {model_name} in combined annotations {combined_annotations}")
if model_name == "toxic_classification" and "factoid_classification" not in combined_annotations:
answer_labels = _probs_to_labels(answer_probs, max_proba=False, threshold=0.5)
else:
answer_labels = _probs_to_labels(answer_probs, max_proba=True, threshold=0.5)
except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(e)
return answer_probs, answer_labels
def _process_text(answer):
if isinstance(answer, dict) and "text" in answer:
return answer["text"]
else:
return answer
def _process_old_sentiment(answer):
# Input: all sentiment annotations. Output: probs
if isinstance(answer[0], str) and isinstance(answer[1], float):
# support old sentiment output
curr_answer = {}
for key in combined_classes["sentiment_classification"]:
if key == answer[0]:
curr_answer[key] = answer[1]
else:
curr_answer[key] = 0.5 * (1 - answer[1])
answer_probs = curr_answer
return answer_probs
else:
logger.warning("_process_old_sentiment got file with an output that is not old-style")
return answer
def _get_plain_annotations(annotated_utterance, model_name):
answer_probs, answer_labels = {}, []
try:
annotations = annotated_utterance["annotations"]
answer = annotations[model_name]
answer = _process_text(answer)
if isinstance(answer, list):
if model_name == "sentiment_classification":
answer_probs = _process_old_sentiment(answer)
answer_labels = _probs_to_labels(answer_probs, max_proba=True, threshold=0.5)
else:
answer_labels = answer
answer_probs = _labels_to_probs(answer_labels, combined_classes[model_name])
else:
answer_probs = answer
if model_name == "toxic_classification":
# this function is only for plain annotations (when toxic_classification is a separate annotator)
answer_labels = _probs_to_labels(answer_probs, max_proba=False, threshold=0.5)
else:
answer_labels = _probs_to_labels(answer_probs, max_proba=True, threshold=0.5)
except Exception as e:
logger.warning(e)
return answer_probs, answer_labels
def print_combined(combined_output):
combined_output = deepcopy(combined_output)
for i in range(len(combined_output)):
for key in combined_output[i]:
for class_ in combined_output[i][key]:
combined_output[i][key][class_] = round(combined_output[i][key][class_], 2)
logger.info(f"Combined classifier output is {combined_output}")
def _get_etc_model(annotated_utterance, model_name, probs, default_probs, default_labels):
"""Function to get emotion classifier annotations from annotated utterance.
Args:
annotated_utterance: dictionary with annotated utterance, or annotations
probs: return probabilities or not
default: default value to return. If it is None, returns empty dict/list depending on probs argument
Returns:
dictionary with emotion probablilties, if probs == True, or emotion labels if probs != True
"""
try:
if model_name in annotated_utterance.get("annotations", {}):
answer_probs, answer_labels = _get_plain_annotations(annotated_utterance, model_name=model_name)
elif "combined_classification" in annotated_utterance.get("annotations", {}):
answer_probs, answer_labels = _get_combined_annotations(annotated_utterance, model_name=model_name)
else:
answer_probs, answer_labels = default_probs, default_labels
except Exception as e:
logger.exception(e, stack_info=True)
answer_probs, answer_labels = default_probs, default_labels
if probs: # return probs
return answer_probs
else:
return answer_labels
def get_toxic(annotated_utterance, probs=True, default_probs=None, default_labels=None):
"""Function to get toxic classifier annotations from annotated utterance.
Args:
annotated_utterance: dictionary with annotated utterance, or annotations
probs: return probabilities or not
default: default value to return. If it is None, returns empty dict/list depending on probs argument
Returns:
dictionary with toxic probablilties, if probs == True, or toxic labels if probs != True
"""
default_probs = {} if default_probs is None else default_probs
default_labels = [] if default_labels is None else default_labels
return _get_etc_model(
annotated_utterance,
"toxic_classification",
probs=probs,
default_probs=default_probs,
default_labels=default_labels,
)
def get_factoid(annotated_utterance, probs=True, default_probs=None, default_labels=None):
"""Function to get factoid classifier annotations from annotated utterance.
Args:
annotated_utterance: dictionary with annotated utterance, or annotations
probs: return probabilities or not
default: default value to return. If it is None, returns empty dict/list depending on probs argument
Returns:
dictionary with factoid probablilties, if probs == True, or factoid labels if probs != True
"""
default_probs = {"is_conversational": 1} if default_probs is None else default_probs
default_labels = ["is_conversational"] if default_labels is None else default_labels
return _get_etc_model(
annotated_utterance,
"factoid_classification",
probs=probs,
default_probs=default_probs,
default_labels=default_labels,
)
def get_sentiment(annotated_utterance, probs=True, default_probs=None, default_labels=None):
"""Function to get sentiment classifier annotations from annotated utterance.
Args:
annotated_utterance: dictionary with annotated utterance, or annotations
probs: return probabilities or not
default: default value to return. If it is None, returns empty dict/list depending on probs argument
Returns:
dictionary with sentiment probablilties, if probs == True, or sentiment labels if probs != True
"""
default_probs = {"positive": 0, "negative": 0, "neutral": 1} if default_probs is None else default_probs
default_labels = ["neutral"] if default_labels is None else default_labels
return _get_etc_model(
annotated_utterance,
"sentiment_classification",
probs=probs,
default_probs=default_probs,
default_labels=default_labels,
)
def get_emotions(annotated_utterance, probs=True, default_probs=None, default_labels=None):
"""Function to get emotion classifier annotations from annotated utterance.
Args:
annotated_utterance: dictionary with annotated utterance, or annotations
probs: return probabilities or not
default: default value to return. If it is None, returns empty dict/list depending on probs argument
Returns:
dictionary with emotion probablilties, if probs == True, or emotion labels if probs != True
"""
default_probs = (
{"anger": 0, "fear": 0, "joy": 0, "love": 0, "sadness": 0, "surprise": 0, "neutral": 1}
if default_probs is None
else default_probs
)
default_labels = ["neutral"] if default_labels is None else default_labels
return _get_etc_model(
annotated_utterance,
"emotion_classification",
probs=probs,
default_probs=default_probs,
default_labels=default_labels,
)
def get_topics(annotated_utterance, probs=False, default_probs=None, default_labels=None, which="all"):
"""Function to get topics from particular annotator or all detected.
Args:
annotated_utterance: dictionary with annotated utterance
probs: if False we return labels, otherwise we return probs
default_probs, default_labels: default probabilities and labels we return
which: which topics to return.
'all' means topics by `cobot_topics` and `cobot_dialogact_topics`,
'cobot_topics' means topics by `cobot_topics`,
'cobot_dialogact_topics' means topics by `cobot_dialogact_topics`.
Returns:
list of topics
"""
default_probs = {} if default_probs is None else default_probs
default_labels = [] if default_labels is None else default_labels
annotations = annotated_utterance.get("annotations", {})
cobot_topics_probs, cobot_topics_labels = {}, []
if "cobot_topics" in annotations:
cobot_topics_labels = _process_text(annotations.get("cobot_topics", {}))
cobot_topics_probs = _labels_to_probs(cobot_topics_labels, combined_classes.get("cobot_topics", {}))
if "combined_classification" in annotations and not cobot_topics_labels:
cobot_topics_probs, cobot_topics_labels = _get_combined_annotations(
annotated_utterance, model_name="cobot_topics"
)
cobot_topics_labels = _process_text(cobot_topics_labels)
if not cobot_topics_probs:
cobot_topics_probs = _labels_to_probs(cobot_topics_labels, combined_classes.get("cobot_topics", {}))
cobot_da_topics_probs, cobot_da_topics_labels = {}, []
if "cobot_dialogact" in annotations and "topics" in annotations["cobot_dialogact"]:
cobot_da_topics_labels = annotated_utterance["annotations"]["cobot_dialogact"]["topics"]
elif "cobot_dialogact_topics" in annotations:
cobot_da_topics_labels = annotated_utterance["annotations"]["cobot_dialogact_topics"]
if "combined_classification" in annotations and not cobot_da_topics_labels:
cobot_da_topics_probs, cobot_da_topics_labels = _get_combined_annotations(
annotated_utterance, model_name="cobot_dialogact_topics"
)
cobot_da_topics_labels = _process_text(cobot_da_topics_labels)
if not cobot_da_topics_probs:
cobot_da_topics_probs = _labels_to_probs(cobot_da_topics_labels, combined_classes["cobot_dialogact_topics"])
if which == "all":
answer_labels = cobot_topics_labels + cobot_da_topics_labels
answer_probs = {**cobot_topics_probs, **cobot_da_topics_probs}
elif which == "cobot_topics":
answer_probs, answer_labels = cobot_topics_probs, cobot_topics_labels
elif which == "cobot_dialogact_topics":
answer_probs, answer_labels = cobot_da_topics_probs, cobot_da_topics_labels
else:
logger.exception(f"Unknown input type in get_topics: {which}")
answer_probs, answer_labels = default_probs, default_labels
try:
assert answer_labels, annotations
except Exception:
annotations_to_log = {
key: value
for key, value in annotations.items()
if key in ["cobot_dialogact", "combined_classification", "cobot_topics"]
}
logger.warning(f"Not answer_labels with payload {annotations_to_log} which {which}")
answer_probs, answer_labels = default_probs, default_labels
if probs:
return answer_probs
else:
return answer_labels
def get_intents(annotated_utterance, probs=False, default_probs=None, default_labels=None, which="all"):
"""Function to get intents from particular annotator or all detected.
Args:
annotated_utterance: dictionary with annotated utterance
probs: if False we return labels, otherwise we return probs
default_probs, default_labels: default probabilities and labels we return
which: which intents to return:
'all' means intents detected by `intent_catcher`,
`cobot_dialogact_intents` and `midas_classification`.
'intent_catcher' means intents detected by `intent_catcher`.
'cobot_dialogact_intents' means intents detected by `cobot_dialogact_intents`.
'midas' means intents detected by `midas_classification`.
Returns:
list of intents
"""
default_probs = {} if default_probs is None else default_probs
default_labels = [] if default_labels is None else default_labels
annotations = annotated_utterance.get("annotations", {})
intents = annotations.get("intent_catcher", {})
detected_intents = [k for k, v in intents.items() if v.get("detected", 0) == 1]
detected_intent_probs = {key: 1 for key in detected_intents}
midas_intent_probs = annotations.get("midas_classification", {})
if isinstance(midas_intent_probs, dict) and midas_intent_probs:
semantic_midas_probs = {k: v for k, v in midas_intent_probs.items() if k in MIDAS_SEMANTIC_LABELS}
functional_midas_probs = {k: v for k, v in midas_intent_probs.items() if k in MIDAS_FUNCTIONAL_LABELS}
if semantic_midas_probs:
max_midas_semantic_prob = max(semantic_midas_probs.values())
else:
max_midas_semantic_prob = 0.0
if functional_midas_probs:
max_midas_functional_prob = max(functional_midas_probs.values())
else:
max_midas_functional_prob = 0.0
midas_semantic_intent_labels = [k for k, v in semantic_midas_probs.items() if v == max_midas_semantic_prob]
midas_functional_intent_labels = [
k for k, v in functional_midas_probs.items() if v == max_midas_functional_prob
]
midas_intent_labels = midas_semantic_intent_labels + midas_functional_intent_labels
elif isinstance(midas_intent_probs, list):
if midas_intent_probs:
# now it's a list of dictionaries. length of list is n sentences
midas_intent_labels = []
for midas_sent_probs in midas_intent_probs:
max_midas_sent_prob = max(midas_sent_probs.values())
midas_intent_labels += [k for k, v in midas_sent_probs.items() if v == max_midas_sent_prob]
_midas_intent_probs = deepcopy(midas_intent_probs)
midas_intent_probs = {}
class_names = list(set(sum([list(resp.keys()) for resp in _midas_intent_probs], [])))
for class_name in class_names:
max_proba = max([resp.get(class_name, 0.0) for resp in _midas_intent_probs])
midas_intent_probs[class_name] = max_proba
else:
midas_intent_probs = {}
midas_intent_labels = []
else:
midas_intent_labels = []
cobot_da_intent_probs, cobot_da_intent_labels = {}, []
if "cobot_dialogact" in annotations and "intents" in annotations["cobot_dialogact"]:
cobot_da_intent_labels = annotated_utterance["annotations"]["cobot_dialogact"]["intents"]
elif "cobot_dialogact_intents" in annotations:
cobot_da_intent_labels = annotated_utterance["annotations"]["cobot_dialogact_intents"]
if "combined_classification" in annotations and not cobot_da_intent_labels:
cobot_da_intent_probs, cobot_da_intent_labels = _get_combined_annotations(
annotated_utterance, model_name="cobot_dialogact_intents"
)
cobot_da_intent_labels = _process_text(cobot_da_intent_labels)
if not cobot_da_intent_probs:
cobot_da_intent_probs = _labels_to_probs(cobot_da_intent_labels, combined_classes["cobot_dialogact_intents"])
if which == "all":
answer_probs = {**detected_intent_probs, **cobot_da_intent_probs, **midas_intent_probs}
answer_labels = detected_intents + cobot_da_intent_labels + midas_intent_labels
elif which == "intent_catcher":
answer_probs, answer_labels = detected_intent_probs, detected_intents
elif which == "cobot_dialogact_intents":
answer_probs, answer_labels = cobot_da_intent_probs, cobot_da_intent_labels
elif which == "midas":
answer_probs, answer_labels = midas_intent_probs, midas_intent_labels
else:
logger.warning(f"Unknown type in get_intents {which}")
answer_probs, answer_labels = default_probs, default_labels
if which not in ["intent_catcher", "midas"]:
try:
assert answer_labels, annotations
except Exception:
annotations_to_log = {
key: value
for key, value in annotations.items()
if key
in [
"intent_catcher",
"cobot_dialogact",
"cobot_dialogact_intents",
"combined_classification",
"midas_classification",
]
}
logger.warning(f"Not answer_labels with payload {annotations_to_log} which {which}")
answer_probs, answer_labels = default_probs, default_labels
if probs:
return answer_probs
else:
return answer_labels
COBOT_ENTITIES_SKIP_LABELS = ["anaphor"]
def get_entities(annotated_utterance, only_named=False, with_labels=False):
if not only_named:
if "entity_detection" in annotated_utterance.get("annotations", {}):
labelled_entities = annotated_utterance["annotations"]["entity_detection"].get("labelled_entities", [])
# skip some labels
entities = [ent for ent in labelled_entities if ent["label"] not in COBOT_ENTITIES_SKIP_LABELS]
if not with_labels:
entities = [ent["text"] for ent in entities]
else:
entities = annotated_utterance.get("annotations", {}).get("spacy_nounphrases", [])
if with_labels:
# actually there are no labels for cobot nounphrases
# so, let's make it as for cobot_entities format
entities = [{"text": ent, "label": "misc"} for ent in entities]
else:
# `ner` contains list of lists of dicts. the length of the list is n-sentences
# each entity is {"confidence": 1, "end_pos": 1, "start_pos": 0, "text": "unicorns", "type": "ORG"}
entities = annotated_utterance.get("annotations", {}).get("ner", [])
entities = sum(entities, []) # flatten list, now it's a list of dicts-entities
if not with_labels:
entities = [ent["text"] for ent in entities]
return entities if entities is not None else []
def get_named_persons(annotated_utterance):
named_entities = get_entities(annotated_utterance, only_named=True, with_labels=True)
all_entities = get_entities(annotated_utterance, only_named=False, with_labels=True)
named_persons = []
if "cobot_entities" in annotated_utterance["annotations"]:
for ent in all_entities:
if ent["label"] == "person":
named_persons.append(ent["text"])
if "ner" in annotated_utterance["annotations"]:
for ent in named_entities:
if ent["type"] == "PER":
named_persons.append(ent["text"])
named_persons = list(set(named_persons))
return named_persons
def get_named_locations(annotated_utterance):
named_entities = get_entities(annotated_utterance, only_named=True, with_labels=True)
all_entities = get_entities(annotated_utterance, only_named=False, with_labels=True)
named_locations = []
if "cobot_entities" in annotated_utterance["annotations"]:
for ent in all_entities:
if ent["label"] == "location":
named_locations.append(ent["text"])
if len(named_locations) == 0 and "ner" in annotated_utterance["annotations"]:
for ent in named_entities:
if ent["type"] == "LOC" and ent["text"] != "alexa":
_is_part_of_other_entity = False
for cobot_ent in all_entities:
if ent["text"] in cobot_ent["text"] and cobot_ent["label"] != "location":
_is_part_of_other_entity = True
if not _is_part_of_other_entity:
named_locations.append(ent["text"])
named_locations = list(set(named_locations))
if re.search(r"\bjapan\b", annotated_utterance["text"], re.IGNORECASE) and "japan" not in named_locations:
# NER does not catch this country at all!
named_locations.append("japan")
return named_locations
def get_raw_entity_names_from_annotations(annotations):
"""
Args:
annotated_utterance: annotated utterance
Returns:
Wikidata entities we received from annotations
"""
raw_el_output = annotations.get("entity_linking", [{}])
entities = []
try:
if raw_el_output:
if isinstance(raw_el_output[0], dict):
entities = raw_el_output[0].get("entity_ids", [])
if isinstance(raw_el_output[0], list):
entities = raw_el_output[0][0]
except Exception as e:
error_message = f"Wrong entity linking output format {raw_el_output} : {e}"
sentry_sdk.capture_exception(e)
logger.exception(error_message)
return entities
def get_entity_names_from_annotations(annotated_utterance, stopwords=None, default_entities=None):
"""
Args:
annotated_utterance: annotated utterance
stopwords_file: name of file with stopwords
Returns:
Names of named entities we received from annotations
"""
default_entities = [] if default_entities is None else default_entities
stopwords = stopwords if stopwords else []
full_text = annotated_utterance.get("text", "").lower()
named_entities = [full_text] if full_text in default_entities else []
annotations = annotated_utterance.get("annotations", {})
for tmp in annotations.get("ner", []):
if tmp and "text" in tmp[0]:
named_entities.append(tmp[0]["text"])
for nounphrase in annotations.get("spacy_nounphrases", []):
named_entities.append(nounphrase)
for wikiparser_dict in annotations.get("wiki_parser", [{}]):
for wiki_entity_name in wikiparser_dict:
named_entities.append(wiki_entity_name)
named_entities = [
entity
for entity in named_entities
if any([len(ent_word) >= 5 or ent_word not in stopwords for ent_word in entity.split(" ")])
]
named_entities = list(set(named_entities))
# remove entities which are is either too short or stopword
return named_entities
def entity_to_label(entity):
"""
Args:
entity: Wikidata entity for which we need to receive the label
If should be string, with first letter Q and other from 0 to 9, like Q5321
Returns:
label: label from this entity.
If entity is in wrong format we assume that it is already label but give exception
"""
logger.debug(f"Calling entity_to_label for {entity}")
no_entity = not entity
wrong_entity_type = not isinstance(entity, str)
wrong_entity_format = entity and (entity[0] != "Q" or any([j not in "0123456789" for j in entity[1:]]))
if no_entity or wrong_entity_type or wrong_entity_format:
warning_text = f"Wrong entity format. We assume {entity} to be label but check the code"
sentry_sdk.capture_exception(Exception(warning_text))
logger.exception(warning_text)
return entity
label = ""
labels = request_triples_wikidata("find_label", [(entity, "")])
try:
sep = '"'
if sep in labels[0]:
label = labels[0].split('"')[1]
else:
label = labels[0]
logger.debug(f"Answer {label}")
except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(Exception(e, "Exception in conversion of labels {labels}"))
return label
def get_types_from_annotations(annotations, types, tocheck_relation="occupation"):
"""
Args:
annotations: annotations of utterance
types: types from which we need to find one
or ( if exclude_types is True) to find type not included in the list, if it is the entity of given type
tocheck_relation: relation we want to check
exclude_types: if False we look for matching types, otherwise we look for excluding types
Returns:
name of entity, name of type found, raw name of type found
"""
wp_annotations = annotations.get("wiki_parser", {})
if isinstance(wp_annotations, list) and wp_annotations: # support 2 different formats
wp_annotations = wp_annotations[0]
try:
topic_entities = wp_annotations.get("topic_skill_entities_info", {})
for entity in topic_entities:
for relation in topic_entities[entity]:
if relation == tocheck_relation:
type_to_typename = {j[0]: j[1] for j in topic_entities[entity][relation]}
found_types = type_to_typename.keys()
matching_types = [type_to_typename[k] for k in set(found_types) & set(types)]
mismatching_types = [type_to_typename[k] for k in found_types if k not in types]
if matching_types:
return entity, matching_types, mismatching_types
logger.warning("Relation to check not found")
except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(Exception(e, f"Exception in processing wp annotations {wp_annotations}"))
return None, None, None
ANYTHING_EXCEPT_OF_LETTERS_AND_SPACE_COMPILED = re.compile(r"[^a-zA-Z ]")
MULTI_SPACE_COMPILED = re.compile(r"\s+")
def clean_entities(entities):
entities = [entity.lower() for entity in entities]
entities = [re.sub(ANYTHING_EXCEPT_OF_LETTERS_AND_SPACE_COMPILED, " ", entity) for entity in entities]
entities = [re.sub(MULTI_SPACE_COMPILED, " ", entity).strip() for entity in entities]
entities = [entity.split() for entity in entities] # now it's a list of lists of strings
entities = sum(entities, []) # flatten list
return entities
def get_common_tokens_in_lists_of_strings(list_of_strings_0, list_of_strings_1):
"""
Clean strings removing anything except of letters and spaces, split every string to tokens by spaces,
find common tokens for two lists of strings.
"""
list_of_strings_0 = deepcopy(list_of_strings_0)
list_of_strings_1 = deepcopy(list_of_strings_1)
list_of_strings_0 = clean_entities(list_of_strings_0)
list_of_strings_1 = clean_entities(list_of_strings_1)
common_substrings = list(set(list_of_strings_0).intersection(set(list_of_strings_1)))
return common_substrings
SYMBOLS_EXCEPT_LETTERS_AND_DIGITS = re.compile(r"[^a-zA-Z0-9\-_ ]")
DOUBLE_SPACES = re.compile(r"\s+")
def replace_symbols_except_letters_and_digits(s):
s = SYMBOLS_EXCEPT_LETTERS_AND_DIGITS.sub(" ", s)
s = DOUBLE_SPACES.sub(" ", s).strip()
return s
def remove_punctuation_from_dict_keys(element):
if isinstance(element, dict):
new_element = {}
for dict_key, value in element.items():
if isinstance(value, dict) or isinstance(value, list):
new_value = remove_punctuation_from_dict_keys(value)
new_element[replace_symbols_except_letters_and_digits(dict_key)] = deepcopy(new_value)
else:
new_element[replace_symbols_except_letters_and_digits(dict_key)] = deepcopy(value)
return new_element
elif isinstance(element, list):
new_element = []
for sub_element in element:
if isinstance(sub_element, dict) or isinstance(sub_element, list):
new_sub_element = remove_punctuation_from_dict_keys(sub_element)
new_element += [new_sub_element]
else:
new_element += [sub_element]
return new_element
else:
return element
PERSONAL_PRONOUNS = re.compile(
r"\b(i|you|he|she|it|we|they|me|my|him|her|us|them|its|mine|your|yours|his|hers|ours|theirs|myself|yourself|himself"
r"|herself|itself|ourselves|themselves|their)\b",
re.IGNORECASE,
)
def find_first_complete_sentence(sentences):
"""Find first sentence without any personal pronouns."""
for sent in sentences:
if PERSONAL_PRONOUNS.search(sent):
continue
else:
return sent
return None
def is_toxic_or_badlisted_utterance(annotated_utterance):
toxic_result = get_toxic(annotated_utterance, probs=False)
toxic_result = [] if "not_toxic" in toxic_result else toxic_result
# now toxic_result is empty if not toxic utterance
toxic_result = True if len(toxic_result) > 0 else False
default_badlist = {"bad_words": False}
badlist_result = annotated_utterance.get("annotations", {}).get("badlisted_words", default_badlist)
return toxic_result or any([badlist_result.get(bad, False) for bad in ["bad_words", "inappropriate", "profanity"]])
FACTOID_PATTERNS = re.compile(
r"^(do you know |((can |could )you )tell me )?(please )?"
r"((what|who|which|where) (is|are|was|were)\b|how to\b|when)",
re.IGNORECASE,
)
COUNTER_FACTOID_PATTERNS = re.compile(r"^(what|who|which|where) (is|are|was|were)( that|[\.\?]$)\b", re.IGNORECASE)
def is_special_factoid_question(annotated_utterance):
uttr_text = annotated_utterance.get("text", "")
found = FACTOID_PATTERNS.search(uttr_text)
if found and not COUNTER_FACTOID_PATTERNS.search(uttr_text):
# remove first question like part
rest_string = uttr_text[uttr_text.find(found[0]) + len(found[0]) :].strip()
if PERSONAL_PRONOUNS.search(rest_string):
# if any personal pronouns - not our case
return False
return True
return False
FACTS_EXTRA_WORDS = re.compile(
r"(this might answer your question[:\,]? "
r"|(according to|from) (wikipedia|wikihow)[:\,]? "
r"|here's (something|what) I found (from|on) [a-zA-Z0-9\-\.]+:"
r"|here's a fact about [a-zA-Z0-9\- \,]+\.)",
re.IGNORECASE,
)
|
11586178
|
import sys
import nltk
from nltk.stem.porter import *
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
import xml.etree.cElementTree as ET
from collections import Counter
import string
from sklearn.feature_extraction.text import TfidfVectorizer
import zipfile
import os
def gettext(xmltext) -> str:
"""
Parse xmltext and return the text from <title> and <text> tags
"""
xmltext = xmltext.encode('ascii', 'ignore') # ensure there are no weird char
def tokenize(text) -> list:
"""
Tokenize text and return a non-unique list of tokenized words
found in the text. Normalize to lowercase, strip punctuation,
remove stop words, drop words of length < 3, strip digits.
"""
text = text.lower()
text = re.sub('[' + string.punctuation + '0-9\\r\\t\\n]', ' ', text)
tokens = nltk.word_tokenize(text)
tokens = [w for w in tokens if len(w) > 2] # ignore a, an, to, at, be, ...
...
def stemwords(words) -> list:
"""
Given a list of tokens/words, return a new list with each word
stemmed using a PorterStemmer.
"""
def tokenizer(text) -> list:
return stemwords(tokenize(text))
def compute_tfidf(corpus:dict) -> TfidfVectorizer:
"""
Create and return a TfidfVectorizer object after training it on
the list of articles pulled from the corpus dictionary. Meaning,
call fit() on the list of document strings, which figures out
all the inverse document frequencies (IDF) for use later by
the transform() function. The corpus argument is a dictionary
mapping file name to xml text.
"""
def summarize(tfidf:TfidfVectorizer, text:str, n:int):
"""
Given a trained TfidfVectorizer object and some XML text, return
up to n (word,score) pairs in a list. Discard any terms with
scores < 0.09. Sort the (word,score) pairs by TFIDF score in reverse order.
"""
def load_corpus(zipfilename:str) -> dict:
"""
Given a zip file containing root directory reuters-vol1-disk1-subset
and a bunch of *.xml files, read them from the zip file into
a dictionary of (filename,xmltext) associations. Use namelist() from
ZipFile object to get list of xml files in that zip file.
Convert filename reuters-vol1-disk1-subset/foo.xml to foo.xml
as the keys in the dictionary. The values in the dictionary are the
raw XML text from the various files.
"""
|
11586214
|
import options
import ocgantestdisjoint
opt = options.test_options()
text_file = open(opt.dataset + "_progress.txt", "w")
for i in range(0,1000,10):
opt.epochs = i
roc_auc = ocgantestdisjoint.main(opt)
print(roc_auc)
auc1=roc_auc[0]
auc2=roc_auc[1]
auc3=roc_auc[2]
auc4=roc_auc[3]
text_file.write("%s %s %s %s %s\n" % (str(i), str(auc1),str(auc2),str(auc3),str(auc4)))
text_file.close()
|
11586250
|
from collections import namedtuple
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
class Module(nn.Module):
def __init__(self, config):
super(Module, self).__init__()
self.config = config
def init_weight(self):
raise NotImplementedError()
def fix_params(self):
raise NotImplementedError()
def forward(self, *inputs, **kwargs):
inputs, kwargs = self.preprocess(*inputs, **kwargs)
if self.training:
return self.train_forward(*inputs, **kwargs)
else:
return self.inference_forward(*inputs, **kwargs)
def train_forward(self, *inputs, **kwargs):
"""
def train_forward(self, data, label, **kwargs):
# this is a toy example for 1 output, 2 loss function
output = None
loss1 = torch.tensor(0.0)
loss2 = torch.tensor(0.0)
outputs = {'output': output,
'loss1': loss1,
'loss2': loss2}
loss = loss1 + loss2
return outputs, loss
"""
raise NotImplemented
def inference_forward(self, *inputs, **kwargs):
"""
def inference_forward(self, data, **kwargs):
output = None
outputs = {'output': output}
return outputs
"""
raise NotImplemented
def preprocess(self, *inputs, **kwargs):
if self.training:
return self.train_preprocess(*inputs, **kwargs)
else:
return self.inference_preprocess(*inputs, **kwargs)
def train_preprocess(self, *inputs, **kwargs):
return inputs, kwargs
def inference_preprocess(self, *inputs, **kwargs):
return inputs, kwargs
|
11586267
|
import logging
import os
from build_migrator.modules import Parser
from build_migrator.parsers._common.command_tokenizer import cmdline_split
logger = logging.getLogger(__name__)
class ResponseFile(Parser):
priority = 5
@staticmethod
def add_arguments(arg_parser):
pass
@staticmethod
def is_applicable(project=None, log_type=None):
return True
def __init__(self, context, platform=None):
self.context = context
self.platform = platform
self.rspfiles = {}
def _get_args(self, cmdline):
cmdline = cmdline.replace("\n", " ")
platform = 0 if self.platform == "windows" else 1
return cmdline_split(cmdline, platform=platform)
def parse(self, target):
tokens = target.get("tokens")
if not tokens:
return target
if tokens[-1].startswith("@"):
path = self.context.normalize_path(target["tokens"][-1][1:])
if os.path.exists(path):
with open(path, "rt") as f:
target["tokens"][-1:] = self._get_args(f.read())
else:
output = self.context.get_output(path)
response_file_target = self.context.target_index.get(output)
if response_file_target is not None:
target["tokens"][-1:] = self._get_args(
response_file_target["content"]
)
else:
logger.error(
"Response file not found: %s. Log may be parsed incorrectly.",
path,
)
return target
__all__ = ["ResponseFile"]
|
11586269
|
from __future__ import division
import re
from collections import Counter
import math
import heapq
import sys
class PhraseMining(object):
"""
PhraseMining performs frequent pattern mining followed by agglomerative clustering
on the input corpus and then stores the results in intermediate files.
:param file_name:
path to the input corpus.
:param min_support:
minimum support threshold which must be satisfied by each phrase during frequent
pattern mining.
:param max_phrase_size:
maximum allowed phrase size.
:param alpha:
threshold for the significance score.
"""
def __init__(self, file_name, min_support=10, max_phrase_size=40, alpha=4):
self.min_support = min_support
self.max_phrase_size = max_phrase_size
self.alpha = alpha
self.file_name = file_name
def mine(self):
return self._run_phrase_mining(self.min_support, self.max_phrase_size, self.alpha, self.file_name)
def _frequentPatternMining(self, documents, min_support, max_phrase_size, word_freq, active_indices):
"""
Performs frequent pattern mining to collect aggregate counts for all contiguous phrases in the
input document that satisfy a certain minimum support threshold.
Parameters:
@documents: the input corpus
@min_support: minimum support threshold which must be satisfied by each phrase.
@max_phrase_size: maximum allowed phrase size
@word_freq: raw frequency of each word in the input corpus
@active_indices: set of active indices
"""
hash_counter = word_freq
n = 2
#iterate until documents is empty
while(len(documents) > 0):
temp_documents = []
new_active_indices = []
#go over each document
for d_i,doc in enumerate(documents):
#get set of indices of phrases of length n-1 with min support
new_word_indices = []
word_indices = active_indices[d_i]
for index in word_indices:
words = doc.split()
if index+n-2 < len(words):
key = ""
for i in range(index, index+n-2+1):
if i == index+n-2:
key = key + words[i]
else:
key = key + words[i] + " "
#check if the phrase 'key' meets min support
if hash_counter[key] >= min_support:
new_word_indices.append(index)
#remove the current document if there is no more phrases of length
#n which satisfy the minimum support threshold
if len(new_word_indices) != 0:
new_active_indices.append(new_word_indices)
temp_documents.append(doc)
words = doc.split()
for idx, i in enumerate(new_word_indices[:-1]):
phrase = ""
if (new_word_indices[idx+1] == i + 1):
for idx in range(i, i+n):
if idx == i+n-1:
phrase += words[idx]
else:
phrase += words[idx] + " "
hash_counter[phrase] += 1
documents = temp_documents
active_indices = new_active_indices
n += 1
if n == max_phrase_size:
break
hash_counter = Counter(x for x in hash_counter.elements() if hash_counter[x] >= min_support)
return hash_counter
def _agglomerative_clustering(self, doc, hash_counter, alpha, total_words):
"""
Performs agglomerative clustering to get meaningful phrases from the input document.
Parameters:
@doc: input corpus
@hash_counter: map from phrases to their respective raw frequency
@alpha: threshold for the significance score
@total_words: total count of the words in input corpus.
"""
sig_map = {}
phrases = doc.split()
while(True):
max_sig = float("-inf")
max_pair = -1
for index, word in enumerate(phrases[:-1]):
phrase = phrases[index]+" "+phrases[index+1]
if phrase not in sig_map:
sig_score = self._significance_score(phrases[index], phrases[index+1], hash_counter, total_words)
sig_map[phrase] = sig_score
if(max_sig < sig_map[phrase]):
max_sig = sig_map[phrase]
max_pair = index
if(max_sig < alpha):
break
#merge max pair
merged_phrase = phrases[max_pair] + " "+ phrases[max_pair+1]
#fix phrases
phrases[max_pair] = merged_phrase
phrases.pop(max_pair+1)
return phrases
def _significance_score(self, phrase1, phrase2, hash_counter, total_words):
"""
Calculates the signifance score of the phrase obtained by joining phrase1
and phrase2. The significance score basically measures how unlikely is the
new phrase. The more unlikely it is, the more informative it will be.
Parameters:
@phrase1: first phrase
@phrase2: second phrase
@hash_counter: map from phrases to their respective raw frequency
@total_words: total count of the words in input corpus.
"""
combined_phrase = phrase1+" "+phrase2
combined_size = len(combined_phrase.split())
actual_occurence = hash_counter[combined_phrase]
numerator = hash_counter[phrase1]*hash_counter[phrase2]
if actual_occurence == 0:
return float("-inf")
denominator = total_words * total_words
independent_prob = numerator/denominator
independent_prob *= 2
expected_occurence = independent_prob*total_words
return (actual_occurence-expected_occurence)/math.sqrt(max(actual_occurence, expected_occurence))
def _get_true_frequency(self, hash_counter):
"""
Updates the raw frequency of the phrases to get their true frequencies.
"""
true_counter = Counter(hash_counter)
for key in hash_counter:
val = key.split()
if len(val) <= 1:
continue
substr1 = " ".join(val[0:-1])
substr2 = " ".join(val[1:])
true_counter[substr1] -= hash_counter[key]
true_counter[substr2] -= hash_counter[key]
return true_counter
def _get_stopwords(self):
"""
Returns a list of stopwords.
"""
f = open("topmine_src/stopwords.txt")
stopwords = set()
for line in f:
stopwords.add(line.rstrip())
return stopwords
def _get_word_freq(self, documents):
"""
Calculates the frequency of each word in the input document.
"""
total_words = 0
word_freq = Counter()
active_indices = []
for doc_index, doc in enumerate(documents):
words = doc.split()
word_indices = []
for word_index, word in enumerate(words):
word_freq[word] += 1
word_indices.append(word_index)
total_words += 1
active_indices.append(word_indices)
return total_words, word_freq, active_indices
def _get_partitioned_docs(self, document_range, doc_phrases):
"""
Partitions the input document based on the punctuations.
"""
partitioned_docs = []
start = 0
end = 0
for idx in document_range:
end = idx
final_doc = []
for i in range(start, end):
final_doc.extend(doc_phrases[i])
partitioned_docs.append(final_doc)
start = end
return partitioned_docs
def _process_partitioned_docs(self, partitioned_docs):
self.vocab = {}
self.index_vocab = []
self.partitioned_docs = []
word_counter = 0
for document_index, document in enumerate(partitioned_docs):
document_of_phrases = []
for phrase in document:
phrases_of_words = []
for word in phrase.split():
if word not in self.vocab:
self.vocab[word] = word_counter
self.index_vocab.append(word)
word_counter += 1
phrases_of_words.append(self.vocab[word])
document_of_phrases.append(phrases_of_words)
self.partitioned_docs.append(document_of_phrases)
def _preprocess_input(self, filename, stopwords):
"""
Performs preprocessing on the input document. Includes stopword removal.
"""
f = open(filename, 'r')
documents = []
document_range = []
i = 0
num_docs = 0
for line in f:
line_lowercase = line.lower()
sentences_no_punc = re.split(r"[.,;!?]",line_lowercase)
stripped_sentences = []
for sentence in sentences_no_punc:
stripped_sentences.append(re.sub('[^A-Za-z0-9]+', ' ', sentence))
sentences_no_punc = stripped_sentences
i += len(sentences_no_punc)
document_range.append(i)
documents.extend(sentences_no_punc)
num_docs += 1
documents = [doc.strip() for doc in documents]
# remove stop-words
documents2 = []
for doc in documents:
documents2.append(' '.join([word for word in doc.split() if word not in stopwords]))
documents = documents2[:]
return documents, document_range, num_docs
def _run_phrase_mining(self, min_support, max_phrase_size, alpha, file_name):
"""
Runs the phrase mining algorithm.
Parameters:
@min_support: minimum support threshold which must be satisfied by each phrase.
@max_phrase_size: maximum allowed phrase size
@alpha: threshold for the significance score
@file_name: path to the input corpus
"""
stopwords = self._get_stopwords()
documents, document_range, num_docs = self._preprocess_input(file_name, stopwords)
#calculate frequency of all words
total_words, word_freq, active_indices = self._get_word_freq(documents)
vocab_size = len(word_freq)
#run frequent pattern mining
hash_counter = self._frequentPatternMining(documents, min_support, max_phrase_size, word_freq, active_indices)
#run agglomerative clustering
doc_phrases = []
for doc in documents:
doc_phrases.append(self._agglomerative_clustering(doc, hash_counter, alpha, total_words))
#update true count of each phrase
self.true_counter = self._get_true_frequency(hash_counter)
partitioned_docs = self._get_partitioned_docs(document_range, doc_phrases)
self._process_partitioned_docs(partitioned_docs)
return self.partitioned_docs, self.index_vocab
def get_frequent_phrases(self, min_support):
"""
Returns the most frequent phrases in the corpus that occur more than
the minimum support in descending order of frequency
"""
frequent_phrases = []
for key,value in self.true_counter.most_common():
if value >= min_support and len(key.split(" "))>1:
frequent_phrases.append((key, value))
elif value < min_support:
break
return frequent_phrases
|
11586272
|
import numpy as np
class DenseOp:
def __init__(self, mat):
self.mat = mat
self.shape = mat.shape
def dot(self, v):
return self.mat.dot(v)
def nearfield_dot(self, v):
return self.dot(v)
def nearfield_no_correction_dot(self, v):
return self.dot(v)
def farfield_dot(self, v):
return np.zeros(self.shape[0])
|
11586275
|
from pymtl3 import *
class Reg( Component ):
def construct( s, Type ):
s.out = OutPort( Type )
s.in_ = InPort( Type )
@update_ff
def up_reg():
s.out <<= s.in_
def line_trace( s ):
return f"[{s.in_} > {s.out}]"
class RegEn( Component ):
def construct( s, Type ):
s.out = OutPort( Type )
s.in_ = InPort( Type )
s.en = InPort()
@update_ff
def up_regen():
if s.en:
s.out <<= s.in_
def line_trace( s ):
return f"[{'en' if s.en else ' '}|{s.in_} > {s.out}]"
class RegRst( Component ):
def construct( s, Type, reset_value=0 ):
s.out = OutPort( Type )
s.in_ = InPort( Type )
@update_ff
def up_regrst():
if s.reset: s.out <<= reset_value
else: s.out <<= s.in_
def line_trace( s ):
return f"[{'rst' if s.reset else ' '}|{s.in_} > {s.out}]"
class RegEnRst( Component ):
def construct( s, Type, reset_value=0 ):
s.out = OutPort( Type )
s.in_ = InPort( Type )
s.en = InPort()
@update_ff
def up_regenrst():
if s.reset: s.out <<= reset_value
elif s.en: s.out <<= s.in_
def line_trace( s ):
return f"[{'en' if s.en else ' '}|{s.in_} > {s.out}]"
|
11586294
|
import re
from django.db import models
from django.db.models import QuerySet, Q
from django.utils import timezone
from django.contrib.postgres.fields import ArrayField
from dateutil.parser import parse as parse_date
from backend.models import PlayerCube, EditableModel
INTERVAL_REGEXP = re.compile(r'^((?P<days>-?\d+) days?)?(\s*,?\s*(?P<hours>\d+?):(?P<minutes>\d+?):(?P<seconds>\d+?))?$')
def parse_interval(string):
match = INTERVAL_REGEXP.match(str(string))
if not match:
return timezone.timedelta()
return timezone.timedelta(**{k: int(v) for k, v in match.groupdict().items() if v is not None})
class PlayerClanRuleGoal(EditableModel):
"""
Example Usage:
goal = PlayerClanRuleGoal(clan=clan, name="Low trophies", applies_to=['elder', 'member', 'coLeader'])
"""
PLAYER_ROLE_CHOICES = [('member', 'Member'), ('elder', 'Elder'), ('coLeader', 'Co-Leader')]
clan = models.ForeignKey('backend.Clan', on_delete=models.CASCADE)
name = models.CharField(max_length=255)
description = models.TextField(null=True)
applies_to = ArrayField(models.CharField(max_length=16, choices=PLAYER_ROLE_CHOICES))
def save(self, *args, **kwargs):
self.applies_to = list(set(self.applies_to))
super(PlayerClanRuleGoal, self).save(*args, **kwargs)
def execute_on(self, query: QuerySet):
return query.filter(Q(clan=self.clan) & self.as_filters())
def as_filters(self):
goal_filters = Q()
for rule in self.get_rules():
goal_filters |= rule.as_filters()
return goal_filters & Q(clan_role__in=self.applies_to)
def get_rules(self):
return self.playerclanrule_set.all().order_by('id')
class PlayerClanRule(EditableModel):
"""
Example usage:
rule1 = PlayerClanRule(goal=goal, field='trophies', operator='<', value=5000, is_promoting_rule=False)
rule2 = PlayerClanRule(field='last_seen', operator='<', value='-14 days', is_promoting_rule=False)
rule3 = PlayerClanRule(field='last_seen', operator='<', value='-14 days', is_promoting_rule=False)
"""
VALUE_TYPE_HANDLERS = {
'int': lambda x: int(x) if x else 0,
'str': lambda x: x if x else '',
'date': lambda x: parse_date(x) if x else timezone.now(),
'interval': lambda x: timezone.now() + parse_interval(x),
}
VALUE_TYPE_CHOICES = [(t, t) for t in VALUE_TYPE_HANDLERS.keys()]
OPERATOR_AVAILABLE_FILTERS = {'>': 'gte', '<': 'lte', '=': None, 'between': 'range'}
OPERATOR_CHOICES = {
'>': 'is greater than',
'<': 'is less than',
'=': 'is exactly',
'between': 'is between',
}
FILTERABLE_FIELDS = {'CharField': str, 'IntegerField': int, 'DateTimeField': timezone.datetime}
goal = models.ForeignKey(PlayerClanRuleGoal, null=True, on_delete=models.CASCADE)
field = models.CharField(max_length=128)
operator = models.CharField(max_length=32, choices=list(OPERATOR_CHOICES.items()))
value = models.CharField(max_length=255, null=True)
value_bound = models.CharField(max_length=255, null=True)
value_type = models.CharField(max_length=255, choices=VALUE_TYPE_CHOICES, default='int')
# A predicate helps to enable or disable a rule according to another, works by filtering players that
# matches the predicate
predicate = models.ForeignKey('PlayerClanRule', null=True, on_delete=models.SET_NULL)
is_promoting_rule = models.BooleanField(default=True)
@classmethod
def get_filterable_fields(cls):
return list(filter(lambda x: x.get_internal_type() in cls.FILTERABLE_FIELDS.keys(), PlayerCube._meta.fields))
def __str__(self):
return "{0.field} {0.operator} {0.humanized_value}".format(self)
@property
def humanized_value(self):
if self.value_type == 'interval':
val = self.value[1:] + ' ago' if self.value.startswith('-') else self.value + ' from now'
if self.value_bound is not None:
val_bound = self.value_bound[1:] + ' ago' if self.value_bound.startswith('-') else 'in ' + self.value_bound
else:
val_bound = None
values = [val, val_bound]
else:
values = [self.value, self.value_bound]
return ' and '.join([v for v in values if v is not None])
@property
def filtered_column(self):
try:
filterable_fields = self.get_filterable_fields()
return [f for f in filterable_fields if f.name == self.field][0]
except IndexError:
raise ValueError('Filtered column must be ' + ' or '.join(self.FILTERABLE_FIELDS.keys()))
@property
def filtered_column_type(self):
return self.FILTERABLE_FIELDS[self.filtered_column.get_internal_type()].__name__
# @override
def save(self, *args, **kwargs):
self._sanitize_value()
self._sanitize_value_bound()
super(PlayerClanRule, self).save(*args, **kwargs)
def _sanitize_value(self):
if isinstance(self.value, timezone.timedelta) or isinstance(self.value, int):
self.value = str(self.value)
if isinstance(self.value, timezone.datetime):
self.value = self.value.strftime('%Y-%m-%d %H:%M:%S')
def _sanitize_value_bound(self):
if isinstance(self.value_bound, timezone.timedelta) or isinstance(self.value_bound, int):
self.value = str(self.value)
if isinstance(self.value_bound, timezone.datetime):
self.value = self.value_bound.strftime('%Y-%m-%d %H:%M:%S')
def _get_main_filter(self):
main_filter = self.OPERATOR_AVAILABLE_FILTERS[self.operator]
return self.field if main_filter is None else '{}__{}'.format(self.field, main_filter)
def as_filters(self):
"""
Returns the rules to be used by the QuerySet API
:return: Q
"""
handler = self.VALUE_TYPE_HANDLERS[self.value_type]
value = handler(self.value)
bound = handler(self.value_bound)
if self.operator == 'between':
return Q(**{'%s__gte' % self.field: value}) & Q(**{'%s__lte' % self.field: bound})
filters = Q(**{self._get_main_filter(): value})
if self.value_type == 'date' or self.value_type == 'interval':
filters = filters | Q(**{'%s__isnull' % self.field: True})
if self.predicate:
filters = filters & self.predicate.as_filters()
return filters
def execute_on(self, query: QuerySet):
"""
Execute a rule on a set of player cubes
:param QuerySet[PlayerCube] query: a list of player cubes to apply rule on
:return: QuerySet
"""
if self.predicate is not None:
query = query.filter(self.predicate.as_filters())
return query.filter(self.as_filters())
def get_filtered_users(self, query: QuerySet):
"""
Returns the players which are not returned by execute_on
:param QuerySet[PlayerCube] query: a list of player cubes to apply the invert of the rule on
:return: QuerySet
"""
return query.filter(~self.as_filters())
def humanize(self):
return "{} {} {}".format(
self.field.replace('_', ' '),
self.OPERATOR_CHOICES[self.operator],
self.humanized_value
)
|
11586301
|
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
def test_repeat_n_d_node_serialization():
tn.check_serialization(tn.RepeatNDNode("a"))
def test_repeat_n_d_node_serialization():
tn.check_serialization(tn.SparseUpsampleNode("a"))
def test_repeat_n_d_node1():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(3,)),
tn.RepeatNDNode("r", upsample_factor=(2,))]).network()
fn = network.function(["i"], ["s"])
x = np.arange(3).astype(fX)
np.testing.assert_equal(np.array([0, 0, 1, 1, 2, 2], dtype=fX),
fn(x)[0])
def test_repeat_n_d_node2():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(3, 4, 5)),
tn.RepeatNDNode("r", upsample_factor=(1, 1, 1))]).network()
fn = network.function(["i"], ["s"])
x = np.random.randn(3, 4, 5).astype(fX)
np.testing.assert_equal(x,
fn(x)[0])
def test_repeat_n_d_node3():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(2, 3)),
tn.RepeatNDNode("r", upsample_factor=(2, 1))]).network()
fn = network.function(["i"], ["s"])
x = np.arange(6).astype(fX).reshape(2, 3)
np.testing.assert_equal(np.array([[0, 1, 2],
[0, 1, 2],
[3, 4, 5],
[3, 4, 5]]),
fn(x)[0])
def test_repeat_n_d_node4():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(2, 3)),
tn.RepeatNDNode("r", upsample_factor=(1, 2))]).network()
fn = network.function(["i"], ["s"])
x = np.arange(6).astype(fX).reshape(2, 3)
np.testing.assert_equal(np.array([[0, 0, 1, 1, 2, 2],
[3, 3, 4, 4, 5, 5]]),
fn(x)[0])
def test_sparse_upsample_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(2, 3)),
tn.SparseUpsampleNode("r", upsample_factor=(1, 2))]).network()
fn = network.function(["i"], ["s"])
x = np.arange(6).astype(fX).reshape(2, 3)
np.testing.assert_equal(np.array([[0, 0, 1, 0, 2, 0],
[3, 0, 4, 0, 5, 0]]),
fn(x)[0])
|
11586313
|
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
class UserProfile(models.Model):
user = models.OneToOneField(
User, on_delete=models.CASCADE, related_name='profile',
null=True, blank=True
)
description = models.TextField()
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def create_or_update_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
instance.profile.save()
|
11586325
|
import json
from collections import OrderedDict
from django.db import models
from django.db.models import Count
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
from model_utils import Choices
from model_utils.models import TimeStampedModel
from feder.cases.models import Case
from feder.letters.models import Letter
STATUS = Choices(
("open", _("Open")),
("ok", _("Delivered")),
("spambounce", _("Spam-bounce")),
("softbounce", _("Soft-bounce")),
("hardbounce", _("Hard-bounce")),
("dropped", _("Dropped")),
("deferred", _("Deferred")),
("unknown", _("Unknown")),
)
class EmailQuerySet(models.QuerySet):
def with_logrecord_count(self):
return self.annotate(Count("logrecord"))
class EmailLog(TimeStampedModel):
status = models.CharField(choices=STATUS, default=STATUS.unknown, max_length=20)
case = models.ForeignKey(Case, on_delete=models.CASCADE, max_length=_("Case"))
letter = models.OneToOneField(
Letter, on_delete=models.CASCADE, max_length=_("Letter"), null=True, blank=True
)
email_id = models.CharField(verbose_name=_("Message-ID"), max_length=255)
to = models.CharField(verbose_name=_("To"), max_length=255)
objects = EmailQuerySet.as_manager()
def __str__(self):
return "Email #{} ({})".format(self.pk, self.email_id)
def get_absolute_url(self):
return reverse("logs:detail", kwargs={"pk": self.pk})
class Meta:
verbose_name = _("Email")
verbose_name_plural = _("Emails")
ordering = ["created"]
class LogRecordQuerySet(models.QuerySet):
def parse_rows(self, rows):
skipped, saved = 0, 0
cases = dict(
Letter.objects.filter(record__case__isnull=False).values_list(
"record__case__email", "record__case_id"
)
)
letters = dict(
Letter.objects.is_outgoing().values_list("message_id_header", "id")
)
for row in rows:
if row["from"] not in cases:
skipped += 1
continue
log = LogRecord(data=row)
status = log.get_status()
letter = letters.get(row["message_id"], None)
obj, created = EmailLog.objects.get_or_create(
case_id=cases[row["from"]],
email_id=row["id"],
to=row["to"],
defaults={"status": status, "letter_id": letter},
)
if obj.status != status:
obj.status = status
obj.save(update_fields=["status"])
log.email = obj
log.save()
saved += 1
return skipped, saved
class LogRecord(TimeStampedModel):
email = models.ForeignKey(
EmailLog, on_delete=models.CASCADE, verbose_name=_("Email")
)
data = JSONField()
objects = LogRecordQuerySet.as_manager()
def get_status(self):
status_list = OrderedDict(STATUS).keys()
for status in status_list:
time_name = "{}_time".format(status)
desc_name = "{}_desc".format(status)
if self.data.get(time_name, False) or self.data.get(desc_name, False):
return status
return STATUS.unknown
def pretty_json(self):
return json.dumps(self.data, indent=4)
class Meta:
verbose_name = _("Log record")
verbose_name_plural = _("Log records")
ordering = ["created"]
def __str__(self):
return "Log #{} for email #{}".format(self.pk, self.email_id)
|
11586344
|
from .abstract_pop_splitter import AbstractPOPSplitter
import random
import numpy as np
from .entity_splitting import split_entities
# assign commodities to subproblems at random
class RandomSplitter(AbstractPOPSplitter):
def __init__(self, num_subproblems, split_fraction=0.1):
super().__init__(num_subproblems)
self.split_fraction = split_fraction
def split(self, problem):
sub_problems = [problem.copy() for _ in range(self._num_subproblems)]
# zero-out the traffic matrices; they will be populated at random using commodity list
for sp in sub_problems:
for u in sp.G.nodes:
for v in sp.G.nodes:
sp.traffic_matrix.tm[u, v] = 0
entity_list = [[k, u, v, d] for (k, (u, v, d)) in problem.commodity_list]
split_entity_lists = split_entities(entity_list, self.split_fraction)
for split_list in split_entity_lists:
num_subentities = len(split_list)
assigned_sps_list = []
# create list of assigned sps by randomly sampling sps (without replacement, if possible)
# until all entities have been assigned
while len(assigned_sps_list) < num_subentities:
num_to_add = min(
[num_subentities - len(assigned_sps_list), self._num_subproblems]
)
randperm = np.random.permutation(np.arange(self._num_subproblems))
assigned_sps_list += list(randperm[:num_to_add])
for ind, [_, source, target, demand] in enumerate(split_list):
sub_problems[assigned_sps_list[ind]].traffic_matrix.tm[
source, target
] += demand
for sub_problem in sub_problems:
for u, v in sub_problems[-1].G.edges:
sub_problem.G[u][v]["capacity"] = (
sub_problem.G[u][v]["capacity"] / self._num_subproblems
)
return sub_problems
|
11586357
|
from django.conf.urls import include, patterns, url
from .views import iiif_image_api_info, iiif_image_api
urlpatterns = patterns(
'',
url(
r'(?P<identifier_param>.+)/info.json',
iiif_image_api_info,
name='iiif_image_api_info',
),
url(
r'(?P<identifier_param>[^/]+)/(?P<region_param>[^/]+)'
r'/(?P<size_param>[^/]+)/(?P<rotation_param>[^/]+)'
r'/(?P<quality_param>[^.]+).(?P<format_param>.+)',
iiif_image_api,
name='iiif_image_api',
),
)
|
11586376
|
from summarizer import Summarizer
body = '''
The Chrysler Building, the famous art deco New York skyscraper, will be sold for a small fraction of its previous sales price.
The deal, first reported by The Real Deal, was for $150 million, according to a source familiar with the deal.
Mubadala, an Abu Dhabi investment fund, purchased 90% of the building for $800 million in 2008.
Real estate firm <NAME> had owned the other 10%.
The buyer is RFR Holding, a New York real estate company.
Officials with Tishman and RFR did not immediately respond to a request for comments.
It's unclear when the deal will close.
The building sold fairly quickly after being publicly placed on the market only two months ago.
The sale was handled by CBRE Group.
The incentive to sell the building at such a huge loss was due to the soaring rent the owners pay to Cooper Union, a New York college, for the land under the building.
The rent is rising from $7.75 million last year to $32.5 million this year to $41 million in 2028.
Meantime, rents in the building itself are not rising nearly that fast.
While the building is an iconic landmark in the New York skyline, it is competing against newer office towers with large floor-to-ceiling windows and all the modern amenities.
Still the building is among the best known in the city, even to people who have never been to New York.
It is famous for its triangle-shaped, vaulted windows worked into the stylized crown, along with its distinctive eagle gargoyles near the top.
It has been featured prominently in many films, including Men in Black 3, Spider-Man, Armageddon, Two Weeks Notice and Independence Day.
The previous sale took place just before the 2008 financial meltdown led to a plunge in real estate prices.
Still there have been a number of high profile skyscrapers purchased for top dollar in recent years, including the Waldorf Astoria hotel, which Chinese firm Anbang Insurance purchased in 2016 for nearly $2 billion, and the Willis Tower in Chicago, which was formerly known as Sears Tower, once the world's tallest.
Blackstone Group (BX) bought it for $1.3 billion 2015.
The Chrysler Building was the headquarters of the American automaker until 1953, but it was named for and owned by Chrysler chief <NAME>, not the company itself.
<NAME> had set out to build the tallest building in the world, a competition at that time with another Manhattan skyscraper under construction at 40 Wall Street at the south end of Manhattan. He kept secret the plans for the spire that would grace the top of the building, building it inside the structure and out of view of the public until 40 Wall Street was complete.
Once the com
'''
model = Summarizer()
result = model(body, num_sentences=3)
print(result)
result = model(body, num_sentences=6)
print(result)
|
11586409
|
import os
import sys
import torch
import pickle
import datetime
import argparse
from argparse import Namespace
from tools import utils
SEM_CITYSCAPES = ['unlabeled', 'ego vehicle', 'rectification border', 'out of roi', 'static', 'dynamic', 'ground',
'road', 'sidewalk', 'parking', 'rail track', 'building', 'wall', 'fence', 'guard rail', 'bridge',
'tunnel', 'pole', 'polegroup', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
'person', 'rider', 'car', 'truck', 'bus', 'caravan', 'trailer', 'train', 'motorcycle', 'bicycle',
'license plate']
SEM_IDD = ['road', 'parking', 'drivable fallback', 'sidewalk', 'rail track', 'non-drivable fallback', 'person',
'animal', 'rider', 'motorcycle', 'bicycle', 'autorickshaw', 'car', 'truck', 'bus', 'caravan', 'trailer',
'train', 'vehicle fallback', 'curb', 'wall', 'fence', 'guard rail', 'billboard', 'traffic sign',
'traffic light', 'pole', 'polegroup', 'obs-str-bar-fallback', 'building', 'bridge' , 'tunnel', 'vegetation',
'sky', 'fallback background','unlabeled', 'ego vehicle', 'rectification border', 'out of roi',
'license plate']
SEM_CELEBA = ['null', 'skin', 'nose', 'eye_g', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'l_ear', 'r_ear', 'mouth', 'u_lip',
'l_lip', 'hair', 'hat', 'ear_r', 'neck_l', 'neck', 'cloth']
SEM_ADE = [str(i) for i in range(95)]
class Options():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser = self.initialize_base(parser)
parser = self.initialize_seg_generator(parser)
parser = self.initialize_img_generator(parser)
parser = self.initialize_segmentor(parser)
parser = self.initialize_extra_dataset(parser)
self.initialized = True
return parser
def initialize_base(self, parser):
# experiment specifics
parser.add_argument('--name', type=str, default='my_experiment', help='name of the experiment, it indicates where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
# for mixed precision
parser.add_argument('--use_amp', action='store_true', help='if specified, use apex mixed precision')
parser.add_argument('--amp_level', type=str, default='O1', help='O1, O2...')
# for input / output sizes
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--true_dim', type=int, default=1024, help='resolution of saved images')
parser.add_argument('--max_dim', type=int, default=512, help='resolution up to which we wish to train our models')
parser.add_argument('--dim', type=int, default=-1, help='resolution at which to initialize training (has no effect for the seg generator)')
parser.add_argument('--seg_dim', type=int, default=-1, help='resolution at which to generate segmentation (they are then resized to dim)')
parser.add_argument('--force_seg_dim', action='store_true', help='if True, load seg at seg_dim')
parser.add_argument('--bilimax', action='store_true', help='if True, apply bilinear upsampling to seg then max discretizer')
parser.add_argument('--true_ratio', type=float, default=1.0, help='ratio width/height of saved images, final width will be max_dim * aspect_ratio')
parser.add_argument('--aspect_ratio', type=float, default=2.0, help='target width/height ratio')
parser.add_argument('--num_semantics', type=int, default=3, help='number of semantic classes including eventual unknown class')
parser.add_argument('--semantic_labels', type=str, default=[], nargs="+", help='name of the semantic class for each index')
parser.add_argument('--label_nc', type=int, default=None, help='new label for unknown class if there is any')
parser.add_argument('--not_sort', action='store_true', help='if specified, do *not* sort the input paths')
parser.add_argument('--soft_sem_seg', action='store_true', help='apply gaussian blur to semantic segmentation')
parser.add_argument('--soft_sem_prop', type=float, default=0.5, help='amount of final sem map with blur')
parser.add_argument('--transpose', action='store_true', help='transpose the input seg/img')
parser.add_argument('--imagenet_norm', action='store_true', help='normalize images the same way as it is done for imagenet')
parser.add_argument('--colorjitter', action='store_true', help='randomly change the brightness, contrast and saturation of images')
# for setting inputs
parser.add_argument('--dataroot', type=str, default='./datasets/cityscapes/')
parser.add_argument('--dataset', type=str, default='cityscapes')
parser.add_argument('--load_extra', action='store_true', help='if true, load extended version of dataset if available')
parser.add_argument('--load_minimal_info', action='store_true', help='if true, load extended version of dataset if available')
parser.add_argument('--data_idx_type', type=str, default='both', help='(even | odd | both)')
parser.add_argument('--data_city_type', type=str, default='both', help='(a | no_a | both)')
parser.add_argument('--has_tgt', action='store_true', help='if false, tgt cond overrides true cond')
parser.add_argument('--estimated_cond', action='store_true', help='if true, teach a model to generate cond and sample from it')
parser.add_argument('--nearest_cond_index', action='store_true', help='if true, sample data points which corresponds to the nearest cond')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--no_h_flip', action='store_true', help='if specified, do not horizontally flip the images for data argumentation')
parser.add_argument('--no_v_flip', action='store_true', help='if specified, do not vertically flip the images for data argumentation')
parser.add_argument('--resize_img', type=int, nargs="+", default=None, help='if specified, resize images once they are loaded')
parser.add_argument('--resize_seg', type=int, nargs="+", default=None, help='if specified, resize segmentations once they are loaded')
parser.add_argument('--min_zoom', type=float, default=1., help='parameter for augmentation method consisting in zooming and cropping')
parser.add_argument('--max_zoom', type=float, default=1., help='parameter for augmentation method consisting in zooming and cropping')
parser.add_argument('--fixed_crop', type=int, nargs="+", default=None, help='if specified, apply a random crop of the given size')
parser.add_argument('--fixed_top_centered_zoom', type=float, default=None, help='if specified, crop the image to the upper center part')
parser.add_argument('--num_workers', default=8, type=int, help='# threads for loading data')
parser.add_argument('--max_dataset_size', type=int, default=sys.maxsize, help='maximum # of samples allowed per dataset, if the dataset directory contains more than max_dataset_size, only a subset is loaded')
parser.add_argument('--load_from_opt_file', action='store_true', help='loads the options_spade from checkpoints and use that as default')
parser.add_argument('--no_pairing_check', action='store_true', help='if specified, skip sanity check of correct label-image file pairing')
# for panoptic mode
parser.add_argument('--load_panoptic', action='store_true', help='if true, loads both instance and semantic information from segmentation maps, otherwise only semantic information')
parser.add_argument('--instance_type', type=str, default='center_offset', help='combination of (center_offset | (soft_)edge | density)')
parser.add_argument('--things_idx', type=int, nargs="+", default=[], help='indexes corresponding to things (by opposition to stuff)')
parser.add_argument('--max_sigma', type=float, default=8., help='sigma of 2d gaussian representing instance centers for max dim')
parser.add_argument('--min_sigma', type=float, default=2., help='sigmaiiii of 2d gaussian representing instance centers for min dim')
parser.add_argument('--center_thresh', type=float, default=0.5, help='threshold to filter instance centers')
# for display and checkpointing
parser.add_argument('--log_freq', type=int, default=100, help='frequency at which logger is updated with images')
parser.add_argument('--save_freq', type=int, default=-1, help='frequency of saving models, if -1 don\'t save')
parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest model')
parser.add_argument('--save_path', type=str, default='./')
parser.add_argument('--colormat', type=str, default='', help='name of colormat to display semantic maps')
# for training
parser.add_argument('--niter', type=int, default=1000, help='number of training iterations')
parser.add_argument('--niter_decay', type=int, default=0, help='# of iter to linearly decay learning rate to zero')
parser.add_argument('--iter_function', type=str, default=None, help='(iter | cycle)')
# for testing
parser.add_argument('--nums_fid', type=int, default=100, help='number of samples to generate to compute fid score')
parser.add_argument('--slide_eval', action='store_true', help='if true, eval on sliding window')
parser.add_argument('--multi_scale_eval', action='store_true', help='if true, eval on two scales')
parser.add_argument('--eval_batchsize', type=int, default=16, help='batch size to compute fid')
parser.add_argument('--eval_freq', type=int, default=10, help='frequency for evaluting fid')
parser.add_argument('--no_eval', action='store_true', help='if true, dont do eval')
parser.add_argument('--eval_idx', type=int, nargs="+", default=[], help="selected classes for evaluation")
parser.add_argument('--force_eval_batch_size', type=int, default=None, help='if true, force eval batch size for segmentor')
# for engine
parser.add_argument('--local_rank', type=int, default=0, help='process rank on node')
# for sampler
parser.add_argument('--sampler_weights_method', type=str, default=None, help='(highlight-)(linear | exponential)')
parser.add_argument('--sampler_bias_method', type=str, default=None, help='(highlight-)linear')
parser.add_argument('--sampler_weights_scale', type=float, default=2., help='rescale sampling weights to range [0, sampler_scale]')
parser.add_argument('--sampler_bias_mul', type=float, default=1., help='amplify std for classes that we wish to bias')
parser.add_argument('--sampler_method', type=str, default="", help='(weights-bias | weights | bias)')
# for estimator
parser.add_argument('--estimator_load_path', type=str, default=None, help='load an estimator model from specified folder')
parser.add_argument('--estimator_min_components', type=int, default=1, help='min number of components for gmm model')
parser.add_argument('--estimator_max_components', type=int, default=5, help='max number of components for gmm model')
parser.add_argument('--estimator_force_components', type=int, default=None, help='if not None, fix number of components for gmm model (overrides min and max)')
parser.add_argument('--estimator_n_init', type=int, default=1, help='number of initializations for gmm model')
parser.add_argument('--estimator_iter_data', type=int, default=1, help='number of time to iter through data to extract cond codes')
parser.add_argument('--estimator_projection_mode', type=str, default="approx", help='(approx | iter)')
parser.add_argument('--estimator_force_bias', type=int, nargs="+", default=[], help="force bias to 1 for specified classes")
parser.add_argument('--estimator_filter_idx', type=int, nargs="+", default=[], help="prevent sem classes at given idx from being sampled")
parser.add_argument('--estimator_force_min_class_p', type=float, nargs="+", default=[], help="pair of (class, p) values where surface proportion should be at least p for class")
# for nearest cond indexor
parser.add_argument('--indexor_load_path', type=str, default=None, help='load an indexor model from specified folder')
parser.add_argument('--indexor_normalize', action='store_true', help='if true, in indexor input classes are normalized individually')
# for end-to-end
parser.add_argument('--fake_from_fake_dis', type=str, default="both", help='(d | d2 | both)')
parser.add_argument('--fake_from_real_dis', type=str, default="both", help='(d | d2 | both)')
parser.add_argument('--img_for_d_real', type=str, default="source", help='(source | target | both)')
parser.add_argument('--img_for_d_fake', type=str, default="target", help='(source | target | both)')
parser.add_argument('--img_for_d2_real', type=str, default="target", help='(source | target | both)')
parser.add_argument('--img_for_d2_fake', type=str, default="target", help='(source | target | both)')
parser.add_argument('--sem_only_real', action='store_true', help='if true, compute only semantic alignement for real data')
parser.add_argument('--lambda_d2_from_real', type=float, default=1, help='parameter for second discriminator and fake data')
parser.add_argument('--no_update_seg_model', action='store_true', help='if true, dont update seg model in end-to-end configuration')
parser.add_argument('--eval_dataset', type=str, default="base", help='(base | extra)')
# for offline generation
parser.add_argument('--save_data_path', type=str, default="datasets/cityscapes_synthetic", help='folder in which to store synthetic data')
parser.add_argument('--data_num', type=int, default=2975, help="number of synthetic pairs to generate")
parser.add_argument('--save8bit', action='store_true', help='if true, save semantic segmentation in 8 bit format')
# for visualizer
parser.add_argument('--vis_method', type=str, default="", help='method for visualization')
parser.add_argument('--vis_steps', type=int, default=32, help='method for visualization')
parser.add_argument('--vis_dataloader_bs', type=int, default=1, help='batch size for dataloader')
parser.add_argument('--extraction_path', type=str, default=None, help="folder containing mean style codes")
parser.add_argument('--mean_style_only', action='store_true', help='if true, do not recompute style from image')
parser.add_argument('--addition_mode', action='store_true', help='if true, shape target for partial edition rather than full')
parser.add_argument('--save_full_res', action='store_true', help='if true, save as individual images at full resolution')
parser.add_argument('--vis_ins', action='store_true', help='if true, visualize instance related masks')
parser.add_argument('--vis_random_style', action='store_true', help='if true, load random style instead of mean style for new elements')
# for offline generator
return parser
def initialize_seg_generator(self, parser):
# for model
parser.add_argument('--s_model', type=str, default='progressive', help='(progressive | style)')
parser.add_argument('--s_seg_type', type=str, default='generator', help='(generator | completor)')
parser.add_argument('--s_panoptic', action='store_true', help='if true, panoptic segmentation generation, otherwise semantic segmentation generation')
parser.add_argument('--s_latent_dim', type=int, default=512, help='dimension of the latent vector')
parser.add_argument('--s_max_hidden_dim', type=int, default=512, help='maximum number of hidden feature maps')
parser.add_argument('--s_discretization', type=str, default='gumbel', help='(gumbel | max)')
# for conditional generation
parser.add_argument('--s_cond_seg', type=str, default=None, help='(semantic | instance | panoptic | None)')
parser.add_argument('--s_joints_mul', type=int, default=0, help='number of assisted joints between generator blocks to refine intermediate outputs')
parser.add_argument('--s_joint_type', type=str, default="bias", help='(linear | bias | affine)')
parser.add_argument('--s_cond_mode', default='sem_recover', help='(entropy &| sem_recover &| (weakly_)assisted &| spread &| ins_recover | original_cgan)')
parser.add_argument('--s_filter_cond', action='store_true', help='if specified, sem should represent at least one pixel to be taken into account in assisted activation')
parser.add_argument('--s_pseudo_supervision', action='store_true', help='self supervision for instance related output')
parser.add_argument('--s_lambda_things', type=float, default=1., help='parameter for things related loss')
parser.add_argument('--s_lambda_stuff', type=float, default=1., help='parameter for stuff related loss')
parser.add_argument('--s_lambda_adv_things', type=float, default=1., help='parameter for things gen/dis loss')
parser.add_argument('--s_things_dis', action='store_true', help='if specified, do an extra forward pass in discriminator with things alone')
parser.add_argument('--s_ova_idx', type=int, nargs="+", default=[], help='indices for which we wish to apply the one-versus-all loss')
parser.add_argument('--s_lambda_ova', type=float, default=1., help='parameter for ova loss')
parser.add_argument('--s_lambda_spread', type=float, default=1., help='parameter for spread loss')
# for for input / output sizes
parser.add_argument('--s_things_stuff', action='store_true', help='if specified, treats things and stuff separately')
parser.add_argument('--s_override_num_semantics', type=int, default=None, help='if not None, overrides num semantics')
parser.add_argument('--s_sem_conv', type=int, nargs="+", default=None, help='convert seg classes for img generator')
# for training
parser.add_argument('--s_optimizer', type=str, default='adam')
parser.add_argument('--s_beta1', type=float, default=0.0, help='momentum term of adam')
parser.add_argument('--s_beta2', type=float, default=0.99, help='momentum term of adam')
parser.add_argument('--s_lr', type=float, default=0.001, help='initial learning rate for adam')
parser.add_argument('--s_batch_size_per_res', type=int, nargs="+", default=None, help='overrides batch_size to have a different batch size for every res')
parser.add_argument('--s_iter_function_per_res', type=str, nargs="+", default=None, help='overrides iter_function to have a different iter function for every res')
parser.add_argument('--s_step_mul_per_res', type=float, nargs="+", default=None, help='step multiplier for every res (more epochs for specified res)')
# for display and checkpointing
parser.add_argument('--s_log_per_phase', type=int, default=50, help='number of times logger is updated with images during each phase, overrides log_freq')
parser.add_argument('--s_save_at_every_res', action='store_true', help='save checkpoint when done training at a given res and moving to the next one')
# for loading
parser.add_argument('--s_load_path', type=str, default=None, help='load model from which_iter at specified folder')
parser.add_argument('--s_cont_train', action='store_true', help='continue training with model from which_iter')
parser.add_argument('--s_which_iter', type=int, default=0, help='load the model from specified iteration')
parser.add_argument('--s_force_res', type=int, default=None, help='train model from given res (instead of estimating res from iter)')
parser.add_argument('--s_force_phase', type=str, default=None, help='train model from given phase (instead of estimating phase from iter)')
parser.add_argument('--s_not_strict', action='store_true', help='whether checkpoint exactly matches network architecture')
# for output
parser.add_argument('--s_t', type=float, default=1, help='temperature in softmax')
parser.add_argument('--s_store_masks', action='store_true', help='to keep the masks information in the output')
# for completor
parser.add_argument('--s_vertical_sem_crop', action='store_true', help='if true, crop a random vertical band from sem')
parser.add_argument('--s_min_sem_crop', type=float, default=0.5, help='min prop of image to crop for vertical sem crop')
parser.add_argument('--s_sem_label_crop', type=int, nargs="+", default=[], help='class idx to be cropped')
parser.add_argument('--s_sem_label_ban', type=int, nargs="+", default=[], help='class idx to be banned from the generation process')
parser.add_argument('--s_switch_cond', action='store_true', help='if true, switch from input image cond to target cond')
parser.add_argument('--s_fill_crop_only', action='store_true', help='if true, keep original sem and only replace cropped areas with new sem')
parser.add_argument('--s_norm_G', type=str, default='spectralspadebatch3x3', help='instance normalization or batch normalization')
parser.add_argument('--s_lambda_novelty', type=float, default=1., help='parameter for novelty loss')
parser.add_argument('--s_edge_cond', action='store_true', help='if true, compute target cond by looking at edge of crop')
parser.add_argument('--s_weight_cond_crop', action='store_true', help='if true, weight the sem cond so that it fills the crop')
parser.add_argument('--s_bias_sem', type=int, nargs="+", default=[], help='bias some classes when filling crop')
parser.add_argument('--s_bias_mul', type=float, default=1., help='bias mul to bias some classes when filling crop')
parser.add_argument('--s_merged_activation', action='store_true', help='if true, merge input sem and generated sem in activation')
parser.add_argument('--s_random_soft_mix', action='store_true', help='if true, some tgt code will be close to src')
parser.add_argument('--s_random_linear', action='store_true', help='if true, some tgt code will be close to src')
parser.add_argument('--s_scalnovelty', action='store_true', help='if true, novelty loss based on bhattacharyya distance')
# for style gan 2
parser.add_argument('--s_style_dim', type=int, default=512, help='latent dimension')
parser.add_argument('--s_n_mlp', type=int, default=8, help='number of mlp layers')
parser.add_argument('--s_mixing', type=float, default=0.9, help='number of mlp layers')
return parser
def initialize_img_generator(self, parser):
# experiment specifics
parser.add_argument('--i_model', type=str, default='pix2pix', help='which model to use')
parser.add_argument('--i_img_type', type=str, default='generator', help='(generator | style_generator)')
parser.add_argument('--i_norm_G', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--i_norm_D', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--i_norm_E', type=str, default='spectralinstance', help='instance normalization or batch normalization')
# for generator
parser.add_argument('--i_netG', type=str, default='spade', help='selects model to use for netG (condconv | pix2pixhd | spade)')
parser.add_argument('--i_ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--i_init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--i_init_variance', type=float, default=0.02, help='variance of the initialization distribution')
parser.add_argument('--i_latent_dim', type=int, default=256, help="dimension of the latent z vector")
parser.add_argument('--i_num_upsampling_layers', choices=('normal', 'more', 'most'), default='normal', help="if 'more', adds upsampling layer between the two middle resnet blocks, if 'most', also add one more upsampling + resnet layer at the end of the generator")
parser.add_argument('--i_resnet_n_downsample', type=int, default=4, help='number of downsampling layers in netG')
parser.add_argument('--i_resnet_n_blocks', type=int, default=9, help='number of residual blocks in the global generator network')
parser.add_argument('--i_resnet_kernel_size', type=int, default=3, help='kernel size of the resnet block')
parser.add_argument('--i_resnet_initial_kernel_size', type=int, default=7, help='kernel size of the first convolution')
# for discriminator
parser.add_argument('--i_netD_subarch', type=str, default='n_layer', help='architecture of each discriminator')
parser.add_argument('--i_num_D', type=int, default=2, help='number of discriminators to be used in multiscale')
parser.add_argument('--i_n_layers_D', type=int, default=3, help='# layers in each discriminator')
# for instance-wise features
parser.add_argument('--i_panoptic', action='store_true', help='if true, conditioned on panoptic segmentation, semantic segmentation otherwise')
parser.add_argument('--i_instance_type_for_img', type=str, default=None, help='combination of (center_offset | (soft_)edge | density), if None same as instance_type')
parser.add_argument('--i_nef', type=int, default=16, help='# of encoder filters in the first conv layer')
parser.add_argument('--i_use_vae', action='store_true', help='enable training with an image encoder.')
# for training
parser.add_argument('--i_optimizer', type=str, default='adam')
parser.add_argument('--i_beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--i_beta2', type=float, default=0.999, help='momentum term of adam')
parser.add_argument('--i_lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--i_D_steps_per_G', type=int, default=1, help='number of discriminator iterations per generator iterations.')
# for loading
parser.add_argument('--i_load_path', type=str, default=None, help='load a model from specified folder')
parser.add_argument('--i_load_path_d2', type=str, default=None, help='load a model from specified folder')
parser.add_argument('--i_cont_train', action='store_true', help='continue training with model from which_iter')
parser.add_argument('--i_which_iter', type=int, default=0, help='load the model from specified iteration')
parser.add_argument('--i_which_iter_d2', type=int, default=0, help='load the model from specified iteration')
parser.add_argument('--i_not_strict', action='store_true', help='whether checkpoint exactly matches network architecture')
# for discriminators
parser.add_argument('--i_ndf', type=int, default=64, help='# of discriminator filters in first conv layer')
parser.add_argument('--i_lambda_feat', type=float, default=10.0, help='weight for feature matching loss')
parser.add_argument('--i_lambda_vgg', type=float, default=10.0, help='weight for vgg loss')
parser.add_argument('--i_no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss')
parser.add_argument('--i_no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss')
parser.add_argument('--i_gan_mode', type=str, default='hinge', help='(ls|original|hinge)')
parser.add_argument('--i_netD', type=str, default='multiscale', help='(fpse|n_layers|multiscale|image)')
parser.add_argument('--i_no_TTUR', action='store_true', help='use TTUR training scheme')
parser.add_argument('--i_lambda_kld', type=float, default=0.05)
parser.add_argument('--i_use_d2', action='store_true', help='if true, use an additional discriminator to distinguish real and fake')
parser.add_argument('--i_lambda_d2', type=float, default=1.0, help='weight for d2 loss')
# for style generator
parser.add_argument('--i_status', type=str, default='train', help='status for ACE layer')
return parser
def initialize_segmentor(self, parser):
# experiment specifics
parser.add_argument('--x_model', type=str, default='pspnet', help='(pspnet | deeplabv3)')
parser.add_argument('--x_segment_eval_classes_only', action="store_true", help="reduce the classes for the segmentor to the eval classes")
# training
parser.add_argument('--x_optimizer', type=str, default='sgd')
parser.add_argument('--x_lr', type=float, default=0.01, help='initial learning rate for adam')
parser.add_argument('--x_momentum', type=float, default=0.9, help='momentum component of the optimiser')
parser.add_argument("--x_not_restore_last", action="store_true", help="if specified, do not restore last (FC) layers")
parser.add_argument("--x_power", type=float, default=0.9, help="decay parameter to compute the learning rate")
parser.add_argument("--x_weight_decay", type=float, default=0.0005, help="regularisation parameter for L2-loss")
parser.add_argument("--x_ohem", action="store_true", help="use hard negative mining")
parser.add_argument("--x_ohem_thres", type=float, default=0.6, help="choose the samples with correct probability under the threshold")
parser.add_argument("--x_ohem_keep", type=int, default=200000, help="choose the samples with correct probability under the threshold")
# for loading
parser.add_argument('--x_load_path', type=str, default=None, help='load a model from specified folder')
parser.add_argument('--x_cont_train', action='store_true', help='continue training with model from which_iter')
parser.add_argument('--x_which_iter', type=int, default=0, help='load the model from specified iteration')
parser.add_argument('--x_pretrained_path', type=str, default=None, help='load a pretrained model from specified path')
parser.add_argument('--x_not_strict', action='store_true', help='whether checkpoint exactly matches network architecture')
# for loading ensemble
parser.add_argument('--x_is_ensemble', action='store_true', help='if true, merge predictions from ensemble of two models')
parser.add_argument('--x_load_path_2', type=str, default=None, help='load an extra model from specified folder')
parser.add_argument('--x_which_iter_2', type=int, default=0, help='load the model from specified iteration')
# for setting inputs
parser.add_argument('--x_synthetic_dataset', action='store_true', help='training dataset is streaming seg/img pairs from trained generators')
parser.add_argument('--x_semi', action='store_true', help='only img are generated')
parser.add_argument('--x_duo', action='store_true', help='train from synthetic and real data')
parser.add_argument('--x_duo_cond', action='store_true', help='use base and extra datasets to get cond codes')
parser.add_argument('--x_cond_real_tgt', action='store_true', help='start from conditioning codes from real and tgt datasets')
# for uda
parser.add_argument('--x_advent', action='store_true', help='to train with adversarial-entropy uda')
parser.add_argument('--x_advent_multi', action='store_true', help='if specified, discriminate at two stages')
parser.add_argument('--x_advent_lr', type=float, default=0.0001, help='initial learning rate for adam')
parser.add_argument('--x_advent_lambda_adv_final', type=float, default=0.01, help='param for adversarial loss on final seg')
parser.add_argument('--x_advent_lambda_adv_inter', type=float, default=0.0002, help='param for adversarial loss on intermediate seg')
# for synthetic data pre processing
parser.add_argument('--x_sample_fixed_crop', type=int, nargs="+", default=None, help='if specified, apply a random crop of the given size')
parser.add_argument('--x_sample_random_crop', action='store_true', help='if specified, zoom and apply a random crop while keeping original size')
# for segmentor plus
parser.add_argument('--x_plus', action='store_true', help='to use segmentor plus')
parser.add_argument('--x_separable_conv', action='store_true', help='to use separable conv in segmentor plus')
parser.add_argument('--x_output_stride', type=int, default=16, help='output stride for segmentor plus')
return parser
def initialize_extra_dataset(self, parser):
# for input / output sizes
parser.add_argument('--d_true_dim', type=int, default=1024, help='resolution of saved images')
parser.add_argument('--d_true_ratio', type=float, default=1.0, help='ratio width/height of saved images, final width will be max_dim * aspect_ratio')
parser.add_argument('--d_num_semantics', type=int, default=3, help='number of semantic classes including eventual unknown class')
parser.add_argument('--d_semantic_labels', type=str, default=[], nargs="+", help='name of the semantic class for each index')
parser.add_argument('--d_label_nc', type=int, default=None, help='new label for unknown class if there is any')
# for setting inputs
parser.add_argument('--d_dataroot', type=str, default='./datasets/cityscapes/')
parser.add_argument('--d_dataset', type=str, default=None)
parser.add_argument('--d_data_idx_type', type=str, default='both', help='(even | odd | both)')
parser.add_argument('--d_has_tgt', action='store_true', help='if false, tgt cond overrides true cond')
parser.add_argument('--d_estimated_cond', action='store_true', help='if true, teach a model to generate cond and sample from it')
parser.add_argument('--d_no_flip', action='store_true', help='if specified, do not flip the images for data argumentation')
parser.add_argument('--d_resize_img', type=int, nargs="+", default=None, help='if specified, resize images once they are loaded')
parser.add_argument('--d_resize_seg', type=int, nargs="+", default=None, help='if specified, resize segmentations once they are loaded')
parser.add_argument('--d_max_zoom', type=float, default=1., help='parameter for augmentation method consisting in zooming and cropping')
parser.add_argument('--d_fixed_top_centered_zoom', type=float, default=None, help='if specified, crop the image to the upper center part')
parser.add_argument('--d_max_dataset_size', type=int, default=sys.maxsize, help='maximum # of samples allowed per dataset, if the dataset directory contains more than max_dataset_size, only a subset is loaded')
# for panoptic mode
parser.add_argument('--d_load_panoptic', action='store_true', help='if true, loads both instance and semantic information from segmentation maps, otherwise only semantic information')
parser.add_argument('--d_instance_type', type=str, default='center_offset', help='combination of (center_offset | (soft_)edge | density)')
parser.add_argument('--d_things_idx', type=int, nargs="+", default=[], help='indexes corresponding to things (by opposition to stuff)')
# for display and checkpointing
parser.add_argument('--d_colormat', type=str, default='', help='name of colormat to display semantic maps')
# for estimator
parser.add_argument('--d_estimator_load_path', type=str, default=None, help='load an estimator model from specified folder')
# for evaluation
parser.add_argument('--d_eval_idx', type=int, nargs="+", default=[], help="selected classes for evaluation")
return parser
def update_defaults(self, opt, parser):
# for base options_spade
if opt.dim == -1:
parser.set_defaults(dim=opt.max_dim)
if opt.seg_dim == -1:
seg_dim_default = opt.dim if opt.dim != -1 else opt.max_dim
parser.set_defaults(seg_dim=seg_dim_default)
if opt.dataset == "cityscapes":
parser.set_defaults(dataroot="datasets/cityscapes")
parser.set_defaults(num_semantics=35)
parser.set_defaults(label_nc=34)
parser.set_defaults(true_ratio=2.0)
parser.set_defaults(i_num_upsampling_layers='more')
parser.set_defaults(things_idx=[24, 25, 26, 27, 28, 29, 30, 31, 32, 33])
parser.set_defaults(semantic_labels=SEM_CITYSCAPES)
parser.set_defaults(colormat="cityscapes_color35")
parser.set_defaults(true_dim=1024)
parser.set_defaults(no_h_flip=True)
parser.set_defaults(eval_idx=[7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33])
if opt.dataset == "idd":
parser.set_defaults(dataroot="datasets/idd")
parser.set_defaults(resize_img=[720, 1280])
parser.set_defaults(num_semantics=40)
parser.set_defaults(label_nc=35)
parser.set_defaults(true_ratio=1.77777777777)
parser.set_defaults(i_num_upsampling_layers='more')
parser.set_defaults(things_idx=[6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
parser.set_defaults(semantic_labels=SEM_IDD)
parser.set_defaults(colormat="idd_color40")
parser.set_defaults(true_dim=720)
parser.set_defaults(no_h_flip=True)
parser.set_defaults(eval_idx=[0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
if opt.dataset == "celeba":
parser.set_defaults(dataroot="datasets/celeba")
# parser.set_defaults(dataroot="/datasets_local/CelebAMask-HQ/CelebAMask-HQ")
parser.set_defaults(num_semantics=19)
parser.set_defaults(label_nc=0)
parser.set_defaults(true_ratio=1.0)
parser.set_defaults(i_num_upsampling_layers='normal')
parser.set_defaults(semantic_labels=SEM_CELEBA)
parser.set_defaults(colormat="celeba_color19")
parser.set_defaults(true_dim=512)
parser.set_defaults(no_h_flip=True)
parser.set_defaults(aspect_ratio=1)
parser.set_defaults(resize_img=[512, 512])
# for extra dataset
if opt.d_dataset == "cityscapes":
parser.set_defaults(d_dataroot="datasets/cityscapes")
parser.set_defaults(d_num_semantics=35)
parser.set_defaults(d_label_nc=34)
parser.set_defaults(d_true_ratio=2.0)
parser.set_defaults(d_things_idx=[24, 25, 26, 27, 28, 29, 30, 31, 32, 33])
parser.set_defaults(d_semantic_labels=SEM_CITYSCAPES)
parser.set_defaults(d_colormat="cityscapes_color35")
parser.set_defaults(d_true_dim=1024)
parser.set_defaults(d_no_h_flip=True)
parser.set_defaults(d_eval_idx=[7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33])
if opt.d_dataset == "idd":
parser.set_defaults(d_dataroot="datasets/idd")
parser.set_defaults(d_resize_img=[720, 1280])
parser.set_defaults(d_num_semantics=40)
parser.set_defaults(d_label_nc=35)
parser.set_defaults(d_true_ratio=1.77777777777)
parser.set_defaults(d_things_idx=[6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
parser.set_defaults(d_semantic_labels=SEM_IDD)
parser.set_defaults(d_colormat="idd_color40")
parser.set_defaults(d_true_dim=720)
parser.set_defaults(d_no_h_flip=True)
parser.set_defaults(d_eval_idx=[0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
# for img generator options_spade
if opt.i_instance_type_for_img is None:
parser.set_defaults(i_instance_type_for_img=opt.instance_type)
if opt.i_netG == "spade":
parser.set_defaults(i_norm_G='spectralspadesyncbatch3x3')
if opt.i_netG == "condconv":
parser.set_defaults(i_norm_G='spectralbatch')
if opt.i_netG == "pix2pixhd":
parser.set_defaults(i_norm_G='instance')
return parser
def gather_options(self):
# initialize parser with basic options_spade
if not self.initialized:
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get options_spade
opt = parser.parse_args()
# modify some defaults based on parser options_spade
parser = self.update_defaults(opt, parser)
opt = parser.parse_args()
# if there is opt_file, load it.
# The previous default options_spade will be overwritten
if opt.load_from_opt_file:
parser = self.update_options_from_file(parser, opt)
opt = parser.parse_args()
self.parser = parser
return opt
def print_options(self, opt, opt_type, opt_prefix=""):
def dash_pad(s, length=50):
num_dash = max(length - len(s) // 2, 0)
return '-' * num_dash
opt_str = opt_type + " Options"
message = ''
message += dash_pad(opt_str) + ' ' + opt_str + ' ' + dash_pad(opt_str) + '\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(opt_prefix + k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
end_str = opt_type + " End"
message += dash_pad(end_str) + ' ' + end_str + ' ' + dash_pad(end_str) + '\n'
print(message)
def option_file_path(self, opt, signature, makedir=False):
expr_dir = os.path.join(opt.save_path, "checkpoints", signature)
if makedir:
utils.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt')
return file_name
def save_options(self, opt, signature):
file_name = self.option_file_path(opt, signature, makedir=True)
with open(file_name + '.txt', 'wt') as opt_file:
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment))
with open(file_name + '.pkl', 'wb') as opt_file:
pickle.dump(opt, opt_file)
def update_options_from_file(self, parser, opt):
new_opt = self.load_options(opt)
for k, v in sorted(vars(opt).items()):
if hasattr(new_opt, k) and v != getattr(new_opt, k):
new_val = getattr(new_opt, k)
parser.set_defaults(**{k: new_val})
return parser
def load_options(self, opt):
file_name = self.option_file_path(opt, makedir=False)
new_opt = pickle.load(open(file_name + '.pkl', 'rb'))
return new_opt
def split_options(self, opt):
base_opt = Namespace()
seg_generator_opt = Namespace()
img_generator_opt = Namespace()
segmentor_opt = Namespace()
extra_dataset_opt = Namespace()
for k, v in sorted(vars(opt).items()):
if k.startswith("s_"):
setattr(seg_generator_opt, k[2:], v)
elif k.startswith("i_"):
setattr(img_generator_opt, k[2:], v)
elif k.startswith("x_"):
setattr(segmentor_opt, k[2:], v)
elif k.startswith("d_"):
setattr(extra_dataset_opt, k[2:], v)
else:
setattr(base_opt, k, v)
return base_opt, seg_generator_opt, img_generator_opt, segmentor_opt, extra_dataset_opt
def copy_options(self, target_options, source_options, new_only=False):
for k, v in sorted(vars(source_options).items()):
if not (new_only and k in target_options):
setattr(target_options, k, v)
def override_num_semantics(self, opt):
if opt.override_num_semantics is not None:
print(f"Overriding num_semantics from {opt.num_semantics} to {opt.override_num_semantics}")
opt.num_semantics = opt.override_num_semantics
def set_cond_dim(self, opt):
if opt.cond_seg == "semantic":
cond_dim = opt.num_semantics
elif opt.cond_seg == "instance":
cond_dim = opt.num_things
elif opt.cond_seg == "panoptic":
cond_dim = opt.num_semantics + opt.num_things
else:
cond_dim = 0
opt.cond_dim = cond_dim
def set_seg_size(self, opt):
size = opt.num_semantics
if opt.panoptic:
if "density" in opt.instance_type:
size += opt.num_things
if "center_offset" in opt.instance_type:
size += 3
if "edge" in opt.instance_type:
size += 1
opt.seg_size = size
def parse(self, load_seg_generator=False, load_img_generator=False, load_segmentor=False,
load_extra_dataset=False, save=False):
opt = self.gather_options()
signature = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") + "-" + opt.name
base_opt, seg_generator_opt, img_generator_opt, segmentor_opt, extra_dataset_opt = self.split_options(opt)
if base_opt.local_rank == 0:
if save:
self.save_options(opt, signature)
self.print_options(base_opt, "Base")
if load_seg_generator:
self.print_options(seg_generator_opt, "Segmentation Generator", "s_")
if load_img_generator:
self.print_options(img_generator_opt, "Image Generator", "i_")
if load_segmentor:
self.print_options(segmentor_opt, "Segmentor", "x_")
if load_extra_dataset:
self.print_options(extra_dataset_opt, "Extra dataset", "d_")
# set gpu ids
str_ids = base_opt.gpu_ids.split(',')
base_opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
base_opt.gpu_ids.append(id)
# set num of things
base_opt.num_things = len(base_opt.things_idx)
extra_dataset_opt.num_things = len(extra_dataset_opt.things_idx)
# set additional paths
base_opt.checkpoint_path = os.path.join(base_opt.save_path, "checkpoints", signature)
base_opt.log_path = os.path.join(base_opt.save_path, "logs", signature)
assert (base_opt.max_dim & (base_opt.max_dim - 1)) == 0, f"Max dim {base_opt.max_dim} must be power of two."
# set width size
if base_opt.fixed_crop is None:
base_opt.width_size = int(base_opt.dim * base_opt.aspect_ratio)
base_opt.height_size = int(base_opt.width_size / base_opt.aspect_ratio)
else:
base_opt.height_size, base_opt.width_size = base_opt.fixed_crop
# set semantic labels
if len(base_opt.semantic_labels) == 0:
base_opt.semantic_labels = ["noname"] * base_opt.num_semantics
# set sem_conv
if seg_generator_opt.sem_conv is not None:
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
seg_generator_opt.sem_conv = {i: j for i, j in pairwise(seg_generator_opt.sem_conv)}
# set stuff idx
base_opt.stuff_idx = [i for i in range(base_opt.num_semantics) if i not in base_opt.things_idx]
# set signature
base_opt.signature = signature
self.copy_options(seg_generator_opt, base_opt)
self.copy_options(img_generator_opt, base_opt)
self.copy_options(segmentor_opt, base_opt)
self.copy_options(extra_dataset_opt, base_opt, new_only=True)
# set num semantics
self.override_num_semantics(seg_generator_opt)
# set cond dim
self.set_cond_dim(seg_generator_opt)
# set seg size
self.set_seg_size(seg_generator_opt)
self.base_opt = base_opt
self.seg_generator_opt = seg_generator_opt if load_seg_generator else None
self.img_generator_opt = img_generator_opt if load_img_generator else None
self.segmentor_opt = segmentor_opt if load_segmentor else None
self.extra_dataset_opt = extra_dataset_opt if load_extra_dataset else None
self.opt = {"base": self.base_opt,
"seg_generator": self.seg_generator_opt,
"img_generator": self.img_generator_opt,
"segmentor": self.segmentor_opt,
"extra_dataset": self.extra_dataset_opt}
return self.opt
|
11586431
|
from langid.langid import LanguageIdentifier, model
from googletrans import Translator
from limpieza import remover_acentos
def detectar_lenguaje(texto, devolver_proba=False):
"""
Identifica el lenguaje en el que está escrito el texto de entrada.
:param texto: Texto de entrada.
:type texto: str
:param devolver_proba: Indica si se retorna el porcentaje de \
confiabilidad del lenguaje identificado. Valor por \
defecto `False`.
:type devolver_proba: bool, opcional
:return: (str) Texto del lenguaje identificado siguiendo el estandar \
`ISO 639-1 <https://es.wikipedia.org/wiki/ISO_639-1>`_. \
Si `devolver_proba = True` retorna una tupla.
"""
identificador = LanguageIdentifier.from_modelstring(model, norm_probs=True)
if devolver_proba:
return identificador.classify(texto)
else:
return identificador.classify(texto)[0]
def traducir_texto(texto, lenguaje_destino):
"""
Permite traducir un texto de entrada.
.. note::
Es importante tener en cuenta los siguientes aspectos al utilizar la \
función **traducir_texto**:
* La función utiliza la librería googletrans, que hace uso de la API \
de Google Translate. Por lo tanto, se requiere tener una conexión \
a internet para su funcionamiento.
* El límite máximo de caracteres en un solo texto es de 15.000.
* Debido a las limitaciones de la versión web del traductor de Google,\
el uso de la API no garantiza que la librería funcione \
correctamente en todo momento.
* Si desea utilizar una API estable, se recomienda el uso de la \
`API de traducción oficial de Google <https://cloud.google.com/translate/docs>`_.
* Si recibe un error HTTP 5xx, probablemente se deba a que Google ha \
bloqueado su dirección IP.
* Para mayor información puede consultar la \
`documentación de la librería googletrans <https://py-googletrans.readthedocs.io/en/latest/>`_.
:param texto: Texto de entrada.
:type texto: str
:param lenguaje_destino: Indica el lenguaje al que desea traducir \
el texto. Para mayor información, consultar la sección de \
:ref:`Lenguajes soportados <seccion_lenguajes_soportados>`.
:type lenguaje_destino: {'es', 'en', 'fr', 'ge'}
:return: (str) Texto traducido.
"""
traductor = Translator()
# Adecuar el lenguaje de destino al formato de la API
lenguaje_destino = dict_lenguajes[lenguaje_destino]
lenguaje_destino = dict_lenguajes_simplificado[lenguaje_destino]
salida = traductor.translate(texto, dest=lenguaje_destino)
if isinstance(texto, str):
return salida.text
else:
return [i.text for i in salida]
# Diccionario para distintas representaciones de idiomas
# Por ahora se acota a español, inglés, alemán y francés
dict_lenguajes = {
"es": "spanish",
"espanol": "spanish",
"esp": "spanish",
"spanish": "spanish",
"sp": "spanish",
"spa": "spanish",
"en": "english",
"eng": "english",
"english": "english",
"ingles": "english",
"ing": "english",
"ge": "german",
"de": "german",
"deu": "german",
"german": "german",
"aleman": "german",
"al": "german",
"ale": "german",
"fr": "french",
"fra": "french",
"fre": "french",
"french": "french",
"frances": "french",
}
# Diccionario para dejar la representación en dos letras de cada idioma
dict_lenguajes_simplificado = {
"spanish": "es",
"english": "en",
"french": "fr",
"german": "de",
}
# Diccionario para traducir el lenguaje a la forma aceptada por Tesseract
dict_tesseract = {
"spanish": "spa",
"english": "eng",
"french": "fra",
"german": "deu",
}
def definir_lenguaje(lenguaje, simplificado=True):
"""
Función auxiliar - permite determinar el lenguaje a partir de una entrada.
:param lenguaje: (str) Corresponde al nombre del lenguaje a definir.
:param simplificado: (bool) {True, False} Valor por defecto: True. Indica \
si se utiliza el dictionario de dict_lenguajes o \
dict_lenguajes_simplificado.
:return: (str) Texto correspondiente al lenguaje identificado.
"""
leng = None
lenguaje = lenguaje.lower()
lenguaje = remover_acentos(lenguaje)
if lenguaje in dict_lenguajes.keys():
leng = dict_lenguajes[lenguaje]
if simplificado:
leng = dict_lenguajes_simplificado[leng]
return leng
def lenguaje_tesseract(lenguaje):
"""
Función auxiliar - Para un lenguaje de entrada, busca su equivalente en \
Tesseract.
:param lenguaje: (str) Corresponde al nombre del lenguaje a definir.
:return: (str) Texto correspondiente al lenguaje identificado, de acuerdo \
a lo aceptado por Tesseract.
"""
leng = None
lenguaje = lenguaje.lower()
lenguaje = remover_acentos(lenguaje)
if lenguaje in dict_lenguajes.keys():
leng = dict_lenguajes[lenguaje]
leng = dict_tesseract[leng]
return leng
|
11586505
|
from etk.timeseries.annotation import block_detector
from etk.timeseries.annotation import date_utilities
from etk.timeseries.annotation import utility
import logging
class parsed_table:
# we defind a table when we find a time header for that
def __init__(self, time_header, parent_sheet):
self.time_block = time_header
self.parent_sheet = parent_sheet
self.time_orientation = self.find_orientation()
self.find_series_block() # location of time series data
self.labels = []
self.label_names = {}
self.label_backfill_bool = {}
self.borders = None
self.granularity = None
self.find_granularity()
self.back_mode_time = False
self.break_points = []
self.time_block_length = self.find_time_block_length()
self.offset = 0
# check if there exists consecutive rows in the time block which are parsed to a unique time
def has_equivalent_time_rows(self):
for i in range(self.time_block.upper_row, self.time_block.lower_row-1):
p_date1 = date_utilities.parse_row_as_date(self.parent_sheet.raw_values[i][self.time_block.left_col:self.time_block.right_col])
p_date2 = date_utilities.parse_row_as_date(self.parent_sheet.raw_values[i+1][self.time_block.left_col:self.time_block.right_col])
if p_date1.equals(p_date2):
return True
return False
def has_equivalent_time_columns(self):
for i in range(self.time_block.left_col, self.time_block.right_col-1):
tb = (list(self.parent_sheet.raw_values.columns()))
p_date1 = date_utilities.parse_row_as_date(list(self.parent_sheet.raw_values.columns())[i][self.time_block.upper_row:self.time_block.lower_row])
p_date2 = date_utilities.parse_row_as_date(list(self.parent_sheet.raw_values.columns())[i+1][self.time_block.upper_row:self.time_block.lower_row])
if p_date1.equals(p_date2):
return True
return False
##important: using this offset list for granularity would be a good idea. specially when they are irregular
# also it can be used for them to create annotation for files with irregular merged cells
def has_regular_offset(self):
offsets = []
if self.time_orientation == utility.row_orientation:
current_pattern = 0
for i in range(self.time_block.left_col, self.time_block.right_col - 1):
if date_utilities.parse_row_as_date(list(self.parent_sheet.raw_values.columns())[i][self.time_block.upper_row:self.time_block.lower_row]).equals(
date_utilities.parse_row_as_date(list(self.parent_sheet.raw_values.columns())[i + 1][self.time_block.upper_row:self.time_block.lower_row])):
current_pattern += 1
else:
offsets.append(current_pattern)
current_pattern = 0
else:
current_pattern = 0
for i in range(self.time_block.upper_row, self.time_block.lower_row - 1):
if date_utilities.parse_row_as_date(self.parent_sheet.raw_values[i][self.time_block.left_col:self.time_block.right_col]).equals(date_utilities.parse_row_as_date(self.parent_sheet.raw_values[i + 1][self.time_block.left_col:self.time_block.right_col])):
current_pattern += 1
else:
offsets.append(current_pattern)
current_pattern = 0
# check for regularity of the given offset
for i in range(len(offsets)-1):
if offsets[i] != offsets[i+1]:
return False, 0
# Case when offset list is empty. Happens when all the dates are same in a range. Eg. [2007, 2007, 2007]
if len(offsets) == 0:
return False, 0
return True, offsets[0]
# check for the effect of merged time cells. non zero offset means multiple tables. Now only regular case is supported
def check_offset(self):
if self.time_orientation == utility.row_orientation:
if self.has_equivalent_time_columns():
regular, offset = self.has_regular_offset()
if regular:
self.offset = offset
else:
if self.has_equivalent_time_rows():
regular, offset = self.has_regular_offset()
if regular:
self.offset = offset
def find_granularity(self):
if self.time_orientation == utility.column_orientation:
self.granularity = date_utilities.find_granularity(list(self.parent_sheet.raw_values.rows())[self.time_block.upper_row:self.time_block.lower_row],
self.time_block.left_col, self.time_block.right_col)
else:
self.granularity = date_utilities.find_granularity(
list(self.parent_sheet.raw_values.columns())[self.time_block.left_col:self.time_block.right_col],
self.time_block.upper_row, self.time_block.lower_row)
logging.info("Granularity of %s is %s", str(self.time_block), self.granularity)
# find the granularity of the specified interval of the time block
def find_bounded_granularity(self, start, end):
if self.time_orientation == utility.column_orientation:
self.granularity = date_utilities.find_granularity(list(self.parent_sheet.raw_values.rows())[start:end],
self.time_block.left_col, self.time_block.right_col)
else:
self.granularity = date_utilities.find_granularity(list(self.parent_sheet.raw_values.columns())[start:end],self.time_block.upper_row, self.time_block.lower_row)
logging.info("Granularity of %s is %s", str(self.time_block), self.granularity)
return self.granularity
def get_orientation(self):
return self.time_orientation
def get_granularity(self):
return self.granularity
def add_label(self, label_block):
self.labels.append(label_block)
self.update_data_block(label_block)
# detecting the number/empty blocks which are data of the time serieses
def find_series_block(self):
start, end = block_detector.find_data_block(self.parent_sheet.raw_values, self.parent_sheet.get_tags(), self.time_block.upper_row, self.time_block.lower_row, self.time_block.left_col,
self.time_block.right_col, self.time_orientation)
self.data_start = start
self.data_end = end
# if there are some labels found and data was not found for them then should update the location of time series data
def update_data_block(self, label_block):
if self.time_orientation == utility.row_orientation:
if label_block.lower_row > self.data_end:
self.data_end = label_block.lower_row
else:
if label_block.right_col > self.data_end:
self.data_end = label_block.right_col
# fill the empty cells in the time block.[the back_fill mode case] (should be based on orientation)
def fill_time_block(self):
for i in range(self.time_block.upper_row, self.time_block.lower_row):
for j in range(self.time_block.left_col, self.time_block.right_col):
if self.parent_sheet.get_tags()[i][j] == {utility.empty_cell}:
# go and find the closest full cell and get the same value for that
row, col = block_detector.find_closest_date_cell(self.parent_sheet.get_tags(), i, j, self.time_block)
self.parent_sheet.raw_values[i, j] = self.parent_sheet.raw_values[row, col]
self.parent_sheet.classified_tags[i][j] = self.parent_sheet.classified_tags[row][col]
self.back_mode_time = True
def find_orientation(self):
self.fill_time_block()
if self.time_block.right_col - self.time_block.left_col == 1:
return utility.column_orientation
if self.time_block.lower_row - self.time_block.upper_row == 1:
return utility.row_orientation
else:
logging.info("finding orientation in the worst case")
# if row is a meaningful absolute time then it can be in columns
if date_utilities.valid_time_row(self.parent_sheet.raw_values[self.time_block.upper_row][self.time_block.left_col:self.time_block.right_col])[0]:
logging.error("time row was not consistent!")
return utility.column_orientation
return utility.row_orientation
def find_table_borders(self, tags):
table_borders = block_detector.rectangular_block()
table_borders.left_col = self.time_block.left_col
table_borders.right_col = self.time_block.right_col
table_borders.upper_row = self.time_block.upper_row
table_borders.lower_row = self.time_block.lower_row
for l_block in self.labels:
table_borders.left_col = min(table_borders.left_col , l_block.left_col)
table_borders.right_col = max(table_borders.right_col, l_block.right_col)
table_borders.upper_row = min(table_borders.upper_row, l_block.upper_row)
table_borders.lower_row = max(table_borders.lower_row, l_block.lower_row)
if self.time_orientation == utility.column_orientation:
table_borders.left_col = min(table_borders.left_col, self.data_start)
table_borders.right_col = max(table_borders.right_col, self.data_end)
else:
table_borders.upper_row = min(table_borders.upper_row, self.data_start)
table_borders.lower_row = max(table_borders.lower_row, self.data_end)
# continue from side untill reaching a place where it is totaly empty
while table_borders.upper_row >= 0:
if block_detector.is_empty_row(tags[table_borders.upper_row][table_borders.left_col:table_borders.right_col]):
break
table_borders.upper_row -= 1
while table_borders.lower_row < len(tags):
if block_detector.is_empty_row(tags[table_borders.lower_row][table_borders.left_col:table_borders.right_col]):
break
table_borders.lower_row += 1
while table_borders.left_col >= 0:
if block_detector.is_empty_col(tags, table_borders.upper_row, table_borders.lower_row, table_borders.left_col):
break
table_borders.left_col -= 1
while True:
if block_detector.is_empty_col(tags, table_borders.upper_row, table_borders.lower_row, table_borders.right_col):
break
table_borders.right_col += 1
table_borders.upper_row += 1
table_borders.left_col += 1
self.borders = table_borders
# Find label header if it is present in table
def find_label_names(self, tags):
if self.time_orientation == utility.row_orientation:
label_row_num = self.time_block.upper_row
unknown_label_counter = 0
for x in self.labels:
for i in range(x.left_col, x.right_col):
if utility.text_cell in tags[label_row_num][i]:
label_name = self.parent_sheet.raw_values[label_row_num, i]
self.label_names[i] = "_".join(utility.data_to_string(label_name).split(" ")).lower()
else:
#TODO: Search for label
suffix = ""
if unknown_label_counter != 0:
suffix = "_" + str(unknown_label_counter)
self.label_names[i] = "label" + suffix
unknown_label_counter += 1
self.label_backfill_bool[i] = self.is_merged_label(i, x.upper_row, x.lower_row)
else:
label_counter = 0
for x in self.labels:
for i in range(x.upper_row, x.lower_row):
suffix = ""
if label_counter != 0:
suffix = "_" + str(label_counter)
self.label_names[i] = "label" + suffix
label_counter += 1
self.label_backfill_bool[i] = self.is_merged_label(i, x.left_col, x.right_col)
#Check if label block has multiple cells merged together
def is_merged_label(self, row, col_start, col_end):
if self.time_orientation == utility.row_orientation:
for i in range(col_start, col_end):
if self.parent_sheet.get_merged_block(i, row):
return True
return False
else:
for i in range(col_start, col_end):
if self.parent_sheet.get_merged_block(row, i):
return True
return False
def create_json(self):
time_serieses = []
logging.info("BREAK POINTS: " + str(self.break_points))
for point in range(len(self.break_points)-1):
# Stores the set of labels which are already written to the json.
# Used here to avoid repetition
seen_labels = {}
time_series_region_json = dict()
time_series_region_json['orientation'] = self.time_orientation
time_series_region_json['metadata'] = []
logging.info("TIME BLOCK: " + str(self.time_block))
if self.time_orientation == utility.row_orientation:
if self.offset == 0:
time_series_region_json['locs'] = parsed_table.str_representation(self.time_block.left_col,
self.time_block.right_col,
utility.column_orientation)
else:
time_series_region_json['locs'] = parsed_table.str_representation_offset(self.time_block.left_col,
self.time_block.right_col,
utility.column_orientation, self.offset+1)
for x in self.labels:
logging.info("LABEL: " + str(x))
for i in range(x.left_col, x.right_col):
if i in seen_labels:
continue
else:
seen_labels[i] = True
meta = {"source": utility.column_orientation, "name": self.label_names[i],"loc": parsed_table.str_representation(i, i + 1, utility.column_orientation)}
if self.label_backfill_bool[i] == True:
meta["mode"] = "backfill"
time_series_region_json['metadata'].append(meta)
time_series_region_json['times'] = {
"locs": parsed_table.str_representation(self.time_block.upper_row, self.time_block.lower_row,utility.row_orientation),"granularity": self.granularity}
logging.error("time series region" + str(self.data_start) + "' " + str(self.data_end))
else:
if self.offset == 0:
time_series_region_json['locs'] = parsed_table.str_representation(self.break_points[point],self.break_points[point+1],utility.row_orientation)
else:
time_series_region_json['locs'] = parsed_table.str_representation_offset(self.break_points[point],
self.break_points[point + 1],
utility.row_orientation, self.offset+1)
for x in self.labels:
logging.info("LABEL: " + str(x))
for i in range(x.upper_row, x.lower_row):
if i in seen_labels:
continue
else:
seen_labels[i] = True
meta = {"source": utility.row_orientation, "name": self.label_names[i],"loc": parsed_table.str_representation(i, i + 1, utility.row_orientation)}
if self.label_backfill_bool[i] == True:
meta["mode"] = "backfill"
time_series_region_json['metadata'].append(meta)
# if self.offset == 0:
time_series_region_json['times'] = {
"locs": parsed_table.str_representation(self.time_block.left_col, self.time_block.right_col,utility.column_orientation),
"granularity": self.find_bounded_granularity(self.break_points[point], self.break_points[point+1])}
# else:
# time_series_region_json['times'] = {
# "locs": parsed_table.str_representation_offset(self.time_block.left_col, self.time_block.right_col,
# utility.column_orientation, self.offset+1),
# "granularity": self.find_bounded_granularity(self.break_points[point],
# self.break_points[point + 1])}
if self.back_mode_time == True:
time_series_region_json['times']['back_fill_mode'] = True
logging.error("time series region" + str(self.data_start) + "' " + str(self.data_end))
time_series_region_json[self.time_orientation + 's'] = parsed_table.str_representation(self.data_start,self.data_end,self.time_orientation)
if len(time_series_region_json['metadata']) >= 15:
logging.error("Time series region: ", time_series_region_json[self.time_orientation + 's'], " ", time_series_region_json['locs'], " is likely not valid.")
continue
time_serieses.append(time_series_region_json)
return time_serieses
def find_time_block_length(self):
time_block = self.time_block
time_header_length = max(time_block.lower_row - time_block.upper_row, time_block.right_col - time_block.left_col)
return time_header_length
# string representation of the given interval
@classmethod
def str_representation(cls, start, end, orientation):
interval = "["
if orientation == utility.row_orientation:
if start+1 == end:
interval += str(start+1) + "]"
else:
interval = interval + str(start+1) + ":" + str(end) + "]"
else:
if start+1 == end:
interval += parsed_table.get_excel_column_name(start) + "]"
else:
interval = interval + parsed_table.get_excel_column_name(start) + ":" + parsed_table.get_excel_column_name(end-1) + "]"
return interval
@classmethod
def str_representation_offset(cls, start, end, orientation, offset):
interval = "["
if orientation == utility.row_orientation:
if start+1 == end:
interval += str(start+1) + "]"
else:
interval = interval + str(start+1) + ":" +str(offset)+":"+ str(end) + "]"
else:
if start+1 == end:
interval += parsed_table.get_excel_column_name(start) + "]"
else:
interval = interval + parsed_table.get_excel_column_name(start) + ":" +str(offset)+":"+ parsed_table.get_excel_column_name(end-1) + "]"
return interval
@classmethod
def get_excel_column_name(cls, idx):
if idx <= 25:
return chr(ord('A') + idx)
return parsed_table.get_excel_column_name(idx/26 - 1) + chr(ord('A') + idx%26)
@classmethod
def is_valid_time_header(self, time_block):
if time_block.upper_row + 1 == time_block.lower_row and time_block.left_col + 1 == time_block.right_col:
return False
if time_block.upper_row + 1 > time_block.lower_row or time_block.left_col + 1 > time_block.right_col:
logging.warn("Invalid time header skipping")
return False
return True
#Check if two blocks overlap each other
def is_overlapping(self, table_borders, date_block):
if date_block.left_col >= table_borders.right_col:
return False
if date_block.right_col <= table_borders.left_col:
return False
if date_block.upper_row >= table_borders.lower_row:
return False
if date_block.lower_row <= table_borders.upper_row:
return False
return True
def date_alignment_measure(self, table):
if self.time_orientation != table.get_orientation():
return 0
old_start = old_end = new_start = new_end = -1
if self.time_orientation == utility.row_orientation:
old_start = table.time_block.left_col
old_end = table.time_block.right_col
new_start = self.time_block.left_col
new_end = self.time_block.right_col
else:
old_start = table.time_block.upper_row
old_end = table.time_block.lower_row
new_start = self.time_block.upper_row
new_end = self.time_block.lower_row
# Computing score similar to F1 score for alignment
# KNOWN TIME BLOCK: |-----------|-----------------| |
# UNKNOWN TIME BLOCK: | FN |-------TP--------|-------FP-------|
# FN - False Negative
# TP - True Positive
# FP - False Positive
TP = min(old_end, new_end) - max(old_start, new_start)
FP = max(0, new_end - old_end) + max(0, old_start - new_start)
FN = max(0, old_end - new_end) + max(0, new_start - old_start)
P = float(TP)/(TP + FP)
R = float(TP)/(TP + FN)
if P + R != 0:
score = 2 * P * R / (P + R)
else:
score = 0
return score
#Validate time block
def is_valid_time_header_post(self, known_tables):
#Heuristics to check whether series of numbers is a date
#H1: Date block has a different orientation from previous date blocks
#H2: Date block is shorter compared to previous blocks
#H3: Check surroundings of date blocks, position in table
#Case 1: Date block is outside of previously detected tables => probably a date
#Case 2: dates are aligned but inside previous table => multiple tables are merged to form a single table structure
#Case 3: dates are not aligned and inside previous table => It's not actually a date.
different_orientation = True
short_time_block = True
table_overlap = False
max_date_alignment_measure = 0
if len(known_tables) == 0:
different_orientation = False
short_time_block = False
for table in known_tables:
#H1
if self.time_orientation == table.get_orientation():
different_orientation = False
#H3
if self.is_overlapping(table.borders, self.time_block):
table_overlap = True
date_alignment_measure = self.date_alignment_measure(table)
max_date_alignment_measure = max(max_date_alignment_measure, date_alignment_measure)
#H2
if self.time_block_length >= 5:
short_time_block = False
else:
for table in known_tables:
if table.time_block_length < 5:
short_time_block = False
if table_overlap == False:
return True
elif date_alignment_measure >= 0.666:
logging.info("Previous table border may have been computed incorrectly")
return True
elif different_orientation == True:
return False
elif short_time_block == True:
logging.info("Highly unlikely to be a time block, but not sure")
return False
else:
logging.info("Unexpected date block found.")
return True
|
11586548
|
import time
import tensorflow as tf
import numpy as np
from models.base import Model
class JointContextModel(Model):
"""Entity Embeddings and Posterior Calculation"""
def __init__(self, num_layers, context_encoded_dim, text_encoded,
coherence_encoded, scope_name, device, dropout_keep_prob):
''' Get context text and coherence encoded and combine into one repr.
Input:
text_encoded: Encoded vector for bi-LSTM. [context_encoded_dim]
coherence_encoded: Encoded vector from sparse coherence FF [context_encoded_dim]
Output:
joint_encoded_vector: [context_encoded_dim]
'''
self.num_layers = num_layers
self.dropout_keep_prob = dropout_keep_prob
with tf.variable_scope(scope_name) as s:
with tf.device(device) as d:
self.joint_weights = tf.get_variable(
name="joint_context_layer",
shape=[2*context_encoded_dim, context_encoded_dim],
initializer=tf.random_normal_initializer(mean=0.0,
stddev=1.0/(100.0)))
self.text_coh_concat = tf.concat(
1, [text_encoded, coherence_encoded], name='text_coh_concat')
context_encoded = tf.matmul(self.text_coh_concat, self.joint_weights)
context_encoded = tf.nn.relu(context_encoded)
self.hidden_layers = []
for i in range(1, self.num_layers):
weight_matrix = tf.get_variable(
name="joint_context_hlayer_"+str(i),
shape=[context_encoded_dim, context_encoded_dim],
initializer=tf.random_normal_initializer(
mean=0.0,
stddev=1.0/(100.0)))
self.hidden_layers.append(weight_matrix)
for i in range(1, self.num_layers):
context_encoded = tf.nn.dropout(context_encoded, keep_prob=self.dropout_keep_prob)
context_encoded = tf.matmul(context_encoded, self.hidden_layers[i-1])
context_encoded = tf.nn.relu(context_encoded)
self.context_encoded = tf.nn.dropout(context_encoded, keep_prob=self.dropout_keep_prob)
|
11586567
|
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import pandas as pd
'''加载数据'''
mnist = pd.read_csv(r'data/train.csv')
train_labels = mnist['label']
train_images = mnist.iloc[:,1:]
train_images.astype(np.float)
train_images = np.multiply(train_images, 1.0/255.0)
train_images = train_images.as_matrix()
train_labels = train_labels.as_matrix()
def compute_accuracy(xs,ys,X,y,keep_prob,sess,prediction):
y_pre = sess.run(prediction,feed_dict={xs:X,keep_prob:1.0})
correct_prediction = tf.equal(tf.argmax(y_pre,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
result = sess.run(accuracy,feed_dict={xs:X,ys:y,keep_prob:1.0})
return result
def weight_variable(shape):
inital = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(inital)
def bias_variable(shape):
inital = tf.constant(0.1,shape=shape)
return tf.Variable(inital)
def conv2d(x,W):#x是图片的所有参数,W是此卷积层的权重
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')#strides[0]和strides[3]的两个1是默认值,中间两个1代表padding时在x方向运动一步,y方向运动一步
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],
strides=[1,2,2,1],
padding='SAME')#池化的核函数大小为2x2,因此ksize=[1,2,2,1],步长为2,因此strides=[1,2,2,1]
epochs_compeleted = 0
index_in_epoch = 0
def cnn():
#mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
xs = tf.placeholder(tf.float32,[None,784])
ys = tf.placeholder(tf.float32,[None,10])
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs,[-1,28,28,1]) #-1代表先不考虑输入的图片例子多少这个维度,后面的1是channel的数量,因为我们输入的图片是黑白的,因此channel是1,例如如果是RGB图像,那么channel就是3
# conv layer1
W_conv1 = weight_variable([5,5,1,32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# conv layer2
W_conv2 = weight_variable([5,5,32,64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2)+b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_flat = tf.reshape(h_pool2, [-1,7*7*64])
W_fc1 = weight_variable([7*7*64,1024])
b_fc1 = bias_variable([1024])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
W_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2)
predict = tf.argmax(prediction, 1)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
for i in range(2000):
batch_xs,batch_ys = next_batch(mnist, batch_size=100)
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
mnist_test = pd.read_csv(r'data/test.csv')
mnist_test.astype(np.float)
mnist_test = np.multiply(mnist_test,1.0/255.0)
X = mnist_test.as_matrix()
BATCH_SIZE = 100
predictions = np.zeros(mnist_test.shape[0])
for i in range(mnist_test.shape[0]//BATCH_SIZE): # 一批一批的预测,否则内存可能不够,这里4G
predictions[i*BATCH_SIZE : (i+1)*BATCH_SIZE] = sess.run(predict,feed_dict={xs:X[i*BATCH_SIZE : (i+1)*BATCH_SIZE],keep_prob:1.0})
result = pd.DataFrame(data={'ImageId':range(1,X.shape[0]+1),'Label':predictions.astype(np.int32)})
result.to_csv(r'my_prediction.csv',index=False)
#np.savetxt('submission_softmax.csv',
#np.c_[range(1,len(test_images)+1),predicted_lables],
#delimiter=',',
#header = 'ImageId,Label',
#comments = '',
#fmt='%d')
'''数据的映射,例如1-->[0,1,0,0,0,0,0,0,0,0]'''
def dense_to_one_hot(label_dense,num_classes):
num_labels = label_dense.shape[0]
index_offset = np.arange(num_labels)*num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + label_dense.ravel()] = 1 # flat展开
return labels_one_hot
'''使用SGD随机梯度下降,所以指定next batch的训练集'''
def next_batch(mnist,batch_size):
num_examples = mnist.shape[0]
global train_images
global train_labels
global index_in_epoch
global epochs_compeleted
start = index_in_epoch
index_in_epoch += batch_size
if index_in_epoch > num_examples:
epochs_compeleted += 1
perm = np.arange(num_examples)
np.random.shuffle(perm)
train_images = train_images[perm]
train_labels = train_labels[perm]
start = 0
index_in_epoch = batch_size
assert batch_size <= num_examples
end = index_in_epoch
train_labels_one_hot = dense_to_one_hot(train_labels[start:end], num_classes=10)
return train_images[start:end], train_labels_one_hot
if __name__ == '__main__':
cnn()
|
11586570
|
def returns(param):
if param == 1:
return 1
if param == 2:
return 2
if param == 2:
return 2
if param == 3:
return 3
return 0
|
11586648
|
import pkgutil
import inspect
from ._program import *
from .art import *
from .bcftools import *
from .bfast import *
from .bowtie import *
from .bwa import *
from .cmake import *
from .curesim import *
from .deez import *
from .drfast import *
from .dwgsim import *
from .freec import *
from .gem import *
from .gnuplot import *
from .htslib import *
from .kallisto import *
from .last import *
from .mrfast import *
from .mrsfast import *
from .pbsim import *
from .perm import *
from .picard import *
from .sambamba import *
from .samtools import *
from .seqan import *
from .sirfast import *
from .storm import *
from .twobittofa import *
from .wgsim import *
from .xs import *
#for loader, name, is_pkg in pkgutil.walk_packages(__path__):
# module = loader.find_module(name).load_module(name)
#
# for name, value in inspect.getmembers(module):
# if name.startswith('__'):
# continue
#
# globals()[name] = value
# __all__.append(name)
|
11586681
|
from datetime import datetime
from functools import reduce
import operator
from django.contrib.auth.models import Group, Permission
from django.db.models import Q
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.utils.http import urlencode
from django.test import TestCase, override_settings
from zentral.contrib.inventory.models import MachineSnapshotCommit
from accounts.models import User
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class MacOSAppsViewsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
# user
cls.user = User.objects.create_user("godzilla", "<EMAIL>", get_random_string())
cls.group = Group.objects.create(name=get_random_string())
cls.user.groups.set([cls.group])
# machine snapshot
cls.computer_name = "yolozulu"
source = {"module": "tests.zentral.io", "name": "Zentral Tests"}
tree = {
"source": source,
"business_unit": {"name": "yo bu",
"reference": "bu1",
"source": source,
"links": [{"anchor_text": "bu link",
"url": "http://bu-link.de"}]},
"groups": [{"name": "yo grp",
"reference": "grp1",
"source": source,
"links": [{"anchor_text": "group link",
"url": "http://group-link.de"}]}],
"serial_number": "0123456789",
"system_info": {"computer_name": cls.computer_name},
"os_version": {'name': 'OS X', 'major': 10, 'minor': 11, 'patch': 1},
"osx_app_instances": [
{'app': {'bundle_id': 'io.zentral.baller',
'bundle_name': 'Baller.app',
'bundle_version': '123',
'bundle_version_str': '1.2.3'},
'bundle_path': "/Applications/Baller.app",
'signed_by': {
"common_name": "Developer ID Application: GODZILLA",
"organization": "GOZILLA INC",
"organizational_unit": "ATOM",
"sha_1": 40 * "a",
"sha_256": 64 * "a",
"valid_from": datetime(2015, 1, 1),
"valid_until": datetime(2026, 1, 1),
"signed_by": {
"common_name": "Developer ID Certification Authority",
"organization": "Apple Inc.",
"organizational_unit": "Apple Certification Authority",
"sha_1": "3b166c3b7dc4b751c9fe2afab9135641e388e186",
"sha_256": "7afc9d01a62f03a2de9637936d4afe68090d2de18d03f29c88cfb0b1ba63587f",
"valid_from": datetime(2012, 12, 1),
"valid_until": datetime(2027, 12, 1),
"signed_by": {
"common_name": "<NAME>",
"organization": "Apple Inc.",
"organizational_unit": "Apple Certification Authority",
"sha_1": "611e5b662c593a08ff58d14ae22452d198df6c60",
"sha_256": "b0b1730ecbc7ff4505142c49f1295e6eda6bcaed7e2c68c5be91b5a11001f024",
"valid_from": datetime(2006, 4, 25),
"valid_until": datetime(2035, 2, 9)
}
}
}}
]
}
_, cls.ms = MachineSnapshotCommit.objects.commit_machine_snapshot_tree(tree)
cls.osx_app_instance = cls.ms.osx_app_instances.all()[0]
cls.osx_app = cls.osx_app_instance.app
# utility methods
def _login_redirect(self, url):
response = self.client.get(url)
self.assertRedirects(response, "{u}?next={n}".format(u=reverse("login"), n=url))
def _login(self, *permissions):
if permissions:
permission_filter = reduce(operator.or_, (
Q(content_type__app_label=app_label, codename=codename)
for app_label, codename in (
permission.split(".")
for permission in permissions
)
))
self.group.permissions.set(list(Permission.objects.filter(permission_filter)))
else:
self.group.permissions.clear()
self.client.force_login(self.user)
# macos pass
def test_macos_apps_redirect(self):
self._login_redirect(reverse("inventory:macos_apps"))
def test_macos_apps_permission_denied(self):
self._login()
response = self.client.get(reverse("inventory:macos_apps"))
self.assertEqual(response.status_code, 403)
def test_macos_apps(self):
self._login("inventory.view_osxapp", "inventory.view_osxappinstance")
response = self.client.get(reverse("inventory:macos_apps"))
self.assertContains(response, "Search macOS applications", status_code=200)
def test_macos_apps_bundle_name(self):
self._login("inventory.view_osxapp", "inventory.view_osxappinstance")
response = self.client.get("{}?{}".format(
reverse("inventory:macos_apps"),
urlencode({"bundle_name": "baller"})
))
self.assertContains(response, "1 result")
response = self.client.get("{}?{}".format(
reverse("inventory:macos_apps"),
urlencode({"bundle_name": "yolo"})
))
self.assertContains(response, "0 results")
def test_macos_apps_bundle_name_and_source_search(self):
self._login("inventory.view_osxapp", "inventory.view_osxappinstance")
response = self.client.get("{}?{}".format(
reverse("inventory:macos_apps"),
urlencode({"bundle_name": "baller",
"source": self.ms.source.id})
))
self.assertContains(response, "1 result")
def test_macos_app(self):
self._login("inventory.view_osxapp", "inventory.view_osxappinstance")
response = self.client.get(reverse("inventory:macos_app", args=(self.osx_app.id,)))
self.assertContains(response, "Baller.app 1.2.3")
self.assertContains(response, "1 application instance")
self.assertContains(response, self.osx_app_instance.signed_by.sha_256)
def test_macos_app_instance_machines(self):
self._login("inventory.view_osxapp", "inventory.view_osxappinstance", "inventory.view_machinesnapshot")
response = self.client.get(reverse("inventory:macos_app_instance_machines",
args=(self.osx_app.id, self.osx_app_instance.id)),
follow=True)
self.assertContains(response, "Baller.app 1.2.3")
self.assertContains(response, "1 Machine")
self.assertContains(response, self.osx_app_instance.signed_by.sha_256)
self.assertContains(response, self.computer_name)
|
11586685
|
import shutil
import sys
from pathlib import Path
from typing import Optional, Union
import nox
THIS_DIR = Path(__file__).parent
WINDOWS = sys.platform.startswith("win")
SUPPORTED_PYTHONS = ["3.7", "3.8", "3.9", "3.10"]
nox.needs_version = ">=2021.10.1"
nox.options.error_on_external_run = True
def wipe(session: nox.Session, path: Union[str, Path]) -> None:
if "--install-only" in sys.argv:
return
if isinstance(path, str):
path = Path.cwd() / path
normalized = path.relative_to(Path.cwd())
if not path.exists():
return
if path.is_file():
session.log(f"Deleting '{normalized}' file.")
path.unlink()
elif path.is_dir():
session.log(f"Deleting '{normalized}' directory.")
shutil.rmtree(path)
def get_flag(session: nox.Session, flag: str) -> bool:
if flag in session.posargs:
index = session.posargs.index(flag)
del session.posargs[index]
return True
return False
def get_option(session: nox.Session, name: str) -> Optional[str]:
assert name.startswith("--")
if name in session.posargs:
index = session.posargs.index(name)
try:
value = session.posargs[index + 1]
except IndexError:
session.warn(f"[WARN] missing argument to {name}")
else:
del session.posargs[index : index + 2]
assert isinstance(value, str)
return value
return None
@nox.session(name="lint")
def lint(session: nox.Session) -> None:
"""Run pre-commit."""
session.install("pre-commit")
session.run("pre-commit", "run", "--all-files", "--show-diff-on-failure")
@nox.session(name="tests", python=SUPPORTED_PYTHONS)
def tests(session: nox.Session) -> None:
"""A proper unit and functional test suite."""
session.install("-e", ".[test]")
session.run("diff-shades", "--version")
black_req = get_option(session, "--black-req")
if black_req:
session.install(black_req)
else:
session.install("black")
coverage = not get_flag(session, "--no-cov")
cmd = ["pytest", "tests"]
if coverage:
wipe(session, "htmlcov")
cmd.extend(["--cov", "--cov-context", "test"])
session.run(*cmd, *session.posargs)
if coverage:
session.run("coverage", "html")
# For some reason, a stray empty coverage is left behind, let's delete it.
# TODO: figure out why it is created in the first place and fix the underlying issue
for c in THIS_DIR.glob(".coverage.*"):
if not c.read_bytes():
wipe(session, c)
@nox.session(name="setup-env", venv_backend="none")
def setup_env(session: nox.Session) -> None:
"""Setup a basic (virtual) environment for manual testing."""
env_dir = THIS_DIR / ".venv"
bin_dir = env_dir / ("Scripts" if WINDOWS else "bin")
wipe(session, env_dir)
session.run(sys.executable, "-m", "virtualenv", str(env_dir))
session.run(str(bin_dir / "python"), "-m", "pip", "install", "-e", ".")
session.run(str(bin_dir / "python"), "-m", "pip", "install", "black")
session.log("Virtual environment at project root under '.venv' ready to go!")
|
11586705
|
import numpy as np
import scipy as sp
import pandas as pd
import scipy.sparse
import numbers
from .helper import SparseTensor
from . import wrapper
def make_sparse(Y, nnz, shape = None, seed = None):
Ytr, Yte = make_train_test(Y, nnz, shape, seed)
return Yte
def make_train_test(Y, ntest, shape = None, seed = None):
"""Splits a sparse matrix Y into a train and a test matrix.
Parameters
----------
Y : :class:`scipy.spmatrix`, (coo_matrix, csr_matrix or csc_matrix) or
:class:`numpy.ndarray` or
:class:`pandas.DataFrame` or
:class:`smurff.SparseTensor`
Matrix/Array/Tensor to split
ntest : float <1.0 or integer.
- if float, then indicates the ratio of test cells
- if integer, then indicates the number of test cells
Returns
-------
Ytrain : csr_matrix
train part
Ytest : csr_matrix
test part
"""
if isinstance(Y, pd.DataFrame):
return make_train_test(SparseTensor(Y), ntest, Y.shape, seed)
if isinstance(Y, np.ndarray):
nmodes = len(Y.shape)
if (nmodes > 2):
Ysparse = SparseTensor(Y)
else:
Ysparse = sp.sparse.coo_matrix(Y)
return make_train_test(Ysparse, ntest, shape, seed)
if sp.sparse.issparse(Y):
Y = Y.tocoo(copy = False)
elif not isinstance(Y, SparseTensor):
raise TypeError("Unsupported Y type: " + str(type(Y)))
if not isinstance(ntest, numbers.Real) or ntest < 0:
raise TypeError("ntest has to be a non-negative number (number or ratio of test samples).")
if ntest < 1:
ntest = Y.nnz * ntest
ntest = int(round(ntest))
ntest = max(1,ntest)
if seed is not None:
np.random.seed(seed)
rperm = np.random.permutation(Y.nnz)
train = rperm[ntest:]
test = rperm[0:ntest]
if shape is None:
shape = Y.shape
if sp.sparse.issparse(Y):
Ytrain = sp.sparse.coo_matrix( (Y.data[train], (Y.row[train], Y.col[train])), shape=shape )
Ytest = sp.sparse.coo_matrix( (Y.data[test], (Y.row[test], Y.col[test])), shape=shape )
else:
assert isinstance(Y, wrapper.SparseTensor)
Ytrain = SparseTensor(
( Y.values[train], [ np.array(idx)[train] for idx in Y.columns ] ),
Y.shape)
Ytest = SparseTensor(
( Y.values[test], [ np.array(idx)[test] for idx in Y.columns ] ),
Y.shape)
return Ytrain, Ytest
|
11586750
|
import os, sys
try:
from setuptools import setup
from setuptools.command.install import install as _install
from setuptools.command.sdist import sdist as _sdist
except ImportError:
from distutils.core import setup
from distutils.command.install import install as _install
from distutils.command.sdist import sdist as _sdist
def _run_build_tables(dir):
from subprocess import call
call([sys.executable, '_build_tables.py'],
cwd=os.path.join(dir, 'py010parser'))
class install(_install):
def run(self):
_install.run(self)
self.execute(_run_build_tables, (self.install_lib,),
msg="Build the lexing/parsing tables")
class sdist(_sdist):
def make_release_tree(self, basedir, files):
_sdist.make_release_tree(self, basedir, files)
self.execute(_run_build_tables, (basedir,),
msg="Build the lexing/parsing tables")
setup(
# metadata
name = 'py010parser',
description = '010 template parser in Python',
long_description = """
py010parser is a modified fork of the pycparser project. It is
pure Python using the PLY parsing library. It parses 010 templates
into an AST.
""",
license = 'BSD',
version = '{{VERSION}}',
author = '<NAME>',
maintainer = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/d0c-s4vage/py010parser',
platforms = 'Cross Platform',
classifiers = [
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',],
packages = ['py010parser', 'py010parser.ply'],
download_url = "https://github.com/d0c-s4vage/py010parser/tarball/v0.1.9",
keywords = ["010", "template", "parser"],
package_data = {'py010parser': ['*.cfg']},
cmdclass = {'install': install, 'sdist': sdist},
install_requires = open(os.path.join(os.path.dirname(__file__), "requirements.txt")).read().split("\n"),
)
|
11586781
|
import time
from typing import Sequence
from rlmolecule.mcts.mcts import MCTS
from rlmolecule.mcts.mcts_problem import MCTSProblem
from rlmolecule.tree_search.graph_search_state import GraphSearchState
from rlmolecule.tree_search.metrics import call_metrics, collect_metrics
from rlmolecule.tree_search.reward import RawRewardFactory
class DummyState(GraphSearchState):
def __init__(self, position, is_terminal):
super(DummyState, self).__init__()
self.is_terminal = is_terminal
self.position = position
def equals(self, other: 'CyclicState') -> bool:
return (self.position == other.position) & (self.is_terminal == other.is_terminal)
def __repr__(self):
return f"{self.__class__}(position={self.position}, is_terminal={self.is_terminal})"
@collect_metrics
def get_next_actions(self) -> Sequence['GraphSearchState']:
time.sleep(0.1)
if self.is_terminal:
return []
else:
return [
DummyState(self.position, is_terminal=True),
DummyState((self.position + 1), is_terminal=False)
]
def hash(self) -> int:
return hash((self.position, self.is_terminal))
class DummyProblem(MCTSProblem):
def __init__(self, **kwargs):
super(DummyProblem, self).__init__(**kwargs)
self.call_count = 0
def get_initial_state(self) -> GraphSearchState:
return DummyState(0, is_terminal=False)
@collect_metrics
def get_reward(self, state: GraphSearchState) -> (float, {}):
self.call_count += 1
return 1, {}
def test_metrics():
call_metrics.reset()
start = DummyState(0, is_terminal=False)
start.get_next_actions()
assert call_metrics.execution_count['get_next_actions'] == 1
assert call_metrics.execution_time['get_next_actions'] > 0.1
start.get_next_actions()
assert call_metrics.execution_count['get_next_actions'] == 2
assert call_metrics.execution_time['get_next_actions'] > 0.2
problem = DummyProblem(reward_class=RawRewardFactory())
game = MCTS(problem)
game.run(num_mcts_samples=5)
assert call_metrics.execution_count['get_reward'] == problem.call_count
|
11586803
|
import numpy as np
from jbdl.rbdl.kinematics import calc_body_to_base_coordinates
def calc_whole_body_com(model: dict, q: np.ndarray) -> np.ndarray:
"""calc_whole_body_com - Calculate whole body's CoM position in world frame
Args:
model (dict): dictionary of model specification
q (np.ndarray): an array of joint position
Returns:
np.ndarray: float (3, 3)
"""
q = q.flatten()
idcomplot = model["idcomplot"]
com = model["com"]
mass = model["mass"]
num = len(idcomplot)
com_list = []
clink = np.zeros((3, 1))
for i in range(num):
clink = calc_body_to_base_coordinates(model, q, idcomplot[i], com[i])
com_list.append(clink)
c = np.zeros((3, 1))
m = 0
for i in range(num):
c = c + np.multiply(com_list[i], mass[i])
m = m + mass[i]
pcom = np.asfarray(np.divide(c, m))
return pcom
|
11586843
|
from subprocess import Popen, PIPE
import platform
import os
class LocateBinary(object):
"""Class implements lookup for chef binaries.
"""
def locate_binary(self, binary):
"""Locate full path of a chef binary
"""
# Try sane paths
for base_path in self._sane_paths():
path = os.path.join(base_path, binary)
if platform.system() == 'Windows' and os.path.isfile(path):
return path
elif os.path.isfile(path):
# On *nix file must be executable
if not os.access(path, os.X_OK):
raise RuntimeError('File %s must have premission to be executed', path)
return path
# Do which for the last resort
if platform.system() == 'Windows':
# Ignore scripts and etc, binary must be an executable file
env, which = ({'PATHEXT': '.exe'}, 'where')
else:
env, which = (None, 'which')
proc = Popen([which, binary], stdout=PIPE, env=env, shell=False)
stdout, _ = proc.communicate()
if proc.returncode > 0:
raise RuntimeError("File `{}' not found".format(binary))
return stdout.rstrip()
@staticmethod
def _sane_paths():
if platform.system() == 'Windows':
return ()
else:
return ('/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin', '/bin')
|
11586882
|
from scapy.all import *
from ccsds_base import CCSDSPacket
class SC_HK_TLM_PKT_TlmPkt(Packet):
"""Housekeeping Packet Structure
app = SC
command = HK_TLM_PKT
msg_id = SC_HK_TLM_MID = 0x08aa = 0x0800 + 0x0aa
"""
name = "SC_HK_TLM_PKT_TlmPkt"
fields_desc = [
# APPEND_ITEM ATS_NUMBER 8 UINT "Current ATS number 1 = ATS A, 2 = ATS B"
ByteField("ATS_NUMBER", 0),
# APPEND_ITEM ATP_STATE 8 UINT "Current ATP state valid values are: 2 = IDLE, 5 = EXECUTING"
ByteField("ATP_STATE", 0),
# APPEND_ITEM CONT_ATS_ON_FAIL 8 UINT "In the event of ATS execution failure (ats command fails checksum) , the ATS execution will continue if this flag is set to TRUE and will stop if this flag is set to FALSE"
ByteField("CONT_ATS_ON_FAIL", 0),
# APPEND_ITEM CMD_ERROR_COUNT 8 UINT "Counts Request Errors"
ByteField("CMD_ERROR_COUNT", 0),
# APPEND_ITEM CMD_VALID_COUNT 8 UINT "Counts Ground Requests"
ByteField("CMD_VALID_COUNT", 0),
# APPEND_ITEM PADDING8 8 UINT ""
ByteField("PADDING8", 0),
# APPEND_ITEM SWITCH_PEND_FLAG 16 UINT "Is an ats switch pending? 0 = NO, 1 = YES This means that the ATS switch is waiting until a safe time"
ShortField("SWITCH_PEND_FLAG", 0),
# APPEND_ITEM NUM_RTS_ACTIVE 16 UINT "Number of RTSs currently active"
ShortField("NUM_RTS_ACTIVE", 0),
# APPEND_ITEM RTS_NUMBER 16 UINT "Next RTS number"
ShortField("RTS_NUMBER", 0),
# APPEND_ITEM RTS_ACTIVE_CTR 16 UINT "Increments when an RTS is started without error"
ShortField("RTS_ACTIVE_CTR", 0),
# APPEND_ITEM RTS_ACTIVE_ERR_CTR 16 UINT "Increments when an attempt to start an RTS fails"
ShortField("RTS_ACTIVE_ERR_CTR", 0),
# APPEND_ITEM ATS_CMD_CTR 16 UINT "Total ATS cmd cnter counts commands sent by the ATS"
ShortField("ATS_CMD_CTR", 0),
# APPEND_ITEM ATS_CMD_ERR_CTR 16 UINT "Total ATS cmd Error ctr command errors in the ATS"
ShortField("ATS_CMD_ERR_CTR", 0),
# APPEND_ITEM RTS_CMD_CTR 16 UINT "Counts TOTAL rts cmds that were sent out from ALL active RTSs"
ShortField("RTS_CMD_CTR", 0),
# APPEND_ITEM RTS_CMD_ERR_CTR 16 UINT "Counts TOTAL number of errs from ALL RTSs that are active"
ShortField("RTS_CMD_ERR_CTR", 0),
# APPEND_ITEM LAST_ATS_ERR_SEQ 16 UINT "Last ATS Errant Sequence Num Values: 1 or 2"
ShortField("LAST_ATS_ERR_SEQ", 0),
# APPEND_ITEM LAST_ATS_ERR_CMD 16 UINT "Last ATS Errant Command Num"
ShortField("LAST_ATS_ERR_CMD", 0),
# APPEND_ITEM LAST_RTS_ERR_SEQ 16 UINT "Last RTS Errant Sequence Num"
ShortField("LAST_RTS_ERR_SEQ", 0),
# APPEND_ITEM LAST_RTS_ERR_CMD 16 UINT "The OFFSET in the RTS buffer of the command that had an error It will be a WORD value i.e. 1st command had an error, this value would be 0, if the 2nd command started at int8 10 in the buffer, this value would be 5"
ShortField("LAST_RTS_ERR_CMD", 0),
# APPEND_ITEM APPEND_CMD_ARG 16 UINT "ATS selection argument from most recent Append ATS command"
ShortField("APPEND_CMD_ARG", 0),
# APPEND_ITEM APPEND_ENTRY_COUNT 16 UINT "Number of cmd entries in current Append ATS table"
ShortField("APPEND_ENTRY_COUNT", 0),
# APPEND_ITEM APPEND_BYTE_COUNT 16 UINT "Size of cmd entries in current Append ATS table"
ShortField("APPEND_BYTE_COUNT", 0),
# APPEND_ITEM APPEND_LOAD_COUNT 16 UINT "Total number of Append ATS table loads"
ShortField("APPEND_LOAD_COUNT", 0),
# APPEND_ITEM ATP_CMD_NUMBER 32 UINT "current command number"
IntField("ATP_CMD_NUMBER", 0),
# APPEND_ITEM ATP_1_FREE_BYTES 32 UINT "Free Bytes in ATS 1"
IntField("ATP_1_FREE_BYTES", 0),
# APPEND_ITEM ATP_2_FREE_BYTES 32 UINT "Free Bytes in ATS 2"
IntField("ATP_2_FREE_BYTES", 0),
# APPEND_ITEM NEXT_RTS_TIME 32 UINT "Next RTS cmd Absolute Time"
IntField("NEXT_RTS_TIME", 0),
# APPEND_ITEM NEXT_ATS_TIME 32 UINT "Next ATS Command Time (seconds)"
IntField("NEXT_ATS_TIME", 0),
# APPEND_ITEM RTS_W1_EXE_STATUS 16 UINT "RTS executing status bit map where each uint16 represents 16 RTS numbers. Note: array index numbers and bit numbers use base zero indexing, but RTS numbers use base one indexing. Thus, the LSB (bit zero) of uint16 array index zero represents RTS number 1, and bit one of uint16 array index zero represents RTS number 2, etc. If an RTS is IDLE, then the corresponding bit is zero. If an RTS is EXECUTING, then the corresponding bit is one"
ShortField("RTS_W1_EXE_STATUS", 0),
# ITEM RTS_8_EXE 576 1 UINT
# ITEM RTS_7_EXE 577 1 UINT
# ITEM RTS_6_EXE 578 1 UINT
# STATE IDLE 0
# STATE EXEC 1
# ITEM RTS_5_EXE 579 1 UINT
# ITEM RTS_4_EXE 580 1 UINT
# ITEM RTS_3_EXE 581 1 UINT
# ITEM RTS_2_EXE 582 1 UINT
# ITEM RTS_1_EXE 583 1 UINT
# APPEND_ITEM RTS_W2_EXE_STATUS 16 UINT "RTS executing status bit map where each uint16 represents 16 RTS numbers. Note: array index numbers and bit numbers use base zero indexing, but RTS numbers use base one indexing. Thus, the LSB (bit zero) of uint16 array index zero represents RTS number 1, and bit one of uint16 array index zero represents RTS number 2, etc. If an RTS is IDLE, then the corresponding bit is zero. If an RTS is EXECUTING, then the corresponding bit is one"
ShortField("RTS_W2_EXE_STATUS", 0),
# APPEND_ITEM RTS_W3_EXE_STATUS 16 UINT "RTS executing status bit map where each uint16 represents 16 RTS numbers. Note: array index numbers and bit numbers use base zero indexing, but RTS numbers use base one indexing. Thus, the LSB (bit zero) of uint16 array index zero represents RTS number 1, and bit one of uint16 array index zero represents RTS number 2, etc. If an RTS is IDLE, then the corresponding bit is zero. If an RTS is EXECUTING, then the corresponding bit is one"
ShortField("RTS_W3_EXE_STATUS", 0),
# APPEND_ITEM RTS_W4_EXE_STATUS 16 UINT "RTS executing status bit map where each uint16 represents 16 RTS numbers. Note: array index numbers and bit numbers use base zero indexing, but RTS numbers use base one indexing. Thus, the LSB (bit zero) of uint16 array index zero represents RTS number 1, and bit one of uint16 array index zero represents RTS number 2, etc. If an RTS is IDLE, then the corresponding bit is zero. If an RTS is EXECUTING, then the corresponding bit is one"
ShortField("RTS_W4_EXE_STATUS", 0),
# APPEND_ITEM RTS_W1_DIS_STATUS 16 UINT "RTS disabled status bit map where each uint16 represents 16 RTS numbers. Note: array index numbers and bit numbers use base zero indexing, but RTS numbers use base one indexing. Thus, the LSB (bit zero) of uint16 array index zero represents RTS number 1, and bit one of uint16 array index zero represents RTS number 2, etc. If an RTS is ENABLED, then the corresponding bit is zero. If an RTS is DISABLED, then the corresponding bit is one"
ShortField("RTS_W1_DIS_STATUS", 0),
# ITEM RTS_8_DIS 640 1 UINT
# ITEM RTS_7_DIS 641 1 UINT
# ITEM RTS_6_DIS 642 1 UINT
# STATE FALSE 0
# STATE TRUE 1
# ITEM RTS_5_DIS 643 1 UINT
# ITEM RTS_4_DIS 644 1 UINT
# ITEM RTS_3_DIS 645 1 UINT
# ITEM RTS_2_DIS 646 1 UINT
# ITEM RTS_1_DIS 647 1 UINT
# APPEND_ITEM RTS_W2_DIS_STATUS 16 UINT "RTS disabled status bit map where each uint16 represents 16 RTS numbers. Note: array index numbers and bit numbers use base zero indexing, but RTS numbers use base one indexing. Thus, the LSB (bit zero) of uint16 array index zero represents RTS number 1, and bit one of uint16 array index zero represents RTS number 2, etc. If an RTS is ENABLED, then the corresponding bit is zero. If an RTS is DISABLED, then the corresponding bit is one"
ShortField("RTS_W2_DIS_STATUS", 0),
# APPEND_ITEM RTS_W3_DIS_STATUS 16 UINT "RTS disabled status bit map where each uint16 represents 16 RTS numbers. Note: array index numbers and bit numbers use base zero indexing, but RTS numbers use base one indexing. Thus, the LSB (bit zero) of uint16 array index zero represents RTS number 1, and bit one of uint16 array index zero represents RTS number 2, etc. If an RTS is ENABLED, then the corresponding bit is zero. If an RTS is DISABLED, then the corresponding bit is one"
ShortField("RTS_W3_DIS_STATUS", 0),
# APPEND_ITEM RTS_W4_DIS_STATUS 16 UINT "RTS disabled status bit map where each uint16 represents 16 RTS numbers. Note: array index numbers and bit numbers use base zero indexing, but RTS numbers use base one indexing. Thus, the LSB (bit zero) of uint16 array index zero represents RTS number 1, and bit one of uint16 array index zero represents RTS number 2, etc. If an RTS is ENABLED, then the corresponding bit is zero. If an RTS is DISABLED, then the corresponding bit is one"
ShortField("RTS_W4_DIS_STATUS", 0),
]
bind_layers(CCSDSPacket, SC_HK_TLM_PKT_TlmPkt, pkttype=0, apid=170)
|
11586889
|
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from builtins import str as unicode
from quark_runtime import *
_lazyImport.plug("puse_md.Root")
class Root(_QObject):
def _init(self):
pass
def __init__(self): self._init()
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
_lazyImport.pump("puse_md.Root")
|
11586891
|
from __future__ import annotations
from typing import List, Text, Dict, Any
from rasa.engine.graph import ExecutionContext
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer
from rasa.shared.nlu.training_data.message import Message
from rasa.shared.utils.io import DEFAULT_ENCODING
@DefaultV1Recipe.register(
DefaultV1Recipe.ComponentType.MESSAGE_TOKENIZER, is_trainable=False
)
class MitieTokenizer(Tokenizer):
"""Tokenizes messages using the `mitie` library.."""
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""Returns default config (see parent class for full docstring)."""
return {
# Flag to check whether to split intents
"intent_tokenization_flag": False,
# Symbol on which intent should be split
"intent_split_symbol": "_",
# Regular expression to detect tokens
"token_pattern": None,
}
@staticmethod
def required_packages() -> List[Text]:
"""Any extra python dependencies required for this component to run."""
return ["mitie"]
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> MitieTokenizer:
"""Creates a new component (see parent class for full docstring)."""
return cls(config)
def tokenize(self, message: Message, attribute: Text) -> List[Token]:
"""Tokenizes the text of the provided attribute of the incoming message."""
import mitie
text = message.get(attribute)
encoded_sentence = text.encode(DEFAULT_ENCODING)
tokenized = mitie.tokenize_with_offsets(encoded_sentence)
tokens = [
self._token_from_offset(token, offset, encoded_sentence)
for token, offset in tokenized
]
return self._apply_token_pattern(tokens)
def _token_from_offset(
self, text: bytes, offset: int, encoded_sentence: bytes
) -> Token:
return Token(
text.decode(DEFAULT_ENCODING),
self._byte_to_char_offset(encoded_sentence, offset),
)
@staticmethod
def _byte_to_char_offset(text: bytes, byte_offset: int) -> int:
return len(text[:byte_offset].decode(DEFAULT_ENCODING))
|
11586899
|
import st7565
import xglcd_font as font
import math
import time
neato = font.XglcdFont('/home/pi/Pi-ST7565/fonts/Neato5x7.c', 5, 7)
glcd = st7565.Glcd(rgb=[21, 20, 16])
glcd.init()
x0, y0 = 63, 31
def get_face_xy(angle, radius):
"""
Get x,y coordinates on face at specified angle and radius
"""
theta = math.radians(angle)
x = int(x0 + radius * math.cos(theta))
y = int(y0 + radius * math.sin(theta))
return x, y
def draw_face():
# Outline
glcd.draw_circle(x0, y0, 31)
# Ticks
for angle in range(30, 331, 30):
glcd.draw_line(x0, y0, *get_face_xy(angle, 29))
# Clear center of circle
glcd.fill_circle(x0, y0, 25, color=0)
# Numbers
glcd.draw_string("12", neato, x0 - 5, y0 - 29, spacing=0)
glcd.draw_letter("3", neato, x0 + 25, y0 - 3)
glcd.draw_letter("6", neato, x0 - 2, y0 + 23)
glcd.draw_letter("9", neato, x0 - 29, y0 - 3)
# Date
glcd.draw_string(time.strftime("%b").upper(), neato, 0,0)
glcd.draw_string(time.strftime(" %d"), neato, 0, 8)
try:
while True:
glcd.clear_back_buffer()
draw_face()
minute = int(time.strftime("%M"))
hour = int(time.strftime("%H"))
glcd.draw_line(x0, y0, *get_face_xy(minute * 6 + 270, 29))
glcd.draw_line(x0, y0, *get_face_xy(hour * 30 - 90, 20))
glcd.flip()
while minute == int(time.strftime("%M")):
time.sleep(1)
except KeyboardInterrupt:
print('\nCtrl-C pressed. Cleaning up and exiting...')
finally:
glcd.cleanup()
|
11586903
|
import sys
import yaml
def readFile(filepath):
with open(filepath, 'r') as f:
out = yaml.safe_load(f)
return out
return []
def main(argv):
# create commonServer js file to extract doc from
commonServer = readFile('./Packs/Base/Scripts/script-CommonServer.yml')
jsScript = commonServer.get("script", "")
with open('./Documentation/commonServerJsDoc.js', 'w') as fp:
fp.write(jsScript)
if __name__ == "__main__":
main(sys.argv[1:])
|
11586947
|
import sifter.grammar
__all__ = ('TestNot',)
# section 5.8
class TestNot(sifter.grammar.Test):
RULE_IDENTIFIER = 'NOT'
def __init__(self, arguments=None, tests=None):
super(TestNot, self).__init__(arguments, tests)
self.validate_arguments()
self.validate_tests_size(1)
def evaluate(self, message, state):
return not self.tests[0].evaluate(message, state)
TestNot.register()
|
11586986
|
import argparse
import json
import os
from named_entity_recognition.api_ner.google_api_repository import remote_named_entity_recognition
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--data_path', type=str, required=True)
arg_parser.add_argument('--output_path', type=str, required=True)
arg_parser.add_argument('--ner_api_secret', type=str, required=True)
args = arg_parser.parse_args()
with open(os.path.join(args.data_path), 'r', encoding='utf-8') as json_file:
data = json.load(json_file)
error_count = 0
ner_data = []
for doc in data:
extracted_values = remote_named_entity_recognition(doc['question'], args.ner_api_secret)
if extracted_values:
ner_data.append({
'entities': extracted_values['entities'],
'language': extracted_values['language'],
'question': doc['question']
})
else:
error_count += 1
with open(os.path.join(args.output_path), 'w', encoding='utf-8') as f:
json.dump(ner_data, f, indent=2)
print("Extracted {} values. {} requests failed.".format(len(data), error_count))
|
11587010
|
from theano import Op
class MyOp(Op):
__props__ = ()
def __init__(self, ...):
# set up parameters
def make_node(self, ...):
# create apply node
def make_thunk(self, node, storage_map,
compute_map, no_recycling):
# return a thunk
def infer_shape(self, input_shapes):
# return output shapes
def grad(self, inputs, output_grads):
# return gradient graph for each input
|
11587037
|
from System import IntPtr
from System.Runtime.InteropServices import Marshal
from Ironclad import HGlobalAllocator, IAllocator
def GetAllocatingTestAllocator(allocsList, freesList):
class TestAllocator(HGlobalAllocator):
def Alloc(self, bytes):
ptr = HGlobalAllocator.Alloc(self, bytes)
allocsList.append((ptr, bytes))
return ptr
def Realloc(self, oldptr, bytes):
newptr = HGlobalAllocator.Realloc(self, oldptr, bytes)
freesList.append(oldptr)
allocsList.append((newptr, bytes))
return newptr
def Free(self, ptr):
freesList.append(ptr)
HGlobalAllocator.Free(self, ptr)
def Contains(self, ptr):
return HGlobalAllocator.Contains(self, ptr)
return TestAllocator()
def GetDoNothingTestAllocator(freesList):
class TestAllocator(IAllocator):
def Alloc(self, _):
return IntPtr.Zero
def Free(self, ptr):
freesList.append(ptr)
def FreeAll(self):
pass
def Contains(self, ptr):
return False
return TestAllocator()
|
11587045
|
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.models.inception import Inception3
from warnings import warn
from torch.utils.model_zoo import load_url
class BeheadedInception3(Inception3):
""" Like torchvision.models.inception.Inception3 but the head goes separately """
def forward(self, x):
if self.transform_input:
x = x.clone()
x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
else: warn("Input isn't transformed")
x = self.Conv2d_1a_3x3(x)
x = self.Conv2d_2a_3x3(x)
x = self.Conv2d_2b_3x3(x)
x = F.max_pool2d(x, kernel_size=3, stride=2)
x = self.Conv2d_3b_1x1(x)
x = self.Conv2d_4a_3x3(x)
x = F.max_pool2d(x, kernel_size=3, stride=2)
x = self.Mixed_5b(x)
x = self.Mixed_5c(x)
x = self.Mixed_5d(x)
x = self.Mixed_6a(x)
x = self.Mixed_6b(x)
x = self.Mixed_6c(x)
x = self.Mixed_6d(x)
x = self.Mixed_6e(x)
x = self.Mixed_7a(x)
x = self.Mixed_7b(x)
x_for_attn = x = self.Mixed_7c(x)
# 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
x_for_capt = x = x.view(x.size(0), -1)
# 2048
x = self.fc(x)
# 1000 (num_classes)
return x_for_attn, x_for_capt, x
def beheaded_inception_v3(transform_input=True):
model= BeheadedInception3(transform_input=transform_input)
inception_url = 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'
model.load_state_dict(load_url(inception_url))
return model
|
11587053
|
import logging
_LOGGER = logging.getLogger(__name__)
def decode(packet):
"""
https://github.com/telldus/telldus/blob/master/telldus-core/service/ProtocolEverflourish.cpp
"""
data = packet["data"]
house = data & 0xFFFC00
house >>= 10
unit = data & 0x300
unit >>= 8
unit += 1
method = data & 0xF
# _LOGGER.debug("Everflourish (data=%x, house=%d, "
# "unit=%d, method=%d)",
# data, house, unit, method)
if house > 16383 or unit < 1 or unit > 4:
# not everflourish
return
if method == 0:
method = "turnoff"
elif method == 15:
method = "turnon"
elif method == 10:
method = "learn"
else:
# not everflourish
return
return dict(
packet,
_class="command",
model="selflearning",
house=house,
unit=unit,
method=method,
)
def encode(method):
"""
https://github.com/telldus/telldus/blob/master/telldus-core/service/ProtocolEverflourish.cpp
"""
raise NotImplementedError()
|
11587057
|
import wx
import wx.lib.agw.hyperlink as hl
import webbrowser
from parser_filmow import Parser
from utils import delay
class Frame(wx.Frame):
def __init__(self, *args, **kwargs):
super(Frame, self).__init__(*args, **kwargs)
self.MyFrame = self
self.is_running = False
self.panel = wx.Panel(
self,
pos=(0, 0),
size=(500,100),
style=wx.CLOSE_BOX | wx.CAPTION | wx.MINIMIZE_BOX | wx.SYSTEM_MENU
)
self.panel.SetBackgroundColour('#ffffff')
self.SetTitle('Filmow to Letterboxd')
self.SetMinSize((500, 300))
self.SetMaxSize((500, 300))
self.letterboxd_link = hl.HyperLinkCtrl(
self.panel,
-1,
'letterboxd',
URL='https://letterboxd.com/import/',
pos=(420,240)
)
self.letterboxd_link.SetToolTip(wx.ToolTip('Clica só quando o programa tiver rodado e sua conta no Letterboxd tiver criada, beleza?'))
self.coffee_link = hl.HyperLinkCtrl(
self.panel,
-1,
'quer me agradecer?',
URL='https://www.buymeacoffee.com/yanari',
pos=(310,240)
)
self.coffee_link.SetToolTip(wx.ToolTip('Se tiver dado tudo certo cê pode me pagar um cafézinho, que tal?. Não é obrigatório, claro.'))
wx.StaticText(self.panel, -1, 'Username no Filmow:', pos=(25, 54))
self.username = wx.TextCtrl(self.panel, size=(200, 25), pos=(150, 50))
submit_button = wx.Button(self.panel, wx.ID_SAVE, 'Submit', pos=(360, 50))
self.Bind(wx.EVT_BUTTON, self.Submit, submit_button)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Show(True)
def Submit(self, event):
self.button = event.GetEventObject()
self.button.Disable()
self.text_control = wx.TextCtrl(
self.panel,
-1,
'',
pos=(50, 120),
size=(400, 100),
style=wx.TE_MULTILINE | wx.TE_CENTRE | wx.TE_READONLY
| wx.TE_NO_VSCROLL | wx.TE_AUTO_URL | wx.TE_RICH2 | wx.BORDER_NONE
)
self.Parse(self.MyFrame)
@delay(1.0)
def Parse(self, MyFrame):
self.user = self.username.GetValue().lower().strip()
if len(self.user) == 0:
self.is_running = False
self.text_control.ChangeValue('O campo não deve ficar em branco.')
self.button.Enable()
return
else:
try:
msg = """Seus filmes estão sendo importados no plano de fundo :)\n\n
Não feche a janela e aguarde um momento."""
self.text_control.ChangeValue(msg)
self.is_running = True
self.parser = Parser(self.user)
except Exception:
self.text_control.ChangeValue('Usuário {} não encontrado. Tem certeza que digitou certo?'.format(self.user))
self.button.Enable()
self.is_running = False
return
self.ChangeMsg()
@delay(1.0)
def ChangeMsg(self):
msg = """Pronto!\n\n Agora clica no link aqui embaixo pra ir pro Letterboxd,
SELECT A FILE e selecione o(s) arquivo(s) de extensão .csv
(tá tudo aqui nessa mesma pasta) criado(s) pelo programa."""
self.text_control.ChangeValue(msg)
self.Bind(wx.EVT_TEXT_URL, self.GoToLetterboxd, self.text_control)
self.is_running = False
def GoToLetterboxd(self, event):
webbrowser.open('https://letterboxd.com/import/')
def BuyMeACoffee(self, event):
webbrowser.open('https://www.buymeacoffee.com/yanari')
def OnClose(self, event):
if self.is_running:
confirm_exit = wx.MessageDialog(
self,
'Tem certeza que quer parar o programa?',
'Sair',
wx.YES_NO | wx.ICON_QUESTION
)
if confirm_exit.ShowModal() == wx.ID_YES:
self.Destroy()
wx.Window.Destroy(self)
else:
confirm_exit.Destroy()
else:
event.Skip()
if __name__ == '__main__':
app = wx.App()
Frame(None, size=(500, 300))
app.MainLoop()
|
11587068
|
from typing import Optional
from pydantic import BaseModel
class Colors(BaseModel):
primary: str = "#E58325"
accent: str = "#00457A"
secondary: str = "#973542"
success: str = "#43A047"
info: str = "#1976D2"
warning: str = "#FF6F00"
error: str = "#EF5350"
class Config:
orm_mode = True
class SiteTheme(BaseModel):
id: Optional[int]
name: str = "default"
colors: Colors = Colors()
class Config:
orm_mode = True
schema_extra = {
"example": {
"name": "default",
"colors": {
"primary": "#E58325",
"accent": "#00457A",
"secondary": "#973542",
"success": "#5AB1BB",
"info": "#4990BA",
"warning": "#FF4081",
"error": "#EF5350",
},
}
}
|
11587101
|
import gfapy
import unittest
class TestApiGFA2Lines(unittest.TestCase):
def test_S(self):
fields=["S","1","ACGTCACANNN","RC:i:1232","LN:i:11","ab:Z:abcd",
"FC:i:2321","KC:i:1212"]
s="\t".join(fields)
gfapy.Line(s) # nothing raised
self.assertEqual(gfapy.line.segment.GFA1, gfapy.Line(s).__class__)
self.assertEqual(fields[0], gfapy.Line(s).record_type)
self.assertEqual(fields[1], gfapy.Line(s).name)
self.assertEqual(fields[2], gfapy.Line(s).sequence)
self.assertEqual(1232, gfapy.Line(s).RC)
self.assertEqual(11, gfapy.Line(s).LN)
self.assertEqual(2321, gfapy.Line(s).FC)
self.assertEqual(1212, gfapy.Line(s).KC)
self.assertEqual("abcd", gfapy.Line(s).ab)
with self.assertRaises(gfapy.FormatError): s+gfapy.Line("\tH1")
with self.assertRaises(gfapy.FormatError): gfapy.Line("S\tH")
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[2]="!@#?"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.TypeError):
f=fields.copy(); f[3]="RC:Z:1232"; gfapy.Line("\t".join(f),version="gfa1")
f=["S","2","ACGTCACANNN","LN:i:3"]
with self.assertRaises(gfapy.InconsistencyError):
gfapy.Line("\t".join(f),vlevel=1, version="gfa1")
f=["S","2","ACGTCACANNN","LN:i:11"]
gfapy.Line("\t".join(f)) # nothing raised
f=["S","2","*","LN:i:3"]
gfapy.Line("\t".join(f)) # nothing raised
def test_coverage(self):
l = gfapy.Line("S\t0\t*\tRC:i:600\tLN:i:100")
self.assertEqual(6, l.coverage())
self.assertEqual(6, l.try_get_coverage())
l = gfapy.Line("S\t0\t*\tRC:i:600")
self.assertEqual(None, l.coverage())
with self.assertRaises(gfapy.NotFoundError): l.try_get_coverage()
l = gfapy.Line("S\t0\t*\tLN:i:100")
self.assertEqual(None, l.coverage())
with self.assertRaises(gfapy.NotFoundError): l.try_get_coverage()
l = gfapy.Line("S\t0\t*\tFC:i:600\tLN:i:100")
self.assertEqual(None, l.coverage())
with self.assertRaises(gfapy.NotFoundError): l.try_get_coverage()
self.assertEqual(6, l.coverage(count_tag="FC"))
self.assertEqual(6, l.try_get_coverage(count_tag="FC"))
|
11587149
|
import logging
from datetime import datetime
from itertools import islice
import requests
from django.conf import settings as django_settings
from django.contrib import auth
from django.db.models import Count, F, Prefetch
from django_auth_ldap.backend import _LDAPUser
from edd import utilities
from main import models
logger = logging.getLogger(__name__)
# tuple for request connection and read timeouts, respectively, in seconds
timeout = (10, 10)
class SolrException(IOError):
pass
class SolrSearch:
"""
Base class for interfacing with Solr indices.
Solr concepts to understand:
- ConfigSet is the schema and configuration of searchers
- Collection is a grouping of documents to search using a ConfigSet
- Alias is a name used to reference a Collection
- Core is an instance of Solr serving documents
This class was originally written to use Cores directly. It is now using
Collections, which contain one or more Cores using a ConfigSet. It takes
the name of a ConfigSet, generates a Collection (and Cores) using that
ConfigSet, and sets an Alias with the same name as the ConfigSet to point
to the Collection.
:param core: the name of the ConfigSet defining the search
:param settings: (optional) settings dictionary containing the Solr URL
:param settings_key: (optional) key used to lookup settings dictionary
from Django setting EDD_MAIN_SOLR (default "default")
:param url: (optional) directly set Solr URL
"""
DEFAULT_URL = "http://localhost:8080"
@classmethod
def resolve_url(cls, settings, settings_key, url):
if url is not None:
return url
if settings is not None:
url = settings.get("URL", None)
if url is not None:
return url
if settings_key in django_settings.EDD_MAIN_SOLR:
url = django_settings.EDD_MAIN_SOLR[settings_key].get("URL", None)
if url is not None:
return url
logger.warning("Could not resolve a URL for Solr, falling back to default")
return cls.DEFAULT_URL
def __init__(
self,
core=None,
settings=None,
settings_key="default",
url=None,
*args,
**kwargs,
):
self.core = core
self.collection = None
self.base_url = self.resolve_url(settings, settings_key, url)
# chop trailing slash if present
self.base_url = self.base_url.rstrip("/")
def __repr__(self, *args, **kwargs):
return self.__str__()
def __str__(self, *args, **kwargs):
return f"SolrSearch[{self.url}]"
def __len__(self):
queryopt = self.get_queryopt("*:*", size=0)
result = self.search(queryopt)
return result.get("response", {}).get("numFound")
def check(self):
"""Ensures that the ConfigSet for this searcher has a Collection."""
# find the primary collection, creating one if it does not exist
primary = self._find_alias_collection()
if primary is None:
primary = self.create_collection()
self.commit_collection()
return primary
def clean(self):
"""
Ensures that the ConfigSet for this searcher has only one Collection.
This should be used carefully, and only run with guarantees of no other
potential callers (e.g. during startup).
"""
# find the primary collection, creating one if it does not exist
primary = self.check()
# discard all collections that are not the primary
discarded = [
self.discard_collection(collection=name)
for name in self._find_collections()
if name != primary
]
# return all the collection names discarded
return discarded
def _find_alias_collection(self):
url = f"{self.base_url}/admin/collections"
params = {"action": "LISTALIASES"}
try:
response = requests.get(url, params=params, timeout=timeout)
response.raise_for_status()
aliases = response.json()["aliases"]
return aliases.get(self.core, None)
except Exception as e:
raise SolrException(
f"Failed to find collection for alias {self.core}"
) from e
def _find_collections(self):
url = f"{self.base_url}/admin/collections"
params = {"action": "LIST"}
try:
response = requests.get(url, params=params, timeout=timeout)
response.raise_for_status()
collections = response.json()["collections"]
return [name for name in collections if self.core in name]
except Exception as e:
raise SolrException(
f"Failed to find collections matching {self.core}"
) from e
def collection_name(self):
now_int = int(datetime.utcnow().timestamp())
# 5 bytes is big enough for the maximum handled by datetime
now_hex = now_int.to_bytes(5, "big").hex()
return f"{self.core}_{now_hex}"
def create_collection(self):
"""Creates a new collection for searching."""
url = f"{self.base_url}/admin/collections"
name = self.collection_name()
params = {
"action": "CREATE",
"collection.configName": self.core,
"name": name,
"numShards": 1,
}
try:
response = requests.get(url, params=params, timeout=timeout)
response.raise_for_status()
self.collection = name
return self.collection
except Exception as e:
raise SolrException(f"Failed to create collection for {self.core}") from e
def commit_collection(self):
"""Sets a collection name to point the core name alias toward."""
if self.collection is None:
raise SolrException("Must call create_collection before commit_collection")
url = f"{self.base_url}/admin/collections"
params = {
"action": "CREATEALIAS",
"collections": self.collection,
"name": self.core,
}
try:
response = requests.get(url, params=params, timeout=timeout)
response.raise_for_status()
self.collection = None
except Exception as e:
raise SolrException(
f"Failed to commit {self.collection} to {self.core}"
) from e
def discard_collection(self, collection=None):
"""Discards a collection from search."""
collection = filter(None, (collection, self.collection))
# always reset the collection attribute
self.collection = None
if collection is None:
raise SolrException("No collection to discard")
url = f"{self.base_url}/admin/collections"
params = {"action": "DELETE", "name": collection}
try:
response = requests.get(url, params=params, timeout=timeout)
response.raise_for_status()
return collection
except Exception as e:
raise SolrException(
f"Failed to discard collection {self.collection}"
) from e
def reindex(self):
"""Runs a full index for the search collection."""
self.create_collection()
try:
queryset = self.get_queryset()
self.update(queryset)
self.commit_collection()
except Exception as e:
self.discard_collection()
raise SolrException(f"Failed to reindex {self.core}") from e
def get_queryset(self):
"""
Return an iterable of items that will be sent to the search index.
In most cases, this will be a Django QuerySet.
"""
return [] # pragma: no cover
def get_solr_payload(self, obj):
if callable(getattr(obj, "to_solr_json", None)):
return obj.to_solr_json()
return obj
def get_queryopt(self, query, **kwargs):
# do some basic bounds sanity checking
try:
start = int(kwargs.get("i", 0))
start = 0 if start < 0 else start
except Exception:
start = 0
try:
rows = int(kwargs.get("size", 50))
rows = 50 if rows < 0 else rows
except Exception:
rows = 50
queryopt = {
"indent": True,
"q": query,
"start": start,
"rows": rows,
"sort": kwargs.get("sort", None),
"wt": "json",
"fl": "*",
}
return queryopt
def remove(self, docs):
"""
Updates Solr with a list of objects to remove from the index.
:param docs: an iterable of objects with an id property
:raises SolrException: if an error occurs during the removal attempt. Note that
removals are performed iteratively, so it's possible that some succeeded
before the error occurred.
"""
logger.info(f"Removing items from {self}")
# Does no permissions checking; permissions already valid if called from
# Study pre_delete signal, but other clients must do their own permission checks.
url = f"{self.url}/update/json"
headers = {"content-type": "application/json"}
commands = ",".join(f'"delete":{{"id":"{doc.id}"}}' for doc in docs)
try:
response = requests.post(
url,
data=f'{{{commands}, "commit":{{}}}}',
headers=headers,
timeout=timeout,
)
response.raise_for_status()
# catch / re-raise communication errors after logging some helpful
# context re: where the error occurred
except Exception as e:
raise SolrException(f"Failed to remove from index {docs}") from e
def search(self, queryopt=None):
"""
Runs query with raw Solr parameters
:return: a dictionary containing the Solr json response
:raises SolrException: if an error occurs during the query attempt
"""
if queryopt is None:
queryopt = {"q": "*:*", "wt": "json"}
# single character queries will never return results as smallest ngram is 2 characters
if len(queryopt["q"]) == 1:
queryopt["q"] = f'{queryopt["q"]}*'
logger.debug(f"{self} searching with: {queryopt}")
try:
# contact Solr / raise any IOErrors that arise
response = requests.get(
f"{self.url}/select", params=queryopt, timeout=timeout
)
response.raise_for_status()
return response.json()
except Exception as e:
raise SolrException(f"{self} failed search with {queryopt}") from e
def query(self, query, **kwargs):
"""
Runs a query against the Solr core, translating options to the Solr syntax.
Arguments:
query: Solr query string (default: 'is_active:true')
i: starting index of results to fetch (default: 0)
size: maximum fetch size (default: 50)
sort: comma-delimited string of "field (asc|desc)" (default: None)
Returns:
JSON results of query:
- responseHeader
- status: 0 for no errors, otherwise an error code
- QTime: milliseconds to complete query
- params: echo of parameters used in request
- response
- numFound: total documents matching query
- start: starting index of results
- docs: array of results
:raises SolrException: if an error occurs during the query attempt
(error connecting, or HTTP error response from Solr)
"""
queryopt = self.get_queryopt(query, **kwargs)
return self.search(queryopt=queryopt)
def update(self, docs):
"""
Update Solr index from the given list of objects. Does no permissions checking;
permissions already valid if called from Study post_save signal, but other
clients must do own checks on permissions.
:param docs: an iterable of objects with a to_solr_json method to update in Solr.
Must have an id attribute.
:raises SolrException: if an error occurs during the update attempt
"""
logger.info(f"Sending updates to {self}")
url = f"{self.url}/update/json"
headers = {"content-type": "application/json"}
payload = filter(lambda d: d is not None, map(self.get_solr_payload, docs))
try:
# Send updates in groups of 50
for group in iter(lambda: list(islice(payload, 50)), []):
ids = [item.get("id") for item in group]
logger.debug(f"{self} updating with IDs: {ids}")
# make an initial request to do the add / raise IOError if it occurs
response = requests.post(
url,
data=utilities.JSONEncoder.dumps(group),
headers=headers,
timeout=timeout,
)
response.raise_for_status()
# if the adds worked, send commit command
response = requests.post(
url, data=r'{"commit":{}}', headers=headers, timeout=timeout
)
# raises HttpError (extends IOError)
response.raise_for_status()
except Exception as e:
raise SolrException(f"{self} failed update") from e
@property
def url(self):
if self.collection is not None:
return f"{self.base_url}/{self.collection}"
return f"{self.base_url}/{self.core}"
class StudySearch(SolrSearch):
"""
A more-or-less straight port of the StudySearch.pm module from the EDD perl code. Makes
requests to the custom Solr schema created to search EDD studies.
Arguments:
ident: User object from django.contrib.auth.models
url: Base URL for Solr instance (default: None; overrides settings value if not None)
settings_key: connection key in settings SOLR value
"""
def __init__(self, core="studies", ident=None, *args, **kwargs):
super().__init__(core=core, *args, **kwargs)
self.ident = ident
def __str__(self, *args, **kwargs):
return f"StudySearch[{self.url}][{self.ident}]"
def build_acl_filter(self):
"""
Create a fq (filter query) string based on an ident (django.contrib.auth.models.User).
Arguments:
ident: User object from django.contrib.auth.models
Returns:
tuple of (read permission filter, write permission eval)
"""
if self.ident is None:
raise SolrException("No user defined for query")
# Admins get no filter on read, and a query that will always eval true for write
if self.ident.is_superuser:
return ("", "id:*")
user_acl = f'"u:{self.ident.username}"'
acl = ['"g:__Everyone__"', user_acl] + [
f'"g:{g.name}"' for g in self.ident.groups.all()
]
return (
" OR ".join([f"aclr:{r}" for r in acl]),
" OR ".join([f"aclw:{w}" for w in acl]),
)
def get_queryset(self):
return (
models.Study.objects.select_related(
"contact",
"updated__mod_by__userprofile",
"created__mod_by__userprofile",
)
.annotate(_file_count=Count("files"), _comment_count=Count("comments"))
.prefetch_related(
Prefetch(
"userpermission_set",
queryset=models.UserPermission.objects.select_related("user"),
),
Prefetch(
"grouppermission_set",
queryset=models.GroupPermission.objects.select_related("group"),
),
"everyonepermission_set",
)
)
def query(self, query="", options=None):
"""
Run a query against the Solr index.
Arguments:
query: Solr query string (default: 'active:true')
options: dict containing optional query parameters
- edismax: boolean to run query as term in edismax query (default: False)
- i: starting index of results to fetch (default: 0)
- size: maximum fetch size (default: 50)
- sort: comma-delimited string of "field (asc|desc)" (default: None)
- showDisabled: boolean adds a filter query for active studies (default: False)
- showMine: boolean adds a filter query for current user's studies (default:
False)
Returns:
JSON results of query:
- responseHeader
- status: 0 for no errors, otherwise an error code
- QTime: milliseconds to complete query
- params: echo of parameters used in request
- response
- numFound: total documents matching query
- start: starting index of results
- docs: array of results
:raises IOError: if an error occurs during the query attempt
"""
# Keeping the old signature to retain backward-compatibility
if options is None:
options = {}
return super().query(query=query, **options)
def get_queryopt(self, query, **kwargs):
queryopt = super().get_queryopt(query, **kwargs)
(readable, writable) = self.build_acl_filter()
fq = [readable]
queryopt["fl"] = f"""*,score,writable:exists(query({{!v='{writable}'}},0))"""
if kwargs.get("edismax", False):
queryopt["defType"] = "edismax"
# these are the query fields and boosts to use in EDisMax
queryopt["qf"] = " ".join(
[
"name^10",
"name_ng",
"description^5",
"description_ng",
"contact",
"contact_ng",
"creator_email",
"creator_name",
"creator_ng",
"initials",
"metabolite_name",
"protocol_name",
"part_name",
]
)
queryopt["q.alt"] = "*:*"
if not kwargs.get("showDisabled", False):
fq.append("active:true")
if kwargs.get("showMine", False) and self.ident:
fq.append(f"creator:{self.ident.pk}")
queryopt["fq"] = fq
return queryopt
class StudyAdminSearch(StudySearch):
"""StudySearch that acts as an admin user without explictly passing one."""
def build_acl_filter(self):
return ("", "id:*")
class UserSearch(SolrSearch):
""" API to manage searching for users via Solr index """
def __init__(self, core="users", *args, **kwargs):
super().__init__(core=core, *args, **kwargs)
def get_queryset(self):
User = auth.get_user_model()
queryset = User.objects.select_related("userprofile").prefetch_related(
"userprofile__institutions"
)
# load any LDAP backends
backends = [b for b in auth.get_backends() if hasattr(b, "ldap")]
# attempt to load groups from LDAP before yielding
for user in queryset:
for backend in backends:
# doing this saves a database query over directly loading
ldap_user = _LDAPUser(backend, user=user)
try:
ldap_user._mirror_groups()
except Exception:
# do nothing on failure to find user in backend
pass
yield user
def query(self, query="is_active:true", options=None):
"""
Run a query against the Users Solr index.
Arguments:
query: Solr query string (default: 'is_active:true')
options: dict containing optional query parameters
- edismax: boolean to run query as term in edismax query (default: False)
- i: starting index of results to fetch (default: 0)
- size: maximum fetch size (default: 50)
- sort: comma-delimited string of "field (asc|desc)" (default: None)
- showDisabled: boolean adds a filter query for active studies (default: False)
Returns:
JSON results of query:
- responseHeader
- status: 0 for no errors, otherwise an error code
- QTime: milliseconds to complete query
- params: echo of parameters used in request
- response
- numFound: total documents matching query
- start: starting index of results
- docs: array of results
:raises IOError: if an error occurs during the query attempt (error connecting,
or HTTP error response from Solr)
"""
# Keeping the old signature to retain backward-compatibility
if options is None:
options = {}
return super().query(query=query, **options)
def get_queryopt(self, query, **kwargs):
queryopt = super().get_queryopt(query, **kwargs)
if kwargs.get("edismax", False):
queryopt["defType"] = "edismax"
# these are the query fields and boosts to use in EDisMax
queryopt["qf"] = " ".join(
[
"name^10",
"name_ng^5",
"initial_lower^5",
"group_ng",
"institution_ng",
]
)
queryopt["q.alt"] = "*:*"
if kwargs.get("showDisabled", False):
queryopt["fq"] = [queryopt["fq"], "is_active:true"]
return queryopt
class MeasurementTypeSearch(SolrSearch):
"""API to manage searching for measurement types via Solr index"""
def __init__(self, core="measurement", *args, **kwargs):
super().__init__(core=core, *args, **kwargs)
def get_queryset(self):
return models.MeasurementType.objects.annotate(
_source_name=F("type_source__name")
).select_related(
"metabolite", "proteinidentifier", "geneidentifier", "phosphor"
)
def get_queryopt(self, query, **kwargs):
queryopt = super().get_queryopt(query, **kwargs)
queryopt["defType"] = "edismax"
queryopt["qf"] = " ".join(
[
"name^10", # put high weight on matching name
"name_edge^5", # half as much on matching begin/end of name
"name_ng^2", # smaller weight on matching substring
"synonym^8", # high weight on matching synonyms
"synonym_edge^4", # half as much on matching begin/end of synonym
"synonym_ng^2", # smaller weight on matching substring of synonym
"code^10", # high weight on matching the BIGG/SBML short name
"m_formula", # small weight on matching formula string
]
)
queryopt["q.alt"] = "*:*"
if kwargs.get("family", None):
family = kwargs["family"]
if isinstance(family, str):
queryopt["fq"] = f"family:{family}"
else:
queryopt["fq"] = [f"family:{f}" for f in family].join(" OR ")
return queryopt
def get_solr_payload(self, obj):
Group = models.MeasurementType.Group
if obj.type_group == Group.METABOLITE:
item = getattr(obj, "metabolite", obj)
elif obj.type_group == Group.PROTEINID:
item = getattr(obj, "proteinidentifier", obj)
else:
item = obj
return item.to_solr_json()
|
11587202
|
from jyotisha.panchaanga import temporal
from jyotisha.panchaanga.spatio_temporal import City
from jyotisha.panchaanga.writer.generation_project import dump_summary
def dump_mysore_history():
maisUru = City.get_city_from_db(name="Mysore")
# dump_summary(year=1797, city=maisUru)
for year in range(1740, 1810):
dump_summary(year=year, city=maisUru)
def dump_pune_history():
city = City.get_city_from_db(name="Pune")
# dump_summary(year=1797, city=maisUru)
for year in range(1625, 1850):
dump_summary(year=year, city=city)
def dump_hampi_history():
city = City.get_city_from_db(name="Hampi")
# dump_summary(year=1797, city=maisUru)
for year in range(1300, 1625):
dump_summary(year=year, city=city)
def dump_bengaluru_history():
city = City.get_city_from_db(name="sahakAra nagar, bengaLUru")
# dump_summary(year=1797, city=maisUru)
# for year in range(1950, 2020):
# dump_summary(year=year, city=city)
for year in range(2010, 2023):
# dump_summary(year=year, city=city,computation_system=temporal.get_kauNdinyAyana_bhAskara_gRhya_computation_system(), allow_precomputed=False)
dump_summary(year=year, city=city, allow_precomputed=False)
if __name__ == '__main__':
dump_bengaluru_history()
|
11587209
|
from unittest import TestCase
import numpy as np
from SEAL.lib import compute_knot_insertion_matrix
class TestComputeKnotInsertionMatrix(TestCase):
def test_compute_knot_insertion_matrix(self):
"""
Given:
p = 2
tau = [-1, -1, -1, 0, 1, 1, 1]
t = [-1, -1, -1, -0.5, 0, 0.5, 1, 1, 1]
When:
Representing the coarse linear B spline
as a linear combination of the fine linear B splines
Then:
Knot Insertion Matrix A =
[[ 1. 0. 0. 0. ]
[ 0.5 0.5 0. 0. ]
[ 0. 0.75 0.25 0. ]
[ 0. 0.25 0.75 0. ]
[ 0. 0. 0.5 0.5 ]
[ 0. 0. 0. 1. ]]
"""
p = 2
tau = [-1, -1, -1, 0, 1, 1, 1]
t = [-1, -1, -1, -0.5, 0, 0.5, 1, 1, 1]
expected_A = np.array([[1., 0., 0., 0.],
[0.5, 0.5, 0., 0.],
[0., 0.75, 0.25, 0.],
[0., 0.25, 0.75, 0.],
[0., 0., 0.5, 0.5],
[0., 0., 0., 1.]])
computed_A = compute_knot_insertion_matrix(p, tau, t)
for e, c in zip(expected_A, computed_A):
np.testing.assert_array_almost_equal(e, c)
|
11587228
|
from ..broker import Broker
class IfGroupMemberBroker(Broker):
controller = "if_group_members"
def index(self, **kwargs):
"""Lists the available if group members. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this data was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this data was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for the interface group.
:type GroupID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for the interface group.
:type GroupID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IfGroupMemberID: The internal NetMRI identifier for this interface group membership record.
:type IfGroupMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IfGroupMemberID: The internal NetMRI identifier for this interface group membership record.
:type IfGroupMemberID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The Interface ID.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The Interface ID.
:type InterfaceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the if group members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of if group member methods. The listed methods will be called on each if group member returned and included in the output. Available methods are: interface.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: interface.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IfGroupMemberID
:param sort: The data field(s) to use for sorting the output. Default is IfGroupMemberID. Valid values are IfGroupMemberID, GroupID, DeviceID, ifIndex, InterfaceID, ifGroupMemberStartTime, ifGroupMemberEndTime, ifGroupMemberChangedCols, ifGroupMemberTimestamp, DataSourceID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfGroupMember. Valid values are IfGroupMemberID, GroupID, DeviceID, ifIndex, InterfaceID, ifGroupMemberStartTime, ifGroupMemberEndTime, ifGroupMemberChangedCols, ifGroupMemberTimestamp, DataSourceID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_group_members: An array of the IfGroupMember objects that match the specified input criteria.
:rtype if_group_members: Array of IfGroupMember
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified if group member.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IfGroupMemberID: The internal NetMRI identifier for this interface group membership record.
:type IfGroupMemberID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of if group member methods. The listed methods will be called on each if group member returned and included in the output. Available methods are: interface.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: interface.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_group_member: The if group member identified by the specified IfGroupMemberID.
:rtype if_group_member: IfGroupMember
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available if group members matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this data was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this data was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for the interface group.
:type GroupID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for the interface group.
:type GroupID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IfGroupMemberID: The internal NetMRI identifier for this interface group membership record.
:type IfGroupMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IfGroupMemberID: The internal NetMRI identifier for this interface group membership record.
:type IfGroupMemberID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The Interface ID.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The Interface ID.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifGroupMemberChangedCols: The fields that changed between this revision of the record and the previous revision.
:type ifGroupMemberChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifGroupMemberChangedCols: The fields that changed between this revision of the record and the previous revision.
:type ifGroupMemberChangedCols: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifGroupMemberEndTime: The ending effective time of this record, or empty if still in effect.
:type ifGroupMemberEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifGroupMemberEndTime: The ending effective time of this record, or empty if still in effect.
:type ifGroupMemberEndTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifGroupMemberStartTime: The starting effective time of this record.
:type ifGroupMemberStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifGroupMemberStartTime: The starting effective time of this record.
:type ifGroupMemberStartTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifGroupMemberTimestamp: The date and time this record was collected or calculated.
:type ifGroupMemberTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifGroupMemberTimestamp: The date and time this record was collected or calculated.
:type ifGroupMemberTimestamp: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifIndex: The internal NetMRI identifier for the device from which this data was collected.
:type ifIndex: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifIndex: The internal NetMRI identifier for the device from which this data was collected.
:type ifIndex: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the if group members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of if group member methods. The listed methods will be called on each if group member returned and included in the output. Available methods are: interface.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: interface.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IfGroupMemberID
:param sort: The data field(s) to use for sorting the output. Default is IfGroupMemberID. Valid values are IfGroupMemberID, GroupID, DeviceID, ifIndex, InterfaceID, ifGroupMemberStartTime, ifGroupMemberEndTime, ifGroupMemberChangedCols, ifGroupMemberTimestamp, DataSourceID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfGroupMember. Valid values are IfGroupMemberID, GroupID, DeviceID, ifIndex, InterfaceID, ifGroupMemberStartTime, ifGroupMemberEndTime, ifGroupMemberChangedCols, ifGroupMemberTimestamp, DataSourceID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against if group members, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceID, GroupID, IfGroupMemberID, InterfaceID, ifGroupMemberChangedCols, ifGroupMemberEndTime, ifGroupMemberStartTime, ifGroupMemberTimestamp, ifIndex.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_group_members: An array of the IfGroupMember objects that match the specified input criteria.
:rtype if_group_members: Array of IfGroupMember
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available if group members matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceID, GroupID, IfGroupMemberID, InterfaceID, ifGroupMemberChangedCols, ifGroupMemberEndTime, ifGroupMemberStartTime, ifGroupMemberTimestamp, ifIndex.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which this data was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_GroupID: The operator to apply to the field GroupID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. GroupID: The internal NetMRI identifier for the interface group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_GroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_GroupID: If op_GroupID is specified, the field named in this input will be compared to the value in GroupID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_GroupID must be specified if op_GroupID is specified.
:type val_f_GroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_GroupID: If op_GroupID is specified, this value will be compared to the value in GroupID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_GroupID must be specified if op_GroupID is specified.
:type val_c_GroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IfGroupMemberID: The operator to apply to the field IfGroupMemberID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IfGroupMemberID: The internal NetMRI identifier for this interface group membership record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IfGroupMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IfGroupMemberID: If op_IfGroupMemberID is specified, the field named in this input will be compared to the value in IfGroupMemberID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IfGroupMemberID must be specified if op_IfGroupMemberID is specified.
:type val_f_IfGroupMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IfGroupMemberID: If op_IfGroupMemberID is specified, this value will be compared to the value in IfGroupMemberID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IfGroupMemberID must be specified if op_IfGroupMemberID is specified.
:type val_c_IfGroupMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InterfaceID: The operator to apply to the field InterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InterfaceID: The Interface ID. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InterfaceID: If op_InterfaceID is specified, the field named in this input will be compared to the value in InterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InterfaceID must be specified if op_InterfaceID is specified.
:type val_f_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InterfaceID: If op_InterfaceID is specified, this value will be compared to the value in InterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InterfaceID must be specified if op_InterfaceID is specified.
:type val_c_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifGroupMemberChangedCols: The operator to apply to the field ifGroupMemberChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifGroupMemberChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifGroupMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifGroupMemberChangedCols: If op_ifGroupMemberChangedCols is specified, the field named in this input will be compared to the value in ifGroupMemberChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifGroupMemberChangedCols must be specified if op_ifGroupMemberChangedCols is specified.
:type val_f_ifGroupMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifGroupMemberChangedCols: If op_ifGroupMemberChangedCols is specified, this value will be compared to the value in ifGroupMemberChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifGroupMemberChangedCols must be specified if op_ifGroupMemberChangedCols is specified.
:type val_c_ifGroupMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifGroupMemberEndTime: The operator to apply to the field ifGroupMemberEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifGroupMemberEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifGroupMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifGroupMemberEndTime: If op_ifGroupMemberEndTime is specified, the field named in this input will be compared to the value in ifGroupMemberEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifGroupMemberEndTime must be specified if op_ifGroupMemberEndTime is specified.
:type val_f_ifGroupMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifGroupMemberEndTime: If op_ifGroupMemberEndTime is specified, this value will be compared to the value in ifGroupMemberEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifGroupMemberEndTime must be specified if op_ifGroupMemberEndTime is specified.
:type val_c_ifGroupMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifGroupMemberStartTime: The operator to apply to the field ifGroupMemberStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifGroupMemberStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifGroupMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifGroupMemberStartTime: If op_ifGroupMemberStartTime is specified, the field named in this input will be compared to the value in ifGroupMemberStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifGroupMemberStartTime must be specified if op_ifGroupMemberStartTime is specified.
:type val_f_ifGroupMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifGroupMemberStartTime: If op_ifGroupMemberStartTime is specified, this value will be compared to the value in ifGroupMemberStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifGroupMemberStartTime must be specified if op_ifGroupMemberStartTime is specified.
:type val_c_ifGroupMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifGroupMemberTimestamp: The operator to apply to the field ifGroupMemberTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifGroupMemberTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifGroupMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifGroupMemberTimestamp: If op_ifGroupMemberTimestamp is specified, the field named in this input will be compared to the value in ifGroupMemberTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifGroupMemberTimestamp must be specified if op_ifGroupMemberTimestamp is specified.
:type val_f_ifGroupMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifGroupMemberTimestamp: If op_ifGroupMemberTimestamp is specified, this value will be compared to the value in ifGroupMemberTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifGroupMemberTimestamp must be specified if op_ifGroupMemberTimestamp is specified.
:type val_c_ifGroupMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifIndex: The operator to apply to the field ifIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifIndex: The internal NetMRI identifier for the device from which this data was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifIndex: If op_ifIndex is specified, the field named in this input will be compared to the value in ifIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifIndex must be specified if op_ifIndex is specified.
:type val_f_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifIndex: If op_ifIndex is specified, this value will be compared to the value in ifIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifIndex must be specified if op_ifIndex is specified.
:type val_c_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the if group members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of if group member methods. The listed methods will be called on each if group member returned and included in the output. Available methods are: interface.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: interface.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IfGroupMemberID
:param sort: The data field(s) to use for sorting the output. Default is IfGroupMemberID. Valid values are IfGroupMemberID, GroupID, DeviceID, ifIndex, InterfaceID, ifGroupMemberStartTime, ifGroupMemberEndTime, ifGroupMemberChangedCols, ifGroupMemberTimestamp, DataSourceID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfGroupMember. Valid values are IfGroupMemberID, GroupID, DeviceID, ifIndex, InterfaceID, ifGroupMemberStartTime, ifGroupMemberEndTime, ifGroupMemberChangedCols, ifGroupMemberTimestamp, DataSourceID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_group_members: An array of the IfGroupMember objects that match the specified input criteria.
:rtype if_group_members: Array of IfGroupMember
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IfGroupMemberID: The internal NetMRI identifier for this interface group membership record.
:type IfGroupMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def interface(self, **kwargs):
"""IfGroup model access the interface method from API accessible.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IfGroupMemberID: The internal NetMRI identifier for this interface group membership record.
:type IfGroupMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : IfGroup model access the interface method from API accessible.
:rtype : Interface
"""
return self.api_request(self._get_method_fullname("interface"), kwargs)
|
11587254
|
from neo.Utils.VMJSONTestCase import VMJSONTestCase
import glob
import io
import os
import json
from neo.VM.tests.JsonTester import execute_test
class VMTest(VMJSONTestCase):
def test_files(self):
"""
This downloads the *.JSON test files from the NEO-VM repo and runs all tests
"""
for filename in glob.glob(os.path.dirname(self.SOURCE_FILENAME) + "/**/*.json", recursive=True):
with io.open(filename, 'r', encoding='utf-8-sig') as f: # uses dirty UTF-8 BOM header *sigh*
data = json.load(f)
execute_test(data)
|
11587265
|
from pypsi.plugins.history import HistoryCommand, HistoryPlugin
from pypsi.shell import Shell
class PluginShell(Shell):
plugin = HistoryPlugin()
class TestHistoryPlugin:
def setup(self):
self.shell = PluginShell()
def teardown(self):
self.shell.restore()
|
11587311
|
from unittest.mock import MagicMock
from cauldron.session.writing import components
def test_get_components_unknown():
"""Should return an empty component for unknown component name."""
result = components._get_components('foo', MagicMock())
assert not result.files
assert not result.includes
|
11587316
|
class Solution:
def beforeAndAfterPuzzles(self, phrases: List[str]) -> List[str]:
hashMap, result = defaultdict(set), set()
for i, phrase in enumerate(phrases):
parts = phrase.split(' ', 1)
hashMap[parts[0]].add((phrase, i))
for i, phrase in enumerate(phrases):
parts = phrase.rsplit(' ', 1)
if parts[-1] in hashMap:
for p, j in hashMap[parts[-1]]:
if i != j:
result.add(' '.join(parts[:-1] + p.split()))
return sorted(result)
|
11587341
|
from threading import Thread
from traceback import print_exc
class MatchComms(Thread):
def __init__(self, agent):
super().__init__(daemon=True)
self.agent = agent
self.online = 1
def stop(self):
self.online = 0
def run(self):
while self.online:
try:
self.agent.handle_match_comm(self.agent.matchcomms.incoming_broadcast.get())
except Exception:
print_exc()
|
11587355
|
from changes.api.serializer import Crumbler, register
from changes.models.repository import Repository, RepositoryBackend
from changes.vcs.git import GitVcs
from changes.vcs.hg import MercurialVcs
DEFAULT_BRANCHES = {
RepositoryBackend.git: GitVcs.get_default_revision(),
RepositoryBackend.hg: MercurialVcs.get_default_revision(),
RepositoryBackend.unknown: ''
}
@register(Repository)
class RepositoryCrumbler(Crumbler):
def crumble(self, instance, attrs):
return {
'id': instance.id.hex,
'url': instance.url,
'backend': instance.backend,
'status': instance.status,
'dateCreated': instance.date_created,
'defaultBranch': DEFAULT_BRANCHES[instance.backend],
}
@register(RepositoryBackend)
class RepositoryBackendCrumbler(Crumbler):
def crumble(self, instance, attrs):
return {
'id': instance.name,
'name': unicode(instance),
}
|
11587373
|
import base64
import json
import zlib
from typing import Any, List
import pytest
from aws_lambda_powertools.utilities.parser import ValidationError, envelopes, event_parser
from aws_lambda_powertools.utilities.parser.models import CloudWatchLogsLogEvent, CloudWatchLogsModel
from aws_lambda_powertools.utilities.typing import LambdaContext
from tests.functional.parser.schemas import MyCloudWatchBusiness
from tests.functional.utils import load_event
@event_parser(model=MyCloudWatchBusiness, envelope=envelopes.CloudWatchLogsEnvelope)
def handle_cloudwatch_logs(event: List[MyCloudWatchBusiness], _: LambdaContext):
assert len(event) == 1
log: MyCloudWatchBusiness = event[0]
assert log.my_message == "hello"
assert log.user == "test"
@event_parser(model=CloudWatchLogsModel)
def handle_cloudwatch_logs_no_envelope(event: CloudWatchLogsModel, _: LambdaContext):
assert event.awslogs.decoded_data.owner == "123456789123"
assert event.awslogs.decoded_data.logGroup == "testLogGroup"
assert event.awslogs.decoded_data.logStream == "testLogStream"
assert event.awslogs.decoded_data.subscriptionFilters == ["testFilter"]
assert event.awslogs.decoded_data.messageType == "DATA_MESSAGE"
assert len(event.awslogs.decoded_data.logEvents) == 2
log_record: CloudWatchLogsLogEvent = event.awslogs.decoded_data.logEvents[0]
assert log_record.id == "eventId1"
convert_time = int(round(log_record.timestamp.timestamp() * 1000))
assert convert_time == 1440442987000
assert log_record.message == "[ERROR] First test message"
log_record: CloudWatchLogsLogEvent = event.awslogs.decoded_data.logEvents[1]
assert log_record.id == "eventId2"
convert_time = int(round(log_record.timestamp.timestamp() * 1000))
assert convert_time == 1440442987001
assert log_record.message == "[ERROR] Second test message"
def test_validate_event_user_model_with_envelope():
my_log_message = {"my_message": "hello", "user": "test"}
inner_event_dict = {
"messageType": "DATA_MESSAGE",
"owner": "123456789123",
"logGroup": "testLogGroup",
"logStream": "testLogStream",
"subscriptionFilters": ["testFilter"],
"logEvents": [{"id": "eventId1", "timestamp": 1440442987000, "message": json.dumps(my_log_message)}],
}
dict_str = json.dumps(inner_event_dict)
compressesd_str = zlib.compress(str.encode(dict_str), -1)
event_dict = {"awslogs": {"data": base64.b64encode(compressesd_str)}}
handle_cloudwatch_logs(event_dict, LambdaContext())
def test_validate_event_does_not_conform_with_user_dict_model():
event_dict = load_event("cloudWatchLogEvent.json")
with pytest.raises(ValidationError):
handle_cloudwatch_logs(event_dict, LambdaContext())
def test_handle_cloudwatch_trigger_event_no_envelope():
event_dict = load_event("cloudWatchLogEvent.json")
handle_cloudwatch_logs_no_envelope(event_dict, LambdaContext())
def test_handle_invalid_cloudwatch_trigger_event_no_envelope():
event_dict: Any = {"awslogs": {"data": "invalid_data"}}
with pytest.raises(ValidationError) as context:
handle_cloudwatch_logs_no_envelope(event_dict, LambdaContext())
assert context.value.errors()[0]["msg"] == "unable to decompress data"
def test_handle_invalid_event_with_envelope():
with pytest.raises(ValidationError):
handle_cloudwatch_logs(event={}, context=LambdaContext())
|
11587375
|
from typing import List, Tuple, Union
import torch.nn as nn
from core.networks.cnn import CNN
from core.networks.mlp import MLP
from core.networks.rl_model import RLModel
Critic = Union["ContinuousVNetwork", "CNNContinuousVNetwork"]
class ContinuousVNetwork(RLModel):
"""Simple value network with MLP."""
def __init__(self, observation_dim: int, layers_dim: List[int]):
super(ContinuousVNetwork, self).__init__()
# V network architecture
self._v_mlp = MLP(observation_dim, 1, layers_dim)
def forward(self, x):
x = self._v_mlp(x)
return x
class CNNContinuousVNetwork(RLModel):
"""Value network with CNN extractor."""
def __init__(
self,
observation_dim: Tuple[int],
layers_dim: List[int],
layers_num_channels: List[int],
):
super(CNNContinuousVNetwork, self).__init__()
flattened_dim = (
observation_dim[1] * observation_dim[2] * layers_num_channels[-1]
)
v_mlp = MLP(flattened_dim, 1, layers_dim)
v_cnn = CNN(observation_dim, layers_num_channels, stride=2, kernel_size=3)
self._v = nn.Sequential(v_cnn, nn.Flatten(), v_mlp)
def forward(self, x):
x = self._v(x)
return x
|
11587398
|
from .robot import *
from .ocp import *
from .cost import *
from .constraints import *
from .solver import *
from .mpc import *
from . import utils
|
11587411
|
import synapse.tests.utils as s_test
class TransportTest(s_test.SynTest):
async def test_model_transport(self):
async with self.getTestCore() as core:
craft = (await core.nodes('[ transport:air:craft=* :tailnum=FF023 :type=helicopter :built=202002 :make=boeing :model=747 :serial=1234 :operator=*]'))[0]
self.eq('helicopter', craft.get('type'))
self.eq(1580515200000, craft.get('built'))
self.eq('boeing', craft.get('make'))
self.eq('747', craft.get('model'))
self.eq('1234', craft.get('serial'))
self.nn(craft.get('operator'))
tailnum = (await core.nodes('transport:air:tailnum=FF023 [ :type=fighter ]'))[0]
flightnum = (await core.nodes('[ transport:air:flightnum="ua 2437" :carrier=* :from:port=IAD :to:port=LAS :stops=(IAD,VISI,LAS) ]'))[0]
flight = (await core.nodes('''
[ transport:air:flight=*
:num=UA2437
:scheduled:departure=20200202
:scheduled:arrival=20200203
:departed=2020020202
:arrived=202002020302
:carrier=*
:craft=*
:from:port=IAD
:to:port=LAS
:stops=(iad, visi, las)
:cancelled=true
]'''))[0]
self.len(1, await core.nodes('transport:air:flight -> transport:air:craft'))
self.eq('ua2437', flight.get('num'))
self.eq(1580601600000, flight.get('scheduled:departure'))
self.eq(1580688000000, flight.get('scheduled:arrival'))
self.eq(1580608800000, flight.get('departed'))
self.eq(1580612520000, flight.get('arrived'))
self.true(flight.get('cancelled'))
self.nn(flight.get('carrier'))
self.eq('las', flight.get('to:port'))
self.eq('iad', flight.get('from:port'))
flightiden = flight.ndef[1]
occup = (await core.nodes(f'[ transport:air:occupant=* :flight={flightiden} :seat=1A :contact=* ]'))[0]
self.eq('1a', occup.get('seat'))
self.len(1, await core.nodes('transport:air:occupant -> ps:contact'))
self.len(1, await core.nodes('transport:air:occupant -> transport:air:flight'))
telem = (await core.nodes('''
[ transport:air:telem=*
:flight=*
:latlong=(20.22, 80.1111)
:loc=us
:place=*
:accuracy=10m
:altitude=9144m
:altitude:accuracy=10m
:time=20200202
]'''))[0]
self.nn(telem.get('flight'))
self.nn(telem.get('place'))
self.eq((20.22, 80.1111), telem.get('latlong'))
self.eq('us', telem.get('loc'))
self.eq(10000, telem.get('accuracy'))
self.eq(6380152800, telem.get('altitude'))
self.eq(10000, telem.get('altitude:accuracy'))
self.eq(1580601600000, telem.get('time'))
vessel = (await core.nodes('''[
transport:sea:vessel=*
:mmsi=123456789
:name="Slice of Life"
:flag=us
:imo="IMO 1234567"
:built=2020
:length=20m
:beam=10m
:operator=*
]'''))[0]
self.eq('123456789', vessel.get('mmsi'))
self.eq('slice of life', vessel.get('name'))
self.eq('us', vessel.get('flag'))
self.eq('imo1234567', vessel.get('imo'))
self.eq(1577836800000, vessel.get('built'))
self.eq(20000, vessel.get('length'))
self.eq(10000, vessel.get('beam'))
self.nn(vessel.get('operator'))
self.len(1, await core.nodes('transport:sea:vessel:imo^="IMO 123"'))
seatelem = (await core.nodes('''[
transport:sea:telem=*
:time=20200202
:vessel=*
:latlong=(20.22, 80.1111)
:loc=us
:place=*
:accuracy=10m
:draft=20m
:airdraft=30m
]'''))[0]
self.nn(seatelem.get('place'))
self.eq((20.22, 80.1111), seatelem.get('latlong'))
self.eq('us', seatelem.get('loc'))
self.eq(10000, seatelem.get('accuracy'))
self.eq(1580601600000, seatelem.get('time'))
self.eq(20000, seatelem.get('draft'))
self.eq(30000, seatelem.get('airdraft'))
airport = (await core.nodes('transport:air:port=VISI [:name="Visi Airport" :place=*]'))[0]
self.eq('visi', airport.ndef[1])
self.eq('visi airport', airport.get('name'))
self.nn(airport.get('place'))
|
11587424
|
from zorro.di import has_dependencies, dependency
from .xcb import Core, Rectangle
from .commands import CommandDispatcher
class Drag(object):
start_distance = 5
def __init__(self, win, x, y):
self.win = win
if self.win.frame:
self.win = self.win.frame
self.drag_started = False
self.start_x = x
self.start_y = y
def moved_to(self, x, y):
if self.drag_started:
self.motion(x + self.x, y + self.y)
self.update_hint()
else:
dist = abs(self.start_x - x) + abs(self.start_y - y)
if dist > self.start_distance:
self.drag_started = True
if not self.win.content.lprops.floating:
self.win.content.make_floating()
self.hint = self.win.add_hint()
self.start(self.start_x, self.start_y)
self.motion(x + self.x, y + self.y)
self.update_hint()
def update_hint(self):
sz = self.win.done.size
txt = '{0.x}, {0.y} {0.width}x{0.height}'.format(sz)
if hasattr(self.win, 'content'):
hints = self.win.content.want.hints
else:
hints = self.win.want.hints
if hints:
bw = getattr(hints, 'base_width', 0)
bh = getattr(hints, 'base_height', 0)
if hasattr(hints, 'width_inc'):
if hasattr(hints, 'height_inc'):
txt += '\n{} cols {} rows'.format(
(sz.width - bw)//hints.width_inc,
(sz.height - bh)//hints.height_inc,
)
else:
txt += '\n{} cols'.format((sz.width - bw)//hints.width_inc)
elif hasattr(hints, 'height_inc'):
txt += '\n{} rows'.format((sz.height - bh)//hints.height_inc)
self.hint.set_text(txt)
hsz = self.hint.done.size
wsz = self.win.done.size
self.hint.set_bounds(Rectangle(
(wsz.width - hsz.width)//2,
(wsz.height - hsz.height)//2,
hsz.width, hsz.height))
def stop(self):
if hasattr(self, 'hint'):
self.hint.destroy()
class DragMove(Drag):
def start(self, x, y):
sz = self.win.done.size
self.x = sz.x - x
self.y = sz.y - y
def motion(self, x, y):
sz = self.win.done.size
self.win.set_bounds(Rectangle(x, y, sz.width, sz.height))
class DragSizeBottomRight(Drag):
def start(self, x, y):
sz = self.win.done.size
self.x = sz.width - x
self.y = sz.height - y
def motion(self, x, y):
sz = self.win.done.size
self.win.set_bounds(Rectangle(sz.x, sz.y, x, y))
class DragSizeTopRight(Drag):
def start(self, x, y):
sz = self.win.done.size
self.x = sz.width - x
self.y = sz.y - y
self.bottom = sz.height + sz.y
def motion(self, x, y):
sz = self.win.done.size
self.win.set_bounds(Rectangle(sz.x, y, x, self.bottom - y))
class DragSizeBottomLeft(Drag):
def start(self, x, y):
sz = self.win.done.size
self.x = sz.x - x
self.y = sz.height - y
self.right = sz.x + sz.width
def motion(self, x, y):
sz = self.win.done.size
self.win.set_bounds(Rectangle(x, sz.y, self.right - x, y))
class DragSizeTopLeft(Drag):
def start(self, x, y):
sz = self.win.done.size
self.x = sz.x - x
self.y = sz.y - y
self.bottom = sz.height + sz.y
self.right = sz.width + sz.x
def motion(self, x, y):
sz = self.win.done.size
self.win.set_bounds(Rectangle(x, y, self.right - x, self.bottom - y))
@has_dependencies
class MouseRegistry(object):
core = dependency(Core, 'xcore')
commander = dependency(CommandDispatcher, 'commander')
drag_classes = { # (is_right, is_bottom): Class
(True, True): DragSizeBottomRight,
(True, False): DragSizeTopRight,
(False, False): DragSizeTopLeft,
(False, True): DragSizeBottomLeft,
}
def __init__(self):
self.drag = None
def init_buttons(self):
self.mouse_buttons = [
(getattr(self.core.ModMask, '4'), 1),
(getattr(self.core.ModMask, '4'), 3),
]
def init_modifiers(self):
# TODO(tailhook) probably calculate them instead of hardcoding
caps = self.core.ModMask.Lock # caps lock
num = getattr(self.core.ModMask, '2') # mod2 is usually numlock
mode = getattr(self.core.ModMask, '5') # mod5 is usually mode_switch
self.extra_modifiers = [0,
caps,
num,
mode,
caps|num,
num|mode,
caps|num|mode,
]
self.modifiers_mask = ~(caps|num|mode)
def register_buttons(self, win):
self.init_modifiers()
for mod, button in self.mouse_buttons:
for extra in self.extra_modifiers:
self.core.raw.GrabButton(
modifiers=mod|extra,
button=button,
owner_events=True,
grab_window=win,
event_mask=self.core.EventMask.ButtonRelease
| self.core.EventMask.PointerMotion,
confine_to=0,
keyboard_mode=self.core.GrabMode.Async,
pointer_mode=self.core.GrabMode.Async,
cursor=0, # TODO(tailhook) make apropriate cursor
)
def dispatch_button_press(self, ev):
if 'pointer_window' not in self.commander:
return
win = self.commander['pointer_window']
if win.lprops.floating:
win.frame.restack(self.core.StackMode.TopIf)
if ev.detail == 1:
self.drag = DragMove(win, ev.root_x, ev.root_y)
elif ev.detail == 3:
sz = win.done.size
right = (ev.root_x - sz.x) * 2 >= sz.width
bottom = (ev.root_y - sz.y) * 2 >= sz.height
self.drag = self.drag_classes[right, bottom](
win, ev.root_x, ev.root_y)
def dispatch_button_release(self, ev):
if not self.drag:
return
self.drag.moved_to(ev.root_x, ev.root_y)
self.drag.stop()
self.drag = None
def dispatch_motion(self, ev):
if not self.drag:
return
self.drag.moved_to(ev.root_x, ev.root_y)
|
11587426
|
from django.db import models
class NewsManager(models.Manager):
def get_published_news(self):
return self.filter(status='published')
class CategoryManager(models.Manager):
def get_active_category(self):
return self.filter(active=True)
|
11587475
|
import re
from mau.lexers.base_lexer import BaseLexer, TokenTypes
class TextLexer(BaseLexer):
def _process_whitespace(self):
regexp = re.compile(r"\ +")
match = regexp.match(self._tail)
if not match:
return None
return self._create_token_and_skip(TokenTypes.TEXT, " ")
def _process_literal(self):
if self._current_char not in '_*`{}()[]#\\"':
return None
return self._create_token_and_skip(TokenTypes.LITERAL, self._current_char)
def _process_text(self):
regexp = re.compile(r'[^_*`{}()[\]#"\\ ]+')
match = regexp.match(self._tail)
if not match: # pragma: no cover
return None
return self._create_token_and_skip(TokenTypes.TEXT, match.group())
def _process_functions(self):
return [
self._process_eof,
self._process_eol,
self._process_whitespace,
self._process_literal,
self._process_text,
]
|
11587493
|
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class FirstOfTagTests(SimpleTestCase):
@setup({"firstof01": "{% firstof a b c %}"})
def test_firstof01(self):
output = self.engine.render_to_string("firstof01", {"a": 0, "c": 0, "b": 0})
self.assertEqual(output, "")
@setup({"firstof02": "{% firstof a b c %}"})
def test_firstof02(self):
output = self.engine.render_to_string("firstof02", {"a": 1, "c": 0, "b": 0})
self.assertEqual(output, "1")
@setup({"firstof03": "{% firstof a b c %}"})
def test_firstof03(self):
output = self.engine.render_to_string("firstof03", {"a": 0, "c": 0, "b": 2})
self.assertEqual(output, "2")
@setup({"firstof04": "{% firstof a b c %}"})
def test_firstof04(self):
output = self.engine.render_to_string("firstof04", {"a": 0, "c": 3, "b": 0})
self.assertEqual(output, "3")
@setup({"firstof05": "{% firstof a b c %}"})
def test_firstof05(self):
output = self.engine.render_to_string("firstof05", {"a": 1, "c": 3, "b": 2})
self.assertEqual(output, "1")
@setup({"firstof06": "{% firstof a b c %}"})
def test_firstof06(self):
output = self.engine.render_to_string("firstof06", {"c": 3, "b": 0})
self.assertEqual(output, "3")
@setup({"firstof07": '{% firstof a b "c" %}'})
def test_firstof07(self):
output = self.engine.render_to_string("firstof07", {"a": 0})
self.assertEqual(output, "c")
@setup({"firstof08": '{% firstof a b "c and d" %}'})
def test_firstof08(self):
output = self.engine.render_to_string("firstof08", {"a": 0, "b": 0})
self.assertEqual(output, "c and d")
@setup({"firstof09": "{% firstof %}"})
def test_firstof09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("firstof09")
@setup({"firstof10": "{% firstof a %}"})
def test_firstof10(self):
output = self.engine.render_to_string("firstof10", {"a": "<"})
self.assertEqual(output, "<")
@setup({"firstof11": "{% firstof a b %}"})
def test_firstof11(self):
output = self.engine.render_to_string("firstof11", {"a": "<", "b": ">"})
self.assertEqual(output, "<")
@setup({"firstof12": "{% firstof a b %}"})
def test_firstof12(self):
output = self.engine.render_to_string("firstof12", {"a": "", "b": ">"})
self.assertEqual(output, ">")
@setup({"firstof13": "{% autoescape off %}{% firstof a %}{% endautoescape %}"})
def test_firstof13(self):
output = self.engine.render_to_string("firstof13", {"a": "<"})
self.assertEqual(output, "<")
@setup({"firstof14": "{% firstof a|safe b %}"})
def test_firstof14(self):
output = self.engine.render_to_string("firstof14", {"a": "<"})
self.assertEqual(output, "<")
@setup({"firstof15": "{% firstof a b c as myvar %}"})
def test_firstof15(self):
ctx = {"a": 0, "b": 2, "c": 3}
output = self.engine.render_to_string("firstof15", ctx)
self.assertEqual(ctx["myvar"], "2")
self.assertEqual(output, "")
@setup({"firstof16": "{% firstof a b c as myvar %}"})
def test_all_false_arguments_asvar(self):
ctx = {"a": 0, "b": 0, "c": 0}
output = self.engine.render_to_string("firstof16", ctx)
self.assertEqual(ctx["myvar"], "")
self.assertEqual(output, "")
|
11587512
|
import hashlib
import json
import logging
import re
import pip
import pymongo
import requests
from pymongo import MongoClient
from gnews.utils.constants import AVAILABLE_LANGUAGES, AVAILABLE_COUNTRIES, GOOGLE_NEWS_REGEX
def lang_mapping(lang):
return AVAILABLE_LANGUAGES.get(lang)
def country_mapping(country):
return AVAILABLE_COUNTRIES.get(country)
def import_or_install(package):
try:
__import__(package)
except ImportError:
if hasattr(pip, 'main'):
pip.main(['install', package])
else:
pip._internal.main(['install', package])
def connect_database(db_user, db_pw, db_name, collection_name):
"""Mongo DB Establish Cluster Connection"""
# .env file Structure:
# DB_USER="..."
# DB_PW="..."
# DB_NAME="..."
# COLLECTION_NAME="..."
# name of the mongodb cluster as well as the database name should be "gnews"
try:
cluster = MongoClient(
"mongodb+srv://" +
db_user +
":" +
db_pw +
"@gnews.stjap.mongodb.net/" +
db_name +
"?retryWrites=true&w=majority"
)
db = cluster[db_name]
collection = db[collection_name]
return collection
except Exception as e:
print("Connection Error.", e)
def post_database(collection, news):
"""post unique news articles to mongodb database"""
doc = {
"_id": hashlib.sha256(str(json.dumps(news)).encode('utf-8')).hexdigest(),
"title": news['title'],
"description": news['description'],
"published_date": news['published date'],
"url": news['url'],
"publisher": news['publisher']
}
try:
collection.update_one(doc, {'$set': doc}, upsert=True)
except pymongo.errors.DuplicateKeyError:
logging.error("Posting to database failed.")
def process_url(item, exclude_websites):
source = item.get('source').get('href')
if not all([not re.match(website, source) for website in
[f'^http(s)?://(www.)?{website.lower()}.*' for website in exclude_websites]]):
return
url = item.get('link')
if re.match(GOOGLE_NEWS_REGEX, url):
url = requests.head(url).headers.get('location', url)
return url
|
11587519
|
def %block_name%(bot, update):
global answers
answers[bot.message.chat_id]["%block_special_name%"] = %function%(%function_args%)
|
11587528
|
import torch
import torch.nn as nn
import os
from div.download_from_url import download_from_url
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'pretrained')
__all__ = ['CoResNet', 'coresnet50', 'coresnet101', 'coresnet152']
model_urls = {
'coresnet50': 'https://drive.google.com/uc?export=download&id=1fnANwWhU6SjRpPLSHIji-nLNhdVdtBSs',
'coresnet101': 'https://drive.google.com/uc?export=download&id=10-oJYZtPrlnM4H9aKxvI-Osjhjm5B1WL',
'coresnet152': 'https://drive.google.com/uc?export=download&id=1dzo1gd7l6_T57wWcfyxSddGlec1foClD',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1, groups=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class CoConv3x3_2d(nn.Module):
"""CoConv3x3_2d with padding (general case). Applies a 2D CoConv over an input signal composed of several input planes.
Args:
in_channels (int): Number of channels in the input image
out_channels (list): Number of channels for each pyramid level produced by the convolution
coconv_dilations (list): The dilation of the kernel for each pyramid level
stride (int or tuple, optional): Stride of the convolution. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``False``
groups (int): Number of groups to split the input channels. Default: 1
Example::
>>> # CoConv with two pyramid levels, dilations: 1, 2
>>> m = CoConv3x3_2d(in_channels=64, out_channels=[32, 32], coconv_dilations=[1, 2])
>>> input = torch.randn(4, 64, 56, 56)
>>> output = m(input)
>>> # CoConv with three pyramid levels, kernels: 1, 2, 3
>>> m = CoConv3x3_2d(in_channels=64, out_channels=[32, 16, 16], coconv_dilations=[1, 2, 3])
>>> input = torch.randn(4, 64, 56, 56)
>>> output = m(input)
"""
def __init__(self, in_channels, out_channels, coconv_dilations, stride=1, bias=False, groups=1):
super(CoConv3x3_2d, self).__init__()
assert len(out_channels) == len(coconv_dilations)
kernel_size = 3
self.coconv_levels = [None] * len(coconv_dilations)
for i in range(len(coconv_dilations)):
self.coconv_levels[i] = nn.Conv2d(in_channels, out_channels[i], kernel_size=kernel_size,
stride=stride, padding=kernel_size // 2 + (coconv_dilations[i] - 1),
dilation=coconv_dilations[i], bias=bias, groups=groups)
self.coconv_levels = nn.ModuleList(self.coconv_levels)
def forward(self, x):
out = []
for level in self.coconv_levels:
out.append(level(x))
return torch.cat(out, 1)
class CoConv5(nn.Module):
def __init__(self, inplans, planes, stride=1, groups=1):
super(CoConv5, self).__init__()
self.conv2_1 = conv3x3(inplans, planes//4,
stride=stride, padding=1, dilation=1, groups=groups)
self.conv2_2 = conv3x3(inplans, planes // 4, stride=stride, padding=2, dilation=2, groups=groups)
self.conv2_3 = conv3x3(inplans, planes // 4, stride=stride, padding=3, dilation=3, groups=groups)
self.conv2_4 = conv3x3(inplans, planes // 8, stride=stride, padding=4, dilation=4, groups=groups)
self.conv2_5 = conv3x3(inplans, planes // 8, stride=stride, padding=5, dilation=5, groups=groups)
def forward(self, x):
return torch.cat((self.conv2_1(x), self.conv2_2(x), self.conv2_3(x), self.conv2_4(x), self.conv2_5(x)), dim=1)
class CoConv4(nn.Module):
def __init__(self, inplans, planes, stride=1, groups=1):
super(CoConv4, self).__init__()
self.conv2_1 = conv3x3(inplans, planes // 4, stride=stride, padding=1,
dilation=1, groups=groups)
self.conv2_2 = conv3x3(inplans, planes // 4, stride=stride, padding=2, dilation=2, groups=groups)
self.conv2_3 = conv3x3(inplans, planes // 4, stride=stride, padding=3, dilation=3, groups=groups)
self.conv2_4 = conv3x3(inplans, planes // 4, stride=stride, padding=4, dilation=4, groups=groups)
def forward(self, x):
return torch.cat((self.conv2_1(x), self.conv2_2(x), self.conv2_3(x), self.conv2_4(x)), dim=1)
class CoConv3(nn.Module):
def __init__(self, inplans, planes, stride=1, groups=1):
super(CoConv3, self).__init__()
self.conv2_1 = conv3x3(inplans, planes // 2, stride=stride, padding=1,
dilation=1, groups=groups)
self.conv2_2 = conv3x3(inplans, planes // 4, stride=stride, padding=2, dilation=2, groups=groups)
self.conv2_3 = conv3x3(inplans, planes // 4, stride=stride, padding=3, dilation=3, groups=groups)
def forward(self, x):
return torch.cat((self.conv2_1(x), self.conv2_2(x), self.conv2_3(x)), dim=1)
class CoConv2(nn.Module):
def __init__(self, inplans, planes, stride=1, groups=1):
super(CoConv2, self).__init__()
self.conv2_1 = conv3x3(inplans, planes // 2, stride=stride, padding=1,
dilation=1, groups=groups)
self.conv2_2 = conv3x3(inplans, planes // 2, stride=stride, padding=2, dilation=2, groups=groups)
def forward(self, x):
return torch.cat((self.conv2_1(x), self.conv2_2(x)), dim=1)
def create_spatial_conv(inplans, planes, pyramid_levels, stride=1, groups=1):
if pyramid_levels == 1:
return conv3x3(inplans, planes, stride=stride, groups=groups)
elif pyramid_levels == 2:
return CoConv2(inplans, planes, stride=stride, groups=groups)
elif pyramid_levels == 3:
return CoConv3(inplans, planes, stride=stride, groups=groups)
elif pyramid_levels == 4:
return CoConv4(inplans, planes, stride=stride, groups=groups)
elif pyramid_levels == 5:
return CoConv5(inplans, planes, stride=stride, groups=groups)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None, groups=1, pyramid_levels=1,
bn_end_stage=False):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = norm_layer(planes)
self.conv2 = create_spatial_conv(planes, planes, pyramid_levels=pyramid_levels, stride=stride, groups=groups)
self.bn2 = norm_layer(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.bn_end_stage = bn_end_stage
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
if not self.bn_end_stage:
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
if self.bn_end_stage:
out = self.bn3(out)
out = self.relu(out)
return out
class CoResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=None, dropout_prob0=0.0):
super(CoResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1, norm_layer=norm_layer,
pyramid_levels=4, groups_block=1, bn_end_stage=True)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer,
pyramid_levels=3, groups_block=1, bn_end_stage=True)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer,
pyramid_levels=2, groups_block=1, bn_end_stage=True)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer,
pyramid_levels=1, groups_block=1, bn_end_stage=True)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if dropout_prob0 > 0.0:
self.dp = nn.Dropout(dropout_prob0, inplace=True)
print("Using Dropout with the prob to set to 0 of: ", dropout_prob0)
else:
self.dp = None
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None,
pyramid_levels=1, groups_block=1, bn_end_stage=False):
if norm_layer is None:
norm_layer = nn.BatchNorm2d
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride=stride, downsample=downsample, norm_layer=norm_layer,
pyramid_levels=pyramid_levels, groups=groups_block))
self.inplanes = planes * block.expansion
for _ in range(1, (blocks-1)):
layers.append(block(self.inplanes, planes, norm_layer=norm_layer,
pyramid_levels=pyramid_levels, groups=groups_block))
layers.append(block(self.inplanes, planes, norm_layer=norm_layer,
pyramid_levels=pyramid_levels, groups=groups_block, bn_end_stage=bn_end_stage))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.dp is not None:
x = self.dp(x)
x = self.fc(x)
return x
def coresnet50(pretrained=False, **kwargs):
"""Constructs a CoResNet-50 model.
"""
model = CoResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
os.makedirs(default_cache_path, exist_ok=True)
model.load_state_dict(torch.load(download_from_url(model_urls['coresnet50'],
root=default_cache_path)))
return model
def coresnet101(pretrained=False, **kwargs):
"""Constructs a CoResNet-101 model.
"""
model = CoResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
os.makedirs(default_cache_path, exist_ok=True)
model.load_state_dict(torch.load(download_from_url(model_urls['coresnet101'],
root=default_cache_path)))
return model
def coresnet152(pretrained=False, **kwargs):
"""Constructs a CoResNet-152 model.
"""
model = CoResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
os.makedirs(default_cache_path, exist_ok=True)
model.load_state_dict(torch.load(download_from_url(model_urls['coresnet152'],
root=default_cache_path)))
return model
|
11587541
|
import math
import sys
from magpieparsers.parser_common import *
#from helper import positive_id
from magpieparsers import error as parsers_error
SCOPES = ['module','type','valuetype','struct','union','function','exception','eventtype','component','home', 'MagpieAST']
def getBasicTypenode(typename, ast):
returntype = getTypenode(typename, ast)
if returntype is None:
raise parsers_error.UnknownTypeError(failed_name = typename)
return returntype
def getTypenode(typename, ast):
return getNode(typename, ast, 'type')
def get_base_typename(typename):
if isinstance(typename, basestring):
typename = typename.split('::')
return typename[-1]
def get_scope_for_typename(ast, typename):
if isinstance(typename, basestring):
typename = typename.split('::')
if len(typename) > 1:
working_node = find_scope(ast, typename[:-1])
assert working_node is not None
else:
working_node = ast
return working_node
def _find_in_ast(ast, node_name, node_leaf):
found = []
for child in ast.children:
if child.leaf == node_leaf and (node_name is None or child.name == node_name):
found.append(child)
return found
# Node resolution:
# Try me
# Try all my parents, breadth-first
# Try my outer scope.
# Until bored or found or no more outer scopes
def _getnode_in_the_family(ast_list, node_name, node_leaf, get_all = False):
"""
Do a breadth-first search of the tree starting from roots at "ast_list".
"""
candidates = ast_list
found = []
while candidates:
candidate = candidates.pop(0)
found.extend(_find_in_ast(candidate, node_name, node_leaf))
if found and not get_all:
return found
candidates.extend( [item.children[0] for item in candidate['inherits']] )
return found
def getNode(typename, ast, nodetype = None):
# Typename could be a basic type, or a scoped type. If it's a scoped type,
# find the appropriate spot to begin searching.
ast = get_scope_for_typename(ast, typename)
# Remove any scope information from "typename".
typename_base = get_base_typename(typename)
# Walk up from AST until we get to a scope.
while ast.name not in SCOPES:
ast = ast.parent
# Starting from AST, search all children for "nodetype" of type "typename_base".
# If we don't find any, check the parent, until we can't go any higher, which
# means we've reached the root (or our tree is broken)
# This is complicated by inheritance rules. If in our travels we encounter a type
# with an "inherits" attribute, also try "find_in_ast" in that type.
while ast:
# Regular search
targets = _getnode_in_the_family([ast], nodetype, typename_base)
if targets:
return targets[0]
# No luck, so try the enclosing scope (confusingly named "parent")
ast = ast.parent
return None
def getAllNodes(typename, ast, nodetype):
# FIXME: This is rather similar to getNode, but the bulk of the main loop
# is different.
# Typename could be a basic type, or a scoped type. If it's a scoped type,
# find the appropriate spot to begin searching.
ast = get_scope_for_typename(ast, typename)
# Remove any scope information from "typename".
typename_base = get_base_typename(typename)
# Walk up from AST until we get to a scope.
while ast.name not in SCOPES:
ast = ast.parent
# Starting from AST, search all children for "nodetype" of type "typename_base".
# If we don't find any, check the parent, until we can't go any higher, which
# means we've reached the root (or our tree is broken)
# This is complicated by inheritance rules. If in our travels we encounter a type
# with an "inherits" attribute, also try "find_in_ast" in that type.
found = []
while ast:
# Regular search
found.extend(_getnode_in_the_family([ast], nodetype, typename_base, get_all = True))
ast = ast.parent
return found
def node_sorter(lhs, rhs):
return cmp( (lhs.source_file, lhs.source_line), (rhs.source_file, rhs.source_line) )
def getSingleNode(typename, ast, nodetype):
found = getAllNodes(typename, ast, nodetype)
assert len(found) == 1 or len(found) == 2
if len(found) == 2:
found.sort(node_sorter)
raise parsers_error.SymbolDefinedTwiceError(typename, found[0], found[1])
return found[0]
def can_coerce_expr(expr_from, type_to):
type_from = expr_from.the('type')
# If a type is None, it may mean that it came from an expression node that
# did not contain a child Type node.
assert type_from is not None and type_to is not None
#print "expr from"
#expr_from.print_tree()
#print "type to"
#type_to.print_tree()
type_from = find_alias_target(type_from)
type_to = find_alias_target(type_to)
if type_from is type_to:
return True
# If we can't do any further comparisons, and the types aren't strictly the same,
# bail.
if not type_from.has_attribute('smallest') or not type_to.has_attribute('smallest') \
or not type_from.has_attribute('largest') or not type_to.has_attribute('largest'):
return False
# If "type_from" fits within "type_to" then we're ok.
if type_from.attribute('smallest') >= type_to.attribute('smallest') \
and type_from.attribute('largest') <= type_to.attribute('largest'):
return True
# If "type_from" happens to be a constant with a known value, and *that*
# fits with in "type_to", then we're OK.
if expr_from.attribute('value', None) is not None \
and expr_from.attribute('value') >= type_to.attribute('smallest') \
and expr_from.attribute('value') <= type_to.attribute('largest'):
return True
# The lesson is: never try.
return False
def dump_types_n_size(node, prefix = None):
returnlist = []
if node.type in SCOPES:
prefix = prefix + node.leaf + '_'
if node.type == 'type' and node.leaf != None:
type_size = size(node)
name = prefix + node.leaf
returnlist.append([name, type_size, node])
for child in node.children:
child_list = dump_types_n_size(child, prefix)
if child_list != []:
returnlist.extend(child_list)
return returnlist
def find_type_in_scope(scope_node, typename):
return find_node_in_scope(scope_node, typename, 'type')
def find_node_in_scope(scope_node, typename, nodetype):
# look for type typename without descending into a node of SCOPES (i.e. a new scope)
type_node = scope_node.find_node(nodetype,typename, SCOPES)
if type_node != None:
return type_node
# else: look for typename in inherited scopes
if scope_node.has_attribute('inheritance'):
inher_list = scope_node.get_attribute('inheritance')
for inher_scope_name in inher_list:
inher_scope = find_scope(scope_node, inher_scope_name.split('::'))
assert inher_scope is not None
inher_result = find_node_in_scope(inher_scope, typename, nodetype)
if inher_result != None:
return inher_result
return None
def find_scope(startnode, name):
if not isinstance(name, list):
name = name.split('::')
#first scope to be searched
find = name[0]
if find == '':
# root_scope ...
working_node = get_root_scope(startnode)
else:
# -> just bottom-up till a "scope"-node
working_node = startnode
while working_node != None and ((working_node.type not in SCOPES) or (working_node.leaf != find)):
result_node = working_node.find_node(SCOPES, name, SCOPES)
if result_node is not None:
#print 'returning1: %s' %(result_node)
return result_node
#print '2working_node is now: %s(%s)' %(working_node.type, working_node)
working_node = working_node.parent
for scope in name[1:]:
working_node = working_node.find_node(SCOPES, scope, SCOPES)
assert working_node is not None
return working_node
def get_root_scope(startnode):
root = startnode
while root.parent != None:
root = root.parent
return root
def get_full_scope(node):
if node is None:
return []
node = node.parent
scope_list = []
while node.parent is not None:
if node.name in SCOPES:
scope_list.append(node.leaf)
node = node.parent
scope_list.reverse()
return scope_list
def get_full_scope_string(node):
if node is None:
return ''
scope_list = []
node = node.parent
while node.parent is not None:
if node.name in SCOPES:
scope_list.append(node.leaf)
node = node.parent
scope_list.reverse()
#print scope_list
return scope_list
def get_param_type_spec(node, side='client'):
#possible meta_types: basic, alias, string, wstring, scoped_name, pointer
meta_type = node.attribute('meta_type')
if meta_type in ['basic','alias']:
scopelist = get_full_scope(node)
scopelist.append(node.leaf)
return '_'.join(scopelist)
elif meta_type in ['string','wstring']:
return
elif meta_type == 'pointer':
return '*' + get_param_type_spec(node.get_attribute('target_type')[0])
elif meta_type == 'polymorphic':
if side == 'client':
return get_param_type_spec(node.attribute('receiver_type'))
else:
return get_param_type_spec(node.attribute('sender_type'))
elif meta_type == 'scoped_name':
target = node.the('target').the('type')
return '_'.join(get_full_scope(target).append(target.leaf))
else:
print 'meta_type unknown: ', meta_type
assert False
def basic_size(type_node):
if isinstance(type_node.attribute('size'), str):
# FIXME: Can this go? Do we ever get str sizes? - nfd
try:
return int(eval(type_node.attribute('size')))
except Exception:
print 'Exception in basic_size-calculation of ', type_node.attribute('size')
return -1
else:
return type_node.attribute('size')
def sized_alias_size(type_node):
#type_node.print_tree()
return basic_size(type_node)
def alias_size(type_node):
#print 'alias_size of:'
#type_node.print_tree()
target_node = type_node.maybe_walk_to('target', 'type')
if target_node == None:
type_node.print_tree()
assert False
return size(target_node)
def union_size(type_node):
case_list = [case.the('type_instance') for case in type_node.the('members')['case']]
maxsize = 0
for case in case_list:
target_type = case.get_attribute('target_type')[0]
target_size = size(target_type)
if target_size > maxsize:
maxsize = target_size
return maxsize
def enum_size(type_node):
# FIXME: Is this right? Enums in C should just be sizeof(int)...
int_type = getBasicTypenode('signed int', type_node)
return basic_size(int_type)
# Old code:
#length = len(type_node.get_attribute('enumeration'))
#enumsize = math.ceil(math.log(math.ceil(math.log(length, 2)),32)) * 32
#return enumsize
def struct_size(type_node):
# Create a list of target types.
type_list = []
for type_inst in type_node.the('members')['type_instance']:
target_ast = type_inst.the('target').the('type')
type_list.append(target_ast)
structsize = 0
for inst_type in type_list:
structsize += size(inst_type)
return structsize
def array_size(type_node):
shape = type_node.get_attribute('shape')
base_size = size(type_node.the('target').the('type'))
if shape is None:
return 0
else:
for dim in shape:
if dim.isdigit():
base_size *= eval(dim)
else:
base_size *= eval(constant_lookup(dim))
return base_size
def sequence_size(type_node):
shape = type_node.get_attribute('shape')
base_size = size(type_node.the('target').the('type'))
if shape is None:
return 0
else:
for dim in shape:
if dim.isdigit():
base_size *= eval(dim)
else:
base_size *= eval(constant_lookup(dim))
return base_size
def polymorphic_size(type_node):
sender_size = size(type_node.attribute('sender_type'))
receiver_size = size(type_node.attribute('receiver_type'))
return (sender_size, receiver_size)
def pointer_size(type_node):
int_node = getTypenode('int', type_node)
if int_node != None:
return size(int_node)
else:
print 'int-Node in infogripper.py -> pointer_size not found!'
return 32
def string_size(type_node):
return pointer_size(type_node)
def word_size(type_node):
int_node = getTypenode('signed int', type_node)
if int_node != None:
return size(int_node)
else:
print 'int-Node in infogripper.py -> word_size not found!'
return 32
def size(type_node):
if type_node.type=='target':
type_node = type_node.the('type')
if not type_node.has_attribute('meta_type'):
type_node.print_tree()
assert False
meta_type = type_node.get_attribute('meta_type')[0] + '_size'
#print globals()[meta_type]
#print 'meta_type = ', meta_type
return globals()[meta_type](type_node)
def find_alias_target(ast):
""" If "ast" is an alias type, return its target.
If "ast" is a type instance, go to the constructing type first.
"""
try_again = True
while try_again:
try_again = False
if ast.name == 'type_instance':
ast = ast.the('target').the('type')
try_again = True
while ast.name == 'type' and ast.the('customised'):
ast = ast.the('customised').children[0]
try_again = True
while ast.name == 'type' and ast.get_attribute('meta_type') == ['alias']:
ast = ast.the("target").children[0]
try_again = True
return ast
# Helpers for type info
def is_basic_type(ast):
return ast.name == 'type' and ast.get_attribute('meta_type') == ['basic']
def is_corba_integer(ast):
ast = find_alias_target(ast)
return is_basic_type(ast) and ast.leaf in ('short', 'unsigned short',
'long', 'unsigned long', 'long long', 'unsigned long long')
def is_bool(ast):
ast = find_alias_target(ast)
return is_basic_type(ast) and ast.leaf == 'bool'
def is_char(ast):
ast = find_alias_target(ast)
return is_basic_type(ast) and ast.leaf == 'char'
def is_octet(ast):
ast = find_alias_target(ast)
return is_basic_type(ast) and ast.leaf == 'octet'
def is_enum(ast):
ast = find_alias_target(ast)
return ast.get_attribute('meta_type') == ['enum']
def is_void(ast):
ast = find_alias_target(ast)
return is_basic_type(ast) and ast.leaf == 'void'
|
11587550
|
from ctypes import CDLL, c_bool, c_wchar_p, c_int, c_ubyte, sizeof, c_void_p
from logging import getLogger
from os import environ
from platform import architecture
from sys import maxsize
from typing import List, Tuple, Optional
from PIL import Image
LOG = getLogger(__name__)
# LCD types
TYPE_MONO = 1
TYPE_COLOR = 2
# LCD Monochrome buttons
MONO_BUTTON_0 = 0x1
MONO_BUTTON_1 = 0x2
MONO_BUTTON_2 = 0x4
MONO_BUTTON_3 = 0x8
# LCD Color buttons
COLOR_BUTTON_LEFT = 0x0100
COLOR_BUTTON_RIGHT = 0x0200
COLOR_BUTTON_OK = 0x0400
COLOR_BUTTON_CANCEL = 0x0800
COLOR_BUTTON_UP = 0x1000
COLOR_BUTTON_DOWN = 0x2000
COLOR_BUTTON_MENU = 0x4000
# LCD Monochrome size
MONO_WIDTH = 160
MONO_HEIGHT = 43
# LCD Color size
COLOR_WIDTH = 320
COLOR_HEIGHT = 240
def _init_dll() -> CDLL:
"""Initialization od dynamic linking library."""
arch = 'x64' if all([architecture()[0] == '64bit', maxsize > 2 ** 32, sizeof(c_void_p) > 4]) else 'x86'
try:
prog_files = environ['PROGRAMW6432']
except KeyError:
prog_files = environ['PROGRAMFILES']
dll_path = f"{prog_files}\\Logitech Gaming Software\\LCDSDK_8.57.148\\Lib\\GameEnginesWrapper\\{arch}\\LogitechLcdEnginesWrapper.dll"
return CDLL(dll_path)
try:
LCD_DLL: Optional[CDLL] = _init_dll()
LOG.debug('Loading of LCD SDK success')
except (KeyError, FileNotFoundError) as err:
LOG.error(f'Loading of LCD SDK failed: {err}', exc_info=True)
LCD_DLL = None
def logi_lcd_init(name: str, lcd_type: int) -> bool:
"""
Function makes necessary initializations.
You must call this function prior to any other function in the library.
:param name: the name of your applet, you cant change it after initialization
:param lcd_type: defines the type of your applet lcd target
:return: result
"""
if LCD_DLL:
logilcdinit = LCD_DLL['LogiLcdInit']
logilcdinit.restype = c_bool
logilcdinit.argtypes = (c_wchar_p, c_int)
return logilcdinit(name, lcd_type)
return False
def logi_lcd_is_connected(lcd_type: int) -> bool:
"""
Function checks if a device of the type specified by the parameter is connected.
:param lcd_type: defines the type of your applet lcd target
:return: result
"""
if LCD_DLL:
logilcdisconnected = LCD_DLL['LogiLcdIsConnected']
logilcdisconnected.restype = c_bool
logilcdisconnected.argtypes = [c_int]
return logilcdisconnected(lcd_type)
return False
def logi_lcd_is_button_pressed(button: int) -> bool:
"""
Function checks if the button specified by the parameter is being pressed.
:param button: defines the button to check on
:return: result
"""
if LCD_DLL:
logilcdisbuttonpressed = LCD_DLL['LogiLcdIsButtonPressed']
logilcdisbuttonpressed.restype = c_bool
logilcdisbuttonpressed.argtypes = [c_int]
return logilcdisbuttonpressed(button)
return False
def logi_lcd_update() -> None:
"""Function updates the LCD display."""
if LCD_DLL:
logilcdupdate = LCD_DLL['LogiLcdUpdate']
logilcdupdate.restype = None
logilcdupdate()
def logi_lcd_shutdown():
"""Function kills the applet and frees memory used by the SDK."""
if LCD_DLL:
logilcdshutdown = LCD_DLL['LogiLcdShutdown']
logilcdshutdown.restype = None
logilcdshutdown()
def logi_lcd_mono_set_background(pixels: List[int]) -> bool:
"""
The array of pixels is organized as a rectangular area, 160 bytes wide and 43 bytes high.
Despite the display being monochrome, 8 bits per pixel are used here for simple
manipulation of individual pixels.
Note: The image size must be 160x43 in order to use this function. The SDK will turn on
the pixel on the screen if the value assigned to that byte is >= 128, it will remain off
if the value is < 128.
:param pixels: list of 6880 (160x43) pixels as int
:return: result
"""
if LCD_DLL:
logilcdmonosetbackground = LCD_DLL['LogiLcdMonoSetBackground']
logilcdmonosetbackground.restype = c_bool
logilcdmonosetbackground.argtypes = [c_ubyte * (MONO_WIDTH * MONO_HEIGHT)]
img_bytes = [pixel * 128 for pixel in pixels]
return logilcdmonosetbackground((c_ubyte * (MONO_WIDTH * MONO_HEIGHT))(*img_bytes))
return False
def logi_lcd_mono_set_text(line_no: int, text: str):
"""
Function sets the specified text in the requested line on the monochrome lcd device connected.
:param line_no: The monochrome lcd display has 4 lines, so this parameter can be any number from 0 to 3
:param text: defines the text you want to display
:return: result
"""
if LCD_DLL:
logilcdmonosettext = LCD_DLL['LogiLcdMonoSetText']
logilcdmonosettext.restype = c_bool
logilcdmonosettext.argtypes = (c_int, c_wchar_p)
return logilcdmonosettext(line_no, text)
return False
def logi_lcd_color_set_background(pixels: List[Tuple[int, int, int, int]]) -> bool:
"""
The array of pixels is organized as a rectangular area, 320 bytes wide and 240 bytes high.
Since the color lcd can display the full RGB gamma, 32 bits per pixel (4 bytes) are used.
The size of the colorBitmap array has to be 320x240x4 = 307200 therefore.
Note: The image size must be 320x240 in order to use this function.
:param pixels: list of 307200 (320x240x4) pixels as int
:return: result
"""
if LCD_DLL:
logilcdcolorsetbackground = LCD_DLL['LogiLcdColorSetBackground']
logilcdcolorsetbackground.restype = c_bool
logilcdcolorsetbackground.argtypes = [c_ubyte * (4 * COLOR_WIDTH * COLOR_HEIGHT)]
img_bytes = [byte for pixel in pixels for byte in pixel]
return logilcdcolorsetbackground((c_ubyte * (4 * COLOR_WIDTH * COLOR_HEIGHT))(*img_bytes))
return False
def logi_lcd_color_set_title(text: str, rgb: Tuple[int, int, int] = (255, 255, 255)):
"""
Function sets the specified text in the first line on the color lcd device connected.
The font size that will be displayed is bigger than the one used in the other lines,
so you can use this function to set the title of your applet/page.
If you dont specify any color, your title will be white.
:param text: defines the text you want to display as title
:param rgb: tuple with integer values between 0 and 255 as red, green, blue
:return: result
"""
if LCD_DLL:
logilcdcolorsettitle = LCD_DLL['LogiLcdColorSetTitle']
logilcdcolorsettitle.restype = c_bool
logilcdcolorsettitle.argtypes = (c_wchar_p, c_int, c_int, c_int)
return logilcdcolorsettitle(text, *rgb)
return False
def logi_lcd_color_set_text(line_no: int, text: str, rgb: Tuple[int, int, int] = (255, 255, 255)):
"""
Function sets the specified text in the requested line on the color lcd device connected.
If you dont specify any color, your title will be white.
:param line_no: The color lcd display has 8 lines for standard text, so this parameter can be any number from 0 to 7
:param text: defines the text you want to display
:param rgb: tuple with integer values between 0 and 255 as red, green, blue
:return: result
"""
if LCD_DLL:
logilcdcolorsettext = LCD_DLL['LogiLcdColorSetText']
logilcdcolorsettext.restype = c_bool
logilcdcolorsettext.argtypes = (c_int, c_wchar_p, c_int, c_int, c_int)
return logilcdcolorsettext(line_no, text, *rgb)
return False
def update_text(txt: List[str]) -> None:
"""
Update display LCD with list of text.
For mono LCD it takes 4 elements of list and display as 4 rows.
For color LCD takes 8 elements of list and display as 8 rows.
:param txt: List of strings to display, row by row
"""
if logi_lcd_is_connected(TYPE_MONO):
for line_no, line in enumerate(txt):
logi_lcd_mono_set_text(line_no, line)
logi_lcd_update()
elif logi_lcd_is_connected(TYPE_COLOR):
for line_no, line in enumerate(txt):
logi_lcd_color_set_text(line_no, line)
logi_lcd_update()
else:
LOG.warning('LCD is not connected')
def update_display(image: Image) -> None:
"""
Update display LCD with image.
:param image: image object from pillow library
"""
if logi_lcd_is_connected(TYPE_MONO):
logi_lcd_mono_set_background(list(image.getdata()))
logi_lcd_update()
elif logi_lcd_is_connected(TYPE_COLOR):
logi_lcd_color_set_background(list(image.getdata()))
logi_lcd_update()
else:
LOG.warning('LCD is not connected')
def clear_display(true_clear=False) -> None:
"""
Clear display.
:param true_clear:
"""
if logi_lcd_is_connected(TYPE_MONO):
_clear_mono(true_clear)
elif logi_lcd_is_connected(TYPE_COLOR):
_clear_color(true_clear)
logi_lcd_update()
def _clear_mono(true_clear):
"""
Clear mono display.
:param true_clear:
"""
logi_lcd_mono_set_background([0] * MONO_WIDTH * MONO_HEIGHT)
if true_clear:
for i in range(4):
logi_lcd_mono_set_text(i, '')
def _clear_color(true_clear):
"""
Clear color display.
:param true_clear:
"""
logi_lcd_color_set_background([(0,) * 4] * COLOR_WIDTH * COLOR_HEIGHT)
if true_clear:
for i in range(8):
logi_lcd_color_set_text(i, '')
|
11587586
|
import pytest
import os
import subprocess
import glob
import re
from shutil import copyfile
dynamorio = pytest.mark.skipif('DYNAMORIO_HOME' not in os.environ.keys(),
reason="DYNAMORIO_HOME not set")
ithemal = pytest.mark.skipif('ITHEMAL_HOME' not in os.environ.keys(),
reason="ITHEMAL_HOME not set")
@pytest.fixture(scope="module")
def db_config():
if not os.path.exists('test_data/db_config.cfg'):
copyfile('test_data/example_config.cfg','test_data/db_config.cfg')
config = dict()
with open('test_data/db_config.cfg','r') as f:
for line in f:
found = re.search('([a-zA-Z\-]+) *= *\"*([a-zA-Z0-9#\./]+)\"*', line)
if found:
config[found.group(1)] = found.group(2)
return config
|
11587613
|
from Configuration import ExecutionMode
from Configuration import Settings
from EndToEndTests.oldScripts import mainE2ELegacyTests
from DataBase import createSchema
from pynvml import nvmlInit
from Utils import Execution, init_context, init_comparators, gpuMemory
from blazingsql import DataType
from Runner import runTest
from Runner import TestSuites
import sys
import time
def E2EResults():
if Settings.execution_mode != ExecutionMode.GENERATOR:
result, error_msgs = runTest.save_log(
Settings.execution_mode == ExecutionMode.GPUCI
)
max = 0
for i in range(0, len(Settings.memory_list)):
if (Settings.memory_list[i].delta) > max:
max = Settings.memory_list[i].delta
print("MAX DELTA: " + str(max))
print(
"""***********************************************************
********************"""
)
gpuMemory.print_log_gpu_memory()
return result, error_msgs
return True, []
def checkErrors(result, error_msgs):
if Settings.execution_mode != ExecutionMode.GENERATOR:
# NOTE kahro <NAME> : here we tell to gpuci there was
# an error comparing with historic results
# TODO <NAME> we should try to enable and
# use this function in the future
if result is False:
for error_msg in error_msgs:
print(error_msg)
def print_delta_time(startTest, endTest):
elapsed = endTest - startTest # in seconds
if elapsed < 60:
time_delta_desc = str(elapsed) + " seconds"
else:
time_delta_desc = (
str(elapsed / 60)
+ " minutes and "
+ str(int(elapsed) % 60)
+ " seconds"
)
return time_delta_desc
print(
"==>> E2E FAILED against previous run, total time was: "
+ print_delta_time(startTest, endTest)
)
return True
return False
def runE2ETest(bc, dask_client, drill, spark):
runnerTest = TestSuites(bc, dask_client, drill, spark)
runnerTest.setTargetTest(Settings.data["RunSettings"]["targetTestGroups"])
runnerTest.runE2ETest()
mainE2ELegacyTests.runLegacyTest(bc, dask_client, drill, spark)
def main():
print("**init end2end**")
Execution.getArgs()
nvmlInit()
targetTestGroups = Settings.data["RunSettings"]["targetTestGroups"]
# only innerJoinsTest will be with progress bar
useProgressBar = False
if "innerJoinsTest" in targetTestGroups:
useProgressBar = True
print("Using progress bar: ", useProgressBar)
drill, spark = init_comparators()
bc, dask_client = init_context(useProgressBar = useProgressBar)
runE2ETest(bc, dask_client, drill, spark)
return E2EResults()
if __name__ == "__main__":
global startTest
global endTest
startTest = time.time() # in seconds
result, error_msgs = main()
endTest = time.time() # in seconds
if checkErrors(result, error_msgs):
# TODO <NAME>: uncomment this line
# when gpuci has all the env vars set
# return error exit status to the command prompt (shell)
sys.exit(1)
|
11587624
|
import re
from abc import abstractmethod, ABCMeta
from pycparser import c_ast as a
class AstVisitor(object):
__metaclass__ = ABCMeta
def __init__(self):
self.methods = {
a.ArrayDecl: self.visit_ArrayDecl,
a.ArrayRef: self.visit_ArrayRef,
a.Assignment: self.visit_Assignment,
a.BinaryOp: self.visit_BinaryOp,
a.Break: self.visit_Break,
a.Case: self.visit_Case,
a.Cast: self.visit_Cast,
a.Compound: self.visit_Compound,
a.CompoundLiteral: self.visit_CompoundLiteral,
a.Constant: self.visit_Constant,
a.Continue: self.visit_Continue,
a.Decl: self.visit_Decl,
a.DeclList: self.visit_DeclList,
a.Default: self.visit_Default,
a.DoWhile: self.visit_DoWhile,
a.EllipsisParam: self.visit_EllipsisParam,
a.EmptyStatement: self.visit_EmptyStatement,
a.Enum: self.visit_Enum,
a.Enumerator: self.visit_Enumerator,
a.EnumeratorList: self.visit_EnumeratorList,
a.ExprList: self.visit_ExprList,
a.FileAST: self.visit_FileAST,
a.For: self.visit_For,
a.FuncCall: self.visit_FuncCall,
a.FuncDecl: self.visit_FuncDecl,
a.FuncDef: self.visit_FuncDef,
a.Goto: self.visit_Goto,
a.ID: self.visit_ID,
a.IdentifierType: self.visit_IdentifierType,
a.If: self.visit_If,
a.InitList: self.visit_InitList,
a.Label: self.visit_Label,
a.NamedInitializer: self.visit_NamedInitializer,
a.ParamList: self.visit_ParamList,
a.PtrDecl: self.visit_PtrDecl,
a.Return: self.visit_Return,
a.Struct: self.visit_Struct,
a.StructRef: self.visit_StructRef,
a.Switch: self.visit_Switch,
a.TernaryOp: self.visit_TernaryOp,
a.TypeDecl: self.visit_TypeDecl,
a.Typedef: self.visit_Typedef,
a.Typename: self.visit_Typename,
a.UnaryOp: self.visit_UnaryOp,
a.Union: self.visit_Union,
a.While: self.visit_While,
a.Pragma: self.visit_Pragma
}
def visit(self, item):
return self.methods[type(item)](item)
@abstractmethod
def visit_ArrayDecl(self, item):
raise NotImplementedError()
@abstractmethod
def visit_ArrayRef(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Assignment(self, item):
raise NotImplementedError()
@abstractmethod
def visit_BinaryOp(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Break(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Case(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Cast(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Compound(self, item):
raise NotImplementedError()
@abstractmethod
def visit_CompoundLiteral(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Constant(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Continue(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Decl(self, item):
raise NotImplementedError()
@abstractmethod
def visit_DeclList(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Default(self, item):
raise NotImplementedError()
@abstractmethod
def visit_DoWhile(self, item):
raise NotImplementedError()
@abstractmethod
def visit_EllipsisParam(self, item):
raise NotImplementedError()
@abstractmethod
def visit_EmptyStatement(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Enum(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Enumerator(self, item):
raise NotImplementedError()
@abstractmethod
def visit_EnumeratorList(self, item):
raise NotImplementedError()
@abstractmethod
def visit_ExprList(self, item):
raise NotImplementedError()
@abstractmethod
def visit_FileAST(self, item):
raise NotImplementedError()
@abstractmethod
def visit_For(self, item):
raise NotImplementedError()
@abstractmethod
def visit_FuncCall(self, item):
raise NotImplementedError()
@abstractmethod
def visit_FuncDecl(self, item):
raise NotImplementedError()
@abstractmethod
def visit_FuncDef(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Goto(self, item):
raise NotImplementedError()
@abstractmethod
def visit_ID(self, item):
raise NotImplementedError()
@abstractmethod
def visit_IdentifierType(self, item):
raise NotImplementedError()
@abstractmethod
def visit_If(self, item):
raise NotImplementedError()
@abstractmethod
def visit_InitList(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Label(self, item):
raise NotImplementedError()
@abstractmethod
def visit_NamedInitializer(self, item):
raise NotImplementedError()
@abstractmethod
def visit_ParamList(self, item):
raise NotImplementedError()
@abstractmethod
def visit_PtrDecl(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Return(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Struct(self, item):
raise NotImplementedError()
@abstractmethod
def visit_StructRef(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Switch(self, item):
raise NotImplementedError()
@abstractmethod
def visit_TernaryOp(self, item):
raise NotImplementedError()
@abstractmethod
def visit_TypeDecl(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Typedef(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Typename(self, item):
raise NotImplementedError()
@abstractmethod
def visit_UnaryOp(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Union(self, item):
raise NotImplementedError()
@abstractmethod
def visit_While(self, item):
raise NotImplementedError()
@abstractmethod
def visit_Pragma(self, item):
raise NotImplementedError()
class DfsVisitor(AstVisitor):
def __init__(self):
super().__init__()
self.current_method = None
def visit_default(self, item):
return list()
def visit(self, item):
if item is None:
return self.visit_default(item)
else:
return super().visit(item)
def visit_ArrayDecl(self, item):
a = self.visit(item.type)
b = self.visit(item.dim)
return a + b
def visit_ArrayRef(self, item):
a = self.visit(item.name)
b = self.visit(item.subscript)
return a + b
def visit_Assignment(self, item):
a = self.visit(item.lvalue)
b = self.visit(item.rvalue)
return a + b
def visit_BinaryOp(self, item):
a = self.visit(item.left)
b = self.visit(item.right)
return a + b
def visit_Break(self, item):
return self.visit_default(item)
def visit_Case(self, item):
a = self.visit(item.expr)
b = flatten([self.visit(s) for s in item.stmts])
return a + b
def visit_Cast(self, item):
a = self.visit(item.to_type)
b = self.visit(item.expr)
return a + b
def visit_Compound(self, item):
if item.block_items:
results = [self.visit(b) for b in item.block_items]
return flatten(results)
else:
return []
def visit_CompoundLiteral(self, item):
a = self.visit(item.type)
b = self.visit(item.init)
return a + b
def visit_Constant(self, item):
return self.visit_default(item)
def visit_Continue(self, item):
return self.visit_default(item)
def visit_Decl(self, item):
a = self.visit(item.type)
b = self.visit(item.init)
c = self.visit(item.bitsize)
return a + b + c
def visit_DeclList(self, item):
return flatten([self.visit(d) for d in item.decls])
def visit_Default(self, item):
return flatten([self.visit(s) for s in item.stmts])
def visit_DoWhile(self, item):
a = self.visit(item.cond)
b = self.visit(item.stmt)
return a + b
def visit_EllipsisParam(self, item):
return self.visit_default(item)
def visit_EmptyStatement(self, item):
return self.visit_default(item)
def visit_Enum(self, item):
return self.visit(item.values)
def visit_Enumerator(self, item):
return self.visit(item.value)
def visit_EnumeratorList(self, item):
return flatten([self.visit(e) for e in item.enumerators])
def visit_ExprList(self, item):
return flatten([self.visit(e) for e in item.exprs])
def visit_FileAST(self, item):
return flatten([self.visit(e) for e in item.ext])
def visit_For(self, item):
return flatten([
self.visit(i) for i in [item.init, item.cond, item.next, item.stmt]
])
def visit_FuncCall(self, item):
a = self.visit(item.name)
b = self.visit(item.args)
return a + b
def visit_FuncDecl(self, item):
a = self.visit(item.args)
b = self.visit(item.type)
return a + b
def visit_FuncDef(self, item):
a = self.visit(item.decl)
b = flatten([self.visit(p) for p in item.param_decls])
assert self.current_method is None
self.current_method = item
c = self.visit(item.body)
return a + b + c
def visit_Goto(self, item):
return self.visit_default(item)
def visit_ID(self, item):
return self.visit_default(item)
def visit_IdentifierType(self, item):
return self.visit_default(item)
def visit_If(self, item):
a = self.visit(item.cond)
b = self.visit(item.iftrue)
c = self.visit(item.iffalse)
return a + b + c
def visit_InitList(self, item):
return flatten([self.visit(e) for e in item.exprs])
def visit_Label(self, item):
return self.visit(item.stmt)
def visit_NamedInitializer(self, item):
a = flatten([self.visit(n) for n in item.name])
b = self.visit(item.expr)
return a + b
def visit_ParamList(self, item):
return flatten([self.visit(p) for p in item.params])
def visit_PtrDecl(self, item):
return self.visit(item.type)
def visit_Return(self, item):
return self.visit(item.expr)
def visit_Struct(self, item):
return flatten([self.visit(d) for d in item.decls])
def visit_StructRef(self, item):
a = self.visit(item.name)
b = self.visit(item.field)
return a + b
def visit_Switch(self, item):
a = self.visit(item.cond)
b = self.visit(item.stmt)
return a + b
def visit_TernaryOp(self, item):
a = self.visit(item.cond)
b = self.visit(item.iftrue)
c = self.visit(item.iffalse)
return a + b + c
def visit_TypeDecl(self, item):
return self.visit(item.type)
def visit_Typedef(self, item):
return self.visit(item.type)
def visit_Typename(self, item):
return self.visit(item.type)
def visit_UnaryOp(self, item):
return self.visit(item.expr)
def visit_Union(self, item):
return flatten([self.visit(d) for d in item.decls])
def visit_While(self, item):
a = self.visit(item.cond)
b = self.visit(item.stmt)
return a + b
def visit_Pragma(self, item):
return self.visit_default(item)
class NondetIdentifierCollector(DfsVisitor):
__metaclass__ = ABCMeta
def __init__(self, pattern):
super().__init__()
self.nondet_identifiers = dict()
self.scope = list()
self.pattern = re.compile(pattern)
@abstractmethod
def get_var_name_from_function(self, item):
raise NotImplementedError()
def visit_FuncCall(self, item):
func_name = get_name(item)
if self.pattern.match(func_name):
relevant_var = self.get_var_name_from_function(item)
self.nondet_identifiers[relevant_var] = {
'line': item.coord.line,
'origin file': item.coord.file,
'scope': self.scope[-1]
}
# no need to visit item.args, we don't do nested klee_make_symbolic calls
return []
def visit_FuncDef(self, item):
self.scope.append(get_name(item.decl))
self.visit(item.body)
self.scope = self.scope[:-1]
return []
def get_name(node):
if type(node) is a.FuncCall:
name = node.name.name
elif type(node) is a.FuncDecl:
name = get_name(node.type)
elif type(node) is a.PtrDecl:
name = get_name(node.type)
elif type(node) is a.Decl:
name = get_name(node.type)
elif type(node) is a.TypeDecl:
name = node.declname
elif type(node) is a.Struct:
name = node.name
else:
raise AssertionError("Unhandled node type: " + str(type(node)))
return name
def get_type(node):
node_type = type(node)
name = []
if node_type is a.IdentifierType:
name += node.names
elif node_type is a.Union:
name += ['union', node.name]
elif node_type is a.EllipsisParam:
name += ['...']
elif node_type is a.Struct:
name += ['struct ' + node.name]
elif node_type is a.Enum:
name += ['enum ' + node.name]
elif node_type is a.TypeDecl:
name += [get_type(node.type)]
elif node_type is a.Typename:
name += [get_type(node.type)]
elif node_type is a.Decl:
name += [get_type(node.type)]
elif node_type is a.PtrDecl:
if type(node.type) is a.FuncDecl:
func_decl = node.type
m_type = get_type(func_decl.type) + '(*{})('
if func_decl.args:
params = list()
for param in func_decl.args.params:
params.append(get_type(param))
m_type += ', '.join(params)
m_type += ')'
name += [m_type]
else:
name += [get_type(node.type), '*']
elif node_type is a.ArrayDecl:
a_type = get_type(node.type)
name += [a_type, " {}[]"]
elif node_type is a.FuncDecl:
name += [get_type(node.type), node.declname + '()']
else:
raise AssertionError("Unhandled node type: " + str(node_type))
try:
# type quals can be 'const', 'volatile', 'static'
if 'const' in node.quals and node_type is not a.Decl:
name += ['const']
if 'static' in node.quals:
name = ['static'] + name
if 'volatile' in node.quals:
name = ['volatile'] + name
except AttributeError:
pass
return ' '.join(name)
class FuncDefCollector(a.NodeVisitor):
def __init__(self):
self.func_defs = []
def visit_FuncDef(self, node):
self.func_defs.append(node.decl)
class FuncDeclCollector(a.NodeVisitor):
def __init__(self):
self.func_decls = []
def visit_FuncDecl(self, node):
self.func_decls.append(node)
def visit_PtrDecl(self, node):
pass # Don't go deeper so we don't collect function pointer
def visit_Typedef(self, node):
pass # Don't go deeper so we don't collect typedef functions
|
11587630
|
import sys
import argparse
import datetime
from operator import itemgetter, attrgetter
from itertools import groupby
from typing import List
from humanfriendly import parse_timespan, format_timespan
import requests
from kubernetes import client, config
def parse_args(argv=sys.argv[1:]):
p = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=(
"Automatically cordon old worker nodes, send notifications about pending evictions and finally evict pods from "
"cordoned old worker nodes"
))
p.add_argument('--dry-run', default=False, action='store_true',
help="don't perform any changes, just print what would have been done")
p.add_argument('--nodes', default=None, nargs='+', type=str, metavar='NODE',
help='act on the given nodes only')
p.add_argument('--cordon-nodes-after', default='30d',
help='cordon nodes as old after they have been up for the given amount of time')
p.add_argument('--notify-after', default='1d',
help=(
"send notifications about soon-to-be-drained nodes and the pods running on them after they have "
"been cordoned for the given amount of time; needs to be smaller than --evict-after)"))
p.add_argument('--evict-after', default='2d',
help=(
"start evicting pods from nodes after they given amount of time after notifications have been "
"sent"))
group = p.add_argument_group('slack notification settings')
group.add_argument('--slack-webhook')
group.add_argument('--slack-username', default=None)
group.add_argument('--slack-icon', default=None)
group.add_argument('--slack-default-message',
default=(
"*Heads up <!everyone>*\n"
"The node(s) running the following pods have been cordoned and will be gracefully drained soon. "
"The pods will be evicted at the given time. Make sure to define pod disruption budgets to prevent "
"service downtime."),
help='default message that will appear in slack notification')
group.add_argument('--slack-additional-message', default=None,
help='will be added after the default message')
group.add_argument('--slack-admin-contact',
help=(
"if given, this slack contact will be included in the notification messages to direct "
"people to the right team or person"))
group.add_argument('--slack-target',
help='target for sending slack notifications about any actions performed by this program')
group.add_argument('--slack-target-annotation',
help=(
"annotation key to look for on pods and the namespaces of pods that are running on nodes "
"that are to be drained soon; the value of the annotation is used as a target for sending "
"slack notifications about the actions performed by this program that affect the nodes that "
"the pods are running on"))
args = p.parse_args(argv)
args.cordon_nodes_after = datetime.timedelta(seconds=parse_timespan(args.cordon_nodes_after))
args.notify_after = datetime.timedelta(seconds=parse_timespan(args.notify_after))
args.evict_after = datetime.timedelta(seconds=parse_timespan(args.evict_after))
if args.notify_after >= args.evict_after:
print('--notify-after needs to be smaller than --evict-after')
sys.exit(1)
return args
def annotation(key: str):
return 'node-drainer.k8s.logmein.com/' + key
def generate_action_plan(all_nodes: List[client.V1Node], all_namespaces: List[client.V1Namespace], all_pods, args):
actions = {
'cordon': {
'nodes': [],
'affected_pods': []
},
'notify': {
'nodes': [],
'affected_pods': []
},
'drain': {
'nodes': [],
'affected_pods': []
},
}
namespace_annotations = {ns.metadata.name: (ns.metadata.annotations or {}) for ns in all_namespaces}
now = datetime.datetime.utcnow().replace(microsecond=0)
for node in all_nodes:
if node.metadata.annotations.get(annotation('ignored')) is not None:
continue
cordoned = (args.dry_run or node.spec.unschedulable) and node.metadata.annotations.get(annotation('cordoned')) is not None
notifications_sent_at = node.metadata.annotations.get(annotation('notifications-sent'))
if notifications_sent_at is not None:
try:
notifications_sent_at = datetime.datetime.fromtimestamp(int(notifications_sent_at))
except:
print(f"Failed parsing timestamp of notifications-sent annotation")
notifications_sent_at = now + args.notify_after
action = None
eviction_time = None
cordon_at = node.metadata.creation_timestamp.replace(tzinfo=None) + args.cordon_nodes_after
if notifications_sent_at is not None:
if notifications_sent_at + args.evict_after < now:
action = 'drain'
print(
f"Node {node.metadata.name} was already cordoned and notifications had been sent. "
"It will be drained now.")
elif cordoned:
if args.dry_run:
cordoned_at = cordon_at
else:
unschedulable_taint = next(filter(
lambda taint: taint.key == 'node.kubernetes.io/unschedulable',
node.spec.taints))
cordoned_at = unschedulable_taint.time_added.replace(tzinfo=None)
if cordoned_at + args.notify_after < now:
action = 'notify'
eviction_time = f"{format_timespan(args.evict_after)} from now"
print(
f"Node {node.metadata.name} was already cordoned and will be drained {eviction_time}. "
"Notifications will be sent now.")
elif not node.spec.unschedulable and cordon_at < now:
action = 'cordon'
print(
f"Node {node.metadata.name} is older than {args.cordon_nodes_after} and will be cordoned now. "
f"Notifications will be sent in {format_timespan(args.notify_after)}.")
if action is None:
continue
pods = [pod for pod in all_pods if pod.spec.node_name == node.metadata.name]
if len(pods) > 0:
print(" Pods running on this instance:")
# group pods by namespace
keyfunc = attrgetter('metadata.namespace')
pods = sorted(pods, key=keyfunc)
pods_by_namespace = groupby(pods, key=keyfunc)
for namespace, pods in pods_by_namespace:
print(f" Namespace: {namespace}")
for pod in pods:
print(f" {pod.metadata.name}")
actions[action]['affected_pods'].append({
'namespace': pod.metadata.namespace,
'name': pod.metadata.name,
'annotations': {
**namespace_annotations[pod.metadata.namespace],
**(pod.metadata.annotations or {})
},
'eviction_time': eviction_time
})
actions[action]['nodes'].append(node.metadata.name)
return actions
def notify(affected_pods, args):
"""
Sends notification to the owners of the given list of affected pods.
"""
if args.slack_webhook and (args.slack_target or args.slack_target_annotation):
pods_by_slack_target = {}
if args.slack_target_annotation:
for pod in affected_pods:
value = pod['annotations'].get(args.slack_target_annotation, None)
if value is None:
continue
targets = value.split(',')
for target in targets:
if target not in pods_by_slack_target.keys():
pods_by_slack_target[target] = []
pods_by_slack_target[target].append(pod)
if args.slack_target:
pods_by_slack_target[args.slack_target] = affected_pods
for target, pods_for_target in pods_by_slack_target.items():
if len(pods_for_target) == 0:
continue
keyfunc = itemgetter('eviction_time')
pods_by_eviction_time = groupby(sorted(pods_for_target, key=keyfunc), key=keyfunc)
attachments = []
for eviction_time, pods_for_eviction_time in pods_by_eviction_time:
keyfunc = itemgetter('namespace')
pods_by_namespace = groupby(sorted(pods_for_eviction_time, key=keyfunc), key=keyfunc)
text = ''
for namespace, pods in pods_by_namespace:
text += f"*Namespace:* {namespace}\n"
for pod in pods:
text += f"• `{pod['name']}`\n"
attachments.append({
'mrkdwn_in': ['text', 'pretext'],
'pretext': f"*Eviction:* {eviction_time}",
'text': text
})
text = args.slack_default_message
if args.slack_additional_message:
text += "\n" + args.slack_additional_message
if args.slack_admin_contact and args.slack_admin_contact != target:
text += f"\nQuestions regarding this message? Contact <{args.slack_admin_contact}>"
requests.post(args.slack_webhook, json={
'username': args.slack_username,
'icon_emoji': args.slack_icon,
'channel': target,
'text': text,
'attachments': attachments,
})
def drain_node(v1, node: client.V1Node, all_pods: List[client.V1Pod], args):
print(f"Draining node {node}")
pods = [pod for pod in all_pods if pod.spec.node_name == node]
for pod in pods:
if pod.metadata.deletion_timestamp is not None:
continue
print(f" Evicting pod {pod.metadata.name}")
try:
if not args.dry_run:
v1.create_namespaced_pod_eviction(
pod.metadata.name,
pod.metadata.namespace,
{'metadata': {'name': pod.metadata.name}}
)
except client.rest.ApiException as exc:
if exc.status == 429:
print('Pod cannot be evicted right now due to PDB')
elif exc.status == 404:
pass
elif exc.status == 500:
print(f"Failed evicting pod {pod.metadata.name}; check PDB configuration")
else:
raise
def run():
args = parse_args()
try:
config.load_kube_config()
except:
print("Failed loading kube config. Trying to use in-cluster config.")
config.load_incluster_config()
v1 = client.CoreV1Api()
all_nodes = v1.list_node().items
if args.nodes is not None:
all_nodes = [node for node in all_nodes if node.metadata.name in args.nodes]
all_namespaces = v1.list_namespace().items
all_pods = [
pod for pod in v1.list_pod_for_all_namespaces().items
if not any(owner.kind == 'DaemonSet' for owner in (pod.metadata.owner_references or []))
]
for action, info in generate_action_plan(all_nodes, all_namespaces, all_pods, args).items():
nodes = info['nodes']
pods = info['affected_pods']
if action == 'notify':
notify(pods, args)
for node in nodes:
print(f"Notifications sent for node {node}")
v1.patch_node(node, {'metadata': {'annotations': {annotation('notifications-sent'): str(int(datetime.datetime.utcnow().timestamp()))}}})
if action == 'cordon':
for node in nodes:
print(f"Cordoning node {node}")
patch = {'metadata': {'annotations': {annotation('cordoned'): ''}}}
if not args.dry_run:
patch = {**patch, 'spec': {'unschedulable': True}}
v1.patch_node(node, patch)
if action == 'drain':
for node in nodes:
drain_node(v1, node, all_pods, args)
if __name__ == "__main__":
run()
|
11587648
|
import os
import subprocess
import linuxcnc
import psutil
from pyudev.pyqt5 import MonitorObserver
from pyudev import Context, Monitor, Devices
from qtpy.QtCore import Slot, Property, Signal, QFile, QFileInfo, QDir, QIODevice
from qtpy.QtWidgets import QFileSystemModel, QComboBox, QTableView, QMessageBox, \
QApplication, QAbstractItemView, QInputDialog, QLineEdit
from qtpyvcp.plugins import getPlugin
from qtpyvcp.actions.program_actions import load as loadProgram
from qtpyvcp.utilities.info import Info
from qtpyvcp.utilities.logger import getLogger
from qtpyvcp.utilities.encode_utils import allEncodings
from qtpyvcp.lib.decorators import deprecated
LOG = getLogger(__name__)
IN_DESIGNER = os.getenv('DESIGNER') != None
class TableType(object):
Local = 0
Remote = 1
class RemovableDeviceComboBox(QComboBox):
"""ComboBox for choosing from a list of removable devices."""
usbPresent = Signal(bool)
currentPathChanged = Signal(str)
currentDeviceEjectable = Signal(bool)
def __init__(self, parent=None):
super(RemovableDeviceComboBox, self).__init__(parent)
self._first_show = True
self.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self._file_locations = getPlugin('file_locations')
self._file_locations.removable_devices.notify(self.onRemovableDevicesChanged)
self._file_locations.new_device.notify(self.onNewDeviceAdded)
self.info = Info()
self._program_prefix = self.info.getProgramPrefix()
self.currentTextChanged.connect(self.onCurrentTextChanged)
# initialize device list
self.onRemovableDevicesChanged(self._file_locations.removable_devices.value)
def showEvent(self, event=None):
if self._first_show:
self._first_show = False
self.setCurrentText(self._file_locations.default_location)
data = self.currentData() or {}
self.currentDeviceEjectable.emit(data.get('removable', False))
super(RemovableDeviceComboBox, self).showEvent(event)
def onCurrentTextChanged(self, text):
data = self.currentData()
if data:
self.currentPathChanged.emit(data.get('path', '/'))
self.currentDeviceEjectable.emit(data.get('removable', False))
def onRemovableDevicesChanged(self, devices):
self.blockSignals(True)
self.clear()
for label, path in list(self._file_locations.local_locations.items()):
self.addItem(label, {'path': os.path.expanduser(path)})
self.insertSeparator(100)
if devices:
for devices_node, device_data in list(devices.items()):
self.addItem(device_data.get('label', 'Unknown'), device_data)
self.blockSignals(False)
def onNewDeviceAdded(self, device):
if device:
self.setCurrentText(device.get('label'))
else:
self.setCurrentText(self._file_locations.default_location)
@Slot()
def ejectDevice(self):
data = self.currentData()
if data:
self._file_locations.ejectDevice(data.get('device'))
class FileSystemTable(QTableView, TableType):
if IN_DESIGNER:
from PyQt5.QtCore import Q_ENUMS
Q_ENUMS(TableType)
gcodeFileSelected = Signal(bool)
filePreviewText = Signal(str)
fileNamePreviewText = Signal(str)
transferFileRequest = Signal(str)
rootChanged = Signal(str)
atDeviceRoot = Signal(bool)
def __init__(self, parent=None):
super(FileSystemTable, self).__init__(parent)
self._table_type = TableType.Local
self._hidden_columns = ''
# This prevents doing unneeded initialization
# when QtDesginer loads the plugin.
if parent is None:
return
self.parent = parent
self.path_data = dict()
self.selected_row = None
self.clipboard = QApplication.clipboard()
self.model = QFileSystemModel()
self.model.setReadOnly(True)
self.model.setFilter(QDir.AllDirs | QDir.NoDotAndDotDot | QDir.AllEntries)
self.setModel(self.model)
self.verticalHeader().hide()
self.horizontalHeader().setStretchLastSection(True)
self.setAlternatingRowColors(True)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.selection_model = self.selectionModel()
self.selection_model.selectionChanged.connect(self.onSelectionChanged)
# open selected item on double click or enter pressed
self.activated.connect(self.openSelectedItem)
self.info = Info()
self.nc_file_editor = self.info.getEditor()
self.nc_file_dir = self.info.getProgramPrefix()
self.nc_file_exts = self.info.getProgramExtentions()
self.setRootPath(self.nc_file_dir)
self.model.rootPathChanged.connect(self.onRootPathChanged)
def showEvent(self, event=None):
root_path = self.model.rootPath()
self.rootChanged.emit(root_path)
self.atDeviceRoot.emit(os.path.ismount(root_path))
def onRootPathChanged(self, path):
self.atDeviceRoot.emit(os.path.ismount(path))
def onSelectionChanged(self, selected, deselected):
if len(selected) == 0:
return
index = selected.indexes()[0]
path = self.model.filePath(index)
if os.path.isfile(path):
self.gcodeFileSelected.emit(True)
encodings = allEncodings()
enc = None
for enc in encodings:
try:
with open(path, 'r', encoding=enc) as f:
content = f.read()
break
except Exception as e:
# LOG.debug(e)
LOG.info(f"File encoding doesn't match {enc}, trying others")
LOG.info(f"File encoding: {enc}")
self.filePreviewText.emit(content)
self.fileNamePreviewText.emit(path)
else:
self.gcodeFileSelected.emit(False)
self.filePreviewText.emit('')
self.fileNamePreviewText.emit('')
@Slot()
def openSelectedItem(self, index=None):
"""If ngc file, opens in LinuxCNC, if dir displays dir."""
if index is None:
selection = self.getSelection()
if selection is None:
return
index = selection[0]
path = self.model.filePath(self.rootIndex())
name = self.model.filePath(index)
absolute_path = os.path.join(path, name)
file_info = QFileInfo(absolute_path)
if file_info.isDir():
self.model.setRootPath(absolute_path)
self.setRootIndex(self.model.index(absolute_path))
self.rootChanged.emit(absolute_path)
elif file_info.isFile():
# if file_info.completeSuffix() not in self.nc_file_exts:
# LOG.warn("Unsuported NC program type with extention .%s",
# file_info.completeSuffix())
loadProgram(absolute_path)
@Slot()
def editSelectedFile(self):
"""Open the selected file in editor."""
selection = self.getSelection()
if selection is not None:
path = self.model.filePath(selection[0])
subprocess.Popen([self.nc_file_editor, path])
return False
@Slot()
def loadSelectedFile(self):
"""Loads the selected file into LinuxCNC."""
selection = self.getSelection()
if selection is not None:
path = self.model.filePath(selection[0])
loadProgram(path)
return True
return False
@Slot()
def selectPrevious(self):
"""Select the previous item in the view."""
selection = self.getSelection()
if selection is None:
# select last item in view
self.selectRow(self.model.rowCount(self.rootIndex()) - 1)
else:
self.selectRow(selection[0].row() - 1)
return True
@Slot()
def selectNext(self):
"""Select the next item in the view."""
selection = self.getSelection()
if selection is None:
# select first item in view
self.selectRow(0)
else:
self.selectRow(selection[-1].row() + 1)
return True
@Slot()
def rename(self):
"""renames the selected file or folder"""
index = self.selectionModel().currentIndex()
path = self.model.filePath(index)
if path:
file_info = QFileInfo(path)
if file_info.isFile():
filename = self.rename_dialog("file")
if filename:
q_file = QFile(path)
file_info.absolutePath()
new_path = os.path.join(file_info.absolutePath(), str(filename))
q_file.rename(new_path)
elif file_info.isDir():
filename = self.rename_dialog("directory")
if filename:
directory = QDir(path)
file_info.absolutePath()
new_path = os.path.join(file_info.absolutePath(), str(filename))
directory.rename(path, new_path)
@Slot()
def newFile(self):
"""Create a new empty file"""
path = self.model.filePath(self.rootIndex())
new_file_path = os.path.join(path, "New File.ngc")
count = 1
while os.path.exists(new_file_path):
new_file_path = os.path.join(path, "New File {}.ngc".format(count))
count += 1
new_file = QFile(new_file_path)
new_file.open(QIODevice.ReadWrite)
@Slot()
def newFolder(self):
path = self.model.filePath(self.rootIndex())
new_name = 'New Folder'
count = 1
while os.path.exists(os.path.join(path, new_name)):
new_name = "New Folder {}".format(count)
count += 1
directory = QDir(path)
directory.mkpath(new_name)
directory.setPath(new_name)
@Slot()
@deprecated(replaced_by='newFolder',
reason='for consistency with newFile method name')
def createDirectory(self):
self.newFolder()
@Slot()
def deleteItem(self):
"""Delete the selected item (either a file or folder)."""
# ToDo: use Move2Trash, instead of deleting the file
index = self.selectionModel().currentIndex()
path = self.model.filePath(index)
if path:
file_info = QFileInfo(path)
if file_info.isFile():
if not self.ask_dialog("Do you wan't to delete the selected file?"):
return
q_file = QFile(path)
q_file.remove()
elif file_info.isDir():
if not self.ask_dialog("Do you wan't to delete the selected directory?"):
return
directory = QDir(path)
directory.removeRecursively()
@Slot()
@deprecated(replaced_by='deleteItem',
reason='because of unclear method name')
def deleteFile(self):
self.deleteItem()
@Slot(str)
def setRootPath(self, root_path):
"""Sets the currently displayed path."""
self.rootChanged.emit(root_path)
self.model.setRootPath(root_path)
self.setRootIndex(self.model.index(root_path))
return True
@Slot()
def viewParentDirectory(self):
"""View the parent directory of the current view."""
path = self.model.filePath(self.rootIndex())
file_info = QFileInfo(path)
directory = file_info.dir()
new_path = directory.absolutePath()
if os.path.ismount(path):
return
currentRoot = self.rootIndex()
self.model.setRootPath(new_path)
self.setRootIndex(currentRoot.parent())
self.rootChanged.emit(new_path)
@Slot()
@deprecated(replaced_by='viewParentDirectory')
def goUP(self):
self.viewParentDirecotry()
@Slot()
def viewHomeDirectory(self):
self.setRootPath(os.path.expanduser('~/'))
@Slot()
def viewNCFilesDirectory(self):
# ToDo: Make preset user definable
path = os.path.expanduser(self._nc_files_dir)
self.setRootPath(path)
@Slot()
def viewPresetDirectory(self):
# ToDo: Make preset user definable
preset = os.path.expanduser(self._nc_files_dir)
self.setRootPath(preset)
@Slot()
def doFileTransfer(self):
index = self.selectionModel().currentIndex()
path = self.model.filePath(index)
self.transferFileRequest.emit(path)
@Slot(str)
def transferFile(self, src_path):
dest_path = self.model.filePath(self.rootIndex())
src_file = QFile()
src_file.setFileName(src_path)
src_file_info = QFileInfo(src_path)
dst_path = os.path.join(dest_path, src_file_info.fileName())
src_file.copy(dst_path)
@Slot()
def getSelection(self):
"""Returns list of selected indexes, or None."""
selection = self.selection_model.selectedIndexes()
if len(selection) == 0:
return None
return selection
@Slot()
def getCurrentDirectory(self):
return self.model.rootPath()
@Property(TableType)
def tableType(self):
return self._table_type
@tableType.setter
def tableType(self, table_type):
self._table_type = table_type
if table_type == TableType.Local:
self.setRootPath(self.nc_file_dir)
else:
self.setRootPath('/media/')
@Property(str)
def hiddenColumns(self):
"""String of comma separated column numbers to hide."""
return self._hidden_columns
@hiddenColumns.setter
def hiddenColumns(self, columns):
try:
col_list = [int(c) for c in columns.split(',') if c != '']
except:
return False
self._hidden_columns = columns
header = self.horizontalHeader()
for col in range(4):
if col in col_list:
header.hideSection(col)
else:
header.showSection(col)
def ask_dialog(self, message):
box = QMessageBox.question(self.parent,
'Are you sure?',
message,
QMessageBox.Yes,
QMessageBox.No)
if box == QMessageBox.Yes:
return True
else:
return False
def rename_dialog(self, data_type):
text, ok_pressed = QInputDialog.getText(self.parent, "Rename", "New {} name:".format(data_type),
QLineEdit.Normal, "")
if ok_pressed and text != '':
return text
else:
return False
|
11587708
|
from abc import ABC
from dataclasses import dataclass
from typing import List, Dict
import torch.nn as nn
from torch import Tensor
from yacs.config import CfgNode
from fandak.core.datasets import GeneralBatch
from fandak.utils.torch import GeneralDataClass
@dataclass(repr=False)
class GeneralLoss(GeneralDataClass):
"""
I assume that there is always the `main` attribute which is the loss that is
going to be used for backpropagation.
"""
main: Tensor
@dataclass(repr=False)
class GeneralForwardOut(GeneralDataClass):
"""
The general output of the forward pass.
"""
pass
class Model(nn.Module, ABC):
def __init__(self, cfg: CfgNode):
super().__init__()
self.cfg = cfg
def get_params(self, original_lr: float) -> List[Dict]:
params = [{"params": self.parameters(), "lr": original_lr}]
return params
# noinspection PyMethodMayBeStatic
def get_backprop_loss(self, loss: GeneralLoss) -> Tensor:
return loss.main
def forward(self, batch: GeneralBatch) -> GeneralForwardOut:
raise NotImplementedError
def loss(self, batch: GeneralBatch, forward_out: GeneralForwardOut) -> GeneralLoss:
raise NotImplementedError
|
11587726
|
class BACFConfig:
cell_size=4
cell_selection_thresh=0.75**2
search_area_shape='square'
search_area_scale=5
filter_max_area=50**2
interp_factor=0.015
output_sigma_factor=1./16
interpolate_response=4
newton_iterations=5
number_of_scales=1
scale_step=1.01
admm_iterations=2
admm_lambda=0.01
class ScaleConfig:
learning_rate_scale = 0.015
scale_sz_window = (128, 128)
scale_config=ScaleConfig()
|
11587730
|
from pytwitcherapi import chat
class MyIRCClient(pytwitcherapi.IRCClient):
def on_privmsg(self, connection, event):
super(MyIRCClient, self).on_privmsg(connection, event)
print chat.Message3.from_event(event)
|
11587731
|
ESV_API_BASE_URL = 'http://www.esvapi.org/v2/rest'
ESV_API_PASSAGE_QUERY_URL = '%s/passageQuery' % ESV_API_BASE_URL
|
11587762
|
import matplotlib.pylab as plt
convlayer = model.layers[0].get_weights()
c1 = convlayer[0].squeeze()[0]
c1 = (c1 - c1.min())/(c1.max() - c1.min())
|
11587780
|
from __future__ import print_function
import csv
import distutils
import shutil
import pandas as pd
import os
import re
from collections import defaultdict
import six
import sys
from Bio import SeqIO
from bracerlib.bracer_func import process_chunk, find_possible_alignments, extract_blast_info
import glob
import pdb
import json
def makeOutputDir(output_dir_path):
if not os.path.exists(output_dir_path):
os.mkdir(output_dir_path)
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def clean_file_list(file_list):
return_list = []
trinity_pattern = re.compile(r"(.+)\.Trinity\.fasta")
for file_name in file_list:
clean_name = os.path.split(file_name)[1]
trinity_match = trinity_pattern.search(clean_name)
if trinity_match:
clean_name = trinity_match.group(1)
return_list.append(clean_name)
return (sorted(return_list))
def get_filename_and_locus(name):
name = name.split("_")
cell = name[0]
locus = "_".join(name[1:3])
return ([cell, locus])
def sort_locus_names(dictionary_to_sort):
for key, value in six.iteritems(dictionary_to_sort):
sorted_value = sorted(value)
dictionary_to_sort[key] = sorted_value
return (dictionary_to_sort)
def load_IMGT_seqs(file):
seqs = {}
with open(file, 'rU') as fh:
for record in SeqIO.parse(fh, 'fasta'):
seqs[record.id] = str(record.seq)
return (seqs)
def parse_assembled_file(output_dir, cell_name, assembled_file):
"""Creates unique sequence names for each sequence if already assembled
sequences are provided as input to BraCeR Assemble"""
if os.path.exists(assembled_file):
outfile = "{output_dir}/Trinity_output/{cell_name}.fasta".format(
output_dir=output_dir, cell_name=cell_name)
write = False
count = 0
with open(assembled_file, "r") as input:
with open(outfile, "w") as output:
for line in input:
if line.startswith(">"):
write = True
header = ">TRINITY_DN0_c0_g0_i{}\n".format(str(count))
output.write(header)
count += 1
elif write == True:
if len(line) > 0:
output.write(line)
def parse_IgBLAST(loci, output_dir, cell_name, raw_seq_dir, species,
assembled_file, max_junc_len=100):
IMGT_seqs = dict()
loci_for_segments = defaultdict(list)
for locus in loci:
seq_files = glob.glob(os.path.join(raw_seq_dir, "BCR_{}_*.fa".format(locus)))
for f in seq_files:
segment_name = os.path.splitext(os.path.split(f)[1])[0]
IMGT_seqs[segment_name] = load_IMGT_seqs(f)
loci_for_segments[segment_name.split("_")[2]].append(locus)
locus_names = ["_".join(["BCR",x]) for x in loci]
all_locus_data = defaultdict(dict)
for locus in locus_names:
if assembled_file is not None:
file = "{output_dir}/IgBLAST_output/{cell_name}.IgBLASTOut".format(
output_dir=output_dir, cell_name=cell_name)
else:
file = "{output_dir}/IgBLAST_output/{cell_name}_{locus}.IgBLASTOut".format(
output_dir=output_dir, cell_name=cell_name, locus=locus)
if os.path.isfile(file):
igblast_result_chunks = split_igblast_file(file)
for chunk in igblast_result_chunks:
(query_name, chunk_details) = process_chunk(chunk)
if query_name is not None:
if "cell_id=" in query_name:
query_name = query_name.split("=")[1]
all_locus_data[locus][query_name] = chunk_details
else:
all_locus_data[locus] = None
cell = find_possible_alignments(all_locus_data, locus_names, cell_name, IMGT_seqs,
output_dir, species, loci_for_segments,
loci, max_junc_len, assembled_file)
return (cell)
def parse_BLAST(loci, output_dir, cell_name, species, assembled_file):
"""Parses BLAST output from output files and writes formatted output to BLAST
output summary files"""
locus_names = ["_".join(["BCR",x]) for x in loci]
for locus in loci:
blast_dir = "BLAST_output"
output_file = "{outdir}/{blast_dir}/blastsummary_{locus}.txt".format(
outdir=output_dir, blast_dir=blast_dir, locus=locus)
input_file = "{output_dir}/{blast_dir}/{cell_name}_BCR_{locus}.xml".format(
output_dir=output_dir, blast_dir=blast_dir, cell_name=cell_name,
locus=locus)
with open(output_file, 'w') as outfile:
outfile.write("------------------\n##{}##\n------------------\n\n#BCR_{}#\n\n".format(
cell_name, locus))
# Split result file into chunks corresponding to results for each query sequence.
if os.path.isfile(input_file):
blast_result_chunks = split_blast_file(input_file)
for chunk in blast_result_chunks:
message = False
for line_x in chunk:
line_x= line_x.strip()
if line_x.startswith("<Iteration_query-def>"):
line = line_x.split(">")[1]
blast_query_name = line.split("<")[0]
if assembled_file == True or " " in blast_query_name:
blast_query_name = blast_query_name.split()[0]
elif line_x.startswith("<Hsp_evalue>"):
evalue = extract_blast_info(line_x)
evalue = format(float(evalue), '.0e')
elif line_x.startswith("<Hit_accession>"):
C_segment = extract_blast_info(line_x)
if "C-REGION" or "CH1" in C_segment:
C_segment = C_segment.split("_")[0]
elif line_x.startswith("<Hsp_bit-score>"):
bit_score = extract_blast_info(line_x)
elif line_x.startswith("<Hsp_query-from>"):
q_start = extract_blast_info(line_x)
elif line_x.startswith("<Hsp_query-to>"):
q_end = extract_blast_info(line_x)
elif line_x.startswith("<Hsp_hit-from>"):
s_start = extract_blast_info(line_x)
elif line_x.startswith("<Hsp_hit-to>"):
s_end = extract_blast_info(line_x)
elif line_x.startswith("<Iteration_query-len>"):
query_length = extract_blast_info(line_x)
elif line_x.startswith("<Hsp_align-len>"):
align_length = extract_blast_info(line_x)
elif line_x.startswith("<Hsp_gaps>"):
gaps = extract_blast_info(line_x)
elif line_x.startswith("<Hsp_identity>"):
identity = extract_blast_info(line_x)
elif line_x.startswith("<Iteration_message>No hits found"):
message = True
out_string = "##{blast_query_name}##\nNo C segment found\n\n".format(
blast_query_name=blast_query_name)
outfile.write(out_string)
# Create output string when reaching end of BLAST
# iteration result (marked by </Iteration>) and write
# to BLAST summary file
elif line_x.startswith("</Iteration>") and message is not True:
identity_pro = float(identity)/int(align_length)*100
identity_pro = format(identity_pro, '.2f')
mismatches = int(align_length) - int(identity)
#Account for reversed sequences
if int(s_start) > int(s_end):
blast_query_name = "reversed|" + blast_query_name
x, y = int(q_start), int(q_end)
q_start = int(query_length) - y + 1
q_end = int(query_length) - x + 1
s_start, s_end = s_end, s_start
intro_string = "##{}##\nC segment:\t{}\n\n".format(
blast_query_name, C_segment)
header_string = ("Segment\tquery_id\tsubject_id\t% identity\talignment length\t"
"mismatches\tgap opens\tgaps\tq start\tq end\ts start\ts end\t"
"evalue\tbit score\n")
out_string = ("C\t{blast_query_name}\t{C_segment}\t{identity_pro}\t{align_length}\t{mismatches}\tNA\t{gaps}\t{q_start}\t{q_end}\t{s_start}\t{s_end}\t{evalue}\t{bit_score}\n\n").format(
blast_query_name=blast_query_name,
C_segment=C_segment, identity_pro=identity_pro, align_length=align_length,
evalue=evalue, mismatches=mismatches, gaps=gaps, q_start=q_start,
q_end=q_end, s_start=s_start, s_end=s_end, bit_score=bit_score)
string_to_write = intro_string + header_string + out_string
outfile.write(string_to_write)
def split_igblast_file(filename):
# code adapted from http://stackoverflow.com/questions/19575702/pythonhow-to-split-file-into-chunks-by-the-
#occurrence-of-the-header-word
token = '# IGBLASTN'
chunks = []
current_chunk = []
with open(filename) as fh:
for line in fh:
line = line.rstrip()
if line.startswith(token) and current_chunk and not line.startswith("Total "):
# if line starts with token and the current chunk is not empty
chunks.append(current_chunk[:]) # add not empty chunk to chunks
current_chunk = [] # make current chunk blank
# just append a line to the current chunk on each iteration
if not line.startswith("Total "):
current_chunk.append(line)
chunks.append(current_chunk) # append the last chunk outside the loop
return (chunks)
def split_blast_file(filename):
# code adapted from http://stackoverflow.com/questions/19575702/pythonhow-to-split-file-into-chunks-by-the-
#occurrence-of-the-header-word
token = '<Iteration>'
chunks = []
current_chunk = []
with open(filename) as fh:
for line in fh:
line = line.rstrip()
if line.startswith(token) and current_chunk:
chunks.append(current_chunk[:])
current_chunk = []
if not line.startswith("Total queries"):
current_chunk.append(line)
chunks.append(current_chunk)
return (chunks)
def check_binary(name, user_path=None):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
if user_path:
if is_exe(user_path):
return user_path
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, name)
if is_exe(exe_file):
return exe_file
raise OSError("Required binary not found: {name}. Please add to PATH or specify location in config file."
.format(name=name))
def read_colour_file(filename, return_used_list=False, receptor_name=None):
colour_map = dict()
used_colours = set()
with open(filename) as f:
for line in f:
line = line.rstrip()
receptor, locus, prod_colour, nonprod_colour = line.split(",")
d = {locus : (prod_colour, nonprod_colour)}
if receptor in colour_map:
colour_map[receptor].update(d)
else:
colour_map[receptor] = d
if receptor_name is not None and receptor == receptor_name:
used_colours.add(prod_colour)
elif receptor_name is None:
used_colours.add(prod_colour)
if return_used_list:
t = (colour_map, used_colours)
return t
else:
return colour_map
def write_colour_file(filename, colour_map):
sorted_receptors = sorted(colour_map.keys())
with open(filename, 'w') as f:
for receptor in sorted_receptors:
sorted_loci = sorted(colour_map[receptor].keys())
for l in sorted_loci:
colours = colour_map[receptor][l]
f.write("{},{},{},{}\n".format(receptor, l, colours[0], colours[1]))
|
11587808
|
import pickle as pkl
import gzip
import numpy
import random
import math
import pandas as pd
from datetime import datetime
from datetime import timedelta
from scipy import stats
def delay(j, day):
return (datetime.strptime(j, '%Y-%m-%d') - timedelta(days=day)).strftime('%Y-%m-%d')
class TextIterator:
"""Simple Bitext iterator."""
def __init__(self, source, label, technical,
dict, delay1=3, delay2=7, delay_tech=5, types='title',
batch_size=32,
n_words=-1,
cut_word=False, cut_news=False,
shuffle=True, shuffle_sentence=False): # delay means how many days over the past
self.source = pd.read_csv(source).set_index('date')
self.source = self.source[types].groupby(self.source.index).apply(list).apply(pd.Series).fillna(
'') # group together
self.label = pd.read_csv(label).set_index('Date')
self.technical = pd.read_csv(technical)
with open(dict, 'rb') as f:
self.dict = pkl.load(f)
self.batch_size = batch_size
self.n_words = n_words
self.shuffle = shuffle
self.shuffle_sentence = shuffle_sentence
self.delay1 = delay1
self.delay2 = delay2
self.delay_tec = delay_tech # delay_tec = 1 means one day ago
self.types = types
self.end_of_data = False
self.cut_word = cut_word if cut_word else float('inf') # cut the word
self.cut_news = cut_news if cut_news else None # cut the sentence
self.source_buffer = []
self.source_d1_buffer = []
self.source_d2_buffer = []
self.label_buffer = []
self.technical_buffer = []
self.k = batch_size * 20
self.index = 0
def __iter__(self):
return self
def reset(self):
# self.source.seek(0)
# self.label.seek(0)
self.index = 0
def __next__(self):
if self.end_of_data:
self.end_of_data = False
self.reset()
raise StopIteration
source = []
source_d1 = []
source_d2 = []
label = []
temp = []
tempd1 = []
tempd2 = []
tech_final = []
# day = (datetime.strptime(j, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d')
# fill buffer, if it's empty
assert len(self.source_buffer) == len(self.label_buffer), 'Buffer size mismatch!'
if len(self.source_buffer) == 0:
for j, i in enumerate(self.label.index.values[self.index:self.index + self.k]): # j for count i for value
try:
ss = list(filter(lambda x: self.cut_word > len(x.split()) > 0,
self.source.loc[delay(i, 1)].values[:self.cut_news]))
d1 = list(list(filter(lambda x: self.cut_word > len(x.split()) > 0, i[:self.cut_news])) for i in
self.source.loc[delay(i, self.delay1):delay(i, 1 + 1)].values)
d2 = list(list(filter(lambda x: self.cut_word > len(x.split()) > 0, i[:self.cut_news])) for i in
self.source.loc[delay(i, self.delay2):delay(i, self.delay1 + 1)].values)
ll = self.label.loc[i].values
idx = self.technical.index[self.technical['Date'] == i][0]
## 8 means the index of column, T is transpose
tec = self.technical.iloc[idx - self.delay_tec:idx, 8:].values
except KeyError as e: # out of length
print(i + ' ' + str(e))
continue
self.source_buffer.append(ss)
self.source_d1_buffer.append(d1)
self.source_d2_buffer.append(d2)
self.label_buffer.append(int(ll))
self.technical_buffer.append(tec)
if 'j' in locals():
self.index += j + 1
##TODO delete useless
if self.shuffle:
# sort by target buffer
tlen = numpy.array([len(t) for t in self.source_buffer])
tidx = tlen.argsort()
# argsort the index from low to high
# shuffle mini-batch
tindex = []
##Todo shuffle
small_index = list(range(int(math.ceil(len(tidx) * 1. / self.batch_size))))
random.shuffle(small_index)
for i in small_index:
if (i + 1) * self.batch_size > len(tidx):
tindex.extend(tidx[i * self.batch_size:])
else:
tindex.extend(tidx[i * self.batch_size:(i + 1) * self.batch_size])
tidx = tindex
_sbuf = [self.source_buffer[i] for i in tidx]
_d1buf = [self.source_d1_buffer[i] for i in tidx]
_d2buf = [self.source_d2_buffer[i] for i in tidx]
_lbuf = [self.label_buffer[i] for i in tidx]
_tech = [self.technical_buffer[i] for i in tidx]
self.source_buffer = _sbuf
self.source_d1_buffer = _d1buf
self.source_d2_buffer = _d2buf
self.label_buffer = _lbuf
self.technical_buffer = _tech
##TODO delete useless
del _sbuf, _d1buf, _d2buf, _lbuf
for i, d1, d2 in zip(self.source_buffer, self.source_d1_buffer, self.source_d2_buffer):
dd1, dd2 = list(), list()
temp.append([j.strip().split() for j in i]) # split words and save to array
for day in d1:
sentence = (j.strip().split() for j in day)
dd1.append(list(sentence))
tempd1.append(dd1)
for day in d2:
sentence = (j.strip().split() for j in day)
dd2.append(list(sentence))
tempd2.append(dd2)
# tempd2.append([j.strip().split() for day in d2 for j in day])
self.source_buffer = temp
self.source_d1_buffer = tempd1
self.source_d2_buffer = tempd2
##TODO delete useless
del temp, tempd1, tempd2
##TODO check if the contains enough day's new
'''
for j, i in enumerate(self.source_d1_buffer):
if len(i) != self.delay1 - 1:
print(j)
for j, i in enumerate(self.source_d2_buffer):
if len(i) != self.delay2 - self.delay1:
print(j)
'''
##TODO #check if it is a list
'''
if isinstance(self.source_buffer[0][0], list) is not True:
for i in self.source_buffer:
temp.append([j.strip().split() for j in i])
self.source_buffer = temp
'''
if len(self.source_buffer) == 0 or len(self.label_buffer) == 0:
self.end_of_data = False
self.reset()
raise StopIteration
try:
# actual work here
'''for i in self.source_buffer:
source_temp = []
for j in i: # len(source_buffer) # read from source file and map to word index
j.insert(0, '_BOS_')
j.append('_EOS_')
ss = [self.dict[w] if w in self.dict else 1 for w in j]
if self.n_words > 0:
ss = [w if w < self.n_words else 1 for w in ss]
# read label
source_temp.append(ss)
source.append(source_temp)
label.append(self.label_buffer.pop(0))
if len(source) >= self.batch_size or len(label) >= self.batch_size:
break
del self.source_buffer[0:self.batch_size]''' # doesn't make any freaky sense
while True:
# read from source file and map to word index
source_temp, source_d1_temp, source_d2_temp = [], [], []
try:
j = self.source_buffer.pop(0) # 1 day before
d1j = self.source_d1_buffer.pop(0) # delay1 day before
d2j = self.source_d2_buffer.pop(0) # delay2 day before
except IndexError:
break
##TODO do shuffle
if self.shuffle_sentence:
numpy.random.shuffle(j)
for i in j: # deal with 1 day before
#i.insert(0, '_BOS_')
#i.append('_EOS_')
ss = [self.dict[w] if w in self.dict else 1 for w in i] # 1 means _UNK_
if self.n_words > 0:
ss = [w if w < self.n_words else 1 for w in ss] # 1 means _UNK_
source_temp.append(ss)
for a in d1j: # deal with delay1
if self.shuffle_sentence:
numpy.random.shuffle(a)
_sd1 = []
for i in a:
#i.insert(0, '_BOS_')
#i.append('_EOS_')
ss = [self.dict[w] if w in self.dict else 1 for w in i] # 1 means _UNK_
if self.n_words > 0:
ss = [w if w < self.n_words else 1 for w in ss] # 1 means _UNK_
_sd1.append(ss)
source_d1_temp.append(_sd1)
for a in d2j: # deal with delay2
if self.shuffle_sentence:
numpy.random.shuffle(a)
_sd2 = []
for i in a:
#i.insert(0, '_BOS_')
#i.append('_EOS_')
ss = [self.dict[w] if w in self.dict else 1 for w in i] # 1 means _UNK_
if self.n_words > 0:
ss = [w if w < self.n_words else 1 for w in ss] # 1 means _UNK_
_sd2.append(ss)
source_d2_temp.append(_sd2)
# read label
ll = self.label_buffer.pop(0)
tech_tech = self.technical_buffer.pop(0)
source.append(source_temp)
source_d1.append(source_d1_temp)
source_d2.append(source_d2_temp)
label.append(ll)
tech_final.append(tech_tech)
##TODO delete useless
##del source_temp, source_d1_temp, source_d2_temp
if len(source) >= self.batch_size or len(source_d1) >= self.batch_size or len(
source_d2) >= self.batch_size or len(label) >= self.batch_size:
break
except IOError:
self.end_of_data = True
if len(source) <= 0 or len(label) <= 0:
self.end_of_data = False
self.reset()
raise StopIteration
return source, source_d1, source_d2, label, numpy.array(tech_final)
def main():
train = TextIterator('../ding_new_1/train.csv',
'../ding_new_1/train_label.csv',
'../ding_new_1/technical.csv',
dict='../ding_new_1/vocab_cased_title.pickle',
delay1=3,
delay2=7,
delay_tec=1,
types='title',
n_words=43920,
batch_size=32, cut_word=False, cut_news=False,
shuffle=True) # cut word: max length of the words in sentence
validate = TextIterator('../ding_new_1//validate.csv',
'../ding_new_1/validate_label.csv',
'../ding_new_1/technical.csv',
dict='../ding_new_1/vocab_cased_title.pickle',
delay1=3,
delay2=7,
delay_tec=1,
types='title',
n_words=43920,
batch_size=32, cut_word=False, cut_news=False,
shuffle=True) # cut word: max length of the words in sentence
test = TextIterator('../ding_new_1/validate.csv',
'../ding_new_1/validate_label.csv',
'../ding_new_1/technical.csv',
dict='../ding_new_1/vocab_cased_title.pickle',
delay1=3,
delay2=7,
delay_tec=1,
types='title',
n_words=43920,
batch_size=32, cut_word=False, cut_news=False,
shuffle=True) # cut word: max length of the words in sentence
# cut news: max news number per day
for i, (x, xd1, xd2, y, tech) in enumerate(train):
print("train", i, 'length', len(x), tech.shape)
for i, (x, xd1, xd2, y, tech) in enumerate(validate):
print("validate", i, 'length', len(x), tech.shape)
for i, (x, xd1, xd2, y, tech) in enumerate(test):
print("test", i, 'length', len(x), tech.shape)
if __name__ == '__main__':
main()
|
11587844
|
from django import forms
__all__ = ('RatingField',)
class RatingField(forms.ChoiceField):
pass
|
11587863
|
class herb_cleaning_config():
SCRIPT_NAME = "HERB CLEANING SCRIPT 0.3v";
BUTTON = [
];
CHATBOX = [
];
FUNCTION = [
];
INTERFACE = [
".\\resources\\interface\\bank\\close_bank.png",
".\\resources\\interface\\bank\\bank_all.png",
];
ITEM = [
".\\resources\\item\\bank\\grimy_avantoe.png",
".\\resources\\item\\inventor\\grimy_avantoe.png",
".\\resources\\item\\inventor\\avantoe.png",
];
NPC = [
];
MAP = [
];
OBJECT = [
];
|
11587893
|
from typing import Dict, Tuple
from starfish.core.codebook.codebook import Codebook
from starfish.core.intensity_table.decoded_intensity_table import DecodedIntensityTable
from starfish.core.spots.DecodeSpots.trace_builders import build_traces_sequential
from starfish.core.types import Axes, Features, SpotFindingResults
from ._base import DecodeSpotsAlgorithm
class SimpleLookupDecoder(DecodeSpotsAlgorithm):
"""
Decode spots by assigning the target value of a spot to the corresponding target value of the
round/ch it was found in. This method only makes sense to use in non mulitplexed sequential
assays where each r/ch pair only has one target assigned to it.
Parameters
----------
codebook : Codebook
Contains codes to decode IntensityTable
"""
def __init__(self, codebook: Codebook):
self.codebook = codebook
def run(self, spots: SpotFindingResults, *args) -> DecodedIntensityTable:
"""
Decode spots by looking up the associated target value for the round and ch each spot is
in.
Parameters
----------
spots: SpotFindingResults
A Dict of tile indices and their corresponding measured spots
Returns
-------
DecodedIntensityTable :
IntensityTable decoded and appended with Features.TARGET and values.
"""
lookup_table: Dict[Tuple, str] = {}
for target in self.codebook[Features.TARGET]:
for ch_label in self.codebook[Axes.CH.value]:
for round_label in self.codebook[Axes.ROUND.value]:
if self.codebook.loc[target, round_label, ch_label]:
lookup_table[(int(round_label), int(ch_label))] = str(target.values)
for r_ch_index, results in spots.items():
target = lookup_table[r_ch_index] if r_ch_index in lookup_table else 'nan'
results.spot_attrs.data[Features.TARGET] = target
intensities = build_traces_sequential(spots)
return DecodedIntensityTable(intensities)
|
11587905
|
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from flowket.callbacks.monte_carlo import TensorBoardWithGeneratorValidationData, \
default_wave_function_stats_callbacks_factory
from flowket.evaluation import evaluate
from flowket.layers import LogSpaceComplexNumberHistograms
from flowket.machines import ConvNetAutoregressive2D
from flowket.machines.ensemble import make_2d_obc_invariants
from flowket.operators import Ising
from flowket.optimization import VariationalMonteCarlo, loss_for_energy_minimization
from flowket.samplers import AutoregressiveSampler
hilbert_state_shape = [4, 4]
inputs = Input(shape=hilbert_state_shape, dtype='int8')
convnet = ConvNetAutoregressive2D(inputs, depth=5, num_of_channels=32, weights_normalization=False)
predictions, conditional_log_probs = convnet.predictions, convnet.conditional_log_probs
model = Model(inputs=inputs, outputs=predictions)
conditional_log_probs_model = Model(inputs=inputs, outputs=conditional_log_probs)
batch_size = 128
steps_per_epoch = 500
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
model.summary()
operator = Ising(h=3.0, hilbert_state_shape=hilbert_state_shape, pbc=False)
sampler = AutoregressiveSampler(conditional_log_probs_model, batch_size)
monte_carlo_generator = VariationalMonteCarlo(model, operator, sampler)
callbacks = default_wave_function_stats_callbacks_factory(monte_carlo_generator, true_ground_state_energy=-50.18662388277671)
model.fit_generator(monte_carlo_generator.to_generator(), steps_per_epoch=steps_per_epoch, epochs=2, callbacks=callbacks,
max_queue_size=0, workers=0)
print('evaluate normal model')
print(evaluate(monte_carlo_generator.to_generator(), steps=200, callbacks=callbacks,
keys_to_progress_bar_mapping={'energy/energy': 'energy', 'energy/relative_error': 'relative_error'}))
print('evaluate invariant model')
evaluation_inputs = Input(shape=hilbert_state_shape, dtype='int8')
invariant_model = make_2d_obc_invariants(evaluation_inputs, model)
monte_carlo_generator = VariationalMonteCarlo(invariant_model, operator, sampler)
callbacks = default_wave_function_stats_callbacks_factory(monte_carlo_generator, true_ground_state_energy=-50.18662388277671)
print(evaluate(monte_carlo_generator.to_generator(), steps=200, callbacks=callbacks,
keys_to_progress_bar_mapping={'energy/energy': 'energy', 'energy/relative_error': 'relative_error'}))
|
11587932
|
from PyQt4 import QtCore, QtGui
import sys
import gettext
#Utf-8 Encoding generated from Qt Designer
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__()
#Calling the Slides Widget class and the MainWindow setup
self.setupUi(self)
def setupUi(self, MainWindow):
MainWindow.resize(1269, 688)
self.label_4 = QtGui.QLabel(self)
self.label_4.setGeometry(QtCore.QRect(110, 30, 191, 21))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lucida Sans"))
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.label_4.setFont(font)
self.pushButton_EN = QtGui.QPushButton(self)
self.pushButton_EN.setGeometry(QtCore.QRect(1000, 30, 75, 23))
self.pushButton_EN.clicked.connect(lambda: self.enTranslate(MainWindow))
self.pushButton_IE = QtGui.QPushButton(self)
self.pushButton_IE.setGeometry(QtCore.QRect(1100, 30, 75, 23))
self.pushButton_IE.clicked.connect(lambda: self.ieTranslate(MainWindow))
self.enTranslate(MainWindow)
def enTranslate(self, MainWindow):
self.translate('en', MainWindow)
def ieTranslate(self, MainWindow):
self.translate('ie', MainWindow)
def translate(self, lang, MainWindow):
self.messages = gettext.translation('messages', localedir='locale', languages=[lang])
self.messages.install()
self.retranslateUi(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Erasmus", None))
self.pushButton_EN.setText(_translate("MainWindow", _("English"), None))
self.pushButton_IE.setText(_translate("MainWindow", _("Irish"), None))
self.label_4.setText(_translate("MainWindow", _("Don\'t have an account?"), None))
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
Win = MainWindow()
Win.show()
sys.exit(app.exec_())
|
11587937
|
import numpy as np
import matplotlib.pyplot as plt
import os
from math import pi
# Define a data type for the simdata structure
simdata = np.dtype([('t', np.float64),
('alpha', np.float64),
('beta', np.float64),
('gamma', np.float64),
('w1', np.float64),
('w2', np.float64),
('w3', np.float64),
('delta', np.float64),
('alpha1', np.float64),
('alpha2', np.float64),
('alpha3', np.float64),
('ke', np.float64)])
os.system('rm -rf ./datafile.dat')
os.system('make')
os.system('./reproduceKane1982_sim') # run the simulation
data = np.fromfile('datafile.dat', dtype=simdata) # read the data
plt.figure()
plt.subplot(211)
plt.plot(data['t'], data['delta']*180./pi, label=r'$\delta$')
plt.subplot(212)
plt.plot(data['t'], data['gamma']*180./pi, label=r'$\gamma$')
plt.title('Orientation')
plt.legend(loc=0)
plt.figure()
plt.plot(data['t'], data['w1'], label=r'$\omega_1$')
plt.plot(data['t'], data['w2'], label=r'$\omega_2$')
plt.plot(data['t'], data['w3'], label=r'$\omega_3$')
plt.title('Angular velocity')
plt.legend(loc=0)
plt.figure()
plt.plot(data['t'], data['alpha1'], label=r'$\alpha_1$')
plt.plot(data['t'], data['alpha2'], label=r'$\alpha_2$')
plt.plot(data['t'], data['alpha3'], label=r'$\alpha_3$')
plt.title('Angular acceleration')
plt.legend(loc=0)
plt.figure()
plt.plot(data['t'], data['ke'], label=r'ke')
plt.title('Kinetic energy')
plt.legend(loc=0)
plt.show()
|
11587971
|
from typing import Optional, List, Set
from figcli.commands.command_context import CommandContext
from figcli.models.defaults.defaults import CLIDefaults, CliCommand
from figcli.models.role import Role
from figgy.models.run_env import RunEnv
class HelpContext(CommandContext):
"""
Contextual information for HelpCommands, including _what_ command resources were passed in. Help commands
often don't have standard "resource" or "command" blocks, instead they may ONLY have --optional parameters
"""
def __init__(self, resource: Optional[CliCommand], command: Optional[CliCommand],
options: Optional[Set[CliCommand]], run_env: Optional[RunEnv], defaults: Optional[CLIDefaults],
role: Optional[Role]):
super().__init__(run_env, resource, defaults=defaults)
self.resource = resource
self.command = command
self.options = options
self.role = role
|
11587980
|
from datetime import datetime
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import DateTime
from sqlalchemy import Integer
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import registry
from sqlalchemy.orm import Session
from sqlalchemy.sql.functions import now
mapper_registry: registry = registry()
e = create_engine("sqlite:///database.db", echo=True)
@mapper_registry.mapped
class A:
__tablename__ = "a"
id = Column(Integer, primary_key=True)
date_time: Mapped[datetime] = Column(DateTime())
date_time2 = Column(DateTime())
mapper_registry.metadata.create_all(e)
with Session(e) as s:
a = A()
a.date_time = now()
a.date_time2 = now()
s.add(a)
s.commit()
|
11587981
|
import time
import pytest
# pytest currently explodes with monkeypatching time.time
@pytest.mark.xfail(run=False)
class TestTimeObject(object):
def test_now(self, space, monkeypatch):
monkeypatch.setattr(time, "time", lambda: 342.1)
w_secs = space.execute("return Time.now.to_f")
assert space.float_w(w_secs) == 342.1
def test_subtraction(self, space, monkeypatch):
monkeypatch.setattr(time, "time", iter([18, 12]).next)
w_secs = space.execute("return Time.now - Time.now")
assert space.float_w(w_secs) == 6
|
11587994
|
import pytest
from aao.spiders import SpiderWilliamhill
pytestmark = pytest.mark.williamhill
COMPETITIONS = [
# country, _country, league, _league, page_name
['england', 'england', 'premier_league', 'English-Premier-League', 'English Premier League'],
['england', 'england', 'efl_championship', 'English-Championship', 'English Championship'],
['italy', 'italy', 'serie_a', 'Italian-Serie-A', 'Italian Serie A'],
['spain', 'spain', 'la_liga', 'Spanish-La-Liga-Primera', 'Spanish La Liga Primera'],
['france', 'france', 'ligue_1', 'French-Ligue-1', 'French Ligue 1'],
['france', 'france', 'ligue_2', 'French-Ligue-2', 'French Ligue 2'],
['germany', 'germany', 'bundesliga_1', 'German-Bundesliga', 'German Bundesliga'],
['germany', 'germany', 'bundesliga_2', 'German-Bundesliga-2', 'German Bundesliga 2'],
]
class TestSpider():
def test_soccer(self):
pass
class TestSoccer():
competition = COMPETITIONS[7]
@pytest.fixture(scope='class')
def spider(self):
spider = SpiderWilliamhill()
spider.soccer.country = self.competition[0]
spider.soccer._country = self.competition[1]
spider.soccer.league = self.competition[2]
spider.soccer._league = self.competition[3]
yield spider
spider.quit()
def test_request_page(self, spider):
spider.soccer._request_page()
league = spider.browser.find_element_by_class_name('header-title')
assert league.text == self.competition[4]
def test_request_page_no_data_found(self, spider):
with pytest.raises(KeyError, match='No data found for'):
spider.soccer._country = 'foo_country'
spider.soccer._league = 'foo_league'
spider.soccer._request_page()
header = spider.browser.find_element_by_class_name('header-title')
assert header.text == 'Competitions'
spider.soccer._country = self.competition[1]
spider.soccer._league = self.competition[3]
def test_change_market(self, spider):
spider.soccer._request_page()
spider.soccer._change_market('Double Chance')
xpath = '//div[@class="sp-o-market__title"]/b'
market = spider.browser.find_element_by_xpath(xpath).text
assert market == 'Double Chance'
def test_get_rows(self, spider):
spider.soccer._request_page()
rows = spider.soccer._get_rows()
assert rows
# parse
def test_parse_datetime(self, spider):
spider.soccer._request_page()
rows = spider.soccer._get_rows()
for row in rows:
datetime_str = str(spider.soccer._parse_datetime(row))
if row[0][4:6].isdigit():
assert row[0][4:6] in datetime_str
assert row[1] in datetime_str
def test_parse_teams(self, spider):
spider.soccer._request_page()
rows = spider.soccer._get_rows()
teams = spider.soccer.teams(
spider.soccer.country, spider.soccer.league, full=True)
for row in rows:
home_team, away_team = spider.soccer._parse_teams(row)
row[2], row[3] = row[2].split(' v ')
assert home_team in teams.values() and row[2] in teams
assert away_team in teams.values() and row[3] in teams
def test_parse_teams_team_not_in_table(self, spider):
row = ['Sun 20 Jan', '14:00', 'foo_home_team v foo_away_team']
msg = ('foo_away_team not in bookmaker teams table. '
'foo_home_team not in bookmaker teams table. '
'Tables need an upgrade, notify the devs.')
with pytest.raises(KeyError, match=msg):
spider.soccer._parse_teams(row)
# markets
market_funcs = ['_events_full_time_result', '_under_over',
'_both_teams_to_score', '_double_chance']
@pytest.mark.parametrize('market_func', market_funcs)
def test_market(self, spider, market_func):
spider.soccer._request_page()
events, odds = getattr(spider.soccer, market_func)()
assert len(events) == len(odds)
# events + odds
def test_events_odds(self, spider):
events, odds = spider.soccer._events_odds()
assert events
assert odds
def test_events_odds_events_only(self, spider):
events = spider.soccer._events_odds(events_only=True)
assert events
|
11588010
|
from typing import List
from usaspending_api.disaster.v2.views.elasticsearch_base import (
ElasticsearchDisasterBase,
ElasticsearchLoansPaginationMixin,
)
from usaspending_api.references.models import Cfda
from usaspending_api.search.v2.elasticsearch_helper import get_summed_value_as_float
class CfdaLoansViewSet(ElasticsearchLoansPaginationMixin, ElasticsearchDisasterBase):
"""
This route takes DEF Codes and Query text and returns Loans of CFDA.
"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/disaster/cfda/loans.md"
required_filters = ["def_codes", "query", "_loan_award_type_codes"]
query_fields = ["cfda_title.contains", "cfda_number.contains"]
agg_key = "cfda_number.keyword"
is_loans = True
def build_elasticsearch_result(self, info_buckets: List[dict]) -> List[dict]:
results = []
cfda_prefetch_pks = [bucket.get("key") for bucket in info_buckets]
prefetched_cfdas = {
cfda["program_number"]: cfda for cfda in Cfda.objects.filter(program_number__in=cfda_prefetch_pks).values()
}
for bucket in info_buckets:
cfda_number = bucket.get("key")
cfda = prefetched_cfdas.get(cfda_number, {})
results.append(
{
"id": cfda.get("id"),
"code": cfda.get("program_number"),
"description": cfda.get("program_title"),
"award_count": int(bucket.get("doc_count", 0)),
"resource_link": cfda.get("url") if cfda.get("url") != "None;" else None,
"cfda_federal_agency": cfda.get("federal_agency"),
"cfda_objectives": cfda.get("objectives"),
"cfda_website": cfda.get("website_address"),
"applicant_eligibility": cfda.get("applicant_eligibility"),
"beneficiary_eligibility": cfda.get("beneficiary_eligibility"),
**{
column: get_summed_value_as_float(
bucket.get("nested", {}).get("filtered_aggs", {})
if column != "face_value_of_loan"
else bucket.get("nested", {}).get("filtered_aggs", {}).get("reverse_nested", {}),
self.sum_column_mapping[column],
)
for column in self.sum_column_mapping
},
}
)
return results
|
11588052
|
import ALPHA3
import random, re
LOCAL_PATH = __path__[0]
NOP = {
'eax': chr(0x43), # nop: 43 = INC EBX
'ebx': chr(0x42), # nop: 42 = INC EDX
'ecx': chr(0x43), # nop: 43 = INC EBX
'edx': chr(0x43), # nop: 43 = INC EBX
'esi': chr(0x43), # nop: 43 = INC EBX
'edi': chr(0x43), # nop: 43 = INC EBX
};
COUNT = chr(0x42) # 42 = INC EDX
REG = r"^countslide:(?P<R>%s)\+(?P<I>(?:0x)?[0-9A-Fa-f]+)~(?P<V>(?:0x)?[0-9A-Fa-f]+)$"
def encodeShellcode(base_address, shellcode):
base_address_parsed = re.match(REG % '|'.join(NOP.keys()), base_address, re.IGNORECASE)
if base_address_parsed is None:
raise Exception("Cannot parse \"%s\"." % base_address)
base_address_register = base_address_parsed.group("R")
nop = NOP[base_address_register.lower()];
base_address_offset = ALPHA3.toInt(base_address_parsed.group("I"))
nopslide_size = ALPHA3.toInt(base_address_parsed.group("V"))
patcher = ALPHA3.io.ReadFile("[%s+i32] - EDX.bin" % base_address_register, LOCAL_PATH)
patch_offset = nopslide_size + len(patcher)
ALPHA3.PrintVerboseStatusLine("Return address", "%s+%X" % (base_address_register, base_address_offset))
ALPHA3.PrintVerboseStatusLine("Nopslide size", "%X" % (nopslide_size,))
ALPHA3.PrintVerboseStatusLine("Patcher size", "%X" % (len(patcher),))
ALPHA3.PrintVerboseStatusLine("Countslide size", "%X" % (nopslide_size,))
ALPHA3.PrintVerboseStatusLine("Patch address", "%s+%X+%X" % (base_address_register, base_address_offset, patch_offset))
base_address_offset_encoded = ALPHA3.encode.dwx_IMUL_30_XOR_dwy(
base_address_offset, "encoded base address offset",
ALPHA3.charsets.valid_charcodes["ascii"]["mixedcase"])
patch_offset_encoded = ALPHA3.encode.dwx_IMUL_30_XOR_dwy(
patch_offset, "encoded patch offset",ALPHA3.charsets.valid_charcodes["ascii"]["mixedcase"])
patcher = ALPHA3.encode.injectCodes(patcher, base_address_offset_encoded +
patch_offset_encoded)
countslide = (nop * nopslide_size + patcher + COUNT * nopslide_size)
return countslide + ALPHA3.x86.ascii.mixedcase.rm32.encodeShellcode("edx", shellcode);
tests = {};
for reg in NOP.keys():
# Shellcode can start anywhere in the first 0x100 bytes of the heap block:
shellcode_start_offset = random.randrange(4,0x104)
# Code execution can start anywhere in bytes 0x100-0x200 of the heap block:
code_execution_start_offset = random.randrange(0x104,0x204)
# register points 0-0x100 bytes before the start of our memory
for offset in [0, 0x80, 0x100]:
tests["countslide:%s+0x%X~0x200" % (reg, offset)] = [
"[$+%X]=ascii:%%shellcode%%" % shellcode_start_offset,
"eip=$+%X" % code_execution_start_offset,
"[$]=value:$+%X" % code_execution_start_offset,
"%s=$-%X" % (reg, offset) # [reg+X] = [$] = code_execution_start_offset
];
encoders = [{
"base address": REG,
"base address samples": [
"countslide:EAX+offset~uncertainty",
"countslide:EBX+offset~uncertainty",
"countslide:ECX+offset~uncertainty",
"countslide:EDX+offset~uncertainty",
"countslide:ESI+offset~uncertainty",
"countslide:EDI+offset~uncertainty",
],
"name": r"AscMix Countslide (rm32)",
"function": encodeShellcode,
"tests": tests,
}]
|
11588055
|
import json
from random import sample
import os
from sklearn.metrics import precision_recall_curve, average_precision_score, accuracy_score
from api.batch_processing.postprocessing import load_api_results
from data_management.cct_json_utils import CameraTrapJsonUtils
from visualization import visualization_utils
#%% Empty and non-empty classification at image level
def empty_accuracy_image_level(gt_db_indexed, detection_res, threshold=0.5):
gt = []
pred = []
for image_id, annotations in gt_db_indexed.image_id_to_annotations.items():
max_det_score = detection_res[image_id]['max_detection_conf']
pred_class = 0 if max_det_score < threshold else 1
pred.append(pred_class)
if len(annotations) > 0:
gt_score = 0
for a in annotations:
if 'bbox' in a:
gt_score = 1 # not empty
break
gt.append(gt_score)
else:
gt.append(0) # empty
accuracy = accuracy_score(gt, pred)
return accuracy
def empty_precision_recall_image_level(gt_db_indexed, detection_res):
"""
For empty/non-empty classification based on max_detection_conf in detection entries.
Args:
gt_db_indexed: IndexedJsonDb of the ground truth bbox json.
detection_res: dict of image_id to image entry in the API output file's `images` field. The key needs to be
the same image_id as those in the ground truth json db.
Returns:
precisions, recalls, thresholds (confidence levels)
"""
gt = []
pred = []
for image_id, annotations in gt_db_indexed.image_id_to_annotations.items():
det_image_obj = detection_res[image_id]
max_det_score = det_image_obj['max_detection_conf']
pred.append(max_det_score)
if len(annotations) > 0:
gt_score = 0
for a in annotations:
if 'bbox' in a:
gt_score = 1 # not empty
break
gt.append(gt_score)
else:
gt.append(0) # empty
print('Length of gt and pred:', len(gt), len(pred))
precisions, recalls, thresholds = precision_recall_curve(gt, pred)
average_precision = average_precision_score(gt, pred)
return precisions, recalls, thresholds, average_precision
#%% Empty and non-empty classification at sequence level
def is_gt_seq_non_empty(annotations, empty_category_id):
"""
True if there are animals etc, False if empty.
"""
category_on_images = set()
for a in annotations:
category_on_images.add(a['category_id'])
if len(category_on_images) > 1:
return True
elif len(category_on_images) == 1:
only_cat = list(category_on_images)[0]
if only_cat == empty_category_id:
return False
else:
return True
else:
raise Exception('No category information in annotation entry.')
def pred_seq_max_conf(detector_output_images_entries):
"""
Surface the max_detection_conf field, include detections of all classes.
"""
return max([entry['max_detection_conf'] for entry in detector_output_images_entries])
def get_number_empty_seq(gt_db_indexed):
gt_seq_id_to_annotations = CameraTrapJsonUtils.annotations_groupby_image_field(gt_db_indexed, image_field='seq_id')
empty_category_id_in_gt = gt_db_indexed.cat_name_to_id['empty']
gt_seq_level = []
for seq_id, seq_annotations in gt_seq_id_to_annotations.items():
gt_seq_level.append(is_gt_seq_non_empty(seq_annotations, empty_category_id_in_gt))
total = len(gt_seq_level)
num_empty = total - sum(gt_seq_level)
print('There are {} sequences, {} are empty, which is {}%'.format(total, num_empty, 100 * num_empty / total))
def empty_accuracy_seq_level(gt_db_indexed, detector_output_path, file_to_image_id,
threshold=0.5, visualize_wrongly_classified=False, images_dir=''):
""" Ground truth label is empty if the fine-category label on all images in this sequence are "empty"
Args:
gt_db_indexed: an instance of IndexedJsonDb containing the ground truth
detector_output_path: path to a file containing the detection results in the API output format
file_to_image_id: see load_api_results.py - a function to convert image_id
threshold: threshold between 0 and 1 below which an image is considered empty
visualize_wrongly_classified: True if want to visualize 5 sequences where the predicted
classes don't agree with gt
images_dir: directory where the 'file' field in the detector output is rooted at. Relevant only if
visualize_wrongly_classified is true
Returns:
"""
# TODO move detector_output_path specific code out so that this function evaluates only on classification results (confidences)
gt_seq_id_to_annotations = CameraTrapJsonUtils.annotations_groupby_image_field(gt_db_indexed, image_field='seq_id')
pred_seq_id_to_res = load_api_results.api_results_groupby(detector_output_path, gt_db_indexed,
file_to_image_id)
gt_seq_level = []
pred_seq_level = []
empty_category_id_in_gt = gt_db_indexed.cat_name_to_id['empty']
# evaluate on sequences that are present in both gt and the detector output file
gt_sequences = set(gt_seq_id_to_annotations.keys())
pred_sequences = set(pred_seq_id_to_res.keys())
diff = gt_sequences.symmetric_difference(pred_sequences)
print('Number of sequences not in both gt and pred: {}'.format(len(diff)))
intersection_sequences = list(gt_sequences.intersection(pred_sequences))
for seq_id in intersection_sequences:
gt_seq_level.append(is_gt_seq_non_empty(gt_seq_id_to_annotations[seq_id], empty_category_id_in_gt))
pred_seq_level.append(pred_seq_max_conf(pred_seq_id_to_res[seq_id]))
pred_class = [0 if max_conf < threshold else 1 for max_conf in pred_seq_level]
accuracy = accuracy_score(gt_seq_level, pred_class)
if visualize_wrongly_classified:
show_wrongly_classified_seq(pred_seq_id_to_res, intersection_sequences, gt_seq_level, pred_class, images_dir)
return accuracy, gt_seq_level, pred_seq_level, intersection_sequences
def show_wrongly_classified_seq(pred_seq_id_to_res, seq_ids, gt_seq_level, pred_binary_seq_level, images_dir):
wrongly_classified_seqs = []
for seq_id, gt, pred in zip(seq_ids, gt_seq_level, pred_binary_seq_level):
if gt != pred:
wrongly_classified_seqs.append((seq_id, gt, pred))
num_to_sample = 5
sampled = sample(wrongly_classified_seqs, num_to_sample)
for seq_id, gt, pred in sampled:
print('Ground truth is {}, predicted class is {}, seq_id {}.'.format(gt, pred, seq_id))
predicted_res = pred_seq_id_to_res[seq_id]
predicted_res_files = [os.path.join(images_dir, item['file']) for item in predicted_res]
fig = visualization_utils.show_images_in_a_row(predicted_res_files)
#%% Utilities
def find_precision_at_recall(precision, recall, thresholds, recall_level=0.9):
""" Returns the precision at a specified level of recall. The thresholds should go from 0 to 1.0
Args:
precision: List of precisions for each confidence
recall: List of recalls for each confidence, paired with items in the `precision` list
recall_level: A float between 0 and 1.0, the level of recall to retrieve precision at.
Returns:
precision at the specified recall_level
"""
if precision is None or recall is None:
print('precision or recall is None')
return 0.0, 0.0
for p, r, t in zip(precision, recall, thresholds):
if r < recall_level:
return p, t
def make_detection_res(results_path, file_prefix=''):
""" Make the detection result into a dictionary of file : result entry
Args:
results_path: path to output of API containing the detection results
file_prefix: this prefix will be taken out of the result entry 'file' field to be consistent
with the 'file_name' field in a CCT formatted json DB.
Returns:
A dictionary of file : API result entry
"""
with open(results_path) as f:
res = json.load(f)
detection_res = {}
for i in res['images']:
file_name = i['file'].split(file_prefix)[1].split('.jpg')[0].split('.JPG')[0]
detection_res[file_name] = i # all detections on that image is in this dict
return detection_res
def get_gt_db(gt_db_path):
""" Load the CCT formatted DB and index it.
Args:
gt_db_path: path to the json DB.
Returns:
An IndexedJsonDb object
"""
with open(gt_db_path) as f:
gt_db = json.load(f)
gt_indexed = cct_json_utils.IndexedJsonDb(gt_db)
return gt_indexed
|
11588062
|
import pickle
import argparse
from pathlib import Path
from rdkit import Chem
from molbart.tokeniser import MolEncTokeniser
from molbart.data.datasets import Chembl, MolOptDataset
MOL_OPT_TOKENS_PATH = "mol_opt_tokens.txt"
PROP_PRED_TOKENS_PATH = "prop_pred_tokens.txt"
NUM_UNUSED_TOKENS = 200
REGEX = "\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9]"
def build_mol_dataset(args):
dataset = Chembl(args.data_path)
return dataset
def build_mol_opt_dataset(args):
dataset = MolOptDataset(args.mol_opt_data_path)
return dataset
def read_extra_tokens(paths):
extra_tokens = []
for path in paths:
p = Path(path)
if p.is_file():
text = p.read_text()
tokens = text.split("\n")
tokens = [token for token in tokens if token != ""]
print(f"Read {len(tokens)} tokens from {path}")
extra_tokens.extend(tokens)
return extra_tokens
def build_unused_tokens(num_tokens):
tokens = []
for i in range(num_tokens):
token = f"<UNUSED_{str(i)}>"
tokens.append(token)
return tokens
def build_tokeniser(smiles, regex, extra_tokens):
tokeniser = MolEncTokeniser.from_smiles(smiles, regex, extra_tokens=extra_tokens)
return tokeniser
def write_tokeniser(args, tokeniser):
write_path = Path(args.tokeniser_path)
write_path.parent.mkdir(parents=True, exist_ok=True)
file_handle = write_path.open("wb")
pickle.dump(tokeniser, file_handle)
file_handle.close()
def main(args):
print("Reading molecule dataset...")
mol_dataset = build_mol_dataset(args)
print("Completed reading dataset.")
print("Reading extra tokens...")
paths = [args.mol_opt_tokens_path, args.prop_pred_tokens_path]
extra_tokens = read_extra_tokens(paths)
unused_tokens = build_unused_tokens(NUM_UNUSED_TOKENS)
print("Completed reading extra tokens.")
print("Constructing SMILES strings...")
mol_smiles = [Chem.MolToSmiles(mol_dataset[idx]) for idx in range(len(mol_dataset))]
print("Completed SMILES construction.")
print("Building tokeniser...")
tokeniser = build_tokeniser(mol_smiles, REGEX, extra_tokens + unused_tokens)
print("Completed building tokeniser.")
print("Writing tokeniser...")
tokeniser.save_vocab(args.tokeniser_path)
print("Complete.")
if __name__ == "__main__":
parser = argparse.ArgumentParser("Script for training the tokeniser on a dataset.")
parser.add_argument("--data_path", type=str)
parser.add_argument("--mol_opt_tokens_path", type=str, default=MOL_OPT_TOKENS_PATH)
parser.add_argument("--prop_pred_tokens_path", type=str, default=PROP_PRED_TOKENS_PATH)
parser.add_argument("--tokeniser_path", type=str)
args = parser.parse_args()
main(args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.