max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 7 115 | max_stars_count int64 101 368k | id stringlengths 2 8 | content stringlengths 6 1.03M |
|---|---|---|---|---|
NLP/2-NaiveBayes_N-gram/language_detector/language_detector.py | excelsimon/AI | 119 | 110836 | <reponame>excelsimon/AI
# -*- coding:utf-8 -*-
import re
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
class LanguageDetector():
def __init__(self,classifier=MultinomialNB()):
self.classifier = classifier
self.vectorizer = CountVectorizer(
lowercase=True,
analyzer='char_wb',
ngram_range=(1,2),
max_features=1000,
preprocessor=self._remove_noise
)
def _remove_noise(self,document):
noise_pattern = re.compile('|'.join(['http\S+', '\@\w+', '\#\w+']))
clean_text = re.sub(noise_pattern, "", document)
return clean_text.strip()
def features(self,x):
return self.vectorizer.transform(x)
def fit(self,x,y):
self.vectorizer.fit(x)
self.classifier.fit(self.features(x),y)
def predict(self,x):
return self.classifier.predict(self.features([x]))
def score(self,x,y):
return self.classifier.score(self.features(x),y)
data_f = open('language_detector.csv')
lines = data_f.readlines()
data_f.close()
dataset = [(line.strip()[:-3],line.strip()[-2:]) for line in lines]
x,y = zip(*dataset) #x,y为list,x包含所有句子,y包含对应标签
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3)
language_detector = LanguageDetector()
language_detector.fit(x_train,y_train)
print(language_detector.score(x_test,y_test))
print(language_detector.predict('This is an english sentence'))
"""
output:
0.977941176471
['en']
"""
|
extrabacon-2.0/improved/shellcode_8_4(6)5.py | JS-Burns/CVE-2016-6366 | 171 | 110852 | ##
## this file autogenerated
## 8.4(6)5
##
jmp_esp_offset = "192.168.3.11"
saferet_offset = "172.16.17.32"
fix_ebp = "72"
pmcheck_bounds = "0.176.88.9"
pmcheck_offset = "96.186.88.9"
pmcheck_code = "192.168.3.11"
admauth_bounds = "0.32.8.8"
admauth_offset = "240.33.8.8"
admauth_code = "172.16.31.10"
# "8.4(6)5" = ["192.168.3.11","172.16.17.32","72","0.176.88.9","172.16.58.3","192.168.3.11","0.32.8.8","240.33.8.8","172.16.31.10"], |
tests/utils/stac_io_mock.py | jisantuc/pystac | 130 | 110879 | from typing import Any, Union
from unittest.mock import Mock
import pystac
class MockStacIO(pystac.StacIO):
"""Creates a mock that records StacIO calls for testing and allows
clients to replace StacIO functionality, all within a context scope.
"""
def __init__(self) -> None:
self.mock = Mock()
def read_text(
self, source: Union[str, pystac.Link], *args: Any, **kwargs: Any
) -> str:
self.mock.read_text(source)
return pystac.StacIO.default().read_text(source)
def write_text(
self, dest: Union[str, pystac.Link], txt: str, *args: Any, **kwargs: Any
) -> None:
self.mock.write_text(dest, txt)
pystac.StacIO.default().write_text(dest, txt)
|
entangle/http.py | radiantone/entangle | 102 | 110889 | <filename>entangle/http.py
"""
http.py - Module that provides http oriented decorators
"""
from functools import partial
import requests
def request(function=None,
timeout=None,
url=None,
method='GET',
sleep=None):
"""
:param function:
:param timeout:
:param url:
:param method:
:param sleep:
:return:
"""
def decorator(func):
def wrapper(f_func):
# Build http request function here, get result
# call func with result
def invoke_request(_func, **kwargs):
def make_request(url, method, data):
if method == 'GET':
response = requests.get(url=url, params=data)
return response.content
return None
response = make_request(url, method, kwargs)
return _func(response)
pfunc = partial(invoke_request, f_func)
pfunc.__name__ = func.__name__
return pfunc
return wrapper(func)
if function is not None:
return decorator(function)
return decorator
|
ikalog/utils/image_loader.py | fetus-hina/IkaLog | 285 | 110908 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import cv2
import numpy as np
from PIL import Image
def imread(filename):
if not os.path.exists(filename):
return None
f = open(filename, 'rb')
img_bytes = f.read()
f.close()
img = cv2.imdecode(np.fromstring(img_bytes, dtype='uint8'), 1)
return img
|
SimGeneral/DataMixingModule/python/supplementary/ReconstructionLocalCosmics_cff.py | ckamtsikis/cmssw | 852 | 110910 | import FWCore.ParameterSet.Config as cms
#
# tracker
#
from RecoLocalTracker.Configuration.RecoLocalTracker_Cosmics_cff import *
from RecoTracker.Configuration.RecoTrackerP5_cff import *
from RecoVertex.BeamSpotProducer.BeamSpot_cff import *
from RecoTracker.Configuration.RecoTrackerBHM_cff import *
from RecoTracker.DeDx.dedxEstimators_Cosmics_cff import *
#
# calorimeters
#
from RecoLocalCalo.Configuration.RecoLocalCalo_Cosmics_cff import *
from RecoEcal.Configuration.RecoEcalCosmics_cff import *
#
# muons
#
from RecoLocalMuon.Configuration.RecoLocalMuonCosmics_cff import *
from RecoMuon.Configuration.RecoMuonCosmics_cff import *
# primary vertex
#from RecoVertex.Configuration.RecoVertexCosmicTracks_cff import *
# local reco
trackerCosmics = cms.Sequence(offlineBeamSpot*trackerlocalreco)
caloCosmics = cms.Sequence(calolocalreco)
muonsLocalRecoCosmics = cms.Sequence(muonlocalreco+muonlocalrecoNoDrift)
localReconstructionCosmics = cms.Sequence(trackerCosmics*caloCosmics*muonsLocalRecoCosmics)
reconstructionCosmics = cms.Sequence(localReconstructionCosmics)
|
fusion/affineFace.py | bj80heyue/One_Shot_Face_Reenactment | 183 | 110920 | from fusion.points2heatmap import *
from fusion.calcAffine import *
from fusion.warper import warping as warp
import matplotlib.pyplot as plt
from fusion.parts2lms import parts2lms
import time
from tqdm import *
import random
import multiprocessing
import sys
def gammaTrans(img, gamma):
gamma_table = [np.power(x/255.0, gamma)*255.0 for x in range(256)]
gamma_table = np.round(np.array(gamma_table)).astype(np.uint8)
return cv2.LUT(img, gamma_table)
def erodeAndBlur(img,kernelSize=21,blurSize=21):
#img : ndarray float32
kernel = np.ones((int(kernelSize), int(kernelSize)), np.uint8)
res = cv2.erode(img,kernel)
res = cv2.GaussianBlur(res, (blurSize, blurSize), math.sqrt(blurSize))
return res
def affineface(img,src_pt,dst_pt,heatmapSize=256,needImg=True):
#src/dst_pt[ndarray] : [...,[x,y],...] in [0.0,1.0],with gaze
#naive mode: align 5 parts
curves_src,_ = points2curves(src_pt.copy())
pts_fivesense_src = np.vstack(curves_src[1:])
curves_dst,_ = points2curves(dst_pt.copy())
pts_fivesense_dst = np.vstack(curves_dst[1:])
affine_mat = calAffine(pts_fivesense_src,pts_fivesense_dst)
pt_aligned = affinePts(affine_mat,src_pt*255.0)/255.0
if needImg:
img_aligned = affineImg(img,affine_mat)
return pt_aligned,img_aligned
else:
return pt_aligned
def affineface_parts(img,src_pt,dst_pt):
curves_src,_ = points2curves(src_pt.copy())
curves_dst,_ = points2curves(dst_pt.copy())#[0,255]
parts_src = curves2parts(curves_src)
parts_dst = curves2parts(curves_dst) #[0,255]
partsList = []
for i in range(len(parts_src)-2):
affine_mat = calAffine(parts_src[i],parts_dst[i])
parts_aligned = affinePts(affine_mat,parts_src[i]) #[0,255]
partsList.append(parts_aligned)
partsList.append(parts_src[-2])
partsList.append(parts_src[-1])
'''
A = []
B = []
for i in range(len(parts_src)):
A.append(parts_src[i])
B.append(partsList[i])
A = np.vstack(A)
B = np.vstack(B)
res = warp(img,A,B)
'''
lms = parts2lms(partsList)
#bound
lms[:33] = dst_pt[:33]*256
res = warp(img,src_pt[:106]*256,lms[:106])
return lms/255.0,res
def lightEye(img_ref,lms_ref,img_gen,lms_gen,ratio=0.1):
#get curves
curves_ref,_ = points2curves(lms_ref.copy())
curves_gen,_ = points2curves(lms_gen.copy())
parts_ref = curves2parts(curves_ref)
parts_gen = curves2parts(curves_gen) #[0,255]
#get rois
gaze_ref = curves2gaze(curves_ref)
gaze_gen = curves2gaze(curves_gen)
#img_gazeL = np.dot(gaze_ref[0], img_ref)
img_gazeL = multi(img_ref,gaze_ref[0])
#img_gazeR = np.dot(gaze_ref[1] , img_ref)
img_gazeR = multi(img_ref,gaze_ref[1])
affine_mat = calAffine(parts_ref[-2],parts_gen[-2])
img_gazeL_affined = affineImg(img_gazeL,affine_mat)
affine_mat = calAffine(parts_ref[-1],parts_gen[-1])
img_gazeR_affined = affineImg(img_gazeR,affine_mat)
img_ref = img_gazeL_affined + img_gazeR_affined
mask = gaze_gen[0] + gaze_gen[1]
mask = erodeAndBlur(mask,5,5)
R = img_gen[:,:,0] * (1-mask) + mask* (img_gen[:,:,0]*ratio + img_ref[:,:,0]*(1-ratio))
G = img_gen[:,:,1] * (1-mask) + mask* (img_gen[:,:,1]*ratio + img_ref[:,:,1]*(1-ratio))
B = img_gen[:,:,2] * (1-mask) + mask* (img_gen[:,:,2]*ratio + img_ref[:,:,2]*(1-ratio))
res = np.stack([R,G,B]).transpose((1,2,0))
seg = mask
seg = seg * 127
return res,seg,img_ref
def multi(img,mask):
R = img[:,:,0] * mask
G = img[:,:,1] * mask
B = img[:,:,2] * mask
res = np.stack([R,G,B]).transpose((1,2,0))
return res
def fusion(img_ref,lms_ref,img_gen,lms_gen,ratio=0.2):
#img*: ndarray(np.uint8) [0,255]
#lms*: ndarray , [...,[x,y],...] in [0,1]
#ratio: weight of gen
#--------------------------------------------
#get curves
curves_ref,_ = points2curves(lms_ref.copy())
curves_gen,_ = points2curves(lms_gen.copy())
#get rois
roi_ref = curves2segments(curves_ref)
roi_gen = curves2segments(curves_gen)
#get seg
seg_ref = roi_ref.sum(0)
seg_gen = roi_gen.sum(0)
seg_ref = seg_ref / seg_ref.max() * 255
seg_gen = seg_gen / seg_gen.max() * 255
#get skin mask
skin_src = roi_ref[0] - roi_ref[2:].max(0)
skin_gen = roi_gen[0] - roi_gen[2:].max(0)
#blur edge
skin_src = erodeAndBlur(skin_src,7,7)
skin_gen = erodeAndBlur(skin_gen,7,7)
#fusion
skin = skin_src * skin_gen
R = img_gen[:,:,0] * (1-skin) + skin * (img_gen[:,:,0]*ratio + img_ref[:,:,0]*(1-ratio))
G = img_gen[:,:,1] * (1-skin) + skin * (img_gen[:,:,1]*ratio + img_ref[:,:,1]*(1-ratio))
B = img_gen[:,:,2] * (1-skin) + skin * (img_gen[:,:,2]*ratio + img_ref[:,:,2]*(1-ratio))
res = np.stack([R,G,B]).transpose((1,2,0))
return res,seg_ref,seg_gen
def loaddata(head,path_lms,flag=256,num = 50000):
#head: head of img
#return res:[[path,lms[0,1]]]
fin = open(path_lms,'r')
data = fin.read().splitlines()
res = []
for i in tqdm(range(min(len(data)//2,num))):
name = data[2*i]
path = os.path.join(head,name)
lms = list(map(float,data[2*i+1].split()))
if flag==256:
lms = np.array(lms).reshape(-1,2) / 255.0
else:
lms = (np.array(lms).reshape(-1,2)-64) / 255.0
res.append((path,lms))
return res
def gray2rgb(img):
res = np.stack([img,img,img]).transpose((1,2,0))
return res.astype(np.uint8)
def process(index, album_ref, album_gen, album_pose):
# 30ms
img_gen = cv2.imread(album_gen[index][0])
lms_gen = album_gen[index][1]
img_ref = cv2.imread(album_ref[index // 100][0])[64:64 + 256, 64:64 + 256, :]
lms_ref = album_ref[index // 100][1]
img_pose = cv2.imread(album_pose[index % 100][0])[64:64 + 256, 64:64 + 256, :]
lms_pose = album_pose[index % 100][1]
# affine
# 4ms
lms_ref_, img_ref_ = affineface(img_ref, lms_ref, lms_gen)
# 200ms
lms_ref_parts, img_ref_parts = affineface_parts(img_ref, lms_ref, lms_gen)
# fusion
# fuse_all,seg_ref_,seg_gen = fusion(img_ref_,lms_ref_,img_gen,lms_gen,0.1)
fuse_parts, seg_ref_parts, seg_gen = fusion(img_ref_parts, lms_ref_parts, img_gen, lms_gen, 0.1)
fuse_eye, mask_eye, img_eye = lightEye(img_ref, lms_ref, fuse_parts, lms_gen, 0.1)
res = np.hstack([img_ref, img_pose, img_gen, fuse_eye])
cv2.imwrite('proposed_wild/fuse/%d.jpg' % (index), fuse_eye)
|
tensorflow_datasets/ranking/istella/istella.py | vanshhhhh/datasets | 3,380 | 110934 | <reponame>vanshhhhh/datasets
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""istella dataset."""
import dataclasses
from typing import Optional
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.ranking.libsvm_ranking_parser import LibSVMRankingParser
_DESCRIPTION = """
The Istella datasets are three large-scale Learning-to-Rank datasets released by
Istella. Each dataset consists of query-document pairs represented as feature
vectors and corresponding relevance judgment labels.
The dataset contains three versions:
* `main` ("Istella LETOR"): Containing 10,454,629 query-document pairs.
* `s` ("Istella-S LETOR"): Containing 3,408,630 query-document pairs.
* `x` ("Istella-X LETOR"): Containing 26,791,447 query-document pairs.
You can specify whether to use the `main`, `s` or `x` version of the dataset as
follows:
```python
ds = tfds.load("istella/main")
ds = tfds.load("istella/s")
ds = tfds.load("istella/x")
```
If only `istella` is specified, the `istella/main` option is selected by
default:
```python
# This is the same as `tfds.load("istella/main")`
ds = tfds.load("istella")
```
"""
_CITATION = """
@article{10.1145/2987380,
author = {<NAME> Lucchese, <NAME>},
title = {Fast Ranking with Additive Ensembles of Oblivious and Non-Oblivious Regression Trees},
year = {2016},
publisher = {ACM},
address = {New York, NY, USA},
volume = {35},
number = {2},
issn = {1046-8188},
url = {https://doi.org/10.1145/2987380},
doi = {10.1145/2987380},
journal = {ACM Transactions on Information Systems},
articleno = {15},
numpages = {31},
}
"""
_URLS = {
"main": "http://library.istella.it/dataset/istella-letor.tar.gz",
"s": "http://library.istella.it/dataset/istella-s-letor.tar.gz",
"x": "http://quickrank.isti.cnr.it/istella-datasets-mirror/istella-X.tar.gz"
}
_FEATURE_NAMES = {n: f"feature_{n}" for n in range(1, 221)}
_LABEL_NAME = "label"
@dataclasses.dataclass
class IstellaConfig(tfds.core.BuilderConfig):
has_vali: bool = False
subdirectory: Optional[str] = None
class Istella(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for istella dataset."""
VERSION = tfds.core.Version("1.0.1")
RELEASE_NOTES = {
"1.0.0": "Initial release.",
"1.0.1": "Fix serialization to support float64."
}
# pytype: disable=wrong-keyword-args
BUILDER_CONFIGS = [
IstellaConfig(name="main", has_vali=False, subdirectory="full"),
IstellaConfig(name="s", has_vali=True, subdirectory="sample"),
IstellaConfig(name="x", has_vali=True, subdirectory=None)
]
# pytype: enable=wrong-keyword-args
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
encoding = tfds.features.Encoding.ZLIB
features = {
name: tfds.features.Tensor(
shape=(None,), dtype=tf.float64, encoding=encoding)
for name in _FEATURE_NAMES.values()
}
features[_LABEL_NAME] = tfds.features.Tensor(
shape=(None,), dtype=tf.float64, encoding=encoding)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features),
homepage="http://quickrank.isti.cnr.it/istella-dataset/",
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
path = dl_manager.download_and_extract(_URLS[self.builder_config.name])
# For some dataset configs, the data is in a subdirectory.
if self.builder_config.subdirectory is not None:
path = path / self.builder_config.subdirectory
splits = {
"train": self._generate_examples(path / "train.txt"),
"test": self._generate_examples(path / "test.txt")
}
# For some dataset configs, there is an additional validation split.
if self.builder_config.has_vali:
splits["vali"] = self._generate_examples(path / "vali.txt")
return splits
def _generate_examples(self, path):
"""Yields examples."""
# Istella datasets seems to be encoded as latin1 and not utf-8, so we have
# to read the file contents as bytes and manually decode it as latin1.
with tf.io.gfile.GFile(path, "rb") as f:
lines = map(lambda bytes_line: bytes_line.decode("latin1"), f)
yield from LibSVMRankingParser(lines, _FEATURE_NAMES, _LABEL_NAME)
|
tests/basics/class2.py | LabAixBidouille/micropython | 303 | 110940 | # class with __init__
class C1:
def __init__(self):
self.x = 1
c1 = C1()
print(type(c1) == C1)
print(c1.x)
class C2:
def __init__(self, x):
self.x = x
c2 = C2(4)
print(type(c2) == C2)
print(c2.x)
|
src/azure-cli/azure/cli/command_modules/serviceconnector/__init__.py | YuanyuanNi/azure-cli | 3,287 | 110948 | <filename>src/azure-cli/azure/cli/command_modules/serviceconnector/__init__.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
from azure.cli.command_modules.serviceconnector._help import helps # pylint: disable=unused-import
class MicrosoftServiceConnectorCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
from azure.cli.command_modules.serviceconnector._client_factory import cf_connection_cl
connection_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.serviceconnector.custom#{}',
client_factory=cf_connection_cl)
parent = super(MicrosoftServiceConnectorCommandsLoader, self)
parent.__init__(cli_ctx=cli_ctx, custom_command_type=connection_custom)
def load_command_table(self, args):
from azure.cli.command_modules.serviceconnector.commands import load_command_table as load_command_table_manual
load_command_table_manual(self, args)
return self.command_table
def load_arguments(self, command):
from azure.cli.command_modules.serviceconnector._params import load_arguments as load_arguments_manual
load_arguments_manual(self, command)
COMMAND_LOADER_CLS = MicrosoftServiceConnectorCommandsLoader
|
users/migrations/0014_auto_20210801_2332.py | Manny27nyc/pythondotorg | 911 | 110974 | # Generated by Django 3.2.5 on 2021-08-01 23:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0013_auto_20180705_0348'),
]
operations = [
migrations.AlterField(
model_name='membership',
name='psf_announcements',
field=models.BooleanField(blank=True, null=True, verbose_name='I would like to receive occasional PSF email announcements'),
),
migrations.AlterField(
model_name='membership',
name='psf_code_of_conduct',
field=models.BooleanField(blank=True, null=True, verbose_name='I agree to the PSF Code of Conduct'),
)
]
|
fooof/tests/__init__.py | varman-m/eeg_notebooks_doc | 154 | 110981 | """Tests for FOOOF."""
|
moonlight/structure/section_barlines.py | lithomas1/moonlight | 288 | 111035 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Detects section barlines, which are much thicker than normal barlines.
Section barlines appear as connected components which span the height of the
system, and are not too thick. They may have 2 repeat dots on one or both sides
of each staff (at y positions -1 and 1), which affect the barline type.
"""
# TODO(ringw): Get repeat dots from the components and adjust the barline
# type accordingly. Currently, assume all thick barlines are END_BAR.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from moonlight.protobuf import musicscore_pb2
from moonlight.structure import barlines
from moonlight.structure import components as components_module
Bar = musicscore_pb2.StaffSystem.Bar # pylint: disable=invalid-name
COLUMNS = components_module.ConnectedComponentsColumns
class SectionBarlines(object):
"""Reads the connected components, and adds thick barlines to the page."""
def __init__(self, structure):
self.components = structure.connected_components.components
self.staff_detector = structure.staff_detector
def apply(self, page):
"""Detects thick section barlines from the connected components.
These should be tall components that start and end near the start and end
of two (possibly different) staves. We use the standard barlines logic to
assign components to the nearest start and end staff. We filter for
candidate barlines, whose start and end are sufficiently close to the
expected values. We then filter again by whether the component width is
within the expected values for section barlines.
For each staff system, we take the section barlines that match exactly that
system's staves. Any standard barlines that are too close to a new section
barline are removed, and we merge the existing standard barlines with the
new section barlines.
Args:
page: A Page message.
Returns:
The same Page message, with new section barlines added.
"""
component_center_x = np.mean(
self.components[:, [COLUMNS.X0, COLUMNS.X1]], axis=1).astype(int)
# Take section barline candidates, whose start and end y values are close
# enough to the staff start and end ys.
component_is_candidate, candidate_start_staff, candidate_end_staff = (
barlines.assign_barlines_to_staves(
barline_x=component_center_x,
barline_y0=self.components[:, COLUMNS.Y0],
barline_y1=self.components[:, COLUMNS.Y1],
staff_detector=self.staff_detector))
candidates = self.components[component_is_candidate]
candidate_center_x = component_center_x[component_is_candidate]
del component_center_x
# Filter again by the expected section barline width.
component_width = candidates[:, COLUMNS.X1] - candidates[:, COLUMNS.X0]
component_width_ok = np.logical_and(
self._section_min_width() <= component_width,
component_width <= self._section_max_width(candidate_start_staff))
candidates = candidates[component_width_ok]
candidate_center_x = candidate_center_x[component_width_ok]
candidate_start_staff = candidate_start_staff[component_width_ok]
candidate_end_staff = candidate_end_staff[component_width_ok]
# For each existing staff system, consider only the candidates that match
# exactly the system's start and end staves.
start_staff = 0
for system in page.system:
staffline_distance = np.median(
[staff.staffline_distance for staff in system.staff]).astype(int)
candidate_covers_staff_system = np.logical_and(
candidate_start_staff == start_staff,
candidate_end_staff + 1 == start_staff + len(system.staff))
# Calculate the x coordinates of all section barlines to keep.
section_bar_x = candidate_center_x[candidate_covers_staff_system]
# Extract the existing bar x coordinates and types for merging.
existing_bar_type = {bar.x: bar.type for bar in system.bar}
existing_bars = np.asarray([bar.x for bar in system.bar])
# Merge the existing barlines and section barlines.
if existing_bars.size and section_bar_x.size:
# Filter the existing bars by whether they are far enough from a new
# section barline. Section barlines override the existing standard
# barlines.
existing_bars_ok = np.greater(
np.min(
np.abs(existing_bars[:, None] - section_bar_x[None, :]),
axis=1), staffline_distance * 4)
existing_bars = existing_bars[existing_bars_ok]
# Merge the existing barlines which we kept, and the new section barlines
# (which are assumed to be type END_BAR), in sorted order.
bars = sorted(
[Bar(x=x, type=existing_bar_type[x]) for x in existing_bars] +
[Bar(x=x, type=Bar.END_BAR) for x in section_bar_x],
key=lambda bar: bar.x)
# Update the staff system.
system.ClearField('bar')
system.bar.extend(bars)
start_staff += len(system.staff)
return page
def _section_min_width(self):
return self.staff_detector.staffline_thickness * 3
def _section_max_width(self, staff_index):
return self.staff_detector.staffline_distance[staff_index] * 2
class MergeStandardAndBeginRepeatBars(object):
"""Detects a begin repeat at the beginning of the staff system.
Typically, a begin repeat bar on a new line will be preceded by a standard
barline, clef, and key signature. We can override a standard bar with a
section bar if they are close together, but this distance is typically closer
than the two bars are in this case.
We want the two bars to be replaced by a single begin repeat bar where we
actually found the first bar, because we want the clef, key signature, and
notes to be a single measure.
Because we don't yet detect repeat dots, and all non-STANDARD barlines are
detected as END_BAR, we accept any non-STANDARD barlines for the second bar.
"""
def __init__(self, structure):
self.staff_detector = structure.staff_detector
def apply(self, page):
for system in page.system:
if (len(system.bar) > 1 and system.bar[0].type == Bar.STANDARD_BAR and
system.bar[1].type != Bar.STANDARD_BAR):
staffline_distance = np.median(
[staff.staffline_distance for staff in system.staff])
if system.bar[1].x - system.bar[0].x < staffline_distance * 12:
system.bar[0].type = system.bar[1].type
del system.bar[1]
return page
|
importer/migrations/0003_auto_20180709_0933.py | juliecentofanti172/juliecentofanti.github.io | 134 | 111041 | # Generated by Django 2.0.7 on 2018-07-09 09:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("importer", "0002_auto_20180709_0833")]
operations = [
migrations.AlterField(
model_name="campaignitemassetcount",
name="campaign_item_asset_count",
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name="campaigntaskdetails",
name="campaign_asset_count",
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name="campaigntaskdetails",
name="campaign_item_count",
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name="campaigntaskdetails",
name="campaign_page_count",
field=models.IntegerField(blank=True, default=0, null=True),
),
]
|
tensorflow/python/util/keyword_args.py | abhaikollara/tensorflow | 848 | 111066 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keyword args functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.util import decorator_utils
def keyword_args_only(func):
"""Decorator for marking specific function accepting keyword args only.
This decorator raises a `ValueError` if the input `func` is called with any
non-keyword args. This prevents the caller from providing the arguments in
wrong order.
Args:
func: The function or method needed to be decorated.
Returns:
Decorated function or method.
Raises:
ValueError: If `func` is not callable.
"""
decorator_utils.validate_callable(func, "keyword_args_only")
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Keyword args only wrapper."""
if args:
raise ValueError(
"Must use keyword args to call {}.".format(func.__name__))
return func(**kwargs)
return new_func
|
hypergan/layers/residual.py | limberc/HyperGAN | 889 | 111082 | import torch.nn as nn
import hypergan as hg
class Residual(hg.Layer):
"""
---
description: 'layer residual for configurable component'
---
# residual layer
`residual` adds one or more residual blocks https://paperswithcode.com/method/residual-block
## optional arguments
The number of residual blocks to add
## input size
Any 4-d tensor
## output size
Same as input size
## syntax
```json
"residual COUNT"
```
## examples
```json
"residual 3"
```
"""
def __init__(self, component, args, options):
super(Residual, self).__init__(component, args, options)
self.size = component.current_size
layers = []
for i in range(args[0] or 3):
layers += [nn.Conv2d(self.size.channels, self.size.channels, 3, 1, padding = (1, 1))]
layers += [nn.ReLU()]
layers += [nn.Conv2d(self.size.channels, self.size.channels, 3, 1, padding = (1, 1))]
layers += [nn.ReLU()]
self.residual = nn.Sequential(*layers)
def output_size(self):
return self.size
def forward(self, input, context):
residual = self.residual(input)
return input + residual
|
plugins/Operations/main.py | nmantani/FileInsight-plugins | 120 | 111087 | <reponame>nmantani/FileInsight-plugins<filename>plugins/Operations/main.py
#
# Copyright (c) 2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import ctypes
import os
import re
import subprocess
import sys
__version__ = "2.14"
sys.path.append("./Basic")
import basic_ops
sys.path.append("./Compression")
import compression_ops
sys.path.append("./Crypto")
import crypto_ops
sys.path.append("./Encoding")
import encoding_ops
sys.path.append("./Misc")
import misc_ops
sys.path.append("./Parsing")
import parsing_ops
sys.path.append("./Search")
import search_ops
sys.path.append("./Visualization")
import visualization_ops
sys.path.append("./XOR")
import xor_ops
class FileInsight:
"""
Class for FileInsight built-in functions
"""
def __init__(self):
self.getLength = getLength
self.getByteAt = getByteAt
self.setByteAt = setByteAt
self.setBookmark = setBookmark
self.getSelection = getSelection
self.getSelectionOffset = getSelectionOffset
self.getSelectionLength = getSelectionLength
self.gotoBookmark = gotoBookmark
self.download = download
self.newDocument = newDocument
self.decode = decode
self.setDocument = setDocument
self.getDocumentName = getDocumentName
self.getDocumentCount = getDocumentCount
self.getDocumentURL = getDocumentURL
self.activateDocumentAt = activateDocumentAt
# Workaround for the truncation bug of getDocument()
def getDocument(self):
length = getLength()
if length == getSelectionLength():
data = getSelection()
else:
data = getDocument()
if length - len(data) > 0:
for i in range(len(data), length):
data += getByteAt(i)
return data
# Workaround for the bug of showSimpleDialog()
def showSimpleDialog(self, prompt):
# Do not show command prompt window
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Execute show_simple_dialog.py to show GUI
# GUI portion is moved to separate process to avoid hangup of FileInsight
p = subprocess.Popen(["py.exe", "-3", "show_simple_dialog.py", prompt], startupinfo=startupinfo, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Get input
stdout_data, stderr_data = p.communicate()
ret = p.wait()
if ret: # Dialog has been closed
return None
else:
return(stdout_data.rstrip())
def find_python3():
pyexe_found = False
python3_found = False
if os.path.exists("C:/Windows/py.exe") or os.path.exists(os.environ["LOCALAPPDATA"].replace("\\", "/") + "/Programs/Python/Launcher/py.exe"):
pyexe_found = True
if not pyexe_found:
print("Error: py.exe is not found. You need to install Python 3 to use FileInsight-plugins.")
else:
# List Python installation
p = subprocess.Popen(["py.exe", "--list"], startupinfo=startupinfo, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Get py.exe output
stdout_data, stderr_data = p.communicate()
# Check whether Python 3 is installed
if re.search("-3.[0-9]{1,2}-(64|32)", stdout_data):
python3_found = True
if not python3_found:
print("Error: no Python 3 installation is found. You need to install Python 3 to use FileInsight-plugins.")
return pyexe_found and python3_found
if __name__ == "__main__":
# Tuple of plugin operations
operations = (basic_ops.copy_to_new_file,
basic_ops.bookmark,
basic_ops.cut_binary_to_clipboard,
basic_ops.copy_binary_to_clipboard,
basic_ops.paste_binary_from_clipboard,
basic_ops.delete_before,
basic_ops.delete_after,
basic_ops.fill,
basic_ops.invert,
basic_ops.reverse_order,
basic_ops.swap_nibbles,
basic_ops.swap_two_bytes,
basic_ops.to_upper_case,
basic_ops.to_lower_case,
basic_ops.swap_case)
operations += (compression_ops.aplib_compress,
compression_ops.bzip2_compress,
compression_ops.gzip_compress,
compression_ops.lz4_compress,
compression_ops.lzma_compress,
compression_ops.lznt1_compress,
compression_ops.lzo_compress,
compression_ops.ppmd_compress,
compression_ops.quicklz_compress,
compression_ops.raw_deflate,
compression_ops.xz_compress,
compression_ops.zlib_compress,
compression_ops.zstandard_compress,
compression_ops.aplib_decompress,
compression_ops.bzip2_decompress,
compression_ops.gzip_decompress,
compression_ops.lz4_decompress,
compression_ops.lzma_decompress,
compression_ops.lznt1_decompress,
compression_ops.lzo_decompress,
compression_ops.ppmd_decompress,
compression_ops.quicklz_decompress,
compression_ops.raw_inflate,
compression_ops.xz_decompress,
compression_ops.zlib_decompress,
compression_ops.zstandard_decompress)
operations += (crypto_ops.aes_decrypt,
crypto_ops.arc2_decrypt,
crypto_ops.arc4_decrypt,
crypto_ops.blowfish_decrypt,
crypto_ops.chacha20_decrypt,
crypto_ops.des_decrypt,
crypto_ops.salsa20_decrypt,
crypto_ops.tea_decrypt,
crypto_ops.triple_des_decrypt,
crypto_ops.xtea_decrypt,
crypto_ops.aes_encrypt,
crypto_ops.arc2_encrypt,
crypto_ops.arc4_encrypt,
crypto_ops.blowfish_encrypt,
crypto_ops.chacha20_encrypt,
crypto_ops.des_encrypt,
crypto_ops.salsa20_encrypt,
crypto_ops.tea_encrypt,
crypto_ops.triple_des_encrypt,
crypto_ops.xtea_encrypt)
operations += (encoding_ops.hex_text_to_binary_data,
encoding_ops.decimal_text_to_binary_data,
encoding_ops.octal_text_to_binary_data,
encoding_ops.binary_text_to_binary_data,
encoding_ops.custom_base16_decode,
encoding_ops.custom_base32_decode,
encoding_ops.custom_base58_decode,
encoding_ops.custom_base64_decode,
encoding_ops.custom_base85_decode,
encoding_ops.protobuf_decode,
encoding_ops.from_quoted_printable,
encoding_ops.unicode_unescape,
encoding_ops.url_decode,
encoding_ops.binary_data_to_hex_text,
encoding_ops.binary_data_to_decimal_text,
encoding_ops.binary_data_to_octal_text,
encoding_ops.binary_data_to_binary_text,
encoding_ops.custom_base16_encode,
encoding_ops.custom_base32_encode,
encoding_ops.custom_base58_encode,
encoding_ops.custom_base64_encode,
encoding_ops.custom_base85_encode,
encoding_ops.rot13,
encoding_ops.to_quoted_printable,
encoding_ops.unicode_escape,
encoding_ops.url_encode)
operations += (misc_ops.emulate_code,
misc_ops.file_comparison,
misc_ops.hash_values,
misc_ops.send_to_cli,
misc_ops.send_to_gui)
operations += (parsing_ops.binwalk_scan,
parsing_ops.disassemble,
parsing_ops.file_type,
parsing_ops.find_pe_file,
parsing_ops.parse_file_structure,
parsing_ops.show_metadata,
parsing_ops.strings)
operations += (search_ops.regex_extraction,
search_ops.regex_search,
search_ops.replace,
search_ops.xor_hex_search,
search_ops.xor_text_search,
search_ops.yara_scan)
operations += (visualization_ops.bitmap_view,
visualization_ops.byte_histogram,
visualization_ops.entropy_graph)
operations += (xor_ops.decremental_xor,
xor_ops.incremental_xor,
xor_ops.null_preserving_xor,
xor_ops.xor_with_next_byte,
xor_ops.xor_with_next_byte_reverse,
xor_ops.guess_multibyte_xor_keys,
xor_ops.visual_decrypt,
xor_ops.visual_encrypt)
# Structure for mouse cursor position
class _point_t(ctypes.Structure):
_fields_ = [
("x", ctypes.c_long),
("y", ctypes.c_long),
]
# Get DPI values
DEFAULT_DPI = 96
LOGPIXELSX = 88
LOGPIXELSY = 90
dc = ctypes.windll.user32.GetDC(0)
dpi_x = ctypes.windll.gdi32.GetDeviceCaps(dc, LOGPIXELSX)
dpi_y = ctypes.windll.gdi32.GetDeviceCaps(dc, LOGPIXELSY)
ctypes.windll.user32.ReleaseDC(0, dc)
# Get mouse cursor position
point = _point_t()
ctypes.windll.user32.GetCursorPos(ctypes.pointer(point))
point.x = point.x * DEFAULT_DPI / dpi_x
point.y = point.y * DEFAULT_DPI / dpi_y
# Do not show command prompt window
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
if find_python3():
# Execute menu.py to show GUI
# GUI portion is moved to menu.py to avoid hangup of FileInsight
p = subprocess.Popen(["py.exe", "-3", "menu.py", str(point.x), str(point.y), __version__], startupinfo=startupinfo)
index = p.wait() # Receive exit value as index of selected plugin
if index >= 0:
fi = FileInsight()
operations[index](fi)
elif index == -2: # requests is not installed
print("requests is not installed.")
print("Please install it with 'py.exe -3 -m pip install requests' and try again.")
print("")
# Workaround to avoid crash of FileInsight on Windows 7
if "threading" in sys.modules:
sys.modules.pop("threading")
|
src/Gon/realtime_starter_cnstock.py | majiajue/Listed-company-news-crawl-and-text-analysis | 635 | 111185 | <filename>src/Gon/realtime_starter_cnstock.py
import __init__
import time
import redis
import logging
import threading
from Kite import config
from Kite.database import Database
from Killua.denull import DeNull
from Killua.deduplication import Deduplication
from Gon.cnstockspyder import CnStockSpyder
redis_client = redis.StrictRedis(config.REDIS_IP,
port=config.REDIS_PORT,
db=config.CACHE_RECORED_OPENED_PYTHON_PROGRAM_DB_ID)
redis_client.lpush(config.CACHE_RECORED_OPENED_PYTHON_PROGRAM_VAR, "realtime_starter_cnstock.py")
obj = Database()
df = obj.get_data(config.DATABASE_NAME, config.COLLECTION_NAME_CNSTOCK, keys=["Date", "Category"])
cnstock_spyder = CnStockSpyder(config.DATABASE_NAME, config.COLLECTION_NAME_CNSTOCK)
# 先补充历史数据,比如已爬取数据到2020-12-01,但是启动实时爬取程序在2020-12-23,则先
# 自动补充爬取2020-12-02至2020-12-23的新闻数据
for url_to_be_crawled, type_chn in config.WEBSITES_LIST_TO_BE_CRAWLED_CNSTOCK.items():
# 查询type_chn的最近一条数据的时间
latets_date_in_db = max(df[df.Category == type_chn]["Date"].to_list())
cnstock_spyder.get_historical_news(url_to_be_crawled, category_chn=type_chn, start_date=latets_date_in_db)
Deduplication(config.DATABASE_NAME, config.COLLECTION_NAME_CNSTOCK).run()
DeNull(config.DATABASE_NAME, config.COLLECTION_NAME_CNSTOCK).run()
# 开启多线程并行实时爬取
thread_list = []
for url, type_chn in config.WEBSITES_LIST_TO_BE_CRAWLED_CNSTOCK.items():
thread = threading.Thread(target=cnstock_spyder.get_realtime_news, args=(url, type_chn, 60))
thread_list.append(thread)
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join() |
tests/sfko/sfko/obj/stage.py | Public-Cloud-Projects/Zenko | 453 | 111193 | <filename>tests/sfko/sfko/obj/stage.py
from pipewrench.pipeline import Filter
from pipewrench.pipeline import Router as BaseRouter
from ..execute import (CheckPipeline, ControllerPipeline, TestPipeline,
WorkerPipeline)
from ..register import CHECKS, TESTS
from .backend import BackendsWrapper
from .scenario import SCENARIOS
from .secret import SECRETS
class OppenValues:
secrets = SECRETS
backends = BackendsWrapper
scenarios = SCENARIOS
tests = TESTS
checks = CHECKS
class Stage(Filter, OppenValues):
pass
class Router(BaseRouter, OppenValues):
_PIPELINE = None
def __init__(self):
if self._PIPELINE is not None:
super().__init__(self._PIPELINE)
else:
super().__init__()
class TestRouter(Router):
_PIPELINE = TestPipeline
class CheckRouter(Router):
_PIPELINE = CheckPipeline
class WorkerRouter(Router):
_PIPELINE = WorkerPipeline
|
davarocr/davarocr/davar_table/core/mask/structures.py | hikopensource/DAVAR-Lab-OCR | 387 | 111201 | <gh_stars>100-1000
"""
##################################################################################################
# Copyright Info : Copyright (c) <NAME> @ Hikvision Research Institute. All rights reserved.
# Filename : structures.py
# Abstract : BitmapMasks designed for LGPMA
# Current Version: 1.0.0
# Date : 2021-09-18
##################################################################################################
"""
import numpy as np
import torch
from mmcv.ops.roi_align import roi_align
from mmdet.core import BitmapMasks
class BitmapMasksTable(BitmapMasks):
"""Inherited from BitmapMasks. Modify the data type of mask to store pyramid mask
"""
def __init__(self, masks, height, width):
"""
Args:
masks (ndarray): ndarray of masks in shape (N, H, W), where N is the number of objects.
height (int): height of masks
width (int): width of masks
"""
super().__init__(
masks=masks,
height=height,
width=width)
def crop_and_resize(self,
bboxes,
out_shape,
inds,
device='cpu',
interpolation='bilinear'):
"""The only difference from the original function is that change resized mask from np.uint8 to np.float.
Args:
bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4)
out_shape (tuple[int]): Target (h, w) of resized mask
inds (ndarray): Indexes to assign masks to each bbox, shape (N,)
and values should be between [0, num_masks - 1].
device (str): Device of bboxes
interpolation (str): See `mmcv.imresize`
Return:
BitmapMasksTable: the cropped and resized masks.
"""
if len(self.masks) == 0:
empty_masks = np.empty((0, *out_shape), dtype=np.uint8)
return BitmapMasks(empty_masks, *out_shape)
# convert bboxes to tensor
if isinstance(bboxes, np.ndarray):
bboxes = torch.from_numpy(bboxes).to(device=device)
if isinstance(inds, np.ndarray):
inds = torch.from_numpy(inds).to(device=device)
num_bbox = bboxes.shape[0]
fake_inds = torch.arange(
num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]
rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5
rois = rois.to(device=device)
if num_bbox > 0:
gt_masks_th = torch.from_numpy(self.masks).to(device).index_select(
0, inds).to(dtype=rois.dtype)
targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,
1.0, 0, 'avg', True).squeeze(1)
resized_masks = targets.cpu().numpy()
else:
resized_masks = []
return BitmapMasks(resized_masks, *out_shape)
|
xcessiv/tests/myrf.py | KhaledTo/xcessiv | 1,362 | 111217 | <reponame>KhaledTo/xcessiv
from sklearn.ensemble import RandomForestClassifier
import joblib
class MyClassifier(RandomForestClassifier):
def save(self, filepath):
joblib.dump(self, filepath, 3)
@staticmethod
def load(filepath):
return joblib.load(filepath)
|
dtu_test_project/core/views.py | foarsitter/django-tenant-users | 165 | 111253 |
from django.views.generic import TemplateView
class MainView(TemplateView):
template_name = 'core/main.html'
|
examples/puma560_animation.py | rodosha98/FRPGitHomework | 214 | 111268 | <gh_stars>100-1000
import robopy.base.model as model
import numpy as np
def main():
robot = model.Puma560()
a = np.transpose(np.asmatrix(np.linspace(1, -180, 500)))
b = np.transpose(np.asmatrix(np.linspace(1, 180, 500)))
c = np.transpose(np.asmatrix(np.linspace(1, 90, 500)))
d = np.transpose(np.asmatrix(np.linspace(1, 450, 500)))
e = np.asmatrix(np.zeros((500, 1)))
f = np.concatenate((d, b, a, e, c, d), axis=1)
robot.animate(stances=f, frame_rate=30, unit='deg')
if __name__ == '__main__':
main()
|
test/gyppies/make_global_settings/test.gyp | indutny/gyp.js | 188 | 111275 | <filename>test/gyppies/make_global_settings/test.gyp
{
"target_defaults": {
"make_global_settings": [
[ "CC", "echo" ],
[ "LD", "echo" ],
],
},
"targets": [{
"target_name": "test",
"type": "executable",
"sources": [
"main.c",
],
}],
}
|
data/cancer/print_tfrecords_files.py | liujh168/tensorflow_template_application | 1,131 | 111283 | <gh_stars>1000+
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
def print_tfrecords_file(input_filename):
print("Try to print the tfrecords file: {}".format(input_filename))
max_print_number = 10
current_print_index = 0
for serialized_example in tf.python_io.tf_record_iterator(input_filename):
# Get serialized example from file
example = tf.train.Example()
example.ParseFromString(serialized_example)
label = example.features.feature["label"].int64_list.value
features = example.features.feature["features"].float_list.value
print("Index: {}, label: {}, features: {}".format(current_print_index,
label, features))
# Return when reaching max print number
current_print_index += 1
if current_print_index > max_print_number - 1:
return
def main():
current_path = os.getcwd()
for filename in os.listdir(current_path):
if filename.startswith("") and filename.endswith(".tfrecords"):
tfrecords_file_path = os.path.join(current_path, filename)
print_tfrecords_file(tfrecords_file_path)
if __name__ == "__main__":
main()
|
Sobolev/main.py | inamori/DeepLearningImplementations | 2,010 | 111345 | <filename>Sobolev/main.py
import argparse
import sobolev_training
# Training settings
parser = argparse.ArgumentParser(description='Sobolev experiments')
# Training params
parser.add_argument('--nb_epoch', default=100, type=int, help="Number of training epochs")
parser.add_argument('--batch_size', default=8, type=int, help='Batch size')
parser.add_argument('--npts', default=20, type=int, help="Number of training points")
parser.add_argument('--learning_rate', default=1E-4, type=float, help="Learning rate")
parser.add_argument('--sobolev_weight', default=1, type=float, help="How much do we weight the Sobolev function")
args = parser.parse_args()
sobolev_training.launch_experiments(args)
|
demo/run_linucb_on_maxcomponent.py | simonoso/EasyRL | 125 | 111349 | # Copyright (c) 2019 Alibaba Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import gym
from gym.spaces import Discrete, Box
import numpy as np
from easy_rl.agents import agents
from easy_rl.models import DQNModel
from easy_rl.utils.window_stat import WindowStat
MODEL_CONFIG = dict(
# specific
type="LinUCB")
AGENT_CONFIG = dict(
type="Agent",
sample_batch_size=1,
buffer_size=1,
learning_starts=0,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta=0.4,
batch_size=1,
use_gae=False,
compute_targets=False,
)
np.random.seed(0)
class MaxComponentEnv(gym.Env):
def __init__(self, num_arms=2):
self._num_arms = num_arms
self.observation_space = Box(
low=np.zeros((num_arms, ), dtype=np.float32),
high=np.ones((num_arms, ), dtype=np.float32))
self.action_space = Discrete(num_arms)
def reset(self, **kwargs):
self._cur_state = np.random.uniform(0, 1.0, size=(self._num_arms, ))
return self._cur_state
def step(self, action):
reward = self._cur_state[action]
self._cur_state = np.random.uniform(0, 1.0, size=(self._num_arms, ))
return self._cur_state, reward, True, {}
def main():
env = MaxComponentEnv(num_arms=6)
agent_class = agents[AGENT_CONFIG["type"]]
agent = agent_class(
env.observation_space,
env.action_space,
AGENT_CONFIG,
MODEL_CONFIG,
distributed_spec={},
export_dir="hook_dump_dir")
reward_window = WindowStat("reward", 50)
length_window = WindowStat("length", 50)
loss_window = WindowStat("loss", 50)
obs, actions, rewards, next_obs, dones = list(), list(), list(), list(
), list()
act_count = 0
for i in range(100):
ob = env.reset()
done = False
episode_reward = .0
episode_len = 0
while not done:
action, results = agent.act(
[ob], deterministic=False, use_perturbed_action=False)
next_ob, reward, done, info = env.step(action[0])
act_count += 1
obs.append(ob)
actions.append(action[0])
rewards.append(reward)
next_obs.append(next_ob)
dones.append(done)
if agent.ready_to_send:
agent.send_experience(
obs=obs,
actions=actions,
rewards=rewards,
next_obs=next_obs,
dones=dones)
if agent.ready_to_receive:
batch_data = agent.receive_experience()
res = agent.learn(batch_data)
loss_window.push(res['loss'])
if AGENT_CONFIG.get("prioritized_replay", False):
agent.update_priorities(
indexes=batch_data["indexes"],
td_error=res["td_error"])
ob = next_ob
episode_reward += reward
episode_len += 1
if act_count % 5 == 0:
print("timestep:", act_count, reward_window, length_window)
agent.add_episode(1)
reward_window.push(episode_reward)
length_window.push(episode_len)
agent.export_saved_model()
print("Done.")
if __name__ == "__main__":
main()
|
alipay/aop/api/response/AlipayCommerceLogisticsWaybillIstddetailQueryResponse.py | antopen/alipay-sdk-python-all | 213 | 111350 | <reponame>antopen/alipay-sdk-python-all<filename>alipay/aop/api/response/AlipayCommerceLogisticsWaybillIstddetailQueryResponse.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceLogisticsWaybillIstddetailQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceLogisticsWaybillIstddetailQueryResponse, self).__init__()
self._reach_duration = None
self._rider_lat = None
self._rider_lng = None
self._rider_mobile_no = None
self._rider_name = None
self._status = None
@property
def reach_duration(self):
return self._reach_duration
@reach_duration.setter
def reach_duration(self, value):
self._reach_duration = value
@property
def rider_lat(self):
return self._rider_lat
@rider_lat.setter
def rider_lat(self, value):
self._rider_lat = value
@property
def rider_lng(self):
return self._rider_lng
@rider_lng.setter
def rider_lng(self, value):
self._rider_lng = value
@property
def rider_mobile_no(self):
return self._rider_mobile_no
@rider_mobile_no.setter
def rider_mobile_no(self, value):
self._rider_mobile_no = value
@property
def rider_name(self):
return self._rider_name
@rider_name.setter
def rider_name(self, value):
self._rider_name = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceLogisticsWaybillIstddetailQueryResponse, self).parse_response_content(response_content)
if 'reach_duration' in response:
self.reach_duration = response['reach_duration']
if 'rider_lat' in response:
self.rider_lat = response['rider_lat']
if 'rider_lng' in response:
self.rider_lng = response['rider_lng']
if 'rider_mobile_no' in response:
self.rider_mobile_no = response['rider_mobile_no']
if 'rider_name' in response:
self.rider_name = response['rider_name']
if 'status' in response:
self.status = response['status']
|
tests/components/goalzero/test_init.py | MrDelik/core | 22,481 | 111376 | """Test Goal Zero integration."""
from datetime import timedelta
from unittest.mock import patch
from goalzero import exceptions
from homeassistant.components.goalzero.const import DEFAULT_NAME, DOMAIN, MANUFACTURER
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import STATE_ON, STATE_UNAVAILABLE
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
import homeassistant.util.dt as dt_util
from . import CONF_DATA, async_init_integration, create_entry, create_mocked_yeti
from tests.common import async_fire_time_changed
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_setup_config_and_unload(hass: HomeAssistant):
"""Test Goal Zero setup and unload."""
entry = create_entry(hass)
mocked_yeti = await create_mocked_yeti()
with patch("homeassistant.components.goalzero.Yeti", return_value=mocked_yeti):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.data == CONF_DATA
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state is ConfigEntryState.NOT_LOADED
assert not hass.data.get(DOMAIN)
async def test_async_setup_entry_not_ready(hass: HomeAssistant):
"""Test that it throws ConfigEntryNotReady when exception occurs during setup."""
entry = create_entry(hass)
with patch(
"homeassistant.components.goalzero.Yeti.init_connect",
side_effect=exceptions.ConnectError,
):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ConfigEntryState.SETUP_RETRY
async def test_update_failed(
hass: HomeAssistant,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test data update failure."""
await async_init_integration(hass, aioclient_mock)
assert hass.states.get(f"switch.{DEFAULT_NAME}_ac_port_status").state == STATE_ON
with patch(
"homeassistant.components.goalzero.Yeti.get_state",
side_effect=exceptions.ConnectError,
) as updater:
next_update = dt_util.utcnow() + timedelta(seconds=30)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
updater.assert_called_once()
state = hass.states.get(f"switch.{DEFAULT_NAME}_ac_port_status")
assert state.state == STATE_UNAVAILABLE
async def test_device_info(hass: HomeAssistant, aioclient_mock: AiohttpClientMocker):
"""Test device info."""
entry = await async_init_integration(hass, aioclient_mock)
device_registry = await dr.async_get_registry(hass)
device = device_registry.async_get_device({(DOMAIN, entry.entry_id)})
assert device.connections == {("mac", "12:34:56:78:90:12")}
assert device.identifiers == {(DOMAIN, entry.entry_id)}
assert device.manufacturer == MANUFACTURER
assert device.model == "Yeti 1400"
assert device.name == DEFAULT_NAME
assert device.sw_version == "1.5.7"
|
model/sequential_recommender/GRU4RecPlus.py | jasonshere/NeuRec | 978 | 111396 | <gh_stars>100-1000
"""
Paper: Recurrent Neural Networks with Top-k Gains for Session-based Recommendations
Author: <NAME>, and <NAME>
Reference: https://github.com/hidasib/GRU4Rec
https://github.com/Songweiping/GRU4Rec_TensorFlow
@author: <NAME>
"""
import numpy as np
from model.AbstractRecommender import SeqAbstractRecommender
import tensorflow as tf
from util import l2_loss
class GRU4RecPlus(SeqAbstractRecommender):
def __init__(self, sess, dataset, conf):
super(GRU4RecPlus, self).__init__(dataset, conf)
self.train_matrix = dataset.train_matrix
self.dataset = dataset
self.users_num, self.items_num = self.train_matrix.shape
self.lr = conf["lr"]
self.reg = conf["reg"]
self.layers = conf["layers"]
self.batch_size = conf["batch_size"]
self.n_sample = conf["n_sample"]
self.sample_alpha = conf["sample_alpha"]
self.epochs = conf["epochs"]
self.bpr_reg = conf["bpr_reg"]
if conf["hidden_act"] == "relu":
self.hidden_act = tf.nn.relu
elif conf["hidden_act"] == "tanh":
self.hidden_act = tf.nn.tanh
else:
raise ValueError("There is not hidden_act named '%s'." % conf["hidden_act"])
# final_act = leaky-relu
if conf["final_act"] == "relu":
self.final_act = tf.nn.relu
elif conf["final_act"] == "linear":
self.final_act = tf.identity
elif conf["final_act"] == "leaky_relu":
self.final_act = tf.nn.leaky_relu
else:
raise ValueError("There is not final_act named '%s'." % conf["final_act"])
if conf["loss"] == "bpr_max":
self.loss_fun = self._bpr_max_loss
elif conf["loss"] == "top1_max":
self.loss_fun = self._top1_max_loss
else:
raise ValueError("There is not loss named '%s'." % conf["loss"])
self.data_uit, self.offset_idx = self._init_data()
# for sampling negative items
_, pop = np.unique(self.data_uit[:, 1], return_counts=True)
pop = np.power(pop, self.sample_alpha)
pop_cumsum = np.cumsum(pop)
self.pop_cumsum = pop_cumsum / pop_cumsum[-1]
self.sess = sess
def _init_data(self):
time_dok = self.dataset.time_matrix.todok()
data_uit = [[row, col, time] for (row, col), time in time_dok.items()]
data_uit.sort(key=lambda x: (x[0], x[-1]))
data_uit = np.array(data_uit, dtype=np.int32)
_, idx = np.unique(data_uit[:, 0], return_index=True)
offset_idx = np.zeros(len(idx)+1, dtype=np.int32)
offset_idx[:-1] = idx
offset_idx[-1] = len(data_uit)
return data_uit, offset_idx
def _create_variable(self):
self.X_ph = tf.placeholder(tf.int32, [self.batch_size], name='input')
self.Y_ph = tf.placeholder(tf.int32, [self.batch_size+self.n_sample], name='output')
self.state_ph = [tf.placeholder(tf.float32, [self.batch_size, n_unit], name='layer_%d_state' % idx)
for idx, n_unit in enumerate(self.layers)]
init = tf.random.truncated_normal([self.items_num, self.layers[0]], mean=0.0, stddev=0.01)
self.input_embeddings = tf.Variable(init, dtype=tf.float32, name="input_embeddings")
init = tf.random.truncated_normal([self.items_num, self.layers[-1]], mean=0.0, stddev=0.01)
self.item_embeddings = tf.Variable(init, dtype=tf.float32, name="item_embeddings")
self.item_biases = tf.Variable(tf.zeros([self.items_num]), dtype=tf.float32, name="item_biases")
def _softmax_neg(self, logits):
# logits: (b, size_y)
hm = 1.0 - tf.eye(tf.shape(logits)[0], tf.shape(logits)[1])
logits = logits * hm
logits = logits - tf.reduce_max(logits, axis=1, keep_dims=True)
e_x = tf.exp(logits) * hm # (b, size_y)
e_x = e_x / tf.reduce_sum(e_x, axis=1, keep_dims=True)
return e_x # (b, size_y)
def _bpr_max_loss(self, logits):
# logits: (b, size_y)
softmax_scores = self._softmax_neg(logits) # (b, size_y)
pos_logits = tf.matrix_diag_part(logits) # (b,)
pos_logits = tf.reshape(pos_logits, shape=[-1, 1]) # (b, 1)
prob = tf.sigmoid((pos_logits - logits)) # (b, size_y)
prob = tf.reduce_sum(tf.multiply(prob, softmax_scores), axis=1) # (b,)
loss = -tf.log(prob + 1e-24)
reg_loss = tf.reduce_sum(tf.multiply(tf.pow(logits, 2), softmax_scores), axis=1) # (b,)
return tf.reduce_mean(loss + self.bpr_reg*reg_loss)
def _top1_max_loss(self, logits):
softmax_scores = self._softmax_neg(logits) # (b, size_y)
pos_logits = tf.matrix_diag_part(logits) # (b,)
pos_logits = tf.reshape(pos_logits, shape=[-1, 1]) # (b, 1)
prob = tf.sigmoid(-pos_logits + logits) + tf.sigmoid(tf.pow(logits, 2))
loss = tf.reduce_sum(tf.multiply(prob, softmax_scores), axis=1)
return tf.reduce_mean(loss)
def build_graph(self):
self._create_variable()
# get embedding and bias
# b: batch size
# l1: the dim of the first layer
# ln: the dim of the last layer
# size_y: the length of Y_ph, i.e., n_sample+batch_size
cells = [tf.nn.rnn_cell.GRUCell(size, activation=self.hidden_act) for size in self.layers]
drop_cell = [tf.nn.rnn_cell.DropoutWrapper(cell) for cell in cells]
stacked_cell = tf.nn.rnn_cell.MultiRNNCell(drop_cell)
inputs = tf.nn.embedding_lookup(self.input_embeddings, self.X_ph) # (b, l1)
outputs, state = stacked_cell(inputs, state=self.state_ph)
self.u_emb = outputs # outputs: (b, ln)
self.final_state = state # [(b, l1), (b, l2), ..., (b, ln)]
# for training
items_embed = tf.nn.embedding_lookup(self.item_embeddings, self.Y_ph) # (size_y, ln)
items_bias = tf.gather(self.item_biases, self.Y_ph) # (size_y,)
logits = tf.matmul(outputs, items_embed, transpose_b=True) + items_bias # (b, size_y)
logits = self.final_act(logits)
loss = self.loss_fun(logits)
# reg loss
reg_loss = l2_loss(inputs, items_embed, items_bias)
final_loss = loss + self.reg*reg_loss
self.update_opt = tf.train.AdamOptimizer(self.lr).minimize(final_loss)
def _sample_neg_items(self, size):
samples = np.searchsorted(self.pop_cumsum, np.random.rand(size))
return samples
def train_model(self):
self.logger.info(self.evaluator.metrics_info())
data_uit, offset_idx = self.data_uit, self.offset_idx
data_items = data_uit[:, 1]
for epoch in range(self.epochs):
state = [np.zeros([self.batch_size, n_unit], dtype=np.float32) for n_unit in self.layers]
user_idx = np.random.permutation(len(offset_idx) - 1)
iters = np.arange(self.batch_size, dtype=np.int32)
maxiter = iters.max()
start = offset_idx[user_idx[iters]]
end = offset_idx[user_idx[iters]+1]
finished = False
while not finished:
min_len = (end - start).min()
out_idx = data_items[start]
for i in range(min_len-1):
in_idx = out_idx
out_idx = data_items[start+i+1]
out_items = out_idx
if self.n_sample:
neg_items = self._sample_neg_items(self.n_sample)
out_items = np.hstack([out_items, neg_items])
feed = {self.X_ph: in_idx, self.Y_ph: out_items}
for l in range(len(self.layers)):
feed[self.state_ph[l]] = state[l]
_, state = self.sess.run([self.update_opt, self.final_state], feed_dict=feed)
start = start+min_len-1
mask = np.arange(len(iters))[(end - start) <= 1]
for idx in mask:
maxiter += 1
if maxiter >= len(offset_idx)-1:
finished = True
break
iters[idx] = maxiter
start[idx] = offset_idx[user_idx[maxiter]]
end[idx] = offset_idx[user_idx[maxiter]+1]
if len(mask):
for i in range(len(self.layers)):
state[i][mask] = 0
result = self.evaluate_model()
self.logger.info("epoch %d:\t%s" % (epoch, result))
def _get_user_embeddings(self):
users = np.arange(self.users_num, dtype=np.int32)
u_nnz = np.array([self.train_matrix[u].nnz for u in users], dtype=np.int32)
users = users[np.argsort(-u_nnz)]
user_embeddings = np.zeros([self.users_num, self.layers[-1]], dtype=np.float32) # saving user embedding
data_uit, offset_idx = self.data_uit, self.offset_idx
data_items = data_uit[:, 1]
state = [np.zeros([self.batch_size, n_unit], dtype=np.float32) for n_unit in self.layers]
batch_iter = np.arange(self.batch_size, dtype=np.int32)
next_iter = batch_iter.max() + 1
start = offset_idx[users[batch_iter]]
end = offset_idx[users[batch_iter] + 1] # the start index of next user
batch_mask = np.ones([self.batch_size], dtype=np.int32)
while np.sum(batch_mask) > 0:
min_len = (end - start).min()
for i in range(min_len):
cur_items = data_items[start + i]
feed = {self.X_ph: cur_items}
for l in range(len(self.layers)):
feed[self.state_ph[l]] = state[l]
u_emb, state = self.sess.run([self.u_emb, self.final_state], feed_dict=feed)
start = start + min_len
mask = np.arange(self.batch_size)[(end - start) == 0]
for idx in mask:
u = users[batch_iter[idx]]
user_embeddings[u] = u_emb[idx] # saving user embedding
if next_iter < self.users_num:
batch_iter[idx] = next_iter
start[idx] = offset_idx[users[next_iter]]
end[idx] = offset_idx[users[next_iter] + 1]
next_iter += 1
else:
batch_mask[idx] = 0
start[idx] = 0
end[idx] = offset_idx[-1]
for i, _ in enumerate(self.layers):
state[i][mask] = 0
return user_embeddings
def evaluate_model(self):
self.cur_user_embeddings = self._get_user_embeddings()
self.cur_item_embeddings, self.cur_item_biases = self.sess.run([self.item_embeddings, self.item_biases])
return self.evaluator.evaluate(self)
def predict(self, users, items=None):
user_embeddings = self.cur_user_embeddings[users]
all_ratings = np.matmul(user_embeddings, self.cur_item_embeddings.T) + self.cur_item_biases
# final_act = leaky-relu
if self.final_act == tf.nn.relu:
all_ratings = np.maximum(all_ratings, 0)
elif self.final_act == tf.identity:
all_ratings = all_ratings
elif self.final_act == tf.nn.leaky_relu:
all_ratings = np.maximum(all_ratings, all_ratings*0.2)
else:
pass
all_ratings = np.array(all_ratings, dtype=np.float32)
if items is not None:
all_ratings = [all_ratings[idx][item] for idx, item in enumerate(items)]
return all_ratings
|
common/src/stack/pylib/stack/dist.py | kmcm0/stacki | 123 | 111402 | #! /opt/stack/bin/python
#
# @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
import os
import types
import stack.file
class Arch:
"""
Base class that understands Linux architecture strings and nothing
else. All distributions needs this information as do other code
that handles rpms
"""
def __init__(self):
self.arch = ''
self.distArch = ''
self.cpus = []
self.i86cpus = [ 'athlon', 'i686', 'i586', 'i486', 'i386' ]
def getCPUs(self):
return self.cpus
def getArch(self):
return self.arch
def getDistArch(self):
return self.distArch
def setArch(self, arch, distArch=None):
"""
The two architectures are to handle trends like
the AMD64 dist arch, where the true arch is x86_64.
NOTE: This trend does not exist with RHEL.
"""
self.arch = arch
if arch in self.i86cpus:
self.cpus = self.i86cpus
self.arch = 'i386'
elif arch == 'x86_64':
self.cpus = [ arch ]
self.cpus.extend([ 'ia32e' ])
self.cpus.extend(self.i86cpus)
else:
self.cpus = [ arch ]
self.cpus.extend([ 'src', 'noarch' ])
if distArch:
self.distArch = distArch
else:
self.distArch = arch
|
river/linear_model/perceptron.py | fox-ds/river | 2,184 | 111406 | from river import optim
from .log_reg import LogisticRegression
class Perceptron(LogisticRegression):
"""Perceptron classifier.
In this implementation, the Perceptron is viewed as a special case of the logistic regression.
The loss function that is used is the Hinge loss with a threshold set to 0, whilst the learning
rate of the stochastic gradient descent procedure is set to 1 for both the weights and the
intercept.
Parameters
----------
l2
Amount of L2 regularization used to push weights towards 0.
clip_gradient
Clips the absolute value of each gradient value.
initializer
Weights initialization scheme.
Attributes
----------
weights
The current weights.
Examples
--------
>>> from river import datasets
>>> from river import evaluate
>>> from river import linear_model as lm
>>> from river import metrics
>>> from river import preprocessing as pp
>>> dataset = datasets.Phishing()
>>> model = pp.StandardScaler() | lm.Perceptron()
>>> metric = metrics.Accuracy()
>>> evaluate.progressive_val_score(dataset, model, metric)
Accuracy: 85.84%
"""
def __init__(
self,
l2=0.0,
clip_gradient=1e12,
initializer: optim.initializers.Initializer = None,
):
super().__init__(
optimizer=optim.SGD(1),
intercept_lr=1,
loss=optim.losses.Hinge(threshold=0.0),
l2=l2,
clip_gradient=clip_gradient,
initializer=initializer,
)
|
raspberryturk/embedded/__init__.py | Dzhuks/raspberryturk | 169 | 111409 | <gh_stars>100-1000
from raspberryturk import is_running_on_raspberryturk, RaspberryTurkError
if not is_running_on_raspberryturk():
raise RaspberryTurkError("Must be running on Raspberry Turk to use {} module.".format(__name__))
|
tests/namespaced/api/urls.py | pavanv/django-tastypie | 1,570 | 111415 | from django.conf import settings
from django.conf.urls import include, url
from tastypie.api import NamespacedApi
from namespaced.api.resources import NamespacedNoteResource, NamespacedUserResource
api = NamespacedApi(api_name='v1', urlconf_namespace='special')
api.register(NamespacedNoteResource(), canonical=True)
api.register(NamespacedUserResource(), canonical=True)
if settings.DJANGO_VERSION >= settings.DJANGO_19:
included = include((api.urls, 'special'))
else:
included = include(api.urls, namespace='special')
urlpatterns = [
url(r'^api/', included),
]
|
presto-benchto-benchmarks/generate_schemas/generate-tpch.py | sreekanth370/presto | 9,782 | 111432 | #!/usr/bin/env python
schemas = [
# (new_schema, source_schema)
('tpch_10gb_orc', 'tpch.sf10'),
('tpch_100gb_orc', 'tpch.sf100'),
('tpch_1tb_orc', 'tpch.sf1000'),
('tpch_10tb_orc', 'tpch.sf10000'),
('tpch_10gb_text', 'hive.tpch_10gb_orc'),
('tpch_100gb_text', 'hive.tpch_100gb_orc'),
('tpch_1tb_text', 'hive.tpch_1tb_orc'),
('tpch_10tb_text', 'hive.tpch_10tb_orc'),
]
tables = [
'customer',
'lineitem',
'nation',
'orders',
'part',
'partsupp',
'region',
'supplier',
]
for (new_schema, source_schema) in schemas:
if new_schema.endswith('_orc'):
format = 'ORC'
elif new_schema.endswith('_text'):
format = 'TEXTFILE'
else:
raise ValueError(new_schema)
print 'CREATE SCHEMA hive.%s;' % (new_schema,)
for table in tables:
print 'CREATE TABLE "hive"."%s"."%s" WITH (format = \'%s\') AS SELECT * FROM %s."%s";' % \
(new_schema, table, format, source_schema, table)
|
examples/primitives.py | microprediction/simdkalman | 148 | 111459 | import numpy as np
from simdkalman.primitives import predict, update
# define model
state_transition = np.array([[1,1],[0,1]])
process_noise = np.eye(2)*0.01
observation_model = np.array([[1,0]])
observation_noise = np.array([[1.0]])
# initial state
m = np.array([0, 1])
P = np.eye(2)
# predict next state
m, P = predict(m, P, state_transition, process_noise)
# first observation
y = np.array([4])
m, P = update(m, P, observation_model, observation_noise, y)
# predict second state
m, P = predict(m, P, state_transition, process_noise)
print('mean')
print(m)
print('cov')
print(P)
|
tools/utils/elf.py | arcilat-adsk/arnold-usd | 171 | 111563 | <filename>tools/utils/elf.py
# vim: filetype=python
# Copyright 2019 Autodesk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .contrib.elftools.common.exceptions import ELFError
from .contrib.elftools.common.py3compat import bytes2str
from .contrib.elftools.elf.elffile import ELFFile
from .contrib.elftools.elf.dynamic import DynamicSection
from .contrib.elftools.elf.gnuversions import GNUVerSymSection, GNUVerDefSection, GNUVerNeedSection
from .version import Version
def _get_symbol_version_info(versioninfo, nsym):
'''
Return a dict containing information on the symbol version
or None if no version information is available
'''
symbol_version = dict.fromkeys(('index', 'name', 'filename', 'hidden'))
if not versioninfo['versym'] or nsym >= versioninfo['versym'].num_symbols():
return None
symbol = versioninfo['versym'].get_symbol(nsym)
index = symbol.entry['ndx']
if not index in ['VER_NDX_LOCAL', 'VER_NDX_GLOBAL']:
index = int(index)
if versioninfo['type'] == 'GNU':
# In GNU versioning mode, the highest bit is used to
# store wether the symbol is hidden or not
if index & 0x8000:
index &= ~0x8000
symbol_version['hidden'] = True
if versioninfo['verdef'] and index <= versioninfo['verdef'].num_versions():
_, verdaux_iter = versioninfo['verdef'].get_version(index)
symbol_version['name'] = bytes2str(next(verdaux_iter).name)
else:
verneed, vernaux = versioninfo['verneed'].get_version(index)
symbol_version['name'] = bytes2str(vernaux.name)
symbol_version['filename'] = bytes2str(verneed.name)
symbol_version['index'] = index
return symbol_version
def get_maximum_symbol_version(filename):
'''
Return a dict containing information about the maximum versioned symbols in the library
'''
with open(filename, 'rb') as file:
sv = {}
try:
versioninfo = {'versym': None, 'verdef': None, 'verneed': None, 'type': None}
elf_file = ELFFile(file)
for section in elf_file.iter_sections():
if isinstance(section, GNUVerSymSection ): versioninfo['versym'] = section
elif isinstance(section, GNUVerDefSection ): versioninfo['verdef'] = section
elif isinstance(section, GNUVerNeedSection): versioninfo['verneed'] = section
elif isinstance(section, DynamicSection ):
for tag in section.iter_tags():
if tag['d_tag'] == 'DT_VERSYM':
versioninfo['type'] = 'GNU'
break
if not versioninfo['type'] and (versioninfo['verneed'] or versioninfo['verdef']):
versioninfo['type'] = 'Solaris'
if not versioninfo['type'] or not versioninfo['versym']:
return sv
for idx in xrange(versioninfo['versym'].num_symbols()):
symbol_version = _get_symbol_version_info(versioninfo, idx)
if symbol_version['index'] not in ['VER_NDX_LOCAL', 'VER_NDX_GLOBAL']:
version = symbol_version['name'].partition('_')
if version[1] == '_' and version[2]:
prefix = version[0]
version = Version(version[2])
if version > sv.get(prefix, Version(None)):
sv[prefix] = version
return sv
except ELFError as ex:
return sv
|
tests/components/roku/test_remote.py | MrDelik/core | 30,023 | 111565 | """The tests for the Roku remote platform."""
from unittest.mock import MagicMock
from homeassistant.components.remote import (
ATTR_COMMAND,
DOMAIN as REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from tests.common import MockConfigEntry
from tests.components.roku import UPNP_SERIAL
MAIN_ENTITY_ID = f"{REMOTE_DOMAIN}.my_roku_3"
# pylint: disable=redefined-outer-name
async def test_setup(hass: HomeAssistant, init_integration: MockConfigEntry) -> None:
"""Test setup with basic config."""
assert hass.states.get(MAIN_ENTITY_ID)
async def test_unique_id(
hass: HomeAssistant, init_integration: MockConfigEntry
) -> None:
"""Test unique id."""
entity_registry = er.async_get(hass)
main = entity_registry.async_get(MAIN_ENTITY_ID)
assert main.unique_id == UPNP_SERIAL
async def test_main_services(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_roku: MagicMock,
) -> None:
"""Test platform services."""
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID},
blocking=True,
)
assert mock_roku.remote.call_count == 1
mock_roku.remote.assert_called_with("poweroff")
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID},
blocking=True,
)
assert mock_roku.remote.call_count == 2
mock_roku.remote.assert_called_with("poweron")
await hass.services.async_call(
REMOTE_DOMAIN,
SERVICE_SEND_COMMAND,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID, ATTR_COMMAND: ["home"]},
blocking=True,
)
assert mock_roku.remote.call_count == 3
mock_roku.remote.assert_called_with("home")
|
core/company.py | Meliueada/Starup-Game-Python | 160 | 111574 | '''
Time: 2015.10.2
Author: Lionel
Content: Company
'''
class Company(object):
def __init__(self, name=None):
self.__name = name
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
|
tempest/lib/api_schema/response/compute/v2_36/quotas.py | rishabh20111990/tempest | 254 | 111611 | <filename>tempest/lib/api_schema/response/compute/v2_36/quotas.py
# Copyright 2018 ZTE Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib.api_schema.response.compute.v2_1 import quotas as quotasv21
# Compute microversion 2.36:
# remove attributes in quota_set:
# 'fixed_ips',
# 'floating_ips',
# 'security_group_rules',
# 'security_groups'
remove_item_list = ['fixed_ips', 'floating_ips',
'security_group_rules', 'security_groups']
update_quota_set = copy.deepcopy(quotasv21.update_quota_set)
for item in remove_item_list:
update_quota_set['response_body']['properties']['quota_set'][
'properties'].pop(item)
update_quota_set['response_body']['properties']['quota_set'][
'required'].remove(item)
get_quota_set = copy.deepcopy(quotasv21.get_quota_set)
for item in remove_item_list:
get_quota_set['response_body']['properties']['quota_set'][
'properties'].pop(item)
get_quota_set['response_body']['properties']['quota_set'][
'required'].remove(item)
get_quota_set_details = copy.deepcopy(quotasv21.get_quota_set_details)
for item in remove_item_list:
get_quota_set_details['response_body']['properties']['quota_set'][
'properties'].pop(item)
get_quota_set_details['response_body']['properties']['quota_set'][
'required'].remove(item)
# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
# to keep this schema in this file to have the generic way to select the
# right schema based on self.schema_versions_info mapping in service client.
# ****** Schemas unchanged since microversion 2.1 ***
delete_quota = copy.deepcopy(quotasv21.delete_quota)
|
src/main/anonymization/base_anonymization.py | BMW-InnovationLab/BMW-Anonymization-API | 108 | 111619 | <filename>src/main/anonymization/base_anonymization.py<gh_stars>100-1000
from abc import ABC, abstractmethod
class BaseAnonymization(ABC):
"""
Base anonymization class for the detection and the semantic anonymization
"""
@abstractmethod
def blurring(self, image, response, degree=None, id=None, mask=None):
pass
@abstractmethod
def pixelating(self, image, response, degree=None, id=None, mask=None):
pass
@abstractmethod
def blackening(self, image, response, degree=None, id=None, mask=None):
pass
|
python/depthcharge/memory/writer.py | justinforbes/depthcharge | 133 | 111626 | # SPDX-License-Identifier: BSD-3-Clause
# Depthcharge: <https://github.com/nccgroup/depthcharge>
"""
Provides MemoryWriter base class
"""
from ..operation import Operation
class MemoryWriter(Operation):
"""
This base class extends :py:class:`~depthcharge.Operation`
to provide memory :py:meth:`write()` and :py:meth:`write_from_file()` methods.
The constructor takes a single :py:class:`~depthcharge.Depthcharge` context
object, as well as an optional *block_size* keyword argument.
The *block_size* values can be used to override the number of bytes
written at a time. The default value is 128, but some subclasses
override this with a more appropriate default or do not respect
this value. You probably don't want or need to change this.
"""
def _setup(self, addr, data):
"""
Subclasses should override this method to perform any necessary setup
activities (e.g. "configure Companion device") .
The :py:class:`~.MemoryWriter` base class implementation is a no-op.
"""
# No-op base implementation
def _teardown(self):
"""
Subclasses should override this method to perform any necessary clean-up
activities (e.g. "exit sub-prompt").
The :py:class:`~.MemoryWriter` base class implementation is a no-op.
"""
# No-op base implementation
def __init__(self, ctx, **kwargs):
block_size = int(kwargs.pop('block_size', 128))
super().__init__(ctx, **kwargs)
# Used by write() to iterate over a payload in fixed-size chunks.
# Status progress is update in increments of this size.
self._block_size = block_size
# Allow the API user to override this at call-time
self._allow_block_size_override = True
def _describe_op(self, addr, data):
"""
Return a string (suitable for logging) that describes the write
operation that would be performed with the provided arguments.
"""
s = '({:s}) Writing {:d} bytes @ 0x{:08x}'
return s.format(self.name, len(data), addr)
def _write(self, addr: int, data: bytes, **kwargs):
"""
Subclasses of :py:class:`~depthcharge.memory.writer.MemoryWriter` must
implement this method to perform the actual write operation.
"""
raise NotImplementedError('Bug: MemoryWriter subclass does not implement _write().')
def write(self, addr: int, data: bytes, **kwargs):
"""
Write *data* to the specified address (*addr*).
Specify a *show_progress=False* keyword argument to disable the progress
bar printed during the write operation.
"""
if self._allow_block_size_override:
block_size = kwargs.get('block_size', self._block_size)
else:
block_size = self._block_size
size = len(data)
desc = '({:s}) Writing {:d} bytes @ 0x{:08x}'.format(self.name, size, addr)
show = kwargs.get('show_progress', True)
progress = self._ctx.create_progress_indicator(self, size, desc, unit='B', show=show)
# Run any setup operations
if not kwargs.get('suppress_setup', False):
self._setup(addr, data)
try:
for offset in range(0, size, block_size):
to_write = block_size
if (size - offset) < block_size:
to_write = (size - offset)
data_slice = data[offset:offset + to_write]
self._write(addr + offset, data_slice, **kwargs)
progress.update(to_write)
finally:
if not kwargs.get('suppress_teardown', False):
self._teardown()
self._ctx.close_progress_indicator(progress)
def write_from_file(self, addr: int, filename: str, **kwargs):
"""
Open the file specified via *filename* and write its contents to
the address indicated by *addr*.
Specify a *show_progress=False* keyword argument to disable the progress
bar printed during the write operation.
"""
with open(filename, 'rb') as infile:
data = infile.read()
self.write(addr, data, **kwargs)
class MemoryWordWriter(MemoryWriter):
"""
A MemoryWordWriter is a specific type of :py:class:`~.MemoryWriter` that
can only operate on byte, word, long-word, and potentially quad-word sized
data. The constructor takes a single :py:class:`~depthcharge.Depthcharge`
context object.
Subclasses must implement :py:meth:`_write_word`. This parent class will
take care of invoking this method as needed to perform arbitrary-sized
writes.
"""
def __init__(self, ctx, **kwargs):
super().__init__(ctx, **kwargs)
self._mode = self._ctx.arch.word_sizes()
self._word_size = self._ctx.arch.word_size
self._block_size = self._word_size
self._allow_block_size_override = False
def _write_word(self, addr: int, data: bytes, **kwargs):
"""
Subclasses of :py:class:`~depthcharge.memory.MemoryWordWriter` must
implement this method to perform the actual word-write operation.
"""
raise NotImplementedError('Subclass bug: _write_word() not implemented')
def _write(self, addr: int, data: bytes, **kwargs):
size = len(data)
assert size <= self._block_size
i = 0
# Write byte-by-byte until we're word-aligned.
while not self._ctx.arch.is_word_aligned(addr + i):
self._write_word(addr + i, data[i:i + 1])
i += 1
while i < size:
remaining = size - i
if remaining >= 8 and self._ctx.arch.supports_64bit_data:
to_write = 8
elif remaining >= 4:
to_write = 4
elif remaining >= 2:
to_write = 2
else:
to_write = 1
self._write_word(addr + i, data[i:i + to_write])
i += to_write
|
models/SFMnet.py | jytime/Deep-SfM-Revisited | 126 | 111667 | <gh_stars>100-1000
from __future__ import print_function
import torch
import numpy as np
import cv2
import utils
import time
from models import DICL_shallow
from models.RAFT.core.raft import RAFT
from models import PSNet as PSNet
import essential_matrix
from epipolar_utils import *
from models.PoseNet import ResNet,Bottleneck, PlainPose
from lib.config import cfg, cfg_from_file, save_config_to_file
# for speed analysis
global time_dict
time_dict = {}
try:
autocast = torch.cuda.amp.autocast
except:
# dummy autocast for PyTorch < 1.6
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class SFMnet(torch.nn.Module):
def __init__(self, nlabel=64, min_depth=0.5):
super(SFMnet,self).__init__()
##### Hyperparameters #####
self.delta = 0.001
self.alpha = 0.0
self.maxreps = 200
self.min_matches = cfg.min_matches
self.ransac_iter = cfg.ransac_iter
self.ransac_threshold = cfg.ransac_threshold
self.nlabel = nlabel
# set minimum depth, to avoid numerical errors
self.min_depth = min_depth
# choose your flow estimator, default as DICL_shallow
if cfg.FLOW_EST =='RAFT':
self.flow_estimator = RAFT()
elif cfg.FLOW_EST =='DICL':
self.flow_estimator = DICL_shallow()
else:
raise NotImplementedError
# choose your depth estimator, default as PSNet
if cfg.DEPTH_EST=='PSNET':
self.depth_estimator = PSNet(nlabel,min_depth)
elif cfg.DEPTH_EST=='CVP':
from models.CVPMVS import CVPMVS
self.depth_estimator = CVPMVS()
elif cfg.DEPTH_EST=='PANET':
from models.PANet import PANet
self.depth_estimator = PANet(nlabel,min_depth)
elif cfg.DEPTH_EST=='REGNET':
from models.REGNet import REGNet
self.depth_estimator = REGNet(nlabel,min_depth)
elif cfg.DEPTH_EST=='REG2D':
from models.REG2D import REG2D
self.depth_estimator = REG2D(nlabel,min_depth)
elif cfg.DEPTH_EST=='DISPNET':
from models.DISPNET import DISPNET
self.depth_estimator = DISPNET(nlabel,min_depth)
else:
raise NotImplementedError
# sift feature extraction
self.sift = cv2.xfeatures2d.SIFT_create()
self.surf = cv2.xfeatures2d.SURF_create()
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
self.flann = cv2.FlannBasedMatcher(index_params, search_params)
if cfg.POSE_EST =='POSENET':
if cfg.POSE_NET_TYPE == 'plain':
self.posenet = PlainPose()
elif cfg.POSE_NET_TYPE == 'res':
self.posenet = ResNet(Bottleneck,[3, 4, 6, 3])
else:
raise NotImplementedError
def forward(self, ref, target, intrinsic, pose_gt=None, pred_pose=None, use_gt_pose=False,
h_side=None,w_side=None,logger=None,depth_gt=None,img_path=None):
# if TRAIN_FLOW, we only conduct flow estimation
if self.training and cfg.TRAIN_FLOW:
flow_outputs = self.flow_estimator(torch.cat((ref, target),dim=1))
return flow_outputs
intrinsic_gpu = intrinsic.float().cuda() # Bx3x3
intrinsic_inv_gpu = torch.inverse(intrinsic_gpu) # Bx3x3
# Default, if do not use ground truth poses for training
if use_gt_pose == False:
# if predict relative poses online, or use pre-saved poses
if cfg.PRED_POSE_ONLINE:
# flow estimation
with autocast(enabled=cfg.MIXED_PREC):
flow_start = time.time()
flow_2D, conf = self.flow_estimator(torch.cat((ref, target),dim=1))
# recover image shape, to avoid meaningless flow matches
if h_side is not None or w_side is not None:
flow_2D = flow_2D[:,:,:h_side,:w_side]
try:
conf = conf[:,:,:h_side,:w_side]
except:
conf = conf
# choose how to estimate pose, by RANSAC or deep regression
if cfg.POSE_EST =='RANSAC':
# some inputs are left for possible visualization or debug, plz ignore them if not
# return: Pose matrix Bx3x4
# Essential matrix Bx3x3
P_mat,E_mat = self.pose_by_ransac(flow_2D,ref,target,intrinsic_inv_gpu,
h_side,w_side,pose_gt =pose_gt,img_path=img_path)
rot_and_trans = None
elif cfg.POSE_EST =='POSENET':
rot_and_trans = self.posenet(flow_2D,conf,ref,target)
P_mat = RT2Pose(rot_and_trans)
else:
raise NotImplementedError
else:
# use ground truth poses, for oracle experiments
P_mat = pred_pose; E_mat = None; flow_2D = None
# if only use gt scales, for oracle experiments
if cfg.PRED_POSE_GT_SCALE:
scale = torch.norm(pose_gt[:,:3, 3],dim=1, p=2).unsqueeze(1).unsqueeze(1)
P_mat[:,:,-1:] = P_mat[:,:,-1:]*scale
P_mat.unsqueeze_(1)
else:
E_mat = None
P_mat = pose_gt.clone()
if cfg.GT_POSE_NORMALIZED:
scale = torch.norm(P_mat[:,:3, 3],dim=1, p=2).unsqueeze(1).unsqueeze(1)
P_mat[:,:,-1:] = P_mat[:,:,-1:]/scale
P_mat.unsqueeze_(1)
flow_2D = torch.zeros([ref.shape[0],2,ref.shape[2],ref.shape[3]]).cuda().type_as(ref)
if cfg.RECORD_POSE or (cfg.RECORD_POSE_EVAL and not self.training):
return P_mat, flow_2D
if h_side is not None or w_side is not None:
ref = ref[:,:,:h_side,:w_side]; target = target[:,:,:h_side,:w_side]
# depth prediction
with autocast(enabled=cfg.MIXED_PREC):
depth_start = time.time()
depth_init, depth = self.depth_estimator(ref, [target], P_mat, intrinsic_gpu, intrinsic_inv_gpu,pose_gt=pose_gt,depth_gt=depth_gt,E_mat=E_mat)
if self.training:
# rot_and_trans is only used for pose deep regression
# otherwise, it is None
return flow_2D, P_mat, depth, depth_init, rot_and_trans
return flow_2D, P_mat, depth, time_dict
def pose_by_ransac(self, flow_2D, ref, target, intrinsic_inv_gpu,
h_side, w_side, pose_gt=False, img_path=None):
b, _, h, w = flow_2D.size()
coord1_flow_2D, coord2_flow_2D = flow2coord(flow_2D) # Bx3x(H*W)
coord1_flow_2D = coord1_flow_2D.view(b,3,h,w)
coord2_flow_2D = coord2_flow_2D.view(b,3,h,w)
margin = 10 # avoid corner case
E_mat = torch.zeros(b, 3, 3).cuda() # Bx3x3
P_mat = torch.zeros(b, 3, 4).cuda() # Bx3x4
PTS1=[]; PTS2=[]; # point list
# process the frames of each batch
for b_cv in range(b):
# convert images to cv2 style
if h_side is not None or w_side is not None:
ref_cv =ref[b_cv,:,:h_side,:w_side].cpu().numpy().transpose(1,2,0)[:,:,::-1]
tar_cv =target[b_cv,:,:h_side,:w_side].cpu().numpy().transpose(1,2,0)[:,:,::-1]
else:
ref_cv =ref[b_cv].cpu().numpy().transpose(1,2,0)[:,:,::-1]
tar_cv =target[b_cv].cpu().numpy().transpose(1,2,0)[:,:,::-1]
ref_cv = (ref_cv*0.5+0.5)*255; tar_cv = (tar_cv*0.5+0.5)*255
# detect key points
kp1, des1 = self.sift.detectAndCompute(ref_cv.astype(np.uint8),None)
kp2, des2 = self.sift.detectAndCompute(tar_cv.astype(np.uint8),None)
if len(kp1)<self.min_matches or len(kp2)<self.min_matches:
# surf generally has more kps than sift
kp1, des1 = self.surf.detectAndCompute(ref_cv.astype(np.uint8),None)
kp2, des2 = self.surf.detectAndCompute(tar_cv.astype(np.uint8),None)
try:
# filter out some key points
matches = self.flann.knnMatch(des1,des2,k=2)
good = []; pts1 = []; pts2 = []
for i,(m,n) in enumerate(matches):
if m.distance < 0.8*n.distance: good.append(m); pts1.append(kp1[m.queryIdx].pt); pts2.append(kp2[m.trainIdx].pt)
# degengrade if not existing good matches
if len(good)<self.min_matches:
good = [];pts1 = [];pts2 = []
for i,(m,n) in enumerate(matches):
good.append(m); pts1.append(kp1[m.queryIdx].pt); pts2.append(kp2[m.trainIdx].pt)
pts1 = np.array(pts1); PTS1.append(pts1);pts2 = np.array(pts2); PTS2.append(pts2);
except:
# if cannot find corresponding pairs, ignore this sift mask
PTS1.append([None]); PTS2.append([None])
assert len(PTS1)==b
for batch in range(b):
if cfg.SIFT_POSE:
# if directly use SIFT matches
pts1 = PTS1[batch]; pts2 = PTS2[batch]
coord1_sift_2D = torch.FloatTensor(pts1)
coord2_sift_2D = torch.FloatTensor(pts2)
coord1_flow_2D_norm_i = torch.cat((coord1_sift_2D,torch.ones(len(coord1_sift_2D),1)),dim=1).unsqueeze(0).to(coord1_flow_2D.device).permute(0,2,1)
coord2_flow_2D_norm_i = torch.cat((coord2_sift_2D,torch.ones(len(coord2_sift_2D),1)),dim=1).unsqueeze(0).to(coord1_flow_2D.device).permute(0,2,1)
else:
# check the number of matches
if len(PTS1[batch])<self.min_matches or len(PTS2[batch])<self.min_matches:
coord1_flow_2D_norm_i = coord1_flow_2D[batch,:,margin:-margin,margin:-margin].contiguous().view(3,-1).unsqueeze(0)
coord2_flow_2D_norm_i = coord2_flow_2D[batch,:,margin:-margin,margin:-margin].contiguous().view(3,-1).unsqueeze(0)
else:
if cfg.SAMPLE_SP:
# conduct interpolation
pts1 = torch.from_numpy(PTS1[batch]).to(coord1_flow_2D.device).type_as(coord1_flow_2D)
B, C, H, W = coord1_flow_2D.size()
pts1[:,0] = 2.0*pts1[:,0]/max(W-1,1)-1.0;pts1[:,1] = 2.0*pts1[:,1]/max(H-1,1)-1.0
coord1_flow_2D_norm_i = F.grid_sample(coord1_flow_2D[batch].unsqueeze(0), pts1.unsqueeze(0).unsqueeze(-2),align_corners=True).squeeze(-1)
coord2_flow_2D_norm_i = F.grid_sample(coord2_flow_2D[batch].unsqueeze(0), pts1.unsqueeze(0).unsqueeze(-2),align_corners=True).squeeze(-1)
else:
# default choice
pts1 = np.int32(np.round(PTS1[batch]))
coord1_flow_2D_norm_i = coord1_flow_2D[batch,:,pts1[:,1],pts1[:,0]].unsqueeze(0)
coord2_flow_2D_norm_i = coord2_flow_2D[batch,:,pts1[:,1],pts1[:,0]].unsqueeze(0)
intrinsic_inv_gpu_i = intrinsic_inv_gpu[batch].unsqueeze(0)
# projection by intrinsic matrix
coord1_flow_2D_norm_i = torch.bmm(intrinsic_inv_gpu_i, coord1_flow_2D_norm_i)
coord2_flow_2D_norm_i = torch.bmm(intrinsic_inv_gpu_i, coord2_flow_2D_norm_i)
# reshape coordinates
coord1_flow_2D_norm_i = coord1_flow_2D_norm_i.transpose(1,2)[0,:,:2].contiguous()
coord2_flow_2D_norm_i = coord2_flow_2D_norm_i.transpose(1,2)[0,:,:2].contiguous()
with autocast(enabled=False):
# GPU-accelerated RANSAC five-point algorithm
E_i, P_i, F_i,inlier_num = compute_P_matrix_ransac(coord1_flow_2D_norm_i.detach(), coord2_flow_2D_norm_i.detach(),
intrinsic_inv_gpu[batch,:,:], self.delta, self.alpha, self.maxreps,
len(coord1_flow_2D_norm_i), len(coord1_flow_2D_norm_i),
self.ransac_iter, self.ransac_threshold)
E_mat[batch, :, :] = E_i.detach(); P_mat[batch, :, :] = P_i.detach()
return P_mat, E_mat
############################################# Utility #############################################
def check_tensor(tensor):
return torch.isinf(tensor).any() or torch.isnan(tensor).any()
def Pose2RT(pose_mat):
# pose_mat [B,3,4]
# return : (d1,d2,d3,t1,t2,t3)
cur_angle = utils.matrix2angle(pose_mat[:,:3,:3])
cur_trans = pose_mat[:,:3,-1]
return torch.cat((cur_angle,cur_trans),dim=-1)
def RT2Pose(RT):
# RT (d1,d2,d3,t1,t2,t3)
# return : [B,3,4]
cur_rot = utils.angle2matrix(RT[:,:3])
cur_trans = RT[:,3:].unsqueeze(-1)
return torch.cat((cur_rot,cur_trans),dim=-1)
def flow2coord(flow):
"""
Generate flat homogeneous coordinates 1 and 2 from optical flow.
Args:
flow: bx2xhxw, torch.float32
Output:
coord1_hom: bx3x(h*w)
coord2_hom: bx3x(h*w)
"""
b, _, h, w = flow.size()
coord1 = torch.zeros_like(flow)
coord1[:,0,:,:] += torch.arange(w).float().cuda()
coord1[:,1,:,:] += torch.arange(h).float().cuda()[:, None]
coord2 = coord1 + flow
coord1_flat = coord1.reshape(b, 2, h*w)
coord2_flat = coord2.reshape(b, 2, h*w)
ones = torch.ones((b, 1, h*w), dtype=torch.float32).cuda()
coord1_hom = torch.cat((coord1_flat, ones), dim=1)
coord2_hom = torch.cat((coord2_flat, ones), dim=1)
return coord1_hom, coord2_hom
def coord2flow(coord1, coord2, b, h, w):
"""
Convert flat homogeneous coordinates 1 and 2 to optical flow.
Args:
coord1: bx3x(h*w)
coord2: bx3x(h*w)
Output:
flow: bx2xhxw, torch.float32
"""
coord1 = coord1[:, :2, :] # bx2x(h*w)
coord2 = coord2[:, :2, :] # bx2x(h*w)
flow = coord2 - coord1
flow = flow.reshape(b, 2, h, w)
return flow
|
datapackage/cli.py | chris48s/datapackage-py | 183 | 111672 | <reponame>chris48s/datapackage-py
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# from __future__ import unicode_literals
import click
import json
import datapackage
from . import config
click.disable_unicode_literals_warning = True
# Module API
@click.group(help='')
@click.version_option(config.VERSION, message='%(version)s')
def cli():
"""Command-line interface
```
Usage: datapackage [OPTIONS] COMMAND [ARGS]...
Options:
--version Show the version and exit.
--help Show this message and exit.
Commands:
infer
validate
```
"""
pass
@cli.command()
@click.argument('descriptor', type=click.STRING)
def validate(descriptor):
try:
datapackage.validate(descriptor)
click.echo('Data package descriptor is valid')
except datapackage.exceptions.ValidationError as exception:
click.echo('Data package descriptor is invalid')
for error in exception.errors:
click.echo(error)
exit(1)
@cli.command()
@click.argument('pattern', type=click.STRING)
def infer(pattern):
descriptor = datapackage.infer(pattern, base_path='.')
click.echo(json.dumps(descriptor, indent=2))
|
stp_zmq/test/test_large_messages.py | andkononykhin/plenum | 148 | 111708 | <gh_stars>100-1000
import json
import zmq
from stp_core.crypto.util import randomSeed
from stp_core.network.port_dispenser import genHa
from stp_core.test.helper import SMotor
from stp_zmq.test.helper import genKeys
from stp_zmq.simple_zstack import SimpleZStack
def testSimpleZStacksMsgs(tdir, looper):
names = ['Alpha', 'Beta']
genKeys(tdir, names)
names = ['Alpha', 'Beta']
aseed = randomSeed()
bseed = randomSeed()
size = 100000
msg = json.dumps({'random': randomSeed(size).decode()}).encode()
def aHandler(m):
str_m = "{}".format(m)
print('{} printing... {}'.format(names[0], str_m[:100]))
d, _ = m
print('Message size is {}'.format(len(d['random'])))
assert len(d['random']) == size
def bHandler(m):
print(beta.msgHandler)
a = m[1]
try:
beta.listener.send_multipart([a, msg],
flags=zmq.NOBLOCK)
except zmq.Again:
return False
str_m = "{}".format(m)
print('{} printing... {}'.format(names[1], str_m[:100]))
stackParams = {
"name": names[0],
"ha": genHa(),
"auto": 2,
"basedirpath": tdir
}
alpha = SimpleZStack(stackParams, aHandler, aseed, False)
stackParams = {
"name": names[1],
"ha": genHa(),
"auto": 2,
"basedirpath": tdir
}
beta = SimpleZStack(stackParams, bHandler, bseed, True)
amotor = SMotor(alpha)
looper.add(amotor)
bmotor = SMotor(beta)
looper.add(bmotor)
alpha.connect(name=beta.name, ha=beta.ha,
verKeyRaw=beta.verKeyRaw, publicKeyRaw=beta.publicKeyRaw)
looper.runFor(0.25)
alpha.send({'greetings': 'hi'}, beta.name)
looper.runFor(1)
|
tests/__init__.py | joshuadavidthomas/django_coverage_plugin | 172 | 111716 | <gh_stars>100-1000
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/django_coverage_plugin/blob/master/NOTICE.txt
"""The tests for the Django Coverage Plugin."""
# Define URLs here so we can use ROOT_URLCONF="tests"
try:
from django.urls import re_path
except ImportError:
from django.conf.urls import url as re_path
def index(request):
"""A bogus view to use in the urls below."""
pass
urlpatterns = [
re_path(r'^home$', index, name='index'),
]
|
tests/resillience_tests/process_wait.py | pycampers/zproc | 106 | 111733 | <filename>tests/resillience_tests/process_wait.py<gh_stars>100-1000
import zproc
ctx = zproc.Context()
for i in range(250):
@ctx.spawn
def my_process(ctx):
assert isinstance(ctx, zproc.Context)
state = ctx.create_state()
assert isinstance(state, zproc.State)
print(i)
return i
assert my_process.wait() == i
|
h5Nastran/h5Nastran/h5nastran/_punch.py | ACea15/pyNastran | 293 | 111735 | from __future__ import print_function, absolute_import
from collections import OrderedDict
from ._result_base import H5NastranResultBase
from h5Nastran.post_process.result_readers.punch import PunchReader
import numpy as np
import tables
from six import iteritems
class H5NastranResultPunch(H5NastranResultBase):
def __init__(self, *args, **kwargs):
super(H5NastranResultPunch, self).__init__(*args, **kwargs)
def load_punch(self, filename):
if self._bdf is None:
raise Exception('BDF must be loaded first!')
if self._f06 is not None:
raise Exception('F06 has already been loaded. Cannot load punch file after f06.')
self._punch = filename
self._punch_subcase_ids.clear()
reader = PunchReader(filename)
reader.register_callback(self._load_punch_table)
reader.read()
self.h5f.flush()
for table in self._tables:
table.finalize()
self._tables.clear()
self._write_unsupported_tables()
self._punch_finalize()
def _punch_finalize(self):
dtype = np.dtype([('SUBCASE_ID', '<i8'), ('LOAD_FACTOR', '<f8'), ('DOMAIN_ID', '<i8')])
format = tables.descr_from_dtype(dtype)[0]
self.h5f.create_table(self.table_paths.subcase_path, self.table_paths.subcase_table, format,
'SUBCASES', expectedrows=len(self._punch_subcase_ids), createparents=True)
table = self.h5f.get_node(self.table_paths.subcase)
data = np.zeros(len(self._punch_subcase_ids), dtype=dtype)
subcase_id = data['SUBCASE_ID']
load_factor = data['LOAD_FACTOR']
domain_id = data['DOMAIN_ID']
for key, domain_id_ in iteritems(self._punch_subcase_ids):
index = domain_id_ - 1
subcase_id_, load_factor_ = key
subcase_id[index] = subcase_id_
load_factor[index] = load_factor_
domain_id[index] = domain_id_
table.append(data)
self.h5f.flush()
def _load_punch_table(self, table_data):
key = table_data.header.subcase_id_num, table_data.header.load_factor
if key not in self._punch_subcase_ids:
self._punch_subcase_ids[key] = len(self._punch_subcase_ids) + 1
results_type = table_data.header.results_type_basic
table = self._result_tables.get(results_type, None)
if table is None:
return self._unsupported_table(table_data)
table.write_punch_data(table_data)
self._tables.add(table)
|
examples/class_a.py | maxschommer/pcbdl | 117 | 111737 | #!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple Class A amplifier example.
https://www.electronics-tutorials.ws/amplifier/amp_5.html
"""
from pcbdl import *
ac_coupling_value = "1000u"
vcc, gnd = Net("vcc"), Net("gnd")
q = BJT("2n3904")
C = C_POL
q.BASE << (
C(ac_coupling_value, to=Net("vin")),
R("1k", to=vcc),
R("1k", to=gnd),
)
q.COLLECTOR << (
C(ac_coupling_value, to=Net("vout")),
R("100", to=vcc),
)
q.EMITTER << (
R("100", "Rc", to=gnd),
C("1u", "C10", to=gnd),
)
|
setup.py | Symbo1/wsltools | 412 | 111763 | <reponame>Symbo1/wsltools
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name="wsltools",
version="0.2.4",
description="Web Scan Lazy Tools",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
author="CongRong",
author_email="<EMAIL>",
url="https://github.com/symbo1/wsltools",
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
packages=find_packages(),
package_data = {'': ['*.bin']},
keywords=["security","security-tools","security-scanner","security-automation","security-audit",
"spider","spider-framework","scanner-web","security-tool","crawling-framework","web-vulnerability-scanners"],
zip_safe=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
project_urls={
'Documentation': 'https://wsltools.readthedocs.io',
'Source': 'https://github.com/symbo1/wsltools',
},
) |
project/utils/logger.py | MahjongRepository/tenhou-python-bot | 201 | 111804 | import datetime
import hashlib
import logging
import os
from logging.handlers import SysLogHandler
from utils.settings_handler import settings
LOG_FORMAT = "%(asctime)s %(levelname)s: %(message)s"
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
class ColoredFormatter(logging.Formatter):
"""
Apply only to the console handler.
"""
green = "\u001b[32m"
cyan = "\u001b[36m"
reset = "\u001b[0m"
def format(self, record):
format_style = self._fmt
if record.getMessage().startswith("id="):
format_style = f"{ColoredFormatter.green}{format_style}{ColoredFormatter.reset}"
if record.getMessage().startswith("msg="):
format_style = f"{ColoredFormatter.cyan}{format_style}{ColoredFormatter.reset}"
formatter = logging.Formatter(format_style)
return formatter.format(record)
def set_up_logging(save_to_file=True, print_to_console=True, logger_name="bot"):
"""
Logger for tenhou communication and AI output
"""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
if print_to_console:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = ColoredFormatter(LOG_FORMAT, datefmt=DATE_FORMAT)
ch.setFormatter(formatter)
logger.addHandler(ch)
log_prefix = settings.LOG_PREFIX
if not log_prefix:
log_prefix = hashlib.sha1(settings.USER_ID.encode("utf-8")).hexdigest()[:5]
if save_to_file:
logs_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "logs")
if not os.path.exists(logs_directory):
os.mkdir(logs_directory)
formatter = logging.Formatter(LOG_FORMAT, datefmt=DATE_FORMAT)
# we need it to distinguish different bots logs (if they were run in the same time)
file_name = "{}_{}.log".format(log_prefix, datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S"))
fh = logging.FileHandler(os.path.join(logs_directory, file_name), encoding="utf-8")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
if settings.PAPERTRAIL_HOST_AND_PORT:
syslog = SysLogHandler(address=settings.PAPERTRAIL_HOST_AND_PORT)
game_id = f"BOT_{log_prefix}"
formatter = ColoredFormatter(f"%(asctime)s {game_id}: %(message)s", datefmt=DATE_FORMAT)
syslog.setFormatter(formatter)
logger.addHandler(syslog)
return logger
|
ursina/prefabs/animator.py | jtiai/ursina | 1,431 | 111844 | <filename>ursina/prefabs/animator.py
from ursina import *
class Animator():
def __init__(self, animations=None, start_state=''):
self.animations = animations # dict
if not start_state and self.animations:
start_state = list(self.animations)[0]
self.start_state = start_state
self._state = None
self.state = start_state
@property
def state(self):
return self._state
@state.setter
def state(self, value):
if not value in self.animations:
print(self, 'has no animation:', value)
elif not self._state == value:
# only show set state and disable the rest
for key, anim in self.animations.items():
if anim:
anim.enabled = value == key
anim = self.animations[value]
if hasattr(anim, 'start') and callable(anim.start):
# print('start', anim)
anim.start()
self._state = value
if __name__ == '__main__':
app = Ursina()
# texture_importer.textureless=True
anim = Animation('ursina_wink', loop=True, autoplay=False)
a = Animator(
animations = {
'lol' : Entity(model='cube', color=color.red),
'yo' : Entity(model='cube', color=color.green, x=1),
'help' : anim,
}
)
a.state = 'yo'
Text('press <red>1<default>, <green>2<default> or <violet>3<default> to toggle different animator states', origin=(0,-.5), y=-.4)
def input(key):
if key == '1':
a.state = 'lol'
if key == '2':
a.state = 'yo'
if key == '3':
a.state = 'help'
print(anim.enabled)
app.run()
|
flexneuart/models/base_bert.py | gitter-badger/FlexNeuART | 101 | 111870 | #
# This code is a modified version of CEDR: https://github.com/Georgetown-IR-Lab/cedr
#
# (c) Georgetown IR lab & Carnegie Mellon University
#
# It's distributed under the MIT License
# MIT License is compatible with Apache 2 license for the code in this repo.
#
from flexneuart.models.base import BaseModel
from flexneuart.models.utils import init_model, BERT_ATTR
USE_BATCH_COEFF = True
DEFAULT_BERT_DROPOUT = 0.1
class BertBaseRanker(BaseModel):
"""
The base class for all Transformer-based ranking models.
We generally/broadly consider these models to be BERT-variants, hence, the name of the base class.
"""
def __init__(self, bert_flavor):
"""Bert ranker constructor.
:param bert_flavor: the name of the underlying Transformer/BERT. Various
Transformer models are possible as long as they return
the object BaseModelOutputWithPoolingAndCrossAttentions.
"""
super().__init__()
init_model(self, bert_flavor)
def bert_param_names(self):
"""
:return: a list of the main BERT-parameters. Because we assigned the main BERT model
to an attribute with the name BERT_ATTR, all parameter keys must start with this
value followed by a dot.
"""
return set([k for k in self.state_dict().keys() if k.startswith( f'{BERT_ATTR}.')])
def tokenize_and_encode(self, text):
"""Tokenizes the text and converts tokens to respective IDs
:param text: input text
:return: an array of token IDs
"""
toks = self.tokenizer.tokenize(text)
return self.tokenizer.convert_tokens_to_ids(toks)
def forward(self, **inputs):
raise NotImplementedError
|
src/estimator_utils.py | ruohoruotsi/harmonixset | 101 | 111890 | <reponame>ruohoruotsi/harmonixset
"""
Created 10-13-19 by <NAME>
"""
# Local imports
from audio_utils import mp3_to_wav
# Third party imports
# None.
# Python standard library imports
from multiprocessing import Pool
import os
import tempfile
import traceback
from functools import wraps
import logging
def estimator(func):
"""
Simple wrapper function around a function that analyizes a file.
The wrapper logs the function that is analyzing the file and the
file that is being analyzed.
Args:
func: function - A file analysis function that takes the filename as the first
argument.
Return:
function - The wrapped function (with logging).
"""
@wraps(func)
def est_func(fname, *args, **kwargs):
logging.info('Analyzing "{}" estimator for track: {}'.format(func.__name__, fname))
try:
with tempfile.NamedTemporaryFile(mode='wb', suffix='.wav', prefix='tmp') as temp_audio_file:
with open(fname, 'rb') as mp3_file:
temp_audio_file.write(mp3_to_wav(mp3_file).read())
result = func(temp_audio_file.name, *args, **kwargs)
return result[0], fname
except Exception:
logging.error('Failed to analyze "{}" for track: {}'.format(func.__name__, fname), exc_info=True)
return [[], fname]
return est_func
def process_estimator(args, estimator, output_dir, num_threads):
"""
Process all files provided by a given algorithm and places the results
as new-line separated values in a text file.
Args:
args: list(tuple(str, *)) - A list of sets of arguments to pass to the estimator function,
iteratively. The first in each argument tuple should be the filename of the mp3 audio to
be analyzed. The remaining arguments may be additional metadata used in estimation.
estimator: function - A function that takes in an audio filename and produces
estimates of beat positions as list of float values (in seconds).
output_dir: str - The path to a directory within which to save the beat position
estimates as individual text files - one per file specified in `filenames`.
num_threads: int - The number of threads to use to analyze the set of files.
each file for analysis is assigned to a single one of these threads, while the
files themselves are split between threads.
"""
# Analyze beats
if num_threads > 1:
the_pool = Pool(num_threads, maxtasksperchild=1)
estimates = the_pool.starmap(estimator, args)
the_pool.close()
the_pool.join()
else:
estimates = [estimator(*arg) for arg in args]
logging.info('Saving results for estimator: "{}"'.format(estimator.__name__))
# Save beats
for est in estimates:
output_fname = os.path.join(output_dir, os.path.splitext(os.path.basename(est[1]))[0] + '.txt')
with open(output_fname, 'w') as f:
f.write('\n'.join([str(time_marker) for time_marker in est[0]]))
|
py2app_tests/argv_app/main.py | flupke/py2app | 193 | 111896 | import sys
import os
root = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
)
)
)
sys.argv[0] = os.path.realpath(sys.argv[0])
fp = open(os.path.join(root, "argv.txt"), "w")
fp.write(repr(sys.argv))
fp.write('\n')
fp.close()
|
rele/management/commands/runrele.py | Itsindigo/rele | 183 | 111916 | <filename>rele/management/commands/runrele.py
import logging
from django.conf import settings
from django.core.management import BaseCommand
from rele import config
from rele.management.discover import discover_subs_modules
from rele.worker import create_and_run
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Start subscriber threads to consume messages from Relé topics."
config = config.Config(settings.RELE)
def handle(self, *args, **options):
if all(map(lambda x: x.get("CONN_MAX_AGE"), settings.DATABASES.values())):
self.stderr.write(
self.style.WARNING(
"WARNING: settings.CONN_MAX_AGE is not set to 0. "
"This may result in slots for database connections to "
"be exhausted."
)
)
subs = config.load_subscriptions_from_paths(
discover_subs_modules(), self.config.sub_prefix, self.config.filter_by
)
self.stdout.write(f"Configuring worker with {len(subs)} " f"subscription(s)...")
create_and_run(subs, self.config)
|
vyapp/plugins/jsonfmt.py | iogf/vy | 927 | 111983 | """
Overview
========
This plugin implements a key-command to format JSON strings. You select
the region containing the JSON then issue the key-command to format it will
print any errors on the status bar.
Key-Commands
============
Namespace: jsonfmt
Mode: EXTRA
Event: <Key-j>
Description: Format the selected JSON data then switch to NORMAL mode.
"""
from subprocess import Popen, PIPE
from vyapp.app import root
class FmtJSON:
def __init__(self, area, *args, **kwargs):
self.area = area
area.install('fmtjson', ('EXTRA', '<Key-j>', self.run_printer))
def run_printer(self, event):
start = self.area.index('sel.first')
end = self.area.index('sel.last')
data = self.area.get(start, end)
child = Popen('python -m json.tool', encoding=self.area.charset,
stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=1)
output, err = child.communicate(data)
print('\nJSON Errors:\n', err)
if child.returncode:
root.status.set_msg('JSON Errors! Check its output.')
else:
self.area.swap(output, start, end)
self.area.chmode('NORMAL')
install = FmtJSON |
tests/testapp/migrate/page/0001_initial.py | mjl/feincms | 325 | 111990 | <reponame>mjl/feincms<filename>tests/testapp/migrate/page/0001_initial.py
# Generated by Django 3.0.2 on 2020-01-21 15:21
import django.db.models.deletion
from django.db import migrations, models
import feincms.contrib.fields
import feincms.extensions.base
import feincms.extensions.datepublisher
import feincms.module.medialibrary.fields
import feincms.module.mixins
class Migration(migrations.Migration):
initial = True
dependencies = [
("sites", "0001_initial"),
("medialibrary", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Page",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("active", models.BooleanField(default=True, verbose_name="active")),
(
"title",
models.CharField(
help_text="This title is also used for navigation menu items.",
max_length=200,
verbose_name="title",
),
),
(
"slug",
models.SlugField(
help_text="This is used to build the URL for this page",
max_length=150,
verbose_name="slug",
),
),
(
"in_navigation",
models.BooleanField(default=False, verbose_name="in navigation"),
),
(
"override_url",
models.CharField(
blank=True,
help_text="Override the target URL. Be sure to include slashes at the beginning and at the end if it is a local URL. This affects both the navigation and subpages' URLs.",
max_length=255,
verbose_name="override URL",
),
),
(
"redirect_to",
models.CharField(
blank=True,
help_text="Target URL for automatic redirects or the primary key of a page.",
max_length=255,
verbose_name="redirect to",
),
),
(
"_cached_url",
models.CharField(
blank=True,
db_index=True,
default="",
editable=False,
max_length=255,
verbose_name="Cached URL",
),
),
("lft", models.PositiveIntegerField(editable=False)),
("rght", models.PositiveIntegerField(editable=False)),
("tree_id", models.PositiveIntegerField(db_index=True, editable=False)),
("level", models.PositiveIntegerField(editable=False)),
(
"template_key",
models.CharField(
choices=[("base", "Base Template")],
default="base",
max_length=255,
verbose_name="template",
),
),
(
"navigation_extension",
models.CharField(
blank=True,
choices=[],
help_text="Select the module providing subpages for this page if you need to customize the navigation.",
max_length=200,
null=True,
verbose_name="navigation extension",
),
),
(
"language",
models.CharField(
choices=[("en", "English"), ("de", "German")],
default="en",
max_length=10,
verbose_name="language",
),
),
(
"publication_date",
models.DateTimeField(
default=feincms.extensions.datepublisher.granular_now,
verbose_name="publication date",
),
),
(
"publication_end_date",
models.DateTimeField(
blank=True,
help_text="Leave empty if the entry should stay active forever.",
null=True,
verbose_name="publication end date",
),
),
(
"_ct_inventory",
feincms.contrib.fields.JSONField(
blank=True,
editable=False,
null=True,
verbose_name="content types",
),
),
(
"meta_keywords",
models.TextField(
blank=True,
help_text="Keywords are ignored by most search engines.",
verbose_name="meta keywords",
),
),
(
"meta_description",
models.TextField(
blank=True,
help_text="This text is displayed on the search results page. It is however not used for the SEO ranking. Text longer than 140 characters is truncated.",
verbose_name="meta description",
),
),
(
"creation_date",
models.DateTimeField(
editable=False, null=True, verbose_name="creation date"
),
),
(
"modification_date",
models.DateTimeField(
editable=False, null=True, verbose_name="modification date"
),
),
(
"_content_title",
models.TextField(
blank=True,
help_text="The first line is the main title, the following lines are subtitles.",
verbose_name="content title",
),
),
(
"_page_title",
models.CharField(
blank=True,
help_text="Page title for browser window. Same as title by default. Must be 69 characters or fewer.",
max_length=69,
verbose_name="page title",
),
),
(
"navigation_group",
models.CharField(
blank=True,
choices=[("default", "Default"), ("footer", "Footer")],
db_index=True,
default="default",
max_length=20,
verbose_name="navigation group",
),
),
(
"parent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="children",
to="page.Page",
verbose_name="Parent",
),
),
(
"site",
models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.CASCADE,
to="sites.Site",
verbose_name="Site",
),
),
(
"symlinked_page",
models.ForeignKey(
blank=True,
help_text="All content is inherited from this page if given.",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="page_page_symlinks",
to="page.Page",
verbose_name="symlinked page",
),
),
(
"translation_of",
models.ForeignKey(
blank=True,
help_text="Leave this empty for entries in the primary language.",
limit_choices_to={"language": "en"},
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="translations",
to="page.Page",
verbose_name="translation of",
),
),
],
options={
"verbose_name": "page",
"verbose_name_plural": "pages",
"ordering": ["tree_id", "lft"],
},
bases=(
models.Model,
feincms.extensions.base.ExtensionsMixin,
feincms.module.mixins.ContentModelMixin,
),
),
migrations.CreateModel(
name="TemplateContent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("region", models.CharField(max_length=255)),
("ordering", models.IntegerField(default=0, verbose_name="ordering")),
(
"template",
models.CharField(
choices=[("templatecontent_1.html", "template 1")],
max_length=100,
verbose_name="template",
),
),
(
"parent",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="templatecontent_set",
to="page.Page",
),
),
],
options={
"verbose_name": "template content",
"verbose_name_plural": "template contents",
"db_table": "page_page_templatecontent",
"ordering": ["ordering"],
"permissions": [],
"abstract": False,
},
),
migrations.CreateModel(
name="RawContent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.TextField(blank=True, verbose_name="content")),
("region", models.CharField(max_length=255)),
("ordering", models.IntegerField(default=0, verbose_name="ordering")),
(
"parent",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="rawcontent_set",
to="page.Page",
),
),
],
options={
"verbose_name": "raw content",
"verbose_name_plural": "raw contents",
"db_table": "page_page_rawcontent",
"ordering": ["ordering"],
"permissions": [],
"abstract": False,
},
),
migrations.CreateModel(
name="MediaFileContent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("region", models.CharField(max_length=255)),
("ordering", models.IntegerField(default=0, verbose_name="ordering")),
(
"type",
models.CharField(
choices=[("default", "Default position")],
default="default",
max_length=20,
verbose_name="type",
),
),
(
"mediafile",
feincms.module.medialibrary.fields.MediaFileForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="medialibrary.MediaFile",
verbose_name="media file",
),
),
(
"parent",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="mediafilecontent_set",
to="page.Page",
),
),
],
options={
"verbose_name": "media file",
"verbose_name_plural": "media files",
"db_table": "page_page_mediafilecontent",
"ordering": ["ordering"],
"permissions": [],
"abstract": False,
},
),
migrations.CreateModel(
name="ApplicationContent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"parameters",
feincms.contrib.fields.JSONField(editable=False, null=True),
),
("region", models.CharField(max_length=255)),
("ordering", models.IntegerField(default=0, verbose_name="ordering")),
(
"urlconf_path",
models.CharField(
choices=[("whatever", "Test Urls")],
max_length=100,
verbose_name="application",
),
),
(
"parent",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="applicationcontent_set",
to="page.Page",
),
),
],
options={
"verbose_name": "application content",
"verbose_name_plural": "application contents",
"db_table": "page_page_applicationcontent",
"ordering": ["ordering"],
"permissions": [],
"abstract": False,
},
),
]
|
retriever/lib/download.py | Nageshbansal/retriever | 254 | 112001 | import os
from retriever.engines import choose_engine
from retriever.lib.defaults import SCRIPT_WRITE_PATH
from retriever.lib.rdatasets import create_rdataset, update_rdataset_catalog
from retriever.lib.repository import check_for_updates
from retriever.lib.scripts import SCRIPT_LIST, name_matches
from retriever.lib.socrata import find_socrata_dataset_by_id, create_socrata_dataset
def download(dataset, path='./', quiet=False, sub_dir='', debug=False, use_cache=True):
"""Download scripts for retriever."""
args = {
'dataset': dataset,
'command': 'download',
'path': path,
'sub_dir': sub_dir,
'quiet': quiet
}
engine = choose_engine(args)
engine.use_cache = use_cache
script_list = SCRIPT_LIST()
if not script_list or not os.listdir(SCRIPT_WRITE_PATH):
check_for_updates()
script_list = SCRIPT_LIST()
scripts = name_matches(script_list, args['dataset'])
if scripts:
for script in scripts:
print("=> Downloading", script.name)
try:
script.download(engine, debug=debug)
script.engine.final_cleanup()
except Exception as e:
print(e)
if debug:
raise
elif args['dataset'].startswith('socrata') and (scripts is None):
socrata_id = args['dataset'].split('-', 1)[1]
resource = find_socrata_dataset_by_id(socrata_id)
if "error" in resource.keys():
if resource["datatype"][0] == "map":
print("{} because map type datasets are not supported".format(
resource["error"]))
else:
print("{} because it is of type {} and not tabular".format(
resource["error"], resource["datatype"][1]))
elif len(resource.keys()) == 0:
return
else:
print("=> Downloading", args['dataset'])
name = f"socrata-{socrata_id}"
create_socrata_dataset(engine, name, resource)
elif (scripts is None) and (args['dataset'].startswith('rdataset')):
print("=> Downloading", args['dataset'])
rdataset = args['dataset'].split('-')
update_rdataset_catalog()
package, dataset_name = rdataset[1], rdataset[2]
create_rdataset(engine, package, dataset_name)
else:
message = "Run retriever.datasets() to see the list of currently " \
"available datasets."
raise ValueError(message)
return engine
|
pipeline/Serverless/common/dynamodb.py | Rkauff/Klayers | 1,096 | 112015 | import json
import decimal
from datetime import datetime
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
# Rename keys from old to new
def map_keys(items):
"""
Args:
itams: List of dict items to be mapped
return:
items: List of dict items whose keys have been renamed according to map_table below
"""
map_table = {
"pckg": "package",
"lyrVrsn": "layerVersion",
"pckgVrsn": "packageVersion",
"rgn": "region",
"dplySts": "deployStatus",
"rqrmntsTxt": "dependencies",
"arn": "arn",
"exDt": "expiryDate",
"rqrmntsHsh": "requirementsHash",
}
new_items = []
for item in items:
new_item = {}
for k in item.keys():
if k == "rqrmntsTxt":
new_item[map_table[k]] = item[k].split("\n")
if k == "exDt":
new_item[map_table[k]] = datetime.fromtimestamp(item[k]).isoformat()
else:
new_item[map_table[k]] = item[k]
new_items.append(new_item)
return new_items
|
descarteslabs/client/services/catalog/tests/test_catalog.py | carderne/descarteslabs-python | 167 | 112088 | <reponame>carderne/descarteslabs-python<gh_stars>100-1000
# Copyright 2018-2020 Descartes Labs.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import unittest
import os
import sys
from mock import patch
from tempfile import NamedTemporaryFile
import responses
import json
import re
from descarteslabs.client.auth import Auth
from descarteslabs.client.services.catalog import Catalog
@patch.object(Auth, "token", "token")
@patch.object(Auth, "namespace", "foo")
class TestCatalog(unittest.TestCase):
instance = None
def setUp(self):
self.url = "http://www.example.com/metadata/v1/catalog"
self.instance = Catalog(url=self.url)
self.match_url = re.compile(self.url)
def mock_response(self, method, json, status=200, **kwargs):
responses.add(method, self.match_url, json=json, status=status, **kwargs)
@staticmethod
def validate_ndarray_callback(request):
np.load(request.body)
return (200, {}, "")
@patch(
"descarteslabs.client.services.catalog.Catalog._do_upload",
return_value=(False, "upload_id", None),
)
def test_upload_ndarray_dtype(self, _do_upload):
unsupported_dtypes = ["uint64"]
for dtype in unsupported_dtypes:
with pytest.raises(TypeError):
self.instance.upload_ndarray(
np.zeros((10, 10), dtype=dtype), "product", "key"
)
for dtype in Catalog.UPLOAD_NDARRAY_SUPPORTED_DTYPES:
self.instance.upload_ndarray(
np.zeros((10, 10), dtype=dtype), "product", "key"
)
def test_upload_invalid_id(self):
with pytest.raises(TypeError):
self.instance.upload_ndarray(
np.zeros((10, 10)),
# invalid product id
{"foo": "bar"},
"key",
)
@unittest.skipIf(sys.version_info.major == 3, "Test only makes sense in py2")
def test_upload_image_deprecated_file_type(self):
# in py2 NamedTemporaryFile produces a file object not a IOBase object
with NamedTemporaryFile() as tmp:
with pytest.raises(Exception):
self.instance.upload_image(tmp, "product")
def test_upload_image_bad_path(self):
name = None
with NamedTemporaryFile() as tmp:
name = tmp.file
with pytest.raises(Exception):
self.instance.upload_image(name, "product")
def test_upload_image_multi_file_no_list(self):
with NamedTemporaryFile() as tmp:
with pytest.raises(ValueError):
self.instance.upload_image(tmp.name, "product", multi=True)
def test_upload_image_multi_file_no_image_id(self):
with NamedTemporaryFile() as tmp:
with pytest.raises(ValueError):
self.instance.upload_image([tmp.name, tmp.name], "product", multi=True)
@responses.activate
def test_upload_image(self):
product = "foo:product_id"
gcs_upload_url = "https://gcs_upload_url.com"
upload_url = (
"http://www.example.com/metadata/v1/catalog/products/{}/images/upload/{}"
)
with NamedTemporaryFile(delete=False) as tmp:
try:
tmp.write(b"foo")
tmp.close()
responses.add(
responses.POST,
upload_url.format(product, os.path.basename(tmp.name)),
body=gcs_upload_url,
)
responses.add(responses.PUT, gcs_upload_url)
self.instance.upload_image(tmp.name, product)
finally:
# Manual cleanup required for Windows compatibility
os.unlink(tmp.name)
@responses.activate
def test_upload_ndarray(self):
product = "foo:product_id"
gcs_upload_url = "https://gcs_upload_url.com"
upload_url = (
"http://www.example.com/metadata/v1/catalog/products/{}/images/upload/key"
)
responses.add(responses.POST, upload_url.format(product), body=gcs_upload_url)
responses.add_callback(
responses.PUT, gcs_upload_url, callback=self.validate_ndarray_callback
)
self.instance.upload_ndarray(np.zeros((10, 10)), product, "key")
# tests verifying storage state kwarg is applied correctly
@responses.activate
def test_add_image_default(self):
self.mock_response(responses.POST, json={})
self.instance.add_image("product", "fake_image_id")
request = responses.calls[0].request
assert json.loads(request.body.decode("utf-8"))["storage_state"] == "available"
if __name__ == "__main__":
unittest.main()
|
malib/utils/exp_tools/tune_type.py | zbzhu99/malib | 258 | 112105 | <reponame>zbzhu99/malib<filename>malib/utils/exp_tools/tune_type.py
from abc import ABCMeta
from typing import Sequence
class TuneUnit(metaclass=ABCMeta):
def __init__(self):
pass
class Grid(TuneUnit):
def __init__(self, data: Sequence):
super().__init__()
self._data = data
@property
def data(self):
return self._data
|
tests/test_decode.py | entn-at/fast-ctc-decode | 120 | 112113 | <filename>tests/test_decode.py
#!/usr/env/bin python3
import numpy as np
from unittest import TestCase, main
from fast_ctc_decode import *
class Test1DBeamSearch(TestCase):
def setUp(self):
self.beam_size = 5
self.alphabet = "NACGT"
self.beam_cut_threshold = 0.1
self.probs = self.get_random_data()
def get_random_data(self, samples=100):
x = np.random.rand(samples, len(self.alphabet)).astype(np.float32)
return x / np.linalg.norm(x, ord=2, axis=1, keepdims=True)
def test_beam_search(self):
""" simple beam search test with the canonical alphabet"""
seq, path = beam_search(self.probs, self.alphabet, self.beam_size, self.beam_cut_threshold)
self.assertEqual(len(seq), len(path))
self.assertEqual(len(set(seq)), len(self.alphabet) - 1)
def test_beam_search_list(self):
""" simple beam search test with the canonical alphabet as a list"""
seq, path = beam_search(self.probs, list(self.alphabet), self.beam_size, self.beam_cut_threshold)
self.assertEqual(len(seq), len(path))
self.assertEqual(len(set(seq)), len(self.alphabet) - 1)
def test_beam_search_tuple(self):
""" simple beam search test with the canonical alphabet as a tuple"""
seq, path = beam_search(self.probs, tuple(self.alphabet), self.beam_size, self.beam_cut_threshold)
self.assertEqual(len(seq), len(path))
self.assertEqual(len(set(seq)), len(self.alphabet) - 1)
def test_beam_search_named_args(self):
""" simple beam search test with named arguments"""
seq, path = beam_search(network_output=self.probs, alphabet=self.alphabet,
beam_size=self.beam_size,
beam_cut_threshold=self.beam_cut_threshold)
self.assertEqual(len(seq), len(path))
self.assertEqual(len(set(seq)), len(self.alphabet) - 1)
def test_beam_search_not_enough_args(self):
""" simple beam search test with not enough arguments"""
with self.assertRaises(TypeError):
beam_search(self.probs)
def test_beam_search_defaults(self):
""" simple beam search test using argument defaults"""
seq, path = beam_search(self.probs, self.alphabet)
self.assertEqual(len(seq), len(path))
self.assertEqual(len(set(seq)), len(self.alphabet) - 1)
def test_beam_search_alphabet(self):
""" simple beam search test with different alphabet"""
seq, path = beam_search(self.probs, "NRUST", self.beam_size, self.beam_cut_threshold)
self.assertEqual(len(seq), len(path))
self.assertEqual(len(set(seq)), len(self.alphabet) - 1)
def test_zero_beam_size(self):
""" simple beam search test with zero beam size"""
with self.assertRaises(ValueError):
beam_search(self.probs, self.alphabet, 0, self.beam_cut_threshold)
def test_zero_beam_cut_threshold(self):
""" simple beam search test with beam cut threshold of 0.0"""
seq, path = beam_search(self.probs, self.alphabet, self.beam_size, 0.0)
self.assertEqual(len(seq), len(path))
self.assertEqual(len(set(seq)), len(self.alphabet) - 1)
def test_negative_beam_cut_threshold(self):
""" simple beam search test with beam cut threshold below 0.0"""
with self.assertRaises(ValueError):
beam_search(self.probs, self.alphabet, self.beam_size, -0.1)
def test_beam_cut_threshold_boundary(self):
""" simple beam search test with beam cut threshold of 1/len(alphabet)"""
with self.assertRaises(ValueError):
beam_search(self.probs, self.alphabet, self.beam_size, 1.0/len(self.alphabet))
def test_high_beam_cut_threshold(self):
""" simple beam search test with very high beam cut threshold"""
with self.assertRaises(ValueError):
beam_search(self.probs, self.alphabet, self.beam_size, 1.1)
def test_beam_search_mismatched_alphabet_short(self):
""" simple beam search test with too few alphabet chars"""
alphabet = "NAGC"
with self.assertRaises(ValueError):
beam_search(self.probs, alphabet, self.beam_size, self.beam_cut_threshold)
def test_beam_search_mismatched_alphabet_long(self):
""" simple beam search test with too many alphabet chars"""
alphabet = "NAGCTX"
with self.assertRaises(ValueError):
beam_search(self.probs, alphabet, self.beam_size, self.beam_cut_threshold)
def test_nans(self):
"""beam_search is passed NaN values"""
self.probs.fill(np.NaN)
with self.assertRaisesRegex(RuntimeError, "Failed to compare values"):
beam_search(self.probs, self.alphabet)
def test_beam_search_short_alphabet(self):
""" simple beam search test with short alphabet"""
self.alphabet = "NAG"
self.probs = self.get_random_data()
seq, path = beam_search(self.probs, self.alphabet, self.beam_size, self.beam_cut_threshold)
self.assertEqual(len(seq), len(path))
self.assertEqual(len(set(seq)), len(self.alphabet) - 1)
def test_beam_search_long_alphabet(self):
""" simple beam search test with long alphabet"""
self.alphabet = "NABCDEFGHIJK"
self.probs = self.get_random_data(10000)
seq, path = beam_search(self.probs, self.alphabet, self.beam_size, beam_cut_threshold=0.0)
self.assertEqual(len(seq), len(path))
self.assertEqual(len(set(seq)), len(self.alphabet) - 1)
def test_beam_search_path(self):
""" simple beam search with path"""
w = 5000
x = np.zeros((w, len(self.alphabet)), np.float32)
x[:, 0] = 0.5 # set stay prob
# emit a base evenly spaced along w
emit = np.arange(0, w, len(self.alphabet) - 1)
for base, pos in enumerate(emit):
x[pos, base % 4 + 1] = 1.0
seq, path = beam_search(x, self.alphabet, self.beam_size, self.beam_cut_threshold)
np.testing.assert_array_equal(emit, path)
self.assertEqual(len(seq), len(path))
def test_repeat_sequence_path(self):
""" simple beam search path test with a repeated sequence """
w = 20
x = np.zeros((w, len(self.alphabet)), np.float32)
x[:, 0] = 0.5 # set stay prob
expected_path = [6, 13, 18]
for idx in expected_path:
x[idx, 0] = 0.0
x[idx, 1] = 1.0
seq, path = beam_search(x, self.alphabet, self.beam_size, self.beam_cut_threshold)
self.assertEqual(seq, 'AAA')
self.assertEqual(len(seq), len(path))
self.assertEqual(path, expected_path)
def test_repeat_sequence_path_with_multi_char_alpha(self):
""" simple beam search path test with a repeated sequence and multi-char alphabet """
w = 20
self.alphabet = ["N", "AAA", "CCC", "GGG", "TTTT"]
x = np.zeros((w, len(self.alphabet)), np.float32)
x[:, 0] = 0.5 # set stay prob
alphabet_idx = 1
expected_path = [6, 13, 18]
for idx in expected_path:
x[idx, 0] = 0.0
x[idx, alphabet_idx] = 1.0
alphabet_idx += 1
seq, path = beam_search(x, self.alphabet, self.beam_size, self.beam_cut_threshold)
self.assertEqual(seq, 'AAACCCGGG')
self.assertEqual(path, expected_path)
def test_repeat_sequence_path_with_spread_probs(self):
""" simple beam search path test with a repeated sequence with probabilities spread"""
w = 20
x = np.zeros((w, len(self.alphabet)), np.float32)
x[:, 0] = 0.5 # set stay prob
spread = 3
expected_path = [6, 13, 18]
for idx in expected_path:
x[idx:idx + spread, 0] = 0.0
x[idx:idx + spread, 1] = 1.0
seq, path = beam_search(x, self.alphabet, self.beam_size, self.beam_cut_threshold)
self.assertEqual(seq, 'AAA')
self.assertEqual(len(seq), len(path))
self.assertEqual(path, expected_path)
class TestViterbiSearch(TestCase):
def setUp(self):
self.alphabet = "NACGT"
self.probs = self.get_random_data()
def get_random_data(self, samples=100):
x = np.random.rand(samples, len(self.alphabet)).astype(np.float32)
return x / np.linalg.norm(x, ord=2, axis=1, keepdims=True)
def test_random_data(self):
"""Test viterbi search on some random data"""
seq, path = viterbi_search(self.probs, self.alphabet)
self.assertEqual(len(seq), len(path))
self.assertEqual(len(set(seq)), len(self.alphabet) - 1)
def test_random_data(self):
"""Test viterbi search on some random data with qstring generation"""
seq, path = viterbi_search(self.probs, self.alphabet, qstring=True)
self.assertEqual(len(seq), len(path) * 2)
def test_not_enough_args(self):
"""Not enough arguments provided"""
with self.assertRaises(TypeError):
viterbi_search(self.probs)
def test_alphabet_too_small(self):
"""When the alphabet is too small, it should raise"""
with self.assertRaises(ValueError):
viterbi_search(self.probs, "NACG")
def test_alphabet_too_large(self):
"""When the alphabet is too large, it should raise"""
with self.assertRaises(ValueError):
viterbi_search(self.probs, "NACGTR")
def test_beam_search_path(self):
"""data with a predefined path"""
w = 5000
x = np.zeros((w, len(self.alphabet)), np.float32)
x[:, 0] = 0.5 # set stay prob
# emit a base evenly spaced along w
emit = np.arange(0, w, len(self.alphabet) - 1)
for base, pos in enumerate(emit):
x[pos, base % 4 + 1] = 1.0
seq, path = viterbi_search(x, self.alphabet)
np.testing.assert_array_equal(emit, path)
self.assertEqual(len(seq), len(path))
def test_repeat_sequence_path(self):
"""test with a repeated sequence """
w = 20
x = np.zeros((w, len(self.alphabet)), np.float32)
x[:, 0] = 0.5 # set stay prob
expected_path = [6, 13, 18]
for idx in expected_path:
x[idx, 0] = 0.0
x[idx, 1] = 1.0
seq, path = viterbi_search(x, self.alphabet)
self.assertEqual(seq, 'AAA')
self.assertEqual(len(seq), len(path))
self.assertEqual(path, expected_path)
def test_repeat_sequence_path_with_qstring(self):
"""test with a repeated sequence with qstring generation """
w = 20
x = np.zeros((w, len(self.alphabet)), np.float32)
x[:, 0] = 0.5 # set stay prob
expected_path = [6, 13, 18]
for idx in expected_path:
x[idx, 0] = 0.0
x[idx, 1] = 1.0
seq, path = viterbi_search(x, self.alphabet, qstring=True)
qual = seq[len(path):]
seq = seq[:len(path)]
self.assertEqual(seq, 'AAA')
self.assertEqual(qual, 'III')
self.assertEqual(len(seq), len(path))
self.assertEqual(path, expected_path)
def test_mean_qscores(self):
""" test mean qscore generation """
w = 20
x = np.zeros((w, len(self.alphabet)), np.float32)
x[:, 0] = 0.5 # set stay prob
# Q10 = "5"
x[3, 0] = 0.0
x[3, 1] = 0.99
x[4, 0] = 0.0
x[4, 1] = 0.99
# Q20 = "?"
x[6, 0] = 0.0
x[6, 2] = 0.999
x[7, 0] = 0.0
x[7, 2] = 0.999
# Q5 = "&"
x[9, 0] = 0.0
x[9, 4] = 0.6
x[10, 0] = 0.0
x[10, 4] = 0.7
x[11, 0] = 0.0
x[11, 4] = 0.8
# Q3 = "$"
x[13, 0] = 0.0
x[13, 4] = 0.4
x[14, 0] = 0.0
x[14, 4] = 0.5
x[15, 0] = 0.0
x[15, 4] = 0.6
seq, path = viterbi_search(x, self.alphabet, qstring=True)
qual = seq[len(path):]
seq = seq[:len(path)]
self.assertEqual(seq, 'ACTT')
self.assertEqual(qual, '5?&$')
self.assertEqual(len(seq), len(path))
def test_repeat_sequence_path_with_multi_char_alpha(self):
"""Test that a multi-char alphabet works"""
w = 20
self.alphabet = ["N", "AAA", "CCC", "GGG", "TTTT"]
x = np.zeros((w, len(self.alphabet)), np.float32)
x[:, 0] = 0.5 # set stay prob
alphabet_idx = 1
expected_path = [6, 13, 18]
for idx in expected_path:
x[idx, 0] = 0.0
x[idx, alphabet_idx] = 1.0
alphabet_idx += 1
seq, path = viterbi_search(x, self.alphabet)
self.assertEqual(seq, 'AAACCCGGG')
self.assertEqual(path, expected_path)
def test_beam_off_path(self):
"""a set a probabilities where a viterbi search would produce the wrong result"""
x = np.array([
[0.7, 0.1, 0.2],
[0.7, 0.1, 0.2],
[0.2, 0.3, 0.5],
[0.2, 0.2, 0.6],
[0.3, 0.3, 0.4],
[0.2, 0.2, 0.6],
[0.2, 0.3, 0.5],
[0.7, 0.1, 0.2],
[0.7, 0.1, 0.2],
], np.float32)
seq, path = viterbi_search(x, "NAB")
self.assertEqual(seq, "B")
class TestDuplexBeamSearch(TestCase):
def setUp(self):
self.beam_size = 5
self.alphabet = "NACGT"
self.beam_cut_threshold = 0.1
self.probs_1 = self.get_random_data()
self.probs_2 = self.get_random_data()
def get_random_data(self, samples=100):
x = np.random.rand(samples, len(self.alphabet)).astype(np.float32)
return x / np.linalg.norm(x, ord=2, axis=1, keepdims=True)
def test_nans(self):
"""beam_search_duplex is passed NaN values"""
self.probs_1.fill(np.NaN)
with self.assertRaisesRegex(RuntimeError, "Failed to compare values"):
beam_search_duplex(self.probs_1, self.probs_2, self.alphabet)
def test_identical_data(self):
"""Test duplex beam search on the same data twice"""
x = np.array([
[0.01, 0.98, 0.01],
[0.01, 0.98, 0.01],
[0.01, 0.98, 0.01],
[0.01, 0.98, 0.01],
[0.9, 0.05, 0.05],
[0.7, 0.05, 0.35],
[0.9, 0.05, 0.05],
[0.01, 0.98, 0.01],
[0.01, 0.98, 0.01],
[0.01, 0.98, 0.01],
[0.01, 0.01, 0.98],
[0.01, 0.01, 0.98],
[0.01, 0.01, 0.98],
[0.01, 0.01, 0.98],
], np.float32)
seq = beam_search_duplex(x, x, "NAB")
self.assertEqual("AAB", seq)
def test_disagreeing_data(self):
"""Test duplex beam search on data that disagrees"""
x = np.array([
[0.01, 0.98, 0.01],
[0.01, 0.34, 0.65],
[0.01, 0.98, 0.01],
[0.01, 0.01, 0.98],
], np.float32)
self.assertEqual("ABAB", beam_search(x, "NAB")[0])
y = np.array([
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
], np.float32)
self.assertEqual("AB", beam_search_duplex(x, y, "NAB"))
if __name__ == '__main__':
main()
|
python/tests/setup-qfunctions.py | AdelekeBankole/libCEED | 123 | 112116 | <gh_stars>100-1000
# Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
# All Rights reserved. See files LICENSE and NOTICE for details.
#
# This file is part of CEED, a collection of benchmarks, miniapps, software
# libraries and APIs for efficient high-order finite element and spectral
# element discretizations for exascale applications. For more information and
# source code availability see http://github.com/ceed.
#
# The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
# a collaborative effort of two U.S. Department of Energy organizations (Office
# of Science and the National Nuclear Security Administration) responsible for
# the planning and preparation of a capable exascale ecosystem, including
# software, applications, hardware, advanced system engineering and early
# testbed platforms, in support of the nation's exascale computing imperative.
import os
from distutils.core import setup, Extension
import libceed
CEED_DIR = os.path.dirname(libceed.__file__)
# ------------------------------------------------------------------------------
# Setup
# ------------------------------------------------------------------------------
qf_module = Extension("libceed_qfunctions",
include_dirs=[os.path.join(CEED_DIR, 'include')],
sources=["libceed-qfunctions.c"],
extra_compile_args=["-O3", "-std=c99",
"-Wno-unused-variable",
"-Wno-unused-function"])
setup(name="libceed_qfunctions",
description="libceed qfunction pointers",
ext_modules=[qf_module])
# ------------------------------------------------------------------------------
|
parakeet/models/parallel_wavegan/parallel_wavegan.py | zh794390558/DeepSpeech | 501 | 112117 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
import numpy as np
import paddle
from paddle import nn
from paddle.nn import functional as F
class Stretch2D(nn.Layer):
def __init__(self, w_scale: int, h_scale: int, mode: str="nearest"):
"""Strech an image (or image-like object) with some interpolation.
Parameters
----------
w_scale : int
Scalar of width.
h_scale : int
Scalar of the height.
mode : str, optional
Interpolation mode, modes suppored are "nearest", "bilinear",
"trilinear", "bicubic", "linear" and "area",by default "nearest"
For more details about interpolation, see
`paddle.nn.functional.interpolate <https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/nn/functional/interpolate_en.html>`_.
"""
super().__init__()
self.w_scale = w_scale
self.h_scale = h_scale
self.mode = mode
def forward(self, x):
"""
Parameters
----------
x : Tensor
Shape (N, C, H, W)
Returns
-------
Tensor
Shape (N, C, H', W'), where ``H'=h_scale * H``, ``W'=w_scale * W``.
The stretched image.
"""
out = F.interpolate(
x, scale_factor=(self.h_scale, self.w_scale), mode=self.mode)
return out
class UpsampleNet(nn.Layer):
"""A Layer to upsample spectrogram by applying consecutive stretch and
convolutions.
Parameters
----------
upsample_scales : List[int]
Upsampling factors for each strech.
nonlinear_activation : Optional[str], optional
Activation after each convolution, by default None
nonlinear_activation_params : Dict[str, Any], optional
Parameters passed to construct the activation, by default {}
interpolate_mode : str, optional
Interpolation mode of the strech, by default "nearest"
freq_axis_kernel_size : int, optional
Convolution kernel size along the frequency axis, by default 1
use_causal_conv : bool, optional
Whether to use causal padding before convolution, by default False
If True, Causal padding is used along the time axis, i.e. padding
amount is ``receptive field - 1`` and 0 for before and after,
respectively.
If False, "same" padding is used along the time axis.
"""
def __init__(self,
upsample_scales: List[int],
nonlinear_activation: Optional[str]=None,
nonlinear_activation_params: Dict[str, Any]={},
interpolate_mode: str="nearest",
freq_axis_kernel_size: int=1,
use_causal_conv: bool=False):
super().__init__()
self.use_causal_conv = use_causal_conv
self.up_layers = nn.LayerList()
for scale in upsample_scales:
stretch = Stretch2D(scale, 1, interpolate_mode)
assert freq_axis_kernel_size % 2 == 1
freq_axis_padding = (freq_axis_kernel_size - 1) // 2
kernel_size = (freq_axis_kernel_size, scale * 2 + 1)
if use_causal_conv:
padding = (freq_axis_padding, scale * 2)
else:
padding = (freq_axis_padding, scale)
conv = nn.Conv2D(
1, 1, kernel_size, padding=padding, bias_attr=False)
self.up_layers.extend([stretch, conv])
if nonlinear_activation is not None:
nonlinear = getattr(
nn, nonlinear_activation)(**nonlinear_activation_params)
self.up_layers.append(nonlinear)
def forward(self, c):
"""
Parameters
----------
c : Tensor
Shape (N, F, T), spectrogram
Returns
-------
Tensor
Shape (N, F, T'), where ``T' = upsample_factor * T``, upsampled
spectrogram
"""
c = c.unsqueeze(1)
for f in self.up_layers:
if self.use_causal_conv and isinstance(f, nn.Conv2D):
c = f(c)[:, :, :, c.shape[-1]]
else:
c = f(c)
return c.squeeze(1)
class ConvInUpsampleNet(nn.Layer):
"""A Layer to upsample spectrogram composed of a convolution and an
UpsampleNet.
Parameters
----------
upsample_scales : List[int]
Upsampling factors for each strech.
nonlinear_activation : Optional[str], optional
Activation after each convolution, by default None
nonlinear_activation_params : Dict[str, Any], optional
Parameters passed to construct the activation, by default {}
interpolate_mode : str, optional
Interpolation mode of the strech, by default "nearest"
freq_axis_kernel_size : int, optional
Convolution kernel size along the frequency axis, by default 1
aux_channels : int, optional
Feature size of the input, by default 80
aux_context_window : int, optional
Context window of the first 1D convolution applied to the input. It
related to the kernel size of the convolution, by default 0
If use causal convolution, the kernel size is ``window + 1``, else
the kernel size is ``2 * window + 1``.
use_causal_conv : bool, optional
Whether to use causal padding before convolution, by default False
If True, Causal padding is used along the time axis, i.e. padding
amount is ``receptive field - 1`` and 0 for before and after,
respectively.
If False, "same" padding is used along the time axis.
"""
def __init__(self,
upsample_scales: List[int],
nonlinear_activation: Optional[str]=None,
nonlinear_activation_params: Dict[str, Any]={},
interpolate_mode: str="nearest",
freq_axis_kernel_size: int=1,
aux_channels: int=80,
aux_context_window: int=0,
use_causal_conv: bool=False):
super().__init__()
self.aux_context_window = aux_context_window
self.use_causal_conv = use_causal_conv and aux_context_window > 0
kernel_size = aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1
self.conv_in = nn.Conv1D(
aux_channels,
aux_channels,
kernel_size=kernel_size,
bias_attr=False)
self.upsample = UpsampleNet(
upsample_scales=upsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
interpolate_mode=interpolate_mode,
freq_axis_kernel_size=freq_axis_kernel_size,
use_causal_conv=use_causal_conv)
def forward(self, c):
"""
Parameters
----------
c : Tensor
Shape (N, F, T), spectrogram
Returns
-------
Tensors
Shape (N, F, T'), where ``T' = upsample_factor * T``, upsampled
spectrogram
"""
c_ = self.conv_in(c)
c = c_[:, :, :-self.aux_context_window] if self.use_causal_conv else c_
return self.upsample(c)
class ResidualBlock(nn.Layer):
"""A gated activation unit composed of an 1D convolution, a gated tanh
unit and parametric redidual and skip connections. For more details,
refer to `WaveNet: A Generative Model for Raw Audio <https://arxiv.org/abs/1609.03499>`_.
Parameters
----------
kernel_size : int, optional
Kernel size of the 1D convolution, by default 3
residual_channels : int, optional
Feature size of the resiaudl output(and also the input), by default 64
gate_channels : int, optional
Output feature size of the 1D convolution, by default 128
skip_channels : int, optional
Feature size of the skip output, by default 64
aux_channels : int, optional
Feature size of the auxiliary input (e.g. spectrogram), by default 80
dropout : float, optional
Probability of the dropout before the 1D convolution, by default 0.
dilation : int, optional
Dilation of the 1D convolution, by default 1
bias : bool, optional
Whether to use bias in the 1D convolution, by default True
use_causal_conv : bool, optional
Whether to use causal padding for the 1D convolution, by default False
"""
def __init__(self,
kernel_size: int=3,
residual_channels: int=64,
gate_channels: int=128,
skip_channels: int=64,
aux_channels: int=80,
dropout: float=0.,
dilation: int=1,
bias: bool=True,
use_causal_conv: bool=False):
super().__init__()
self.dropout = dropout
if use_causal_conv:
padding = (kernel_size - 1) * dilation
else:
assert kernel_size % 2 == 1
padding = (kernel_size - 1) // 2 * dilation
self.use_causal_conv = use_causal_conv
self.conv = nn.Conv1D(
residual_channels,
gate_channels,
kernel_size,
padding=padding,
dilation=dilation,
bias_attr=bias)
if aux_channels is not None:
self.conv1x1_aux = nn.Conv1D(
aux_channels, gate_channels, kernel_size=1, bias_attr=False)
else:
self.conv1x1_aux = None
gate_out_channels = gate_channels // 2
self.conv1x1_out = nn.Conv1D(
gate_out_channels, residual_channels, kernel_size=1, bias_attr=bias)
self.conv1x1_skip = nn.Conv1D(
gate_out_channels, skip_channels, kernel_size=1, bias_attr=bias)
def forward(self, x, c):
"""
Parameters
----------
x : Tensor
Shape (N, C_res, T), the input features.
c : Tensor
Shape (N, C_aux, T), the auxiliary input.
Returns
-------
res : Tensor
Shape (N, C_res, T), the residual output, which is used as the
input of the next ResidualBlock in a stack of ResidualBlocks.
skip : Tensor
Shape (N, C_skip, T), the skip output, which is collected among
each layer in a stack of ResidualBlocks.
"""
x_input = x
x = F.dropout(x, self.dropout, training=self.training)
x = self.conv(x)
x = x[:, :, x_input.shape[-1]] if self.use_causal_conv else x
if c is not None:
c = self.conv1x1_aux(c)
x += c
a, b = paddle.chunk(x, 2, axis=1)
x = paddle.tanh(a) * F.sigmoid(b)
skip = self.conv1x1_skip(x)
res = (self.conv1x1_out(x) + x_input) * math.sqrt(0.5)
return res, skip
class PWGGenerator(nn.Layer):
"""Wave Generator for Parallel WaveGAN
Parameters
----------
in_channels : int, optional
Number of channels of the input waveform, by default 1
out_channels : int, optional
Number of channels of the output waveform, by default 1
kernel_size : int, optional
Kernel size of the residual blocks inside, by default 3
layers : int, optional
Number of residual blocks inside, by default 30
stacks : int, optional
The number of groups to split the residual blocks into, by default 3
Within each group, the dilation of the residual block grows
exponentially.
residual_channels : int, optional
Residual channel of the residual blocks, by default 64
gate_channels : int, optional
Gate channel of the residual blocks, by default 128
skip_channels : int, optional
Skip channel of the residual blocks, by default 64
aux_channels : int, optional
Auxiliary channel of the residual blocks, by default 80
aux_context_window : int, optional
The context window size of the first convolution applied to the
auxiliary input, by default 2
dropout : float, optional
Dropout of the residual blocks, by default 0.
bias : bool, optional
Whether to use bias in residual blocks, by default True
use_weight_norm : bool, optional
Whether to use weight norm in all convolutions, by default True
use_causal_conv : bool, optional
Whether to use causal padding in the upsample network and residual
blocks, by default False
upsample_scales : List[int], optional
Upsample scales of the upsample network, by default [4, 4, 4, 4]
nonlinear_activation : Optional[str], optional
Non linear activation in upsample network, by default None
nonlinear_activation_params : Dict[str, Any], optional
Parameters passed to the linear activation in the upsample network,
by default {}
interpolate_mode : str, optional
Interpolation mode of the upsample network, by default "nearest"
freq_axis_kernel_size : int, optional
Kernel size along the frequency axis of the upsample network, by default 1
"""
def __init__(self,
in_channels: int=1,
out_channels: int=1,
kernel_size: int=3,
layers: int=30,
stacks: int=3,
residual_channels: int=64,
gate_channels: int=128,
skip_channels: int=64,
aux_channels: int=80,
aux_context_window: int=2,
dropout: float=0.,
bias: bool=True,
use_weight_norm: bool=True,
use_causal_conv: bool=False,
upsample_scales: List[int]=[4, 4, 4, 4],
nonlinear_activation: Optional[str]=None,
nonlinear_activation_params: Dict[str, Any]={},
interpolate_mode: str="nearest",
freq_axis_kernel_size: int=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.aux_channels = aux_channels
self.aux_context_window = aux_context_window
self.layers = layers
self.stacks = stacks
self.kernel_size = kernel_size
assert layers % stacks == 0
layers_per_stack = layers // stacks
self.first_conv = nn.Conv1D(
in_channels, residual_channels, 1, bias_attr=True)
self.upsample_net = ConvInUpsampleNet(
upsample_scales=upsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
interpolate_mode=interpolate_mode,
freq_axis_kernel_size=freq_axis_kernel_size,
aux_channels=aux_channels,
aux_context_window=aux_context_window,
use_causal_conv=use_causal_conv)
self.upsample_factor = np.prod(upsample_scales)
self.conv_layers = nn.LayerList()
for layer in range(layers):
dilation = 2**(layer % layers_per_stack)
conv = ResidualBlock(
kernel_size=kernel_size,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_channels=skip_channels,
aux_channels=aux_channels,
dilation=dilation,
dropout=dropout,
bias=bias,
use_causal_conv=use_causal_conv)
self.conv_layers.append(conv)
self.last_conv_layers = nn.Sequential(nn.ReLU(),
nn.Conv1D(
skip_channels,
skip_channels,
1,
bias_attr=True),
nn.ReLU(),
nn.Conv1D(
skip_channels,
out_channels,
1,
bias_attr=True))
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x, c):
"""Generate waveform.
Parameters
----------
x : Tensor
Shape (N, C_in, T), The input waveform.
c : Tensor
Shape (N, C_aux, T'). The auxiliary input (e.g. spectrogram). It
is upsampled to match the time resolution of the input.
Returns
-------
Tensor
Shape (N, C_out, T), the generated waveform.
"""
c = self.upsample_net(c)
assert c.shape[-1] == x.shape[-1]
x = self.first_conv(x)
skips = 0
for f in self.conv_layers:
x, s = f(x, c)
skips += s
skips *= math.sqrt(1.0 / len(self.conv_layers))
x = self.last_conv_layers(skips)
return x
def apply_weight_norm(self):
"""Recursively apply weight normalization to all the Convolution layers
in the sublayers.
"""
def _apply_weight_norm(layer):
if isinstance(layer, (nn.Conv1D, nn.Conv2D)):
nn.utils.weight_norm(layer)
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
"""Recursively remove weight normalization from all the Convolution
layers in the sublayers.
"""
def _remove_weight_norm(layer):
try:
nn.utils.remove_weight_norm(layer)
except ValueError:
pass
self.apply(_remove_weight_norm)
def inference(self, c=None):
"""Waveform generation. This function is used for single instance
inference.
Parameters
----------
c : Tensor, optional
Shape (T', C_aux), the auxiliary input, by default None
x : Tensor, optional
Shape (T, C_in), the noise waveform, by default None
If not provided, a sample is drawn from a gaussian distribution.
Returns
-------
Tensor
Shape (T, C_out), the generated waveform
"""
x = paddle.randn(
[1, self.in_channels, paddle.shape(c)[0] * self.upsample_factor])
c = paddle.transpose(c, [1, 0]).unsqueeze(0) # pseudo batch
c = nn.Pad1D(self.aux_context_window, mode='replicate')(c)
out = self(x, c).squeeze(0).transpose([1, 0])
return out
class PWGDiscriminator(nn.Layer):
"""A convolutional discriminator for audio.
Parameters
----------
in_channels : int, optional
Number of channels of the input audio, by default 1
out_channels : int, optional
Output feature size, by default 1
kernel_size : int, optional
Kernel size of convolutional sublayers, by default 3
layers : int, optional
Number of layers, by default 10
conv_channels : int, optional
Feature size of the convolutional sublayers, by default 64
dilation_factor : int, optional
The factor with which dilation of each convolutional sublayers grows
exponentially if it is greater than 1, else the dilation of each
convolutional sublayers grows linearly, by default 1
nonlinear_activation : str, optional
The activation after each convolutional sublayer, by default "LeakyReLU"
nonlinear_activation_params : Dict[str, Any], optional
The parameters passed to the activation's initializer, by default
{"negative_slope": 0.2}
bias : bool, optional
Whether to use bias in convolutional sublayers, by default True
use_weight_norm : bool, optional
Whether to use weight normalization at all convolutional sublayers,
by default True
"""
def __init__(
self,
in_channels: int=1,
out_channels: int=1,
kernel_size: int=3,
layers: int=10,
conv_channels: int=64,
dilation_factor: int=1,
nonlinear_activation: str="LeakyReLU",
nonlinear_activation_params: Dict[str, Any]={"negative_slope": 0.2},
bias: bool=True,
use_weight_norm: bool=True):
super().__init__()
assert kernel_size % 2 == 1
assert dilation_factor > 0
conv_layers = []
conv_in_channels = in_channels
for i in range(layers - 1):
if i == 0:
dilation = 1
else:
dilation = i if dilation_factor == 1 else dilation_factor**i
conv_in_channels = conv_channels
padding = (kernel_size - 1) // 2 * dilation
conv_layer = nn.Conv1D(
conv_in_channels,
conv_channels,
kernel_size,
padding=padding,
dilation=dilation,
bias_attr=bias)
nonlinear = getattr(
nn, nonlinear_activation)(**nonlinear_activation_params)
conv_layers.append(conv_layer)
conv_layers.append(nonlinear)
padding = (kernel_size - 1) // 2
last_conv = nn.Conv1D(
conv_in_channels,
out_channels,
kernel_size,
padding=padding,
bias_attr=bias)
conv_layers.append(last_conv)
self.conv_layers = nn.Sequential(*conv_layers)
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x):
"""
Parameters
----------
x : Tensor
Shape (N, in_channels, num_samples), the input audio.
Returns
-------
Tensor
Shape (N, out_channels, num_samples), the predicted logits.
"""
return self.conv_layers(x)
def apply_weight_norm(self):
def _apply_weight_norm(layer):
if isinstance(layer, (nn.Conv1D, nn.Conv2D)):
nn.utils.weight_norm(layer)
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
def _remove_weight_norm(layer):
try:
nn.utils.remove_weight_norm(layer)
except ValueError:
pass
self.apply(_remove_weight_norm)
class ResidualPWGDiscriminator(nn.Layer):
"""A wavenet-style discriminator for audio.
Parameters
----------
in_channels : int, optional
Number of channels of the input audio, by default 1
out_channels : int, optional
Output feature size, by default 1
kernel_size : int, optional
Kernel size of residual blocks, by default 3
layers : int, optional
Number of residual blocks, by default 30
stacks : int, optional
Number of groups of residual blocks, within which the dilation
of each residual blocks grows exponentially, by default 3
residual_channels : int, optional
Residual channels of residual blocks, by default 64
gate_channels : int, optional
Gate channels of residual blocks, by default 128
skip_channels : int, optional
Skip channels of residual blocks, by default 64
dropout : float, optional
Dropout probability of residual blocks, by default 0.
bias : bool, optional
Whether to use bias in residual blocks, by default True
use_weight_norm : bool, optional
Whether to use weight normalization in all convolutional layers,
by default True
use_causal_conv : bool, optional
Whether to use causal convolution in residual blocks, by default False
nonlinear_activation : str, optional
Activation after convolutions other than those in residual blocks,
by default "LeakyReLU"
nonlinear_activation_params : Dict[str, Any], optional
Parameters to pass to the activation, by default {"negative_slope": 0.2}
"""
def __init__(self,
in_channels: int=1,
out_channels: int=1,
kernel_size: int=3,
layers: int=30,
stacks: int=3,
residual_channels: int=64,
gate_channels: int=128,
skip_channels: int=64,
dropout: float=0.,
bias: bool=True,
use_weight_norm: bool=True,
use_causal_conv: bool=False,
nonlinear_activation: str="LeakyReLU",
nonlinear_activation_params: Dict[
str, Any]={"negative_slope": 0.2}):
super().__init__()
assert kernel_size % 2 == 1
self.in_channels = in_channels
self.out_channels = out_channels
self.layers = layers
self.stacks = stacks
self.kernel_size = kernel_size
assert layers % stacks == 0
layers_per_stack = layers // stacks
self.first_conv = nn.Sequential(
nn.Conv1D(in_channels, residual_channels, 1, bias_attr=True),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params))
self.conv_layers = nn.LayerList()
for layer in range(layers):
dilation = 2**(layer % layers_per_stack)
conv = ResidualBlock(
kernel_size=kernel_size,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_channels=skip_channels,
aux_channels=None, # no auxiliary input
dropout=dropout,
dilation=dilation,
bias=bias,
use_causal_conv=use_causal_conv)
self.conv_layers.append(conv)
self.last_conv_layers = nn.Sequential(
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
nn.Conv1D(skip_channels, skip_channels, 1, bias_attr=True),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
nn.Conv1D(skip_channels, out_channels, 1, bias_attr=True))
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x):
"""
Parameters
----------
x : Tensor
Shape (N, in_channels, num_samples), the input audio.
Returns
-------
Tensor
Shape (N, out_channels, num_samples), the predicted logits.
"""
x = self.first_conv(x)
skip = 0
for f in self.conv_layers:
x, h = f(x, None)
skip += h
skip *= math.sqrt(1 / len(self.conv_layers))
x = skip
x = self.last_conv_layers(x)
return x
def apply_weight_norm(self):
def _apply_weight_norm(layer):
if isinstance(layer, (nn.Conv1D, nn.Conv2D)):
nn.utils.weight_norm(layer)
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
def _remove_weight_norm(layer):
try:
nn.utils.remove_weight_norm(layer)
except ValueError:
pass
self.apply(_remove_weight_norm)
class PWGInference(nn.Layer):
def __init__(self, normalizer, pwg_generator):
super().__init__()
self.normalizer = normalizer
self.pwg_generator = pwg_generator
def forward(self, logmel):
normalized_mel = self.normalizer(logmel)
wav = self.pwg_generator.inference(normalized_mel)
return wav
|
dojo/db_migrations/0114_cyclonedx_vuln_uniqu.py | mtcolman/django-DefectDojo | 1,772 | 112148 | <filename>dojo/db_migrations/0114_cyclonedx_vuln_uniqu.py<gh_stars>1000+
from django.db import migrations
from django.db.models import F
class Migration(migrations.Migration):
def rename_cyclonedx_parser_vuln_uniq(apps, schema_editor):
"""
1) rename test type to reflect changes in the parser
2) switch vuln_id_from_tool and unique_id_from_tool in findings
"""
# rename 'cyclonedx' to 'CycloneDX Scan'
test_type_model = apps.get_model('dojo', 'Test_Type')
cyclonedx_testtype = test_type_model.objects.filter(name='cyclonedx').first()
if cyclonedx_testtype:
cyclonedx_testtype.name = 'CycloneDX Scan'
cyclonedx_testtype.save()
finding_model = apps.get_model('dojo', 'Finding')
# extra protection, only migrate for the findings that have a non-null unique_id_from_tool
findings = finding_model.objects.filter(test__test_type=cyclonedx_testtype, unique_id_from_tool__isnull=False)
# assign value from unique_id_from tool to vuln_id_from_tool
findings.update(vuln_id_from_tool=F('unique_id_from_tool'))
# reset unique_id_from_tool
findings.update(unique_id_from_tool=None)
def reverse_cyclonedx_parser_vuln_uniq(apps, schema_editor):
test_type_model = apps.get_model('dojo', 'Test_Type')
cyclonedx_testtype = test_type_model.objects.filter(name='CycloneDX Scan').first()
if cyclonedx_testtype:
cyclonedx_testtype.name = 'cyclonedx'
cyclonedx_testtype.save()
findings = finding_model.objects.filter(test__test_type=cyclonedx_testtype, vuln_id_from_tool__isnull=False)
# assign value from vuln_id_from tool to unique_id_from_tool
findings.update(unique_id_from_tool=F('vuln_id_from_tool'))
# reset vuln_id_from_tool
findings.update(vuln_id_from_tool=None)
dependencies = [
('dojo', '0113_endpoint_protocol'),
]
operations = [
migrations.RunPython(rename_cyclonedx_parser_vuln_uniq, reverse_cyclonedx_parser_vuln_uniq)
]
|
tests/core/segment_tree_test.py | stjordanis/rlmeta | 258 | 112154 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import unittest
from math import prod
import numpy as np
import torch
from rlmeta.core.segment_tree import SumSegmentTree, MinSegmentTree
from tests.test_utils import TestCaseBase
class SumSegmentTreeTest(TestCaseBase):
def setUp(self) -> None:
self.size = 100
self.data = torch.randn(self.size)
self.segment_tree = SumSegmentTree(self.size)
self.segment_tree[torch.arange(self.size)] = self.data
self.query_size = (2, 3, 4)
def test_at(self) -> None:
index = torch.randint(self.size, self.query_size)
value = self.segment_tree[index]
self.assert_tensor_equal(value, self.data[index])
value = self.segment_tree.at(index)
self.assert_tensor_equal(value, self.data[index])
def test_update(self) -> None:
weights = torch.ones(self.size)
index = weights.multinomial(prod(self.query_size), replacement=False)
index = index.view(self.query_size)
origin_value = self.segment_tree[index]
value = np.random.randn()
self.segment_tree[index] = value
self.assert_tensor_equal(self.segment_tree[index],
torch.full(self.query_size, value))
self.segment_tree[index] = origin_value
value = np.random.randn()
self.segment_tree.update(index, value)
self.assert_tensor_equal(self.segment_tree[index],
torch.full(self.query_size, value))
self.segment_tree[index] = origin_value
value = torch.randn(self.query_size)
self.segment_tree[index] = value
self.assert_tensor_equal(self.segment_tree[index], value)
self.segment_tree[index] = origin_value
value = torch.randn(self.query_size)
self.segment_tree.update(index, value)
self.assert_tensor_equal(self.segment_tree[index], value)
self.segment_tree[index] = origin_value
def test_query(self) -> None:
a = torch.randint(self.size, self.query_size)
b = torch.randint(self.size, self.query_size)
l = torch.minimum(a, b)
r = torch.maximum(a, b)
value = self.segment_tree.query(l, r)
l_list = l.view(-1).tolist()
r_list = r.view(-1).tolist()
ret = []
for (x, y) in zip(l_list, r_list):
ret.append(self.data[x:y].sum())
ret = torch.tensor(ret).view(self.query_size)
self.assert_tensor_close(value, ret, rtol=1e-6, atol=1e-6)
def test_pickle(self) -> None:
s = pickle.dumps(self.segment_tree)
t = pickle.loads(s)
self.assert_tensor_equal(t[torch.arange(self.size)], self.data)
for _ in range(10):
l = np.random.randint(self.size)
r = np.random.randint(self.size)
if l > r:
l, r = r, l
ret = t.query(l, r)
ans = self.data[l:r].sum().item()
self.assertAlmostEqual(ret, ans, places=5)
if __name__ == "__main__":
unittest.main()
|
experiments/cifar10/train.py | vishalbelsare/tanda | 166 | 112166 | <reponame>vishalbelsare/tanda<gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from dataset import load_cifar10_data
from experiments.train_scripts import flags, select_fold, train
from experiments.tfs.image import *
from functools import partial
from itertools import chain
#####################################################################
flags.DEFINE_boolean("validation_set", True,
"If False, use validation set as part of training set")
FLAGS = flags.FLAGS
#####################################################################
#####################################################################
# Transformation functions
tfs = list(chain.from_iterable([
[partial(TF_rotate, angle=p) for p in [2.5, -2.5, 5, -5]],
[partial(TF_zoom, scale=p) for p in [0.9, 1.1, 0.75, 1.25]],
[partial(TF_shear, shear=p) for p in [0.1, -0.1, 0.25, -0.25]],
[partial(TF_swirl, strength=p) for p in [0.1, -0.1, 0.25, -0.25]],
[partial(TF_shift_hue, shift=p) for p in [0.1, -0.1, 0.25, -0.25]],
[partial(TF_enhance_contrast, p=p) for p in [0.75, 1.25, 0.5, 1.5]],
[partial(TF_enhance_brightness, p=p) for p in [0.75, 1.25, 0.5, 1.5]],
[partial(TF_enhance_color, p=p) for p in [0.75, 1.25, 0.5, 1.5]],
[TF_horizontal_flip]
]))
#####################################################################
if __name__ == '__main__':
# Load CIFAR10 data
dims = [32, 32, 3]
DATA_DIR = 'experiments/cifar10/data/cifar-10-batches-py'
X_train, Y_train, X_valid, Y_valid, X_test, Y_test = load_cifar10_data(
DATA_DIR, validation_set=FLAGS.validation_set)
if FLAGS.n_folds > 0:
X_train, Y_train = select_fold(X_train, Y_train)
# Run training scripts
train(X_train, dims, tfs, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid,
n_classes=10)
|
sdk/python/pulumi_gcp/iot/outputs.py | sisisin/pulumi-gcp | 121 | 112172 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'DeviceConfig',
'DeviceCredential',
'DeviceCredentialPublicKey',
'DeviceGatewayConfig',
'DeviceLastErrorStatus',
'DeviceState',
'RegistryCredential',
'RegistryEventNotificationConfigItem',
]
@pulumi.output_type
class DeviceConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "binaryData":
suggest = "binary_data"
elif key == "cloudUpdateTime":
suggest = "cloud_update_time"
elif key == "deviceAckTime":
suggest = "device_ack_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeviceConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeviceConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeviceConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
binary_data: Optional[str] = None,
cloud_update_time: Optional[str] = None,
device_ack_time: Optional[str] = None,
version: Optional[str] = None):
if binary_data is not None:
pulumi.set(__self__, "binary_data", binary_data)
if cloud_update_time is not None:
pulumi.set(__self__, "cloud_update_time", cloud_update_time)
if device_ack_time is not None:
pulumi.set(__self__, "device_ack_time", device_ack_time)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="binaryData")
def binary_data(self) -> Optional[str]:
return pulumi.get(self, "binary_data")
@property
@pulumi.getter(name="cloudUpdateTime")
def cloud_update_time(self) -> Optional[str]:
return pulumi.get(self, "cloud_update_time")
@property
@pulumi.getter(name="deviceAckTime")
def device_ack_time(self) -> Optional[str]:
return pulumi.get(self, "device_ack_time")
@property
@pulumi.getter
def version(self) -> Optional[str]:
return pulumi.get(self, "version")
@pulumi.output_type
class DeviceCredential(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "publicKey":
suggest = "public_key"
elif key == "expirationTime":
suggest = "expiration_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeviceCredential. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeviceCredential.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeviceCredential.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
public_key: 'outputs.DeviceCredentialPublicKey',
expiration_time: Optional[str] = None):
"""
:param 'DeviceCredentialPublicKeyArgs' public_key: A public key used to verify the signature of JSON Web Tokens (JWTs).
Structure is documented below.
:param str expiration_time: The time at which this credential becomes invalid.
"""
pulumi.set(__self__, "public_key", public_key)
if expiration_time is not None:
pulumi.set(__self__, "expiration_time", expiration_time)
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> 'outputs.DeviceCredentialPublicKey':
"""
A public key used to verify the signature of JSON Web Tokens (JWTs).
Structure is documented below.
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter(name="expirationTime")
def expiration_time(self) -> Optional[str]:
"""
The time at which this credential becomes invalid.
"""
return pulumi.get(self, "expiration_time")
@pulumi.output_type
class DeviceCredentialPublicKey(dict):
def __init__(__self__, *,
format: str,
key: str):
"""
:param str format: The format of the key.
Possible values are `RSA_PEM`, `RSA_X509_PEM`, `ES256_PEM`, and `ES256_X509_PEM`.
:param str key: The key data.
"""
pulumi.set(__self__, "format", format)
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def format(self) -> str:
"""
The format of the key.
Possible values are `RSA_PEM`, `RSA_X509_PEM`, `ES256_PEM`, and `ES256_X509_PEM`.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def key(self) -> str:
"""
The key data.
"""
return pulumi.get(self, "key")
@pulumi.output_type
class DeviceGatewayConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "gatewayAuthMethod":
suggest = "gateway_auth_method"
elif key == "gatewayType":
suggest = "gateway_type"
elif key == "lastAccessedGatewayId":
suggest = "last_accessed_gateway_id"
elif key == "lastAccessedGatewayTime":
suggest = "last_accessed_gateway_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeviceGatewayConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeviceGatewayConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeviceGatewayConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
gateway_auth_method: Optional[str] = None,
gateway_type: Optional[str] = None,
last_accessed_gateway_id: Optional[str] = None,
last_accessed_gateway_time: Optional[str] = None):
"""
:param str gateway_auth_method: Indicates whether the device is a gateway.
Possible values are `ASSOCIATION_ONLY`, `DEVICE_AUTH_TOKEN_ONLY`, and `ASSOCIATION_AND_DEVICE_AUTH_TOKEN`.
:param str gateway_type: Indicates whether the device is a gateway.
Default value is `NON_GATEWAY`.
Possible values are `GATEWAY` and `NON_GATEWAY`.
:param str last_accessed_gateway_id: -
The ID of the gateway the device accessed most recently.
:param str last_accessed_gateway_time: -
The most recent time at which the device accessed the gateway specified in last_accessed_gateway.
"""
if gateway_auth_method is not None:
pulumi.set(__self__, "gateway_auth_method", gateway_auth_method)
if gateway_type is not None:
pulumi.set(__self__, "gateway_type", gateway_type)
if last_accessed_gateway_id is not None:
pulumi.set(__self__, "last_accessed_gateway_id", last_accessed_gateway_id)
if last_accessed_gateway_time is not None:
pulumi.set(__self__, "last_accessed_gateway_time", last_accessed_gateway_time)
@property
@pulumi.getter(name="gatewayAuthMethod")
def gateway_auth_method(self) -> Optional[str]:
"""
Indicates whether the device is a gateway.
Possible values are `ASSOCIATION_ONLY`, `DEVICE_AUTH_TOKEN_ONLY`, and `ASSOCIATION_AND_DEVICE_AUTH_TOKEN`.
"""
return pulumi.get(self, "gateway_auth_method")
@property
@pulumi.getter(name="gatewayType")
def gateway_type(self) -> Optional[str]:
"""
Indicates whether the device is a gateway.
Default value is `NON_GATEWAY`.
Possible values are `GATEWAY` and `NON_GATEWAY`.
"""
return pulumi.get(self, "gateway_type")
@property
@pulumi.getter(name="lastAccessedGatewayId")
def last_accessed_gateway_id(self) -> Optional[str]:
"""
-
The ID of the gateway the device accessed most recently.
"""
return pulumi.get(self, "last_accessed_gateway_id")
@property
@pulumi.getter(name="lastAccessedGatewayTime")
def last_accessed_gateway_time(self) -> Optional[str]:
"""
-
The most recent time at which the device accessed the gateway specified in last_accessed_gateway.
"""
return pulumi.get(self, "last_accessed_gateway_time")
@pulumi.output_type
class DeviceLastErrorStatus(dict):
def __init__(__self__, *,
details: Optional[Sequence[Mapping[str, Any]]] = None,
message: Optional[str] = None,
number: Optional[int] = None):
if details is not None:
pulumi.set(__self__, "details", details)
if message is not None:
pulumi.set(__self__, "message", message)
if number is not None:
pulumi.set(__self__, "number", number)
@property
@pulumi.getter
def details(self) -> Optional[Sequence[Mapping[str, Any]]]:
return pulumi.get(self, "details")
@property
@pulumi.getter
def message(self) -> Optional[str]:
return pulumi.get(self, "message")
@property
@pulumi.getter
def number(self) -> Optional[int]:
return pulumi.get(self, "number")
@pulumi.output_type
class DeviceState(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "binaryData":
suggest = "binary_data"
elif key == "updateTime":
suggest = "update_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DeviceState. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DeviceState.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DeviceState.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
binary_data: Optional[str] = None,
update_time: Optional[str] = None):
if binary_data is not None:
pulumi.set(__self__, "binary_data", binary_data)
if update_time is not None:
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter(name="binaryData")
def binary_data(self) -> Optional[str]:
return pulumi.get(self, "binary_data")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> Optional[str]:
return pulumi.get(self, "update_time")
@pulumi.output_type
class RegistryCredential(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "publicKeyCertificate":
suggest = "public_key_certificate"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RegistryCredential. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RegistryCredential.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RegistryCredential.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
public_key_certificate: Mapping[str, Any]):
"""
:param Mapping[str, Any] public_key_certificate: A public key certificate format and data.
"""
pulumi.set(__self__, "public_key_certificate", public_key_certificate)
@property
@pulumi.getter(name="publicKeyCertificate")
def public_key_certificate(self) -> Mapping[str, Any]:
"""
A public key certificate format and data.
"""
return pulumi.get(self, "public_key_certificate")
@pulumi.output_type
class RegistryEventNotificationConfigItem(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "pubsubTopicName":
suggest = "pubsub_topic_name"
elif key == "subfolderMatches":
suggest = "subfolder_matches"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RegistryEventNotificationConfigItem. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RegistryEventNotificationConfigItem.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RegistryEventNotificationConfigItem.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
pubsub_topic_name: str,
subfolder_matches: Optional[str] = None):
"""
:param str pubsub_topic_name: PubSub topic name to publish device events.
:param str subfolder_matches: If the subfolder name matches this string exactly, this
configuration will be used. The string must not include the
leading '/' character. If empty, all strings are matched. Empty
value can only be used for the last `event_notification_configs`
item.
"""
pulumi.set(__self__, "pubsub_topic_name", pubsub_topic_name)
if subfolder_matches is not None:
pulumi.set(__self__, "subfolder_matches", subfolder_matches)
@property
@pulumi.getter(name="pubsubTopicName")
def pubsub_topic_name(self) -> str:
"""
PubSub topic name to publish device events.
"""
return pulumi.get(self, "pubsub_topic_name")
@property
@pulumi.getter(name="subfolderMatches")
def subfolder_matches(self) -> Optional[str]:
"""
If the subfolder name matches this string exactly, this
configuration will be used. The string must not include the
leading '/' character. If empty, all strings are matched. Empty
value can only be used for the last `event_notification_configs`
item.
"""
return pulumi.get(self, "subfolder_matches")
|
test/augmentation/apply/test_tf_applier.py | HazyResearch/snorkel | 2,906 | 112192 | <gh_stars>1000+
import unittest
from types import SimpleNamespace
from typing import List
import pandas as pd
from snorkel.augmentation import (
ApplyOnePolicy,
PandasTFApplier,
RandomPolicy,
TFApplier,
transformation_function,
)
from snorkel.types import DataPoint
@transformation_function()
def square(x: DataPoint) -> DataPoint:
x.num = x.num**2
return x
@transformation_function()
def square_returns_none(x: DataPoint) -> DataPoint:
if x.num == 2:
return None
x.num = x.num**2
return x
@transformation_function()
def modify_in_place(x: DataPoint) -> DataPoint:
x.d["my_key"] = 0
return x
DATA = [1, 2, 3]
STR_DATA = ["x", "y", "z"]
DATA_IN_PLACE_EXPECTED = [(1 + i // 3) if i % 3 == 0 else 0 for i in range(9)]
def make_df(values: list, index: list, key: str = "num") -> pd.DataFrame:
return pd.DataFrame({key: values}, index=index)
# NB: reconstruct each time to avoid inplace updates
def get_data_dict(data: List[int] = DATA):
return [dict(my_key=num) for num in data]
class TestTFApplier(unittest.TestCase):
def _get_x_namespace(self, data: List[int] = DATA) -> List[SimpleNamespace]:
return [SimpleNamespace(num=num) for num in data]
def _get_x_namespace_dict(self, data: List[int] = DATA) -> List[SimpleNamespace]:
return [SimpleNamespace(d=d) for d in get_data_dict(data)]
def test_tf_applier(self) -> None:
data = self._get_x_namespace()
policy = RandomPolicy(
1, sequence_length=2, n_per_original=1, keep_original=False
)
applier = TFApplier([square], policy)
data_augmented = applier.apply(data, progress_bar=False)
self.assertEqual(data_augmented, self._get_x_namespace([1, 16, 81]))
self.assertEqual(data, self._get_x_namespace())
data_augmented = applier.apply(data, progress_bar=True)
self.assertEqual(data_augmented, self._get_x_namespace([1, 16, 81]))
self.assertEqual(data, self._get_x_namespace())
def test_tf_applier_keep_original(self) -> None:
data = self._get_x_namespace()
policy = RandomPolicy(
1, sequence_length=2, n_per_original=2, keep_original=True
)
applier = TFApplier([square], policy)
data_augmented = applier.apply(data, progress_bar=False)
vals = [1, 1, 1, 2, 16, 16, 3, 81, 81]
self.assertEqual(data_augmented, self._get_x_namespace(vals))
self.assertEqual(data, self._get_x_namespace())
def test_tf_applier_returns_none(self) -> None:
data = self._get_x_namespace()
policy = RandomPolicy(
1, sequence_length=2, n_per_original=2, keep_original=True
)
applier = TFApplier([square_returns_none], policy)
data_augmented = applier.apply(data, progress_bar=False)
vals = [1, 1, 1, 2, 3, 81, 81]
self.assertEqual(data_augmented, self._get_x_namespace(vals))
self.assertEqual(data, self._get_x_namespace())
def test_tf_applier_keep_original_modify_in_place(self) -> None:
data = self._get_x_namespace_dict()
policy = ApplyOnePolicy(n_per_original=2, keep_original=True)
applier = TFApplier([modify_in_place], policy)
data_augmented = applier.apply(data, progress_bar=False)
self.assertEqual(
data_augmented, self._get_x_namespace_dict(DATA_IN_PLACE_EXPECTED)
)
self.assertEqual(data, self._get_x_namespace_dict())
def test_tf_applier_generator(self) -> None:
data = self._get_x_namespace()
policy = RandomPolicy(
1, sequence_length=2, n_per_original=2, keep_original=False
)
applier = TFApplier([square], policy)
batches_expected = [[1, 1, 16, 16], [81, 81]]
gen = applier.apply_generator(data, batch_size=2)
for batch, batch_expected in zip(gen, batches_expected):
self.assertEqual(batch, self._get_x_namespace(batch_expected))
self.assertEqual(data, self._get_x_namespace())
def test_tf_applier_keep_original_generator(self) -> None:
data = self._get_x_namespace()
policy = RandomPolicy(
1, sequence_length=2, n_per_original=2, keep_original=True
)
applier = TFApplier([square], policy)
batches_expected = [[1, 1, 1, 2, 16, 16], [3, 81, 81]]
gen = applier.apply_generator(data, batch_size=2)
for batch, batch_expected in zip(gen, batches_expected):
self.assertEqual(batch, self._get_x_namespace(batch_expected))
self.assertEqual(data, self._get_x_namespace())
def test_tf_applier_returns_none_generator(self) -> None:
data = self._get_x_namespace()
policy = RandomPolicy(
1, sequence_length=2, n_per_original=2, keep_original=True
)
applier = TFApplier([square_returns_none], policy)
batches_expected = [[1, 1, 1, 2], [3, 81, 81]]
gen = applier.apply_generator(data, batch_size=2)
for batch, batch_expected in zip(gen, batches_expected):
self.assertEqual(batch, self._get_x_namespace(batch_expected))
self.assertEqual(data, self._get_x_namespace())
def test_tf_applier_keep_original_modify_in_place_generator(self) -> None:
data = self._get_x_namespace_dict()
policy = ApplyOnePolicy(n_per_original=2, keep_original=True)
applier = TFApplier([modify_in_place], policy)
batches_expected = [DATA_IN_PLACE_EXPECTED[:6], DATA_IN_PLACE_EXPECTED[6:]]
gen = applier.apply_generator(data, batch_size=2)
for batch, batch_expected in zip(gen, batches_expected):
self.assertEqual(batch, self._get_x_namespace_dict(batch_expected))
self.assertEqual(data, self._get_x_namespace_dict())
class TestPandasTFApplier(unittest.TestCase):
def _get_x_df(self):
return pd.DataFrame(dict(num=DATA))
def _get_x_df_with_str(self):
return pd.DataFrame(dict(num=DATA, strs=STR_DATA))
def _get_x_df_dict(self):
return pd.DataFrame(dict(d=get_data_dict()))
def test_tf_applier_pandas(self):
df = self._get_x_df_with_str()
policy = RandomPolicy(
1, sequence_length=2, n_per_original=1, keep_original=False
)
applier = PandasTFApplier([square], policy)
df_augmented = applier.apply(df, progress_bar=False)
df_expected = pd.DataFrame(
dict(num=[1, 16, 81], strs=STR_DATA), index=[0, 1, 2]
)
self.assertEqual(df_augmented.num.dtype, "int64")
pd.testing.assert_frame_equal(df_augmented, df_expected)
pd.testing.assert_frame_equal(df, self._get_x_df_with_str())
df_augmented = applier.apply(df, progress_bar=True)
df_expected = pd.DataFrame(
dict(num=[1, 16, 81], strs=STR_DATA), index=[0, 1, 2]
)
pd.testing.assert_frame_equal(df_augmented, df_expected)
pd.testing.assert_frame_equal(df, self._get_x_df_with_str())
def test_tf_applier_pandas_keep_original(self):
df = self._get_x_df()
policy = RandomPolicy(
1, sequence_length=2, n_per_original=2, keep_original=True
)
applier = PandasTFApplier([square], policy)
df_augmented = applier.apply(df, progress_bar=False)
df_expected = pd.DataFrame(
dict(num=[1, 1, 1, 2, 16, 16, 3, 81, 81]), index=[0, 0, 0, 1, 1, 1, 2, 2, 2]
)
self.assertEqual(df_augmented.num.dtype, "int64")
pd.testing.assert_frame_equal(df_augmented, df_expected)
pd.testing.assert_frame_equal(df, self._get_x_df())
def test_tf_applier_returns_none(self):
df = self._get_x_df()
policy = RandomPolicy(
1, sequence_length=2, n_per_original=2, keep_original=True
)
applier = PandasTFApplier([square_returns_none], policy)
df_augmented = applier.apply(df, progress_bar=False)
df_expected = pd.DataFrame(
dict(num=[1, 1, 1, 2, 3, 81, 81]), index=[0, 0, 0, 1, 2, 2, 2]
)
self.assertEqual(df_augmented.num.dtype, "int64")
pd.testing.assert_frame_equal(df_augmented, df_expected)
pd.testing.assert_frame_equal(df, self._get_x_df())
def test_tf_applier_pandas_modify_in_place(self):
df = self._get_x_df_dict()
policy = ApplyOnePolicy(n_per_original=2, keep_original=True)
applier = PandasTFApplier([modify_in_place], policy)
df_augmented = applier.apply(df, progress_bar=False)
idx = [0, 0, 0, 1, 1, 1, 2, 2, 2]
df_expected = pd.DataFrame(
dict(d=get_data_dict(DATA_IN_PLACE_EXPECTED)), index=idx
)
pd.testing.assert_frame_equal(df_augmented, df_expected)
pd.testing.assert_frame_equal(df, self._get_x_df_dict())
def test_tf_applier_pandas_generator(self):
df = self._get_x_df_with_str()
policy = RandomPolicy(
1, sequence_length=2, n_per_original=2, keep_original=False
)
applier = PandasTFApplier([square], policy)
gen = applier.apply_generator(df, batch_size=2)
df_expected = [
pd.DataFrame(
{"num": [1, 1, 16, 16], "strs": ["x", "x", "y", "y"]},
index=[0, 0, 1, 1],
),
pd.DataFrame({"num": [81, 81], "strs": ["z", "z"]}, index=[2, 2]),
]
for df_batch, df_batch_expected in zip(gen, df_expected):
self.assertEqual(df_batch.num.dtype, "int64")
pd.testing.assert_frame_equal(df_batch, df_batch_expected)
pd.testing.assert_frame_equal(df, self._get_x_df_with_str())
def test_tf_applier_pandas_keep_original_generator(self):
df = self._get_x_df()
policy = RandomPolicy(
1, sequence_length=2, n_per_original=2, keep_original=True
)
applier = PandasTFApplier([square], policy)
gen = applier.apply_generator(df, batch_size=2)
df_expected = [
make_df([1, 1, 1, 2, 16, 16], [0, 0, 0, 1, 1, 1]),
make_df([3, 81, 81], [2, 2, 2]),
]
for df_batch, df_batch_expected in zip(gen, df_expected):
pd.testing.assert_frame_equal(df_batch, df_batch_expected)
pd.testing.assert_frame_equal(df, self._get_x_df())
def test_tf_applier_returns_none_generator(self):
df = self._get_x_df()
policy = RandomPolicy(
1, sequence_length=2, n_per_original=2, keep_original=True
)
applier = PandasTFApplier([square_returns_none], policy)
gen = applier.apply_generator(df, batch_size=2)
df_expected = [
make_df([1, 1, 1, 2], [0, 0, 0, 1]),
make_df([3, 81, 81], [2, 2, 2]),
]
for df_batch, df_batch_expected in zip(gen, df_expected):
pd.testing.assert_frame_equal(df_batch, df_batch_expected)
pd.testing.assert_frame_equal(df, self._get_x_df())
def test_tf_applier_pandas_modify_in_place_generator(self):
df = self._get_x_df_dict()
policy = ApplyOnePolicy(n_per_original=2, keep_original=True)
applier = PandasTFApplier([modify_in_place], policy)
gen = applier.apply_generator(df, batch_size=2)
idx = [0, 0, 0, 1, 1, 1, 2, 2, 2]
df_expected = [
make_df(get_data_dict(DATA_IN_PLACE_EXPECTED[:6]), idx[:6], key="d"),
make_df(get_data_dict(DATA_IN_PLACE_EXPECTED[6:]), idx[6:], key="d"),
]
for df_batch, df_batch_expected in zip(gen, df_expected):
pd.testing.assert_frame_equal(df_batch, df_batch_expected)
pd.testing.assert_frame_equal(df, self._get_x_df_dict())
|
redis/commands/bf/info.py | salty-horse/redis-py | 483 | 112242 | from ..helpers import nativestr
class BFInfo(object):
capacity = None
size = None
filterNum = None
insertedNum = None
expansionRate = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.capacity = response["Capacity"]
self.size = response["Size"]
self.filterNum = response["Number of filters"]
self.insertedNum = response["Number of items inserted"]
self.expansionRate = response["Expansion rate"]
class CFInfo(object):
size = None
bucketNum = None
filterNum = None
insertedNum = None
deletedNum = None
bucketSize = None
expansionRate = None
maxIteration = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.size = response["Size"]
self.bucketNum = response["Number of buckets"]
self.filterNum = response["Number of filters"]
self.insertedNum = response["Number of items inserted"]
self.deletedNum = response["Number of items deleted"]
self.bucketSize = response["Bucket size"]
self.expansionRate = response["Expansion rate"]
self.maxIteration = response["Max iterations"]
class CMSInfo(object):
width = None
depth = None
count = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.width = response["width"]
self.depth = response["depth"]
self.count = response["count"]
class TopKInfo(object):
k = None
width = None
depth = None
decay = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.k = response["k"]
self.width = response["width"]
self.depth = response["depth"]
self.decay = response["decay"]
class TDigestInfo(object):
compression = None
capacity = None
mergedNodes = None
unmergedNodes = None
mergedWeight = None
unmergedWeight = None
totalCompressions = None
def __init__(self, args):
response = dict(zip(map(nativestr, args[::2]), args[1::2]))
self.compression = response["Compression"]
self.capacity = response["Capacity"]
self.mergedNodes = response["Merged nodes"]
self.unmergedNodes = response["Unmerged nodes"]
self.mergedWeight = response["Merged weight"]
self.unmergedWeight = response["Unmerged weight"]
self.totalCompressions = response["Total compressions"]
|
.github/release_log.py | Psycojoker/uvloop | 9,084 | 112254 | <reponame>Psycojoker/uvloop<filename>.github/release_log.py
#!/usr/bin/env python3
import argparse
import json
import requests
import re
BASE_URL = 'https://api.github.com/repos/magicstack/uvloop/compare'
def main():
parser = argparse.ArgumentParser(
description='Generate release log.')
parser.add_argument('--to', dest='to_hash', default='master', type=str)
parser.add_argument('--from', dest='from_hash', type=str)
args = parser.parse_args()
r = requests.get(f'{BASE_URL}/{args.from_hash}...{args.to_hash}')
data = json.loads(r.text)
for commit in data['commits']:
message = commit['commit']['message']
first_line = message.partition('\n\n')[0]
if commit.get('author'):
username = '@{}'.format(commit['author']['login'])
else:
username = commit['commit']['author']['name']
sha = commit["sha"][:8]
m = re.search(r'\#(?P<num>\d+)\b', message)
if m:
issue_num = m.group('num')
else:
issue_num = None
print(f'* {first_line}')
print(f' (by {username} in {sha}', end='')
if issue_num:
print(f' for #{issue_num})')
else:
print(')')
print()
if __name__ == '__main__':
main()
|
replay.py | Pandinosaurus/pyLiDAR-SLAM | 130 | 112325 | <gh_stars>100-1000
# Hydra and OmegaConf
from dataclasses import MISSING, dataclass, field
import numpy as np
from pathlib import Path
from typing import Optional
from slam.common.modules import _with_viz3d
if _with_viz3d:
from viz3d.window import OpenGLWindow
from omegaconf import DictConfig, OmegaConf
# Project Imports
from tqdm import tqdm
from slam.dataset.dataset import WindowDataset
from slam.odometry.odometry_runner import SLAMRunner
from argparse import ArgumentParser
@dataclass
class ReplayArguments:
config_path: str = "" # The path to the SLAMRunner Config
sequence: str = "" # The name of sequence to replay
root_dir: Path = field(default_factory=lambda: Path())
sequence_dir: Path = field(default_factory=lambda: Path())
start_index: int = 0
num_frames: int = -1
show_information: bool = True # Whether to print information about the to experiment be replayed
overrides_path: Optional[str] = None # The path to the yaml containing the overrides
def parse_arguments() -> ReplayArguments:
parser = ArgumentParser()
parser.add_argument("--root_dir", type=str, help="Path to the root of the execution", required=True)
parser.add_argument("--start_index", type=int, help="The index at which the SLAM should start", required=False)
parser.add_argument("--seq", type=str, help="The name of the sequence to replay", required=True)
parser.add_argument("--info", action="store_true",
help="Whether to display information of the sequence prior to the replay")
parser.add_argument("--overrides", type=str,
help="The path (optional) to the overrides")
args, _ = parser.parse_known_args()
options = ReplayArguments()
root_dir = Path(args.root_dir)
assert root_dir.exists(), f"The root dir {root_dir} for the execution does not exist"
options.root_dir = root_dir
options.sequence_dir = root_dir / args.seq
assert options.sequence_dir.exists(), f"The sequence dir {options.sequence_dir} does not exist"
config_path = root_dir / "config.yaml"
assert config_path.exists(), f"The config path {config_path} does not exist"
options.config_path = str(config_path)
options.start_index = args.start_index
options.sequence = args.seq
options.show_information = args.info
options.overrides_path = args.overrides
return options
def replay_slam(options: ReplayArguments) -> None:
"""The main entry point to the script running the SLAM"""
# Load the config
from slam.common.io import read_poses_from_disk
import time
# Display information about the previous execution
poses: Optional[np.ndarray] = None
poses_file_path = options.sequence_dir / f"{options.sequence}.poses.txt"
gt_file_path = options.sequence_dir / f"{options.sequence}_gt.poses.txt"
if poses_file_path.exists():
poses = read_poses_from_disk(str(poses_file_path))
if options.show_information:
print("*" * 80)
if poses is not None:
print(f"[INFO]Found Pose Estimate file {poses_file_path}.")
if gt_file_path.exists():
print(f"[INFO]Found GT Pose Estimate file {poses_file_path}. The algorithm run to completion.")
else:
if poses is not None:
print(f"[INFO]The execution stopped after {poses.shape[0]} frames.")
print("*" * 80)
# Run the algorithm again on the same data
config: DictConfig = OmegaConf.load(options.config_path)
if options.overrides_path is not None:
overrides_conf = OmegaConf.load(options.overrides_path)
# Merge the two config
config.merge_with(overrides_conf)
config.dataset.train_sequences = [options.sequence]
config.debug = True
config.log_dir = f"/tmp/{time.time()}"
Path(config.log_dir).mkdir()
runner = SLAMRunner(config)
# Load the Datasets
datasets: list = runner.load_datasets()
for sequence_name, dataset in datasets:
window = None
try:
# Build dataloader
num_frames = options.num_frames if options.num_frames > 0 else len(dataset) - options.start_index
dataset = WindowDataset(dataset, options.start_index, num_frames)
slam = runner.load_slam_algorithm()
slam.init()
elapsed = 0.0
relative_ground_truth = runner.ground_truth(sequence_name)
if _with_viz3d:
window = OpenGLWindow()
if poses is not None:
window.init()
saved_poses = poses[options.start_index:]
if saved_poses.shape[0] > 0:
saved_poses = np.einsum("ij,njk->nik", np.linalg.inv(saved_poses[0]), saved_poses)
window.set_poses(0, saved_poses.astype(np.float32))
for data_dict in tqdm(dataset, desc=f"Sequence {sequence_name}", ncols=100, total=num_frames):
start = time.time()
# Process next frame
slam.process_next_frame(data_dict)
# Measure the time spent on the processing of the next frame
elapsed_sec = time.time() - start
elapsed += elapsed_sec
if window is not None:
window.close(True)
except KeyboardInterrupt:
if _with_viz3d and window is not None:
window.close(True)
del slam
if __name__ == "__main__":
options: ReplayArguments = parse_arguments()
replay_slam(options)
|
utils/bin/add_program.py | RohitAthithya/learntosolveit | 136 | 112343 | #!/usr/bin/python
"""
BUGS:
1. make cprogramming and cprogs dir into a single dir name.
"""
import os
import sys
import time
LANGUAGE_PATH = '../../languages/'
NOW_FORMAT = '%d-%m-%Y %H:%M'
PROGRAM_NAME_TEMPLATE = 'PROGRAMNAME'
SOURCE_PATH = '../../source/'
TEMPLATE_FORMAT = '../{0}_template.rst'
INVALID_EXIT = -1
PROGRAM_DIR = os.path.abspath(os.path.dirname(__file__))
USAGE = """
add_program.py program_name
program_name should follow pattern generic_specific.extension
"""
def _now():
return time.strftime(NOW_FORMAT, time.localtime(time.time()))
def _comment_type(ext):
return {
'c': '//',
'py': '#',
'rb': '#',
'java': '//',
'scala': '//'}.get(ext, '#')
def _source_folder_name(language):
return {
'c': 'cprogramming',
'py': 'python',
'rb': 'ruby',
'scala': 'scala',
'java': 'java'}.get(language, '')
def _program_folder_name(language):
return {
'c': 'cprogs',
'py': 'python',
'rb': 'ruby',
'scala': 'scala',
'java': 'java'}.get(language, '')
def get_language_dir(language):
return os.path.abspath(
os.path.join(
PROGRAM_DIR,
LANGUAGE_PATH,
_program_folder_name(language)))
def get_source_dir(language):
return os.path.abspath(
os.path.join(
PROGRAM_DIR,
SOURCE_PATH,
_source_folder_name(language)))
def get_template_file(language):
template_path = TEMPLATE_FORMAT.format(_source_folder_name(language))
return os.path.abspath(os.path.join(PROGRAM_DIR, template_path))
def create_program(filename):
ext = filename.split('.')[1]
with open(filename, 'w') as fh:
fh.write('{0} {1} - {2}'.format(
_comment_type(ext),
os.path.basename(filename),
_now()))
def _program_name(program):
return program.split('.')[0]
def _rst_filename(program):
return _program_name(program) + '.rst'
def create_source(template, filename, program):
with open(template) as template_file:
with open(filename, 'w') as source_file:
for line in template_file:
source_file.write(
line.replace(PROGRAM_NAME_TEMPLATE, program))
def update_index_file(filename, program):
with open(filename, 'a') as f:
f.write(' %s\n\n' % program)
def get_index_file(language):
return os.path.abspath(os.path.join(get_source_dir(language), 'index.rst'))
def exit_if_not_exists(path):
if not os.path.exists(path):
print("{0} does not exists".format(path))
sys.exit(-1)
def main(args):
try:
program, = args
except ValueError:
print(USAGE)
sys.exit(-1)
program_name, language = program.split('.')
path = get_language_dir(language)
exit_if_not_exists(path)
program_file = os.path.abspath(os.path.join(path, program))
create_program(program_file)
print('Created {0}'.format(program_file))
path = get_source_dir(language)
exit_if_not_exists(path)
source_file = os.path.abspath(os.path.join(path, _rst_filename(program)))
create_source(
get_template_file(language),
source_file,
_program_name(program))
print('Created {0}'.format(source_file))
filename = get_index_file(language)
exit_if_not_exists(filename)
update_index_file(filename, _program_name(program))
print('Updated {0}'.format(filename))
if __name__ == '__main__':
main(sys.argv[1:])
|
cli/jobs/train/tensorflow/iris/src/main.py | denniseik/azureml-examples | 331 | 112360 | <reponame>denniseik/azureml-examples<gh_stars>100-1000
# imports
import os
import mlflow
import argparse
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow_decision_forests as tfdf
# define functions
def main(args):
# enable auto logging
mlflow.autolog()
# read in data
df = pd.read_csv(args.iris_csv)
# convert to tensorflow dataset
ds = tfdf.keras.pd_dataframe_to_tf_dataset(df, label="species")
# train model
model = tfdf.keras.RandomForestModel().fit(ds)
def parse_args():
# setup arg parser
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument("--iris-csv", type=str)
# parse args
args = parser.parse_args()
# return args
return args
# run script
if __name__ == "__main__":
# add space in logs
print("*" * 60)
print("\n\n")
# parse args
args = parse_args()
# run main function
main(args)
# add space in logs
print("*" * 60)
print("\n\n")
|
sample_creation.py | CharlesAuthier/geo-deep-learning | 121 | 112379 | <reponame>CharlesAuthier/geo-deep-learning
import argparse
from datetime import datetime
import os
import numpy as np
np.random.seed(1234) # Set random seed for reproducibility
import warnings
import rasterio
import fiona
import shutil
import time
import json
from pathlib import Path
from tqdm import tqdm
from collections import Counter
from typing import List, Union
from utils.create_dataset import create_files_and_datasets
from utils.utils import get_key_def, pad, pad_diff, add_metadata_from_raster_to_sample
from utils.geoutils import vector_to_raster, clip_raster_with_gpkg
from utils.readers import read_parameters
from utils.verifications import assert_crs_match, validate_num_classes
from rasterio.mask import mask
from rasterio.windows import Window
from rasterio.plot import reshape_as_image
def validate_class_prop_dict(actual_classes_dict, config_dict):
"""
Populate dictionary containing class values found in vector data with values (thresholds) from sample/class_prop
parameter in config file
actual_classes_dict: dict
Dictionary where each key is a class found in vector data. Value is not relevant (should be 0)
config_dict:
Dictionary with class ids (keys and thresholds (values) from config file
"""
# Validation of class proportion parameters (assert types).
if not isinstance(config_dict, dict):
warnings.warn(f"Class_proportion parameter should be a dictionary. Got type {type(config_dict)}. "
f"Ignore if parameter was omitted)")
return None
for key, value in config_dict.items():
try:
assert isinstance(key, str)
int(key)
except (ValueError, AssertionError):
f"Class should be a string castable as an integer. Got {key} of type {type(key)}"
assert isinstance(value, int), f"Class value should be an integer, got {value} of type {type(value)}"
# Populate actual classes dictionary with values from config
for key, value in config_dict.items():
if int(key) in actual_classes_dict.keys():
actual_classes_dict[int(key)] = value
else:
warnings.warn(f"Class {key} not found in provided vector data.")
return actual_classes_dict.copy()
def getFeatures(gdf):
"""Function to parse features from GeoDataFrame in such a manner that rasterio wants them"""
import json
return [json.loads(gdf.to_json())['features'][0]['geometry']]
def process_raster_img(rst_pth, gpkg_pth):
with rasterio.open(rst_pth) as src:
rst_pth = clip_raster_with_gpkg(src, gpkg_pth)
# TODO: Return clipped raster handle
return rst_pth, src
def reorder_bands(a: List[str], b: List[str]):
read_band_order = []
for band in a:
if band in b:
read_band_order.insert(a.index(band) + 1, b.index(band) + 1)
# print(f'{a.index(band)},{band}, {b.index(band)}')
return read_band_order
def gen_img_samples(rst_pth, tile_size, dist_samples, *band_order):
with rasterio.open(rst_pth) as src:
for row in range(0, src.height, dist_samples):
for column in range(0, src.width, dist_samples):
window = Window.from_slices(slice(row, row + tile_size),
slice(column, column + tile_size))
if band_order:
window_array = reshape_as_image(src.read(band_order[0], window=window))
else:
window_array = reshape_as_image(src.read(window=window))
if window_array.shape[0] < tile_size or window_array.shape[1] < tile_size:
padding = pad_diff(window_array.shape[0], window_array.shape[1], tile_size, tile_size)
window_array = pad(window_array, padding, fill=np.nan)
yield window_array
def process_vector_label(rst_pth, gpkg_pth, ids):
if rst_pth is not None:
with rasterio.open(rst_pth) as src:
np_label = vector_to_raster(vector_file=gpkg_pth,
input_image=src,
out_shape=(src.height, src.width),
attribute_name='properties/Quatreclasses',
fill=0,
target_ids=ids,
merge_all=True,
)
return np_label
def gen_label_samples(np_label, dist_samples, tile_size):
h, w = np_label.shape
for row in range(0, h, dist_samples):
for column in range(0, w, dist_samples):
target = np_label[row:row + tile_size, column:column + tile_size]
target_row = target.shape[0]
target_col = target.shape[1]
if target_row < tile_size or target_col < tile_size:
padding = pad_diff(target_row, target_col, tile_size,
tile_size) # array, actual height, actual width, desired size
target = pad(target, padding, fill=-1)
indices = (row, column)
yield target, indices
def minimum_annotated_percent(target_background_percent, min_annotated_percent):
if not min_annotated_percent:
return True
elif float(target_background_percent) <= 100 - min_annotated_percent:
return True
return False
def append_to_dataset(dataset, sample):
"""
Append a new sample to a provided dataset. The dataset has to be expanded before we can add value to it.
:param dataset:
:param sample: data to append
:return: Index of the newly added sample.
"""
old_size = dataset.shape[0] # this function always appends samples on the first axis
dataset.resize(old_size + 1, axis=0)
dataset[old_size, ...] = sample
return old_size
def class_proportion(target, sample_size: int, class_min_prop: dict):
if not class_min_prop:
return True
sample_total = sample_size ** 2
for key, value in class_min_prop.items():
if key not in np.unique(target):
target_prop_classwise = 0
else:
target_prop_classwise = (round((np.bincount(target.clip(min=0).flatten())[key] / sample_total) * 100, 1))
if target_prop_classwise < value:
return False
return True
def add_to_datasets(dataset,
samples_file,
val_percent,
val_sample_file,
data,
target,
sample_metadata,
metadata_idx,
dict_classes):
""" Add sample to Hdf5 (trn, val or tst) and computes pixel classes(%). """
val = False
if dataset == 'trn':
random_val = np.random.randint(1, 100)
if random_val > val_percent:
pass
else:
val = True
samples_file = val_sample_file
append_to_dataset(samples_file["sat_img"], data)
append_to_dataset(samples_file["map_img"], target)
append_to_dataset(samples_file["sample_metadata"], repr(sample_metadata))
append_to_dataset(samples_file["meta_idx"], metadata_idx)
# adds pixel count to pixel_classes dict for each class in the image
for key, value in enumerate(np.bincount(target.clip(min=0).flatten())):
cls_keys = dict_classes.keys()
if key in cls_keys:
dict_classes[key] += value
elif key not in cls_keys and value > 0:
raise ValueError(f"A class value was written ({key}) that was not defined in the classes ({cls_keys}).")
return val
def sample_prep(src, data, target, indices, gpkg_classes, sample_size, sample_type, samples_count, samples_file,
num_classes,
val_percent,
val_sample_file,
min_annot_perc=None,
class_prop=None,
dontcare=-1
):
added_samples = 0
excl_samples = 0
pixel_classes = {key: 0 for key in gpkg_classes}
background_val = 0
pixel_classes[background_val] = 0
class_prop = validate_class_prop_dict(pixel_classes, class_prop)
pixel_classes[dontcare] = 0
image_metadata = add_metadata_from_raster_to_sample(sat_img_arr=data,
raster_handle=src,
meta_map={},
raster_info={})
# Save label's per class pixel count to image metadata
image_metadata['source_label_bincount'] = {class_num: count for class_num, count in
enumerate(np.bincount(target.clip(min=0).flatten()))
if count > 0} # TODO: add this to add_metadata_from[...] function?
if sample_type == 'trn':
idx_samples = samples_count['trn']
append_to_dataset(val_sample_file["metadata"], repr(image_metadata))
elif sample_type == 'tst':
idx_samples = samples_count['tst']
else:
raise ValueError(f"Sample type must be trn or tst. Provided type is {sample_type}")
idx_samples_v = samples_count['val']
# Adds raster metadata to the dataset. All samples created by tiling below will point to that metadata by index
metadata_idx = append_to_dataset(samples_file["metadata"], repr(image_metadata))
u, count = np.unique(target, return_counts=True)
# print('class:', u, 'count:', count)
target_background_percent = round(count[0] / np.sum(count) * 100 if 0 in u else 0, 1)
sample_metadata = {'sample_indices': indices}
val = False
if minimum_annotated_percent(target_background_percent, min_annot_perc) and \
class_proportion(target, sample_size, class_prop):
val = add_to_datasets(dataset=sample_type,
samples_file=samples_file,
val_percent=val_percent,
val_sample_file=val_sample_file,
data=data,
target=target,
sample_metadata=sample_metadata,
metadata_idx=metadata_idx,
dict_classes=pixel_classes)
if val:
idx_samples_v += 1
else:
idx_samples += 1
added_samples += 1
else:
excl_samples += 1
target_class_num = np.max(u)
if num_classes < target_class_num:
num_classes = target_class_num
sample_type_ = 'val' if val else sample_type
# assert added_samples > 0, "No sample added for current raster. Problems may occur with use of metadata"
if sample_type == 'tst':
samples_count['tst'] = idx_samples
else:
samples_count['trn'] = idx_samples
samples_count['val'] = idx_samples_v
return samples_count, num_classes, pixel_classes
def class_pixel_ratio(pixel_classes: dict, source_data: str, file_path: str):
with open(file_path, 'a+') as f:
pixel_total = sum(pixel_classes.values())
print(f'\n****{source_data}****\n', file=f)
for i in pixel_classes:
prop = round((pixel_classes[i] / pixel_total) * 100, 1) if pixel_total > 0 else 0
print(f'{source_data}_class', i, ':', prop, '%', file=f)
print(f'\n****{source_data}****\n', file=f)
def main(params):
"""
Dataset preparation (trn, val, tst).
:param params: (dict) Parameters found in the yaml config file.
"""
assert params['global']['task'] == 'segmentation', \
f"sample_creation.py isn't necessary when performing classification tasks"
num_classes = get_key_def('num_classes', params['global'], expected_type=int)
num_bands = get_key_def('number_of_bands', params['global'], expected_type=int)
debug = get_key_def('debug_mode', params['global'], False)
targ_ids = get_key_def('target_ids', params['sample'], None, expected_type=List)
# SET BASIC VARIABLES AND PATHS. CREATE OUTPUT FOLDERS.
val_percent = params['sample']['val_percent']
samples_size = params["global"]["samples_size"]
overlap = params["sample"]["overlap"]
dist_samples = round(samples_size * (1 - (overlap / 100)))
min_annot_perc = get_key_def('min_annotated_percent', params['sample']['sampling_method'], None, expected_type=int)
ignore_index = get_key_def('ignore_index', params['training'], -1)
meta_map = get_key_def('meta_map', params['global'], default={})
list_params = params['read_img']
source_pan = get_key_def('pan', list_params['source'], default=False, expected_type=bool)
source_mul = get_key_def('mul', list_params['source'], default=False, expected_type=bool)
mul_band_order = get_key_def('mulband', list_params['source'], default=[], expected_type=list)
prep_band = get_key_def('band', list_params['prep'], default=[], expected_type=list)
tst_set = get_key_def('benchmark', list_params, default=[], expected_type=list)
in_pth = get_key_def('input_file', list_params, default='data_file.json', expected_type=str)
sensor_lst = get_key_def('sensorID', list_params, default=['GeoEye1', 'QuickBird2' 'WV2', 'WV3', 'WV4'],
expected_type=list)
month_range = get_key_def('month_range', list_params, default=list(range(1, 12 + 1)), expected_type=list)
root_folder = Path(get_key_def('root_img_folder', list_params, default='', expected_type=str))
gpkg_status = 'all'
data_path = Path(params['global']['data_path'])
Path.mkdir(data_path, exist_ok=True, parents=True)
if not data_path.is_dir():
raise FileNotFoundError(f'Could not locate data path {data_path}')
# mlflow logging
experiment_name = get_key_def('mlflow_experiment_name', params['global'], default='gdl-training', expected_type=str)
samples_folder_name = (f'samples{samples_size}_overlap{overlap}_min-annot{min_annot_perc}_{num_bands}bands'
f'_{experiment_name}')
samples_folder = data_path.joinpath(samples_folder_name)
if samples_folder.is_dir():
if debug:
# Move existing data folder with a random suffix.
last_mod_time_suffix = datetime.fromtimestamp(samples_folder.stat().st_mtime).strftime('%Y%m%d-%H%M%S')
shutil.move(samples_folder, data_path.joinpath(f'{str(samples_folder)}_{last_mod_time_suffix}'))
else:
raise FileExistsError(f'Data path exists: {samples_folder}. Remove it or use a different experiment_name.')
Path.mkdir(samples_folder, exist_ok=False) # TODO: what if we want to append samples to existing hdf5?
trn_hdf5, val_hdf5, tst_hdf5 = create_files_and_datasets(samples_size=samples_size,
number_of_bands=num_bands,
meta_map=meta_map,
samples_folder=samples_folder,
params=params)
class_prop = get_key_def('class_proportion', params['sample']['sampling_method'], None, expected_type=dict)
dontcare = get_key_def("ignore_index", params["training"], -1)
number_samples = {'trn': 0, 'val': 0, 'tst': 0}
number_classes = 0
pixel_pan_counter = Counter()
pixel_mul_counter = Counter()
pixel_prep_counter = Counter()
filename = samples_folder.joinpath('class_distribution.txt')
with open(Path(in_pth), 'r') as fin:
dict_images = json.load(fin)
for i_dict in tqdm(dict_images['all_images'], desc=f'Writing samples to {samples_folder}'):
if i_dict['sensorID'] in sensor_lst and \
datetime.strptime(i_dict['date']['yyyy/mm/dd'], '%Y/%m/%d').month in month_range:
if source_pan:
if not len(i_dict['pan_img']) == 0 and i_dict['gpkg']:
if gpkg_status == 'all':
if 'corr' or 'prem' in i_dict['gpkg'].keys():
gpkg = root_folder.joinpath(list(i_dict['gpkg'].values())[0])
gpkg_classes = validate_num_classes(gpkg, num_classes,
'properties/Quatreclasses',
dontcare,
targ_ids)
for img_pan in i_dict['pan_img']:
img_pan = root_folder.joinpath(img_pan)
assert_crs_match(img_pan, gpkg)
rst_pth, r_ = process_raster_img(img_pan, gpkg)
np_label = process_vector_label(rst_pth, gpkg, targ_ids)
if np_label is not None:
if Path(gpkg).stem in tst_set:
sample_type = 'tst'
out_file = tst_hdf5
else:
sample_type = 'trn'
out_file = trn_hdf5
val_file = val_hdf5
src = r_
pan_label_gen = gen_label_samples(np_label, dist_samples, samples_size)
pan_img_gen = gen_img_samples(rst_pth, samples_size, dist_samples)
else:
continue
for pan_img, pan_label in zip(pan_img_gen, pan_label_gen):
number_samples, number_classes, class_pixels_pan = sample_prep(src, pan_img, pan_label[0],
pan_label[1], gpkg_classes,
samples_size, sample_type,
number_samples, out_file,
number_classes,
val_percent, val_file,
min_annot_perc,
class_prop=class_prop,
dontcare=dontcare)
pixel_pan_counter.update(class_pixels_pan)
if source_mul:
if not len(i_dict['mul_img']) == 0 and i_dict['gpkg']:
band_order = reorder_bands(i_dict['mul_band'], mul_band_order)
if gpkg_status == 'all':
if 'corr' or 'prem' in i_dict['gpkg'].keys():
gpkg = root_folder.joinpath(list(i_dict['gpkg'].values())[0])
gpkg_classes = validate_num_classes(gpkg, num_classes,
'properties/Quatreclasses',
dontcare,
targ_ids)
for img_mul in i_dict['mul_img']:
img_mul = root_folder.joinpath(img_mul)
assert_crs_match(img_mul, gpkg)
rst_pth, r_ = process_raster_img(img_mul, gpkg)
np_label = process_vector_label(rst_pth, gpkg, targ_ids)
if np_label is not None:
if Path(gpkg).stem in tst_set:
sample_type = 'tst'
out_file = tst_hdf5
else:
sample_type = 'trn'
out_file = trn_hdf5
val_file = val_hdf5
src = r_
mul_label_gen = gen_label_samples(np_label, dist_samples, samples_size)
mul_img_gen = gen_img_samples(rst_pth, samples_size, dist_samples, band_order)
else:
continue
for mul_img, mul_label in zip(mul_img_gen, mul_label_gen):
number_samples, number_classes, class_pixels_mul = sample_prep(src, mul_img, mul_label[0],
mul_label[1], gpkg_classes,
samples_size, sample_type,
number_samples, out_file,
number_classes,
val_percent, val_file,
min_annot_perc,
class_prop=class_prop,
dontcare=dontcare)
pixel_mul_counter.update(class_pixels_mul)
if prep_band:
bands_gen_list = []
if set(prep_band).issubset({'R', 'G', 'B', 'N'}):
for ib in prep_band:
if i_dict[f'{ib}_band'] and i_dict['gpkg']:
i_dict[f'{ib}_band'] = root_folder.joinpath(i_dict[f'{ib}_band'])
if gpkg_status == 'all':
if 'corr' or 'prem' in i_dict['gpkg'].keys():
gpkg = root_folder.joinpath(list(i_dict['gpkg'].values())[0])
gpkg_classes = validate_num_classes(gpkg, num_classes,
'properties/Quatreclasses',
dontcare,
targ_ids)
assert_crs_match(i_dict[f'{ib}_band'], gpkg)
rst_pth, r_ = process_raster_img(i_dict[f'{ib}_band'], gpkg)
np_label = process_vector_label(rst_pth, gpkg, targ_ids)
prep_img_gen = gen_img_samples(rst_pth, samples_size, dist_samples)
bands_gen_list.append(prep_img_gen)
if np_label is not None:
if Path(gpkg).stem in tst_set:
sample_type = 'tst'
out_file = tst_hdf5
else:
sample_type = 'trn'
out_file = trn_hdf5
val_file = val_hdf5
src = r_
prep_label_gen = gen_label_samples(np_label, dist_samples, samples_size)
if len(prep_band) and len(bands_gen_list) == 1:
for b1, prep_label in zip(bands_gen_list[0], prep_label_gen):
prep_img = b1
number_samples, number_classes, class_pixels_prep = sample_prep(src, prep_img,
prep_label[0],
prep_label[1],
gpkg_classes,
samples_size,
sample_type,
number_samples,
out_file,
number_classes,
val_percent, val_file,
min_annot_perc,
class_prop=class_prop,
dontcare=dontcare)
pixel_prep_counter.update(class_pixels_prep)
elif len(prep_band) and len(bands_gen_list) == 2:
for b1, b2, prep_label in zip(*bands_gen_list, prep_label_gen):
prep_img = np.dstack(np.array([b1, b2]))
number_samples, number_classes, class_pixels_prep = sample_prep(src, prep_img,
prep_label[0],
prep_label[1],
gpkg_classes,
samples_size,
sample_type,
number_samples,
out_file,
number_classes,
val_percent, val_file,
min_annot_perc,
class_prop=class_prop,
dontcare=dontcare)
pixel_prep_counter.update(class_pixels_prep)
elif len(prep_band) and len(bands_gen_list) == 3:
for b1, b2, b3, prep_label in zip(*bands_gen_list, prep_label_gen):
prep_img = np.dstack(np.array([b1, b2, b3]))
number_samples, number_classes, class_pixels_prep = sample_prep(src, prep_img,
prep_label[0],
prep_label[1],
gpkg_classes,
samples_size,
sample_type,
number_samples,
out_file,
number_classes,
val_percent, val_file,
min_annot_perc,
class_prop=class_prop,
dontcare=dontcare)
pixel_prep_counter.update(class_pixels_prep)
elif len(prep_band) and len(bands_gen_list) == 4:
for b1, b2, b3, b4, prep_label in zip(*bands_gen_list, prep_label_gen):
prep_img = np.dstack(np.array([b1, b2, b3, b4]))
number_samples, number_classes, class_pixels_prep = sample_prep(src, prep_img,
prep_label[0],
prep_label[1],
gpkg_classes,
samples_size,
sample_type,
number_samples,
out_file,
number_classes,
val_percent, val_file,
min_annot_perc,
class_prop=class_prop,
dontcare=dontcare)
pixel_prep_counter.update(class_pixels_prep)
else:
continue
else:
continue
trn_hdf5.close()
val_hdf5.close()
tst_hdf5.close()
class_pixel_ratio(pixel_pan_counter, 'pan_source', filename)
class_pixel_ratio(pixel_mul_counter, 'mul_source', filename)
class_pixel_ratio(pixel_prep_counter, 'prep_source', filename)
print("Number of samples created: ", number_samples, number_classes)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sample preparation')
parser.add_argument('ParamFile', metavar='DIR',
help='Path to training parameters stored in yaml')
args = parser.parse_args()
params = read_parameters(args.ParamFile)
start_time = time.time()
tqdm.write(f'\n\nStarting images to samples preparation with {args.ParamFile}\n\n')
main(params)
print("Elapsed time:{}".format(time.time() - start_time))
|
desktop/core/ext-py/nose-1.3.7/functional_tests/support/ltfn/test_pak1/__init__.py | kokosing/hue | 5,079 | 112399 | from state import called
def setup():
called.append('test_pak1.setup')
def teardown():
called.append('test_pak1.teardown')
def test_one_one():
called.append('test_pak1.test_one_one')
def test_one_two():
called.append('test_pak1.test_one_two')
|
pyvi/__init__.py | minhpqn/pyvi | 161 | 112446 | <reponame>minhpqn/pyvi<filename>pyvi/__init__.py
__author__ = 'trungtv'
|
Easy Challenges/Challenge #0007 [Easy]/solutions/solution.py | doctorBeast/challenges | 331 | 112482 | <filename>Easy Challenges/Challenge #0007 [Easy]/solutions/solution.py<gh_stars>100-1000
#Python 3.X solution for Easy Challenge #0007
#GitHub: https://github.com/Ashkore
#https://www.reddit.com/user/Ashkoree/
alphabet = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
morase = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-",".--","-..-","-.--","--.."]
def tomorase(string):
string = list(string)
newstring =[]
for char in string:
try:
char = char.upper()
newstring.append(morase[alphabet.index(char)]+" ")
except:
if char == " ":
newstring.append("/ ")
else:
newstring.append("? ")
return "".join(map(str,newstring))
def frommorase(string):
string = string.split(" ")
newstring = []
for char in string:
try:
newstring.append(alphabet[morase.index(char)])
except:
if char == "/":
newstring.append(" ")
if char == "?":
newstring.append("_")
return "".join(map(str,newstring))
string = input("What is your string?")
print("Morase: "+tomorase(string))
print("Normal: "+frommorase(tomorase(string)))
|
py/riscv/exception_handlers/SyncDispatcher.py | noahsherrill/force-riscv | 111 | 112492 | <gh_stars>100-1000
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from riscv.PrivilegeLevel import PrivilegeLevelRISCV
from riscv.exception_handlers.ExceptionHandlerContext import RegisterCallRole
from riscv.exception_handlers.SyncDispatcherBase import SyncDispatcherBaseRISCV
# Comprehensive Exception Handler dispatcher, to dispatch the sync exception
# handlers
class DefaultSyncDispatcher(SyncDispatcherBaseRISCV):
def __init__(self, gen_thread, factory):
super().__init__(gen_thread, factory)
# Generate the necessary instructions to execute prior to dispatching to
# exception handlers. These instructions should be generated once at the
# top-level dispatch.
#
# @param aHandlerContext A source for usable registers and other
# information useful in generating exception handler instructions.
def processPreDispatch(self, aHandlerContext):
caller_saved_reg_indices = aHandlerContext.getScratchRegisterIndices(
RegisterCallRole.CALLER_SAVED
)
self.exceptionsStack.newStackFrame(caller_saved_reg_indices)
scratch_reg_index = aHandlerContext.getScratchRegisterIndices(
RegisterCallRole.TEMPORARY, 1
)
priv_level_reg_index = aHandlerContext.getScratchRegisterIndices(
RegisterCallRole.PRIV_LEVEL_VALUE
)
cause_reg_index = aHandlerContext.getScratchRegisterIndices(
RegisterCallRole.CAUSE_VALUE
)
err_code_reg_index = aHandlerContext.getScratchRegisterIndices(
RegisterCallRole.EC_VALUE
)
priv_level = PrivilegeLevelRISCV[self.privilegeLevel]
self.mAssemblyHelper.genMoveImmediate(
priv_level_reg_index, priv_level.value
)
self.mAssemblyHelper.genReadSystemRegister(
cause_reg_index, ("%scause" % priv_level.name.lower())
)
# Drop the interrupt bit to isolate the error code
self.mAssemblyHelper.genShiftLeftImmediate(
err_code_reg_index, 1, aSrcRegIndex=cause_reg_index
)
self.mAssemblyHelper.genShiftRightImmediate(err_code_reg_index, 1)
# Jump to the exception dispatch; return here afterward to unwind the
# stack and return from the exception
self.mAssemblyHelper.genRelativeBranchWithLink(
(self.exceptionsStack.frameInstructionCount() + 2) * 2
)
self.exceptionsStack.freeStackFrame()
self.mAssemblyHelper.genExceptionReturn(priv_level)
# Generate instructions to dispatch to exception handlers based on the
# specified exception code class. These instructions may be generated
# repeatedly: once for the top-level dispatch and potentially multiple
# times at finer levels of dispatch granularity to handle various
# subexception codes.
#
# @param aHandlerContext A source for usable registers and other
# information useful in generating exception handler instructions.
# @param aExceptionCodeClass The class defining the possible exception
# codes for this level of dispatch granularity.
def processDispatch(self, aHandlerContext, aExceptionCodeClass):
(
base_reg_index,
offset_reg_index,
) = aHandlerContext.getScratchRegisterIndices(
RegisterCallRole.TEMPORARY, 2
)
err_code_reg_index = aHandlerContext.getScratchRegisterIndices(
RegisterCallRole.EC_VALUE
)
self.genInstruction(
"AUIPC##RISCV", {"rd": base_reg_index, "simm20": 0}
)
self.mAssemblyHelper.genAddImmediate(base_reg_index, 20)
# Use error code as word offset into the table
self.mAssemblyHelper.genShiftLeftImmediate(
offset_reg_index, 2, aSrcRegIndex=err_code_reg_index
)
self.mAssemblyHelper.genAddRegister(base_reg_index, offset_reg_index)
self.genInstruction(
"JALR##RISCV",
{"rd": 0, "rs1": base_reg_index, "simm12": 0, "NoRestriction": 1},
)
|
Unit 3 Reservoir Statics/functions/extrapolatepressure.py | datasolver/reservoir-engineering | 139 | 112622 | <gh_stars>100-1000
def extrapolatepressure_gas(sg, pressure, temp, delta):
import numpy as np
R = 10.732
rhogas = (28.97 * sg * pressure) / (z * R * (temp + 459)) # temp convert to Rankine
# gas density gradient
rhogas_grad = rhogas / 144
# extrapolate using Eq 3.1
pressure_extrapolated_below = pressure + rhogas_grad * delta
pressure_extrapolated_above = pressure - rhogas_grad * delta
# or extrapolate using Eq 3.7
pressure_extrapolated_below2 = pressure*(np.exp((0.01877 * sg * delta) / (z * (temp + 459)))) # temp in Rankine
pressure_extrapolated_above2 = pressure*(np.exp(-(0.01877 * sg * delta) / (z * (temp + 459)))) # temp in Rankine
return(pressure_extrapolated_below, pressure_extrapolated_above, pressure_extrapolated_below2, pressure_extrapolated_above2)
|
sopel.py | Gizmokid2005/sopel | 555 | 112664 | #!/usr/bin/env python3
from __future__ import generator_stop
import sys
# Different from setuptools script, because we want the one in this dir.
from sopel.cli import run
sys.exit(run.main())
|
Python3/327.py | rakhi2001/ecom7 | 854 | 112666 | __________________________________________________________________________________________________
sample 148 ms submission
from bisect import bisect_left, bisect_right, insort
class Solution:
def countRangeSum(self, nums: List[int], lower: int, upper: int) -> int:
if(lower > upper):
return 0
prev_sums, curr_sum, res = [0], 0, 0
for num in nums:
curr_sum += num
left, right = curr_sum - upper, curr_sum - lower
l, r = bisect_left(prev_sums, left), bisect_right(prev_sums, right)
insort(prev_sums, curr_sum)
res += r - l
return res
__________________________________________________________________________________________________
sample 152 ms submission
class Solution:
def countRangeSum(self, nums: List[int], lower: int, upper: int) -> int:
# lower <= thisSum - prevSum <= upper, which also becomes:
# thisSum - upper <= prevSum <= thisSum - lower
prefix = [0]
currsum = 0
res = 0
for n in nums:
currsum += n
res += bisect.bisect_right(prefix, currsum - lower) - bisect.bisect_left(prefix, currsum - upper)
bisect.insort(prefix, currsum)
return res
__________________________________________________________________________________________________
|
external/source/meterpreter/source/bionic/libc/kernel/tools/find_headers.py | truekonrads/mirv-metasploit | 264 | 112707 | <reponame>truekonrads/mirv-metasploit
#!/usr/bin/env python
#
# this program is used to find source code that includes linux kernel headers directly
# (e.g. with #include <linux/...> or #include <asm/...>)
#
# then it lists
import sys, cpp, glob, os, re, getopt, kernel
from utils import *
from defaults import *
program_dir = find_program_dir()
wanted_archs = kernel_archs
wanted_include = os.path.normpath(program_dir + '/../original')
wanted_config = os.path.normpath(program_dir + '/../original/config')
def usage():
print """\
usage: find_headers.py [options] (file|directory|@listfile)+
options:
-d <include-dir> specify alternate kernel headers
'include' directory
('%s' by default)
-c <file> specify alternate .config file
('%s' by default)
-a <archs> used to specify an alternative list
of architectures to support
('%s' by default)
-v enable verbose mode
this program is used to find all the kernel headers that are used
by a set of source files or directories containing them. the search
is recursive to find *all* required files.
""" % ( wanted_include, wanted_config, string.join(kernel_archs,",") )
sys.exit(1)
try:
optlist, args = getopt.getopt( sys.argv[1:], 'vc:d:a:' )
except:
# unrecognized option
print "error: unrecognized option"
usage()
for opt, arg in optlist:
if opt == '-a':
wanted_archs = string.split(arg,',')
elif opt == '-d':
wanted_include = arg
elif opt == '-c':
wanted_config = arg
elif opt == '-v':
kernel.verboseSearch = 1
kernel.verboseFind = 1
verbose = 1
else:
usage()
if len(args) < 1:
usage()
kernel_root = wanted_include
if not os.path.exists(kernel_root):
sys.stderr.write( "error: directory '%s' does not exist\n" % kernel_root )
sys.exit(1)
if not os.path.isdir(kernel_root):
sys.stderr.write( "error: '%s' is not a directory\n" % kernel_root )
sys.exit(1)
if not os.path.isdir(kernel_root+"/linux"):
sys.stderr.write( "error: '%s' does not have a 'linux' directory\n" % kernel_root )
sys.exit(1)
if not os.path.exists(wanted_config):
sys.stderr.write( "error: file '%s' does not exist\n" % wanted_config )
sys.exit(1)
if not os.path.isfile(wanted_config):
sys.stderr.write( "error: '%s' is not a file\n" % wanted_config )
sys.exit(1)
# find all architectures in the kernel tree
re_asm_ = re.compile(r"asm-(\w+)")
archs = []
for dir in os.listdir(kernel_root):
m = re_asm_.match(dir)
if m:
if verbose: print ">> found kernel arch '%s'" % m.group(1)
archs.append(m.group(1))
# if we're using the 'kernel_headers' directory, there is only asm/
# and no other asm-<arch> directories (arm is assumed, which sucks)
#
in_kernel_headers = False
if len(archs) == 0:
# this can happen when we're using the 'kernel_headers' directory
if os.path.isdir(kernel_root+"/asm"):
in_kernel_headers = True
archs = [ "arm" ]
# if the user has specified some architectures with -a <archs> ensure that
# all those he wants are available from the kernel include tree
if wanted_archs != None:
if in_kernel_headers and wanted_archs != [ "arm" ]:
sys.stderr.write( "error: when parsing kernel_headers, 'arm' architecture only is supported at the moment\n" )
sys.exit(1)
missing = []
for arch in wanted_archs:
if arch not in archs:
missing.append(arch)
if len(missing) > 0:
sys.stderr.write( "error: the following requested architectures are not in the kernel tree: " )
for a in missing:
sys.stderr.write( " %s" % a )
sys.stderr.write( "\n" )
sys.exit(1)
archs = wanted_archs
# helper function used to walk the user files
def parse_file(path, parser):
parser.parseFile(path)
# remove previous destination directory
#destdir = "/tmp/bionic-kernel-headers/"
#cleanup_dir(destdir)
# try to read the config file
try:
cparser = kernel.ConfigParser()
cparser.parseFile( wanted_config )
except:
sys.stderr.write( "error: can't parse '%s'" % wanted_config )
sys.exit(1)
kernel_config = cparser.getDefinitions()
# first, obtain the list of kernel files used by our clients
fparser = kernel.HeaderScanner()
walk_source_files( args, parse_file, fparser, excludes=["kernel_headers"] )
headers = fparser.getHeaders()
files = fparser.getFiles()
# now recursively scan the kernel headers for additionnal sub-included headers
hparser = kernel.KernelHeaderFinder(headers,archs,kernel_root,kernel_config)
headers = hparser.scanForAllArchs()
if 0: # just for debugging
dumpHeaderUsers = False
print "the following %d headers:" % len(headers)
for h in sorted(headers):
if dumpHeaderUsers:
print " %s (%s)" % (h, repr(hparser.getHeaderUsers(h)))
else:
print " %s" % h
print "are used by the following %d files:" % len(files)
for f in sorted(files):
print " %s" % f
sys.exit(0)
for h in sorted(headers):
print h
sys.exit(0)
|
plugin/RunTransfer/RunTransfer.py | iontorrent/TS | 125 | 112710 | <reponame>iontorrent/TS
#!/usr/bin/env python
# Copyright (C) 2013 Ion Torrent Systems, Inc. All Rights Reserved
# RunTransfer plugin
import os
import json
import logging
import glob
import requests
import traceback
import zipfile
from distutils.version import LooseVersion
from ion.plugin import *
from ion.utils.explogparser import parse_log
from ftplib import FTP, error_perm
from django.utils.functional import cached_property
DJANGO_FTP_PORT = 8021
PLAN_PARAMS_FILENAME = 'plan_params.json'
CHEF_SUMMARY_FILENAME = 'chef_params.json'
EXPLOG_FILENAME = 'explog.txt'
# the list of required files for the root files
PGMSTYLE_REQUIRED_FILES = ['1.wells', 'analysis.bfmask.bin', 'processParameters.txt', 'avgNukeTrace_ATCG.txt', 'avgNukeTrace_TCAG.txt', 'bfmask.stats', 'bfmask.bin', 'analysis.bfmask.stats', 'analysis_return_code.txt', 'sigproc.log']
BLOCKSTYLE_SIGPROC_ROOT_LEVEL_REQUIRED_FILES = ['avgNukeTrace_ATCG.txt', 'avgNukeTrace_TCAG.txt', 'analysis.bfmask.stats']
REQUIRED_RESULTS_FILES = [PLAN_PARAMS_FILENAME]
OPTIONAL_RESULTS_FILES = [CHEF_SUMMARY_FILENAME]
OPTIONAL_SIGNAL_FILE_PATTERNS = ['Bead_density_20.png', 'Bead_density_70.png', 'Bead_density_200.png', 'Bead_density_1000.png', 'Bead_density_raw.png', 'Bead_density_contour.png']
class RunTransfer(IonPlugin):
"""Main class definition for this plugin"""
version = '5.16.0.0'
author = "<EMAIL>"
runtypes = [RunType.FULLCHIP, RunType.COMPOSITE]
results_dir = None
raw_data_dir = None
results_dir_base = None
output_dir = None
plugin_name = None
sigproc_dir = None
analysis_dir = None
plugin_dir = None
server_ip = None
user_name = None
user_password = None
upload_path = None
is_proton = False
total_blocks = None
transferred_blocks = None
json_head = {'Content-Type': 'application/json'}
rest_auth = None
spj = dict()
@cached_property
def ftp_client(self):
"""Helper property to get an ftp client"""
client = FTP()
client.connect(host=self.server_ip, port=DJANGO_FTP_PORT)
client.login(user=self.user_name, passwd=self.user_password)
return client
@cached_property
def barcodedata(self):
"""Gets the barcodes.json data"""
with open('barcodes.json', 'r') as handle:
return json.load(handle)
def get_list_of_files_common(self):
"""Gets a list of common files to transfer"""
plugin_results_dir = self.spj['runinfo']['plugin']['results_dir']
file_transfer_list = list()
for required_file in REQUIRED_RESULTS_FILES:
file_transfer_list.append((os.path.join(plugin_results_dir, required_file), self.upload_path))
for optional_file in OPTIONAL_RESULTS_FILES:
optional_path = os.path.join(plugin_results_dir, optional_file)
if os.path.exists(optional_path):
file_transfer_list.append((optional_path, self.upload_path))
return file_transfer_list
def get_list_of_files_pgmstyle(self, root_sigproc_dir):
"""This helper method will get a list of the tuples (source path, destination path) and verify that the required files are present for the pgm style"""
file_transfer_list = list()
# because the explog can be in multiple locations we are going to have to get it from there
file_transfer_list.append((self.setup_explog(), self.upload_path))
# verify the files before we attempt to copy them
for filename in PGMSTYLE_REQUIRED_FILES:
filename = os.path.join(root_sigproc_dir, filename)
if not os.path.exists(filename):
raise Exception("The required file %s does not exists and thus the run transfer cannot be completed." % filename)
file_transfer_list.append((filename, os.path.join(self.upload_path, "onboard_results", "sigproc_results")))
return file_transfer_list
def get_list_of_files_blockstyle(self, root_sigproc_dir, block_dirs):
"""This helper method will get a list of the tuples (source path, destination path) and verify that the required files are present for the block style"""
file_transfer_list = list()
# because the explog can be in multiple locations we are going to have to get it from there
file_transfer_list.append((self.setup_explog(), self.upload_path))
# iterate through all of the signal processing output directories
dst_sigproc_dir = os.path.join(self.upload_path, "onboard_results", "sigproc_results")
# now collect required files from the root of the results directory
for filename in BLOCKSTYLE_SIGPROC_ROOT_LEVEL_REQUIRED_FILES:
file_transfer_list.append((os.path.join(root_sigproc_dir, filename), dst_sigproc_dir))
for block_dir in block_dirs:
destination_directory = os.path.join(dst_sigproc_dir, os.path.basename(block_dir))
# verify the files before we attempt to copy them
for filename in PGMSTYLE_REQUIRED_FILES:
filename = os.path.join(block_dir, filename)
if not os.path.exists(filename):
raise Exception("The required file %s does not exists and thus the run transfer cannot be completed." % filename)
file_transfer_list.append((filename, destination_directory))
return file_transfer_list
def copy_files(self, file_transfer_list):
"""This helper method will copy over all of the files in the directory"""
# assuming we have all of the required files on the local system, we will now do the transfer
destination_directories = set([destination for _, destination in file_transfer_list])
for destination_directory in destination_directories:
self.create_remote_directory(destination_directory)
total_transferred = 0
total_files = len(file_transfer_list)
for source_file_path, destination_directory in file_transfer_list:
self.set_upload_status(total_transferred, total_files, source_file_path)
self.file_transport(source_file_path, destination_directory)
total_transferred += 1
def setup_explog(self):
"""This method will find the experiment log and return it's location"""
# First look in raw data directory
original = os.path.join(self.raw_data_dir, EXPLOG_FILENAME)
if not os.path.exists(original):
# Next look in parent of raw data (case:Proton data)
original = os.path.join(os.path.dirname(self.raw_data_dir), EXPLOG_FILENAME)
# Next look in the report directory
if not os.path.exists(original):
original = os.path.join(self.analysis_dir, EXPLOG_FILENAME)
# Next look in the pgm_logs.zip file
if not os.path.exists(original) and os.path.exists(os.path.join(self.results_dir, 'pgm_logs.zip')):
original = os.path.join(self.raw_data_dir, EXPLOG_FILENAME)
with zipfile.ZipFile(os.path.join(self.results_dir, 'pgm_logs.zip'), mode='r') as pgm_zip_hangle:
explog_info = pgm_zip_hangle.getinfo(EXPLOG_FILENAME)
pgm_zip_hangle.extract(explog_info, self.raw_data_dir)
# read in the exp log
with open(original, 'r') as original_handle:
explog = parse_log(original_handle.read())
# HACK ALERT! In order to make sure we don't go over the maximum length of the experiment name (currently 128 characters) by the
# appending of the _foreign string in the from_wells_analysis.py logic, we are going to have to add a check to see if it can fit
# into the data base constraints with the appending of the foreign string
if len(explog['experiment_name'] + "_foreign") > 128:
raise Exception("We cannot transfer this result due to the length of the experiment name.")
return original
def create_remote_directory(self, directory_path):
"""Helper method to create the directory on the remote server via ftp"""
directories = filter(None, directory_path.split('/'))
cur_dir = '/'
for sub_directory in directories:
# Create remote directory
cur_dir = os.path.join(cur_dir, sub_directory)
try:
self.ftp_client.mkd(cur_dir)
except error_perm:
pass
def file_transport(self, filename, destination_path):
"""Transfers a file across the ftp"""
# delete the old file
try:
self.ftp_client.delete(os.path.join(destination_path, os.path.basename(filename)))
except:
# don't do anything in case this fails....
pass
# push the new file
try:
self.ftp_client.cwd(destination_path)
self.ftp_client.storbinary('STOR ' + os.path.basename(filename), open(filename, 'rb'))
except error_perm as exc:
if '550' in exc.message:
print(traceback.format_exc())
print("550 Error while attempting to transfer file %s to %s" % (filename, destination_path))
print(filename + " -> " + destination_path)
raise Exception("The destination already contains the files and cannot overwrite them. This is most likely due to a previous execution of Run Transfer.")
else:
raise
def start_reanalysis(self):
"""Set the status for a reanalysis"""
self.show_standard_status("<p><h2>Status:</h2><small>Launching Analysis</small><img src=\"/site_media/jquery/colorbox/images/loading.gif\" alt=\"Running Plugin\" style=\"float:center\"></img></p>\n")
analysis_params = {'directory': self.upload_path, 'is_thumbnail': False}
response = requests.post('http://' + self.server_ip + '/rundb/api/v1/experiment/from_wells_analysis/', data=json.dumps(analysis_params), headers=self.json_head, auth=self.rest_auth)
response.raise_for_status()
return response.content
def show_standard_status(self, stat_line):
"""method to display initial status view"""
# replace new lines with line breaks
stat_line = stat_line.replace("\n", "<br />")
with open('status_block.html', "wb") as display_fs:
display_fs.write("<html><head>\n")
display_fs.write("<link href=\"/pluginMedia/RunTransfer/bootstrap.min.css\" rel=\"stylesheet\">\n")
display_fs.write("</head><body>\n")
display_fs.write("<center>\n")
display_fs.write("<img src=\"/pluginMedia/%s/images/complete/wells.png\">" % self.plugin_name)
display_fs.write("<img src=\"/pluginMedia/%s/images/complete/bfmask.png\">" % self.plugin_name)
display_fs.write("<img src=\"/pluginMedia/%s/images/complete/nuke.png\">" % self.plugin_name)
display_fs.write("<img src=\"/pluginMedia/%s/images/complete/bead.png\">" % self.plugin_name)
display_fs.write("<img src=\"/pluginMedia/%s/images/complete/explog.png\">" % self.plugin_name)
if self.is_proton and self.transferred_blocks:
display_fs.write("<p>%s blocks transferred</p>" % self.transferred_blocks)
display_fs.write("<p> %s </p>" % stat_line)
display_fs.write("</center></body></html>\n")
def set_upload_status(self, total_transfered, total_files, file_uploading):
"""Creates the update page"""
# Updates the webpage showing status of the file transfer
progress = int(float(total_transfered) / float(total_files) * 100)
with open('status_block.html', "wb") as display_fs:
display_fs.write("<html><head>\n")
display_fs.write("<link href=\"/site_media/resources/bootstrap/css/bootstrap.min.css\" rel=\"stylesheet\">\n")
display_fs.write("<meta http-equiv=\"refresh\" content=\"10\" >")
display_fs.write("</head><body><center>\n")
display_fs.write("<p><h2>Status:</h2><small>Uploading %s</small>\n" % file_uploading)
display_fs.write("<div class=\"progress\"><div class=\"bar\" style=\"width: %d%%;\"></div></div>" % progress)
display_fs.write("</center></body></html>\n")
def init_status_page(self, stat_line):
"""method to clear initial status view (if previously run, previous status is cleared)"""
stat_line = stat_line.replace("\n", "<br />")
with open('status_block.html', "wb") as display_fs:
display_fs.write("<html><head>\n")
display_fs.write("<link href=\"/site_media/resources/bootstrap/css/bootstrap.min.css\" rel=\"stylesheet\">\n")
display_fs.write("</head><body>\n")
display_fs.write("<center>\n")
display_fs.write("<p> %s </p>" % stat_line)
display_fs.write("</center></body></html>\n")
def plugin_not_configured_error(self):
"""Write out the status that the plugin is not configured"""
with open('status_block.html', "wb") as display_fs:
display_fs.write("<html><head>\n")
display_fs.write("<link href=\"/site_media/resources/bootstrap/css/bootstrap.min.css\" rel=\"stylesheet\">\n")
display_fs.write("</head><body>\n")
display_fs.write("<center>\n")
display_fs.write("<p> PLUGIN IS NOT CONFIGURED. </p>")
display_fs.write("<p> Run the global configuration for this plugin from the <a href=\"/configure/plugins\" target=\"_blank\">Plugins page</a>. </p>")
display_fs.write("</center></body></html>\n")
def check_version(self):
"""This method will check the version of the remote site to make sure that it's a compatible version"""
# test to make sure both of them must have identical versions
api_version_directory = '/rundb/api/v1/torrentsuite/version'
local_version_response = requests.get(self.spj['runinfo']['net_location'] + api_version_directory)
local_version_response.raise_for_status()
local_version = '172.16.58.3'
remote_version_response = requests.get('http://' + self.server_ip + api_version_directory)
remote_version_response.raise_for_status()
remote_version = '172.16.58.3'
if not remote_version:
raise Exception('Could not establish version of remote computer, exiting.')
if local_version != remote_version:
raise Exception("In order to transfer runs the remote torrent suite must have the identical version.")
if LooseVersion(remote_version) < LooseVersion('5.3.0.0'):
raise Exception('The remote server\'s version of Torrent Suite is not compatible.')
def check_for_localhost(self):
"""This method will make sure that we are not trying to transfer to ourselves"""
system_id_api_endpoint = "/rundb/api/v1/ionmeshnode/system_id/"
remote_system_id_response = requests.get("http://" + self.server_ip + system_id_api_endpoint, auth=self.rest_auth)
remote_system_id_response.raise_for_status()
remote_system_id = json.loads(remote_system_id_response.content)['system_id']
api_key_args = {'api_key': self.spj['runinfo']['api_key'], 'pluginresult': str(self.spj['runinfo']['pluginresult'])}
local_system_id_response = requests.get(self.spj['runinfo']['net_location'] + system_id_api_endpoint, params=api_key_args)
local_system_id_response.raise_for_status()
local_system_id = json.loads(local_system_id_response.content)['system_id']
if local_system_id == remote_system_id:
raise Exception("The remote system is the same and this one. Transferring to the same machine is not allowed.")
def launch(self, data=None):
"""main method of plugin execution"""
def find_reference_in_list(short_name, reference_genomes_list):
"""Helper method to that detects if the short name is in the list"""
for reference_genome_item in reference_genomes_list:
if reference_genome_item['short_name'] == short_name:
return True
return False
try:
# turn off the logging to prevent logging of the url's with the api keys
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# Gather variables
self.spj = self.startplugin
self.results_dir = self.spj['runinfo']['report_root_dir']
self.raw_data_dir = self.spj['runinfo']['raw_data_dir']
self.results_dir_base = os.path.basename(self.spj['runinfo']['report_root_dir'])
self.output_dir = self.spj['runinfo']['results_dir']
self.plugin_name = self.spj['runinfo']['plugin_name']
self.sigproc_dir = self.spj['runinfo']['sigproc_dir']
self.analysis_dir = self.spj['runinfo']['analysis_dir']
self.plugin_dir = self.spj['runinfo']['plugin_dir']
api_key = self.spj['runinfo']['api_key']
try:
self.server_ip = self.spj['pluginconfig']['ip']
self.user_name = self.spj['pluginconfig']['user_name']
upload_path_local = self.spj['pluginconfig']['upload_path']
if self.server_ip == "" or self.user_name == "" or upload_path_local == "":
raise Exception()
except:
# If these fail, then plugin is not configured
self.plugin_not_configured_error()
return True
self.upload_path = os.path.join(self.spj['pluginconfig']['upload_path'], self.results_dir_base + '_foreign')
# Determine Dataset Type
self.is_proton = self.spj['runinfo']['platform'].lower() in ['proton', 's5']
if self.spj['runplugin']['run_type'].lower() == 'thumbnail':
self.show_standard_status("The plugin is set to only transfer non-thumbnail data.")
return True
plan = self.spj.get('plan', dict())
chef_summary = self.spj.get('chefSummary', dict())
# this method will check version compatibility
self.check_version()
# attempt to retrieve the password from secure storage
secret_args = {
'name': 'RunTransferConfig-' + self.server_ip + "-" + self.user_name,
'api_key': api_key,
'pluginresult': str(self.spj['runinfo']['pluginresult']),
}
secret_response = requests.get(self.spj['runinfo']['net_location'] + '/security/api/v1/securestring/', params=secret_args)
secret_response.raise_for_status()
json_secret = json.loads(secret_response.content)
if 'objects' not in json_secret or len(json_secret['objects']) == 0:
raise Exception("Could not get password from secure storage.")
self.user_password = json_secret['objects'][0]['decrypted']
self.rest_auth = (self.user_name, self.user_password)
# check to make sure that we are not attempting to transfer to the exact same machine
self.check_for_localhost()
# append the TS source system id to the plan which we will get from the global config which there should always be one and only one
global_config_response = requests.get(self.spj['runinfo']['net_location'] + '/rundb/api/v1/globalconfig/', params={'api_key': api_key, 'pluginresult': str(self.spj['runinfo']['pluginresult'])})
global_config_response.raise_for_status()
global_config = json.loads(global_config_response.content)
plan['runTransferFromSource'] = global_config['objects'][0]['site_name']
# check that all of the references are available on the remote server
reference_request = requests.get('http://' + self.server_ip + '/rundb/api/v1/referencegenome/?enabled=true', auth=self.rest_auth)
reference_request.raise_for_status()
reference_genomes = json.loads(reference_request.content)['objects']
for barcode_name, barcode_data in self.barcodedata.items():
reference_short_name = barcode_data.get('reference', '')
if barcode_name == 'nomatch' or not reference_short_name:
continue
if not find_reference_in_list(reference_short_name, reference_genomes):
raise Exception("The remote execution will not be run because the remote site does not have the reference " + reference_short_name)
# Display initial status html
self.show_standard_status("Starting")
# prepare transient files
if plan:
with open(os.path.join(self.output_dir, PLAN_PARAMS_FILENAME), 'w') as plan_file_handle:
json.dump(plan, plan_file_handle)
if chef_summary:
with open(os.path.join(self.output_dir, CHEF_SUMMARY_FILENAME), 'w') as chef_file_handle:
json.dump(chef_summary, chef_file_handle)
# get a list of all of the files which will be transferred
file_transfer_list = list()
src_sigproc_dir = os.path.join(self.results_dir, 'sigproc_results')
if self.is_proton:
# generate a list of all of the block directories
block_directories = [os.path.join(src_sigproc_dir, block_dir) for block_dir in os.listdir(src_sigproc_dir) if os.path.isdir(os.path.join(src_sigproc_dir, block_dir)) and 'thumbnail' not in block_dir]
# first collect a list of all of the files to transfer from all of the block directories
file_transfer_list = self.get_list_of_files_blockstyle(src_sigproc_dir, block_directories)
else:
# first collect a list of all of the files to transfer from all of the block directories
file_transfer_list = self.get_list_of_files_pgmstyle(src_sigproc_dir)
file_transfer_list += self.get_list_of_files_common()
# now transfer the files across the transport layer
# for file_pair in file_transfer_list:
# print(file_pair[0] + "-->" + file_pair[1] + "\n")
self.copy_files(file_transfer_list)
# Start the re-analysis on the target server
self.show_standard_status(self.start_reanalysis())
return True
except requests.exceptions.ConnectionError as exc:
print(exc)
self.show_standard_status("<strong>Could not create a connection to the server %s.</strong><br />" % self.server_ip)
raise
except Exception as exc:
print(exc)
self.show_standard_status("<strong>There was an issue running the plugin</strong><br />" + str(exc))
raise
# dev use only - makes testing easier
if __name__ == "__main__":
PluginCLI(RunTransfer())
|
ants/utils/multi_label_morphology.py | xemio/ANTsPy | 338 | 112734 | """
Morphology operations on multi-label ANTsImage types
"""
__all__ = ['multi_label_morphology']
import numpy as np
def multi_label_morphology(image, operation, radius, dilation_mask=None, label_list=None, force=False):
"""
Morphology on multi label images.
Wraps calls to iMath binary morphology. Additionally, dilation and closing operations preserve
pre-existing labels. The choices of operation are:
Dilation: dilates all labels sequentially, but does not overwrite original labels.
This reduces dependence on the intensity ordering of adjoining labels. Ordering dependence
can still arise if two or more labels dilate into the same space - in this case, the label
with the lowest intensity is retained. With a mask, dilated labels are multiplied by the
mask and then added to the original label, thus restricting dilation to the mask region.
Erosion: Erodes labels independently, equivalent to calling iMath iteratively.
Closing: Close holes in each label sequentially, but does not overwrite original labels.
Opening: Opens each label independently, equivalent to calling iMath iteratively.
Arguments
---------
image : ANTsImage
Input image should contain only 0 for background and positive integers for labels.
operation : string
One of MD, ME, MC, MO, passed to iMath.
radius : integer
radius of the morphological operation.
dilation_mask : ANTsImage
Optional binary mask to constrain dilation only (eg dilate cortical label into WM).
label_list : list or tuple or numpy.ndarray
Optional list of labels, to perform operation upon. Defaults to all unique
intensities in image.
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> labels = ants.get_mask(img,1,150) + ants.get_mask(img,151,225) * 2
>>> labels_dilated = ants.multi_label_morphology(labels, 'MD', 2)
>>> # should see original label regions preserved in dilated version
>>> # label N should have mean N and 0 variance
>>> print(ants.label_stats(labels_dilated, labels))
"""
if (label_list is None) or (len(label_list) == 1):
label_list = np.sort(np.unique(image[image > 0]))
if (len(label_list) > 200) and (not force):
raise ValueError('More than 200 labels... Make sure the image is discrete'
' and call this function again with `force=True` if you'
' really want to do this.')
image_binary = image.clone()
image_binary[image_binary > 1] = 1
# Erosion / opening is simply a case of looping over the input labels
if (operation == 'ME') or (operation == 'MO'):
output = image.clone()
for current_label in label_list:
output = output.iMath(operation, radius, current_label)
return output
if dilation_mask is not None:
if int(dilation_mask.max()) != 1:
raise ValueError('Mask is either empty or not binary')
output = image.clone()
for current_label in label_list:
current_label_region = image.threshold_image(current_label, current_label)
other_labels = output - current_label_region
clab_binary_morphed = current_label_region.iMath(operation, radius, 1)
if (operation == 'MD') and (dilation_mask is not None):
clab_binary_morphed_nooverlap = current_label_region + dilation_mask * clab_binary_morphed - other_labels
clab_binary_morphed_nooverlap = clab_binary_morphed_nooverlap.threshold_image(1, 2)
else:
clab_binary_morphed_nooverlap = clab_binary_morphed - other_labels
clab_binary_morphed_nooverlap = clab_binary_morphed_nooverlap.threshold_image(1, 1)
output = output + clab_binary_morphed_nooverlap * current_label
return output
|
public-engines/sms-spam-engine/marvin_sms_spam_engine/data_handler/acquisitor_and_cleaner.py | tallandroid/incubator-marvin | 101 | 112745 | <reponame>tallandroid/incubator-marvin
#!/usr/bin/env python
# coding=utf-8
"""AcquisitorAndCleaner engine action.
Use this module to add the project main code.
"""
from .._compatibility import six
from .._logging import get_logger
from marvin_python_toolbox.engine_base import EngineBaseDataHandler
from marvin_python_toolbox.common.data import MarvinData
import pandas as pd
__all__ = ['AcquisitorAndCleaner']
logger = get_logger('acquisitor_and_cleaner')
class AcquisitorAndCleaner(EngineBaseDataHandler):
def __init__(self, **kwargs):
super(AcquisitorAndCleaner, self).__init__(**kwargs)
def execute(self, params, **kwargs):
data_file = MarvinData.download_file("https://s3.amazonaws.com/marvin-engines-data/spam.csv")
data = pd.read_csv(data_file, encoding='latin-1')
data = data.drop(["Unnamed: 2", "Unnamed: 3", "Unnamed: 4"], axis=1)
data = data.rename(columns={"v1": "label", "v2": "text"})
data['label_num'] = data.label.map({'ham': 0, 'spam': 1})
self.marvin_initial_dataset = data
|
openaps/init.py | ijustlovemath/openaps | 525 | 112749 |
import os
def init (args):
shell_cmd = ['git-openaps-init' ] + args.args
os.execvp(shell_cmd[0], shell_cmd)
|
docs/Tutorials/NetlistParser/bookshelf_parser.py | mabrains/ALIGN-public | 119 | 112755 | <filename>docs/Tutorials/NetlistParser/bookshelf_parser.py
import re
from collections import namedtuple
from collections import OrderedDict
import json
# Token specification
COMMENT = r'(?P<COMMENT>\#.*\n)'
EOLN = r'(?P<EOLN>\n)'
NUM = r'(?P<NUM>-?\d*\.\d+|-?\d+\.?)'
SEMI = r'(?P<SEMI>;)'
LPAREN = r'(?P<LPAREN>\()'
RPAREN = r'(?P<RPAREN>\))'
LBRACE = r'(?P<LBRACE>\{)'
RBRACE = r'(?P<RBRACE>\})'
MINUS = r'(?P<MINUS>\-)'
PLUS = r'(?P<PLUS>\+)'
COLON = r'(?P<COLON>\:)'
COMMA = r'(?P<COMMA>\,)'
FSLASH = r'(?P<FSLASH>\/)'
NAME = r'(?P<NAME>[a-zA-Z_][a-zA-Z_!0-9]*)'
LIT = r'(?P<LIT>\".*?\")'
WS = r'(?P<WS>\s+)'
master_pat = re.compile('|'.join([COMMENT,EOLN,NUM,SEMI,LPAREN,RPAREN,LBRACE,RBRACE,MINUS,PLUS,COLON,COMMA,FSLASH,NAME,LIT,WS]))
# Tokenizer
Token = namedtuple('Token', ['type','value'])
def generate_tokens(text):
scanner = master_pat.scanner(text)
for m in iter(scanner.match, None):
tok = Token(m.lastgroup, m.group())
if tok.type != 'WS': # and tok.type != 'COMMENT':
yield tok
def test_tokenize_two_strings():
foo = list(generate_tokens( '"Steve" "Burns"'))
print( foo)
assert len(foo) == 2
assert foo[0].type == 'LIT'
assert foo[1].type == 'LIT'
assert foo[0].value == '"Steve"'
assert foo[1].value == '"Burns"'
def test_tokenize_numbers():
foo = list(generate_tokens( '--6.8 6. .8'))
print( foo)
assert len(foo) == 4
assert foo[0].type == 'MINUS'
assert foo[1].type == 'NUM'
assert foo[1].value == '-6.8'
assert foo[2].type == 'NUM'
assert foo[2].value == '6.'
assert foo[3].type == 'NUM'
assert foo[3].value == '.8'
class Parser:
'''
Base class for recursive descent parsers.
Use the ._accept() method to test and accept the current lookahead token.
Use the ._accept_keyword() method to test and accept the current
lookahead token if it matches the argument.
Use the ._expect() method to exactly match and consume the next token on
the input (or raise a SyntaxError if it doesn't match).
Use the ._expect_keyword() method to exactly match and consume the next token on
the input (or raise a SyntaxError if it doesn't match).
'''
def parse(self,text):
self.tokens = generate_tokens(text)
self.tok = None # Last symbol consumed
self.nexttok = None # Next symbol tokenized
self._advance() # Load first lookahead token
return self.whole()
def _advance(self):
'Advance one token ahead'
self.tok, self.nexttok = self.nexttok, next(self.tokens, None)
# print( self.tok, self.nexttok)
def _accept(self,toktype):
'Test and consume the next token if it matches toktype'
if self.nexttok and self.nexttok.type == toktype:
self._advance()
return True
else:
return False
def _accept_keyword(self,str):
'Test and consume the next token if it matches the keyword str'
if self.nexttok and self.nexttok.type == 'NAME' and self.nexttok.value == str:
self._advance()
return True
else:
return False
def _expect(self,toktype):
'Consume next token if it matches toktype or raise SyntaxError'
if not self._accept(toktype):
raise SyntaxError('Expected ' + toktype)
def _expect_keyword(self,str):
'Consume next token if it matches argument or raise SyntaxError'
if not self._accept_keyword(str):
raise SyntaxError('Expected keyword' + str)
def parseint( self):
self._expect( 'NUM')
return int(self.tok.value)
class Blank:
def __init__(self):
pass
class BlocksParser(Parser):
def pointList( self):
points = []
while self._accept( 'LPAREN'):
x = self.parseint()
self._expect( 'COMMA')
y = self.parseint()
self._expect( 'RPAREN')
points.append( { "x": x, "y": y})
return points
def whole(self):
b = Blank()
b.outlines = []
b.blocks = []
b.terminals = []
while True:
if self._accept( 'EOLN'):
pass
elif self._accept( 'COMMENT'):
pass
elif self._accept_keyword( 'NumSoftRectangularBlocks'):
self._expect( 'COLON')
b.NumSoftRectangularBlocks = self.parseint()
self._expect( 'EOLN')
elif self._accept_keyword( 'NumHardRectilinearBlocks'):
self._expect( 'COLON')
b.NumHardRectilinearBlocks = self.parseint()
self._expect( 'EOLN')
elif self._accept_keyword( 'NumTerminals'):
self._expect( 'COLON')
b.NumTerminals = self.parseint()
self._expect( 'EOLN')
elif self._accept_keyword( 'BLOCK'):
self._expect( 'NAME')
block_nm = self.tok.value
self._expect( 'COLON')
n = self.parseint()
self._expect( 'EOLN')
pin_lst = []
for i in range(n):
self._expect( 'NAME')
pin_name = self.tok.value
self._expect( 'NAME')
layer = self.tok.value
point_lst = self.pointList()
assert pin_name not in pin_lst
pin_lst.append( { "pin": pin_name, "layer" : layer, "points": point_lst})
self._expect( 'EOLN')
obs_lst = []
while self._accept_keyword( 'INT'):
self._expect( 'NAME')
layer = self.tok.value
point_lst = self.pointList()
point_lst.append( (layer, point_lst))
self._expect( 'EOLN')
assert n == len(pin_lst)
b.blocks.append( { "block": block_nm, "pins": pin_lst, "obstructions": obs_lst})
elif self._accept( 'NAME'):
nm = self.tok.value
if self._accept_keyword( 'hardrectilinear'):
m = self.parseint()
point_lst = self.pointList()
assert m == len(point_lst)
b.outlines.append( { "block": nm, "outline": point_lst})
self._expect( 'EOLN')
else:
self._expect( 'NAME')
layer = self.tok.value
self._expect_keyword( 'terminal')
b.terminals.append( { "net": nm, "layer" : layer})
self._expect( 'EOLN')
elif self.nexttok is None:
break
else:
raise SyntaxError( 'Expected NAME')
assert b.NumSoftRectangularBlocks == 0
assert b.NumHardRectilinearBlocks == len( b.blocks)
assert b.NumHardRectilinearBlocks == len( b.outlines)
assert b.NumTerminals == len( b.terminals)
outline_tbl = OrderedDict()
for outline in b.outlines:
outline_tbl[outline['block']] = outline
# Stitch together blocks and outlines
for block in b.blocks:
block['outline'] = outline_tbl[block['block']]
return { "blocks": b.blocks,
"terminals": b.terminals}
class NetsParser(Parser):
def whole(self):
b = Blank()
b.nets = OrderedDict()
while True:
if self._accept( 'EOLN'):
pass
elif self._accept( 'COMMENT'):
pass
elif self._accept_keyword( 'NumNets'):
self._expect( 'COLON')
b.NumNets = self.parseint()
self._expect( 'EOLN')
elif self._accept_keyword( 'NumPins'):
self._expect( 'COLON')
b.NumPins = self.parseint()
self._expect( 'EOLN')
elif self._accept( 'NAME'):
nm = self.tok.value
self._expect( 'COLON')
n = self.parseint()
self._expect( 'EOLN')
b.nets[nm] = []
for i in range(n):
self._expect( 'NAME')
instance_nm = self.tok.value
self._expect( 'NAME')
pin_nm = self.tok.value
b.nets[nm].append( (instance_nm, pin_nm))
self._expect( 'EOLN')
elif self.nexttok is None:
break
else:
raise SyntaxError( 'Expected NAME')
return { "NumNets": b.NumNets, "NumPins": b.NumPins, "nets": b.nets}
class PlParser(Parser):
def bracePoint(self):
self._expect( 'LBRACE')
x = self.parseint()
self._expect( 'COMMA')
y = self.parseint()
self._expect( 'RBRACE')
return (x,y)
def whole(self):
b = Blank()
b.instances = OrderedDict()
b.terminals = OrderedDict()
while True:
if self._accept( 'EOLN'):
pass
elif self._accept( 'COMMENT'):
pass
elif self._accept_keyword( 'DIE'):
ll = self.bracePoint()
ur = self.bracePoint()
b.bbox = (ll,ur)
self._expect( 'EOLN')
elif self._accept( 'NAME'):
nm = self.tok.value
x = self.parseint()
y = self.parseint()
if self._accept( 'NAME'):
t = self.tok.value
b.instances[nm] = (x,y,t)
else:
b.terminals[nm] = (x,y)
self._expect( 'EOLN')
elif self.nexttok is None:
break
else:
raise SyntaxError( 'Expected NAME')
return { "bbox": b.bbox, "instances": b.instances, "terminals": b.terminals}
class ConstsParser(Parser):
def parseHierName(self):
lst = []
self._expect( 'NAME')
lst.append( self.tok.value)
while self._accept( 'FSLASH'):
self._expect( 'NAME')
lst.append( self.tok.value)
return lst
def braceTuple(self):
lst = []
self._expect( 'LBRACE')
lst.append( self.parseHierName())
while self._accept( 'COMMA'):
lst.append( self.parseHierName())
self._expect( 'RBRACE')
return lst
def whole(self):
b = Blank()
b.symmnets = []
b.critnets = OrderedDict()
b.shieldnets = []
b.matchblocks = []
while True:
if self._accept( 'EOLN'):
pass
elif self._accept( 'COMMENT'):
pass
elif self._accept_keyword( 'SymmNet'):
self._expect( 'LPAREN')
t0 = self.braceTuple()
self._expect( 'COMMA')
t1 = self.braceTuple()
self._expect( 'RPAREN')
b.symmnets.append( (t0,t1))
self._expect( 'EOLN')
elif self._accept_keyword( 'CritNet'):
self._expect( 'LPAREN')
self._expect( 'NAME')
net_nm = self.tok.value
self._expect( 'COMMA')
self._expect( 'NAME')
priority = self.tok.value
self._expect( 'RPAREN')
self._expect( 'EOLN')
b.critnets[net_nm] = priority
elif self._accept_keyword( 'ShieldNet'):
self._expect( 'LPAREN')
self._expect( 'NAME')
net_nm = self.tok.value
self._expect( 'RPAREN')
self._expect( 'EOLN')
b.shieldnets.append( net_nm)
elif self._accept_keyword( 'MatchBlock'):
self._expect( 'LPAREN')
self._expect( 'NAME')
b0 = self.tok.value
self._expect( 'COMMA')
self._expect( 'NAME')
b1 = self.tok.value
self._expect( 'RPAREN')
self._expect( 'EOLN')
b.matchblocks.append( (b0,b1))
elif self.nexttok is None:
break
else:
raise SyntaxError( 'Expected SymmNet, CritNet, ShieldNet, or MatchBlock keyword.')
return { "symmnets": b.symmnets, "critnets": b.critnets, "shieldnets": b.shieldnets, "matchblocks": b.matchblocks}
def test_blocks():
s = """#UMN blocks 1.0
# Created : July 09 19:15:43
# User : <EMAIL>
# Platform : Linux
NumSoftRectangularBlocks : 0
NumHardRectilinearBlocks : 3
NumTerminals : 5
L1_MM4_MM5 hardrectilinear 4 (0, 0) (0, 789) (648, 789) (648, 0)
L1_MM1_MM0 hardrectilinear 4 (0, 0) (-0, 842) (648, 842) (648, 0)
L1_MM3_MM2 hardrectilinear 4 (-0, 0) (0, 789) (648, 789) (648, 0)
BLOCK L1_MM4_MM5 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (196, 748) (196, 788) (236, 788) (236, 748)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (196, 789)
INT M1 (412, 619) (412, 789) (452, 789) (412, 789)
BLOCK L1_MM1_MM0 : 5
G1 M1 (108, 684) (108, 842) (148, 842) (148, 684)
G2 M1 (504, 684) (504, 836) (544, 836) (544, 684)
D1 M1 (88, 4) (88, 146) (128, 146) (128, 4)
S M1 (236, 796) (236, 836) (412, 836) (412, 796)
D2 M1 (520, -0) (520, 146) (560, 146) (560, 0)
INT M1 (196, 612) (196, 836) (236, 836) (196, 836)
INT M1 (412, 612) (412, 836) (452, 836) (412, 836)
BLOCK L1_MM3_MM2 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (236, 749) (236, 789) (412, 789) (412, 749)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (236, 619)
INT M1 (412, 619) (412, 789) (452, 789) (452, 619)
INT M1 (89, 39) (89, 148) (125, 148) (125, 39)
INT M1 (89, 39) (89, 75) (471, 75) (471, 39)
gnd! M1 terminal
vdd! M1 terminal
net2 M1 terminal
net14 M1 terminal
net17 M1 terminal
"""
# print( list(generate_tokens( s)))
p = BlocksParser()
print( json.dumps( p.parse( s), indent=2) + '\n')
def test_nets():
s = """#UMN nets 1.0
# Created : July 09 19:15:43
# User : <EMAIL>
# Platform : Linux
NumNets : 8
NumPins : 11
net2 : 2
L1_MM3_MM2 D1
terminal net2
net8 : 2
L1_MM4_MM5 D1
L1_MM1_MM0 D1
net10 : 2
L1_MM3_MM2 D2
L1_MM1_MM0 S
net11 : 2
L1_MM4_MM5 D2
L1_MM1_MM0 D2
net14 : 2
terminal net14
L1_MM1_MM0 G2
net17 : 2
terminal net17
L1_MM1_MM0 G1
gnd! : 2
L1_MM3_MM2 S
terminal gnd!
vdd! : 2
L1_MM4_MM5 S
terminal vdd!
"""
# print( list(generate_tokens( s)))
p = NetsParser()
print( json.dumps( p.parse( s), indent=2) + '\n')
def test_pl():
s = """# TAMU blocks 1.0
DIE {0, 0} {648, 2620}
L1_MM4_MM5 0 0 N
L1_MM1_MM0 648 889 FN
L1_MM3_MM2 0 2620 FS
net2 648 1932
net14 0 1649
net17 648 1652
gnd! 0 1851
vdd! 0 768
"""
# print( list(generate_tokens( s)))
p = PlParser()
print( json.dumps( p.parse( s), indent=2) + '\n')
def test_consts():
s = """SymmNet ( {net8,L1_MM1_MM0/D1,L1_MM4_MM5/D1} , {net11,L1_MM1_MM0/D2,L1_MM4_MM5/D2} )
SymmNet ( {net17,L1_MM1_MM0/G1,net17} , {net14,L1_MM1_MM0/G2,net14} )
CritNet ( net8 , min )
CritNet ( net10 , mid )
"""
# print( list(generate_tokens( s)))
p = ConstsParser()
print( json.dumps( p.parse( s), indent=2) + '\n')
def test_consts_sc():
s = """CritNet(net7, min)
CritNet(net23, min)
SymmNet({Voutn,L1_CC0_CC2/CN2,L0_MM7/D,I0/Voutn},{Voutp,L1_CC0_CC2/CN1,L0_MM8/S,I0/Voutp})
SymmNet({net7,L1_CC5_CC7/CP2,L1_CC0_CC2/CP2,I0/Vinp,L0_MM3/D},{net23,L1_CC5_CC7/CP1,L1_CC0_CC2/CP1,I0/Vinn,L0_MM1/S})
SymmNet({net11,L0_MM6/D,L0_MM7/S,L1_CC1_CC3/CN1},{net12,L0_MM11/S,L0_MM8/D,L1_CC1_CC3/CN2})
SymmNet({net5,L1_CC4_CC6/CP2,L1_CC1_CC3/CP1,L0_MM5/D,L0_MM3/S},{net6,L1_CC4_CC6/CP1,L1_CC1_CC3/CP2,L0_MM9/S,L0_MM1/D})
SymmNet({net4,L1_CC4_CC6/CN2,L0_MM2/D,L0_MM4/D},{net3,L1_CC4_CC6/CN1,L0_MM0/S,L0_MM10/S})
SymmNet({Vinn,L1_CC5_CC7/CN1,L0_MM2/S},{Vinp,L1_CC5_CC7/CN2,L0_MM0/D})
ShieldNet(Vinn)
ShieldNet(Vinp)
ShieldNet(net3)
ShieldNet(net4)
ShieldNet(net5)
ShieldNet(net6)
ShieldNet(net7)
ShieldNet(net23)
MatchBlock(L0_MM0,L0_MM2)
MatchBlock(L0_MM10,L0_MM4)
MatchBlock(L0_MM9,L0_MM5)
MatchBlock(L0_MM3,L0_MM1)
MatchBlock(L0_MM6,L0_MM11)
MatchBlock(L0_MM7,L0_MM8)
"""
# print( list(generate_tokens( s)))
p = ConstsParser()
print( json.dumps( p.parse( s), indent=2) + '\n')
|
ghostwriter/rolodex/migrations/0017_projectobjective_position.py | bbhunter/Ghostwriter | 601 | 112766 | <filename>ghostwriter/rolodex/migrations/0017_projectobjective_position.py
# Generated by Django 3.0.10 on 2021-02-27 00:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rolodex', '0016_auto_20210224_0645'),
]
operations = [
migrations.AddField(
model_name='projectobjective',
name='position',
field=models.IntegerField(default=1, verbose_name='List Position'),
),
]
|
suplemon/modules/rstrip.py | johnmbaughman/suplemon | 912 | 112768 | # -*- encoding: utf-8
from suplemon.suplemon_module import Module
class RStrip(Module):
"""Trim whitespace from the end of lines."""
def run(self, app, editor, args):
line_nums = editor.get_lines_with_cursors()
for n in line_nums:
line = editor.lines[n]
line.set_data(line.data.rstrip())
module = {
"class": RStrip,
"name": "rstrip",
}
|
components/aws/sagemaker/tests/unit_tests/tests/deploy/test_deploy_spec.py | nostro-im/pipelines | 2,860 | 112796 | from deploy.src.sagemaker_deploy_spec import SageMakerDeploySpec
import unittest
class DeploySpecTestCase(unittest.TestCase):
REQUIRED_ARGS = ["--region", "us-west-2", "--model_name_1", "model-test"]
def test_minimum_required_args(self):
# Will raise if the inputs are incorrect
spec = SageMakerDeploySpec(self.REQUIRED_ARGS)
|
tonic/agents/__init__.py | Eyalcohenx/tonic | 350 | 112797 | <gh_stars>100-1000
from .agent import Agent
from .basic import Constant, NormalRandom, OrnsteinUhlenbeck, UniformRandom
__all__ = [Agent, Constant, NormalRandom, OrnsteinUhlenbeck, UniformRandom]
|
terrascript/mysql/__init__.py | hugovk/python-terrascript | 507 | 112819 | # terrascript/mysql/__init__.py
import terrascript
class mysql(terrascript.Provider):
pass
|
build/android/pylib/utils/decorators.py | zealoussnow/chromium | 14,668 | 112821 | <gh_stars>1000+
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import logging
def Memoize(f):
"""Decorator to cache return values of function."""
memoize_dict = {}
@functools.wraps(f)
def wrapper(*args, **kwargs):
key = repr((args, kwargs))
if key not in memoize_dict:
memoize_dict[key] = f(*args, **kwargs)
return memoize_dict[key]
return wrapper
def NoRaiseException(default_return_value=None, exception_message=''):
"""Returns decorator that catches and logs uncaught Exceptions.
Args:
default_return_value: Value to return in the case of uncaught Exception.
exception_message: Message for uncaught exceptions.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception: # pylint: disable=broad-except
logging.exception(exception_message)
return default_return_value
return wrapper
return decorator
|
configs/benchmarks/classification/imagenet/imagenet_10percent/resnet50_head1_4xb64-steplr1e-2-20e_in1k-10pct.py | mitming/mmselfsup | 355 | 112880 | _base_ = 'resnet50_head1_4xb64-steplr1e-1-20e_in1k-10pct.py'
# optimizer
optimizer = dict(lr=0.01)
|
Competitions/Skillenza/Blume Fellows Challenge/IntouchApp Recruitment Test/Merge Candidates.py | cnm06/Competitive-Programming | 994 | 112891 | contacts = [['p1', 'e1'],['p2', 'e2'],['p2', 'e3'],['e3', 'p4'],['e2', 'p5'],['p3', 'e4', 'p6'],['e4'],['p6', 'e5']]
visited = [False]*len(contacts)
contacts = map(set, contacts)
def dfs(node,index, temp):
visited[index] = True
result = node
for i,item in enumerate(contacts):
if not visited[i] and not result.isdisjoint(item):
temp.append('c'+str(i+1))
result.update(dfs(item,i, temp))
return result
def merge_contacts():
result = []
ans = []
for i,node in enumerate(contacts):
if not visited[i]:
temp = ['c'+str(i+1)]
result.append(list(dfs(node,i, temp)))
ans.append(temp)
temp = []
return ans
print merge_contacts()
|
tests/trac/test-issue-0069.py | eLBati/pyxb | 123 | 112901 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
xsd='''<?xml version="1.0" encoding="utf-8"?>
<xs:schema elementFormDefault="qualified" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="Address">
<xs:complexType>
<xs:sequence>
<xs:element name="City" type="xs:string" />
<xs:element name="Country" minOccurs="0">
<xs:simpleType>
<xs:restriction base="xs:string">
<xs:pattern value="[A-Z]{2}|" />
</xs:restriction>
</xs:simpleType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestIssue0069 (unittest.TestCase):
def testCountry (self):
address = Address()
address.City = 'New-York'
with self.assertRaises(SimpleFacetValueError) as cm:
address.Country = 'USA'
address.Country = 'US'
xmlt = '<Address><City>New-York</City><Country>US</Country></Address>';
xmld = xmlt.encode('utf-8');
self.assertEqual(address.toxml('utf8', root_only=True), xmld)
if __name__ == '__main__':
unittest.main()
|
trinity/protocol/les/managers.py | pipermerriam/py-evm | 137 | 112912 | <filename>trinity/protocol/les/managers.py
from typing import (
Any,
Dict,
Tuple,
Type,
TYPE_CHECKING,
)
from eth_typing import BlockIdentifier
from eth.rlp.headers import BlockHeader
from p2p.exceptions import (
MalformedMessage,
)
from p2p.protocol import (
Command,
)
from trinity.protocol.common.managers import (
BaseRequestManager as _BaseRequestManager,
)
from trinity.utils.les import (
gen_request_id,
)
from .commands import (
BlockHeaders,
)
from .requests import HeaderRequest
if TYPE_CHECKING:
from .peer import LESPeer # noqa: #401
BaseRequestManager = _BaseRequestManager[
'LESPeer',
HeaderRequest,
Dict[str, Any],
Tuple[BlockHeader, ...],
]
class GetBlockHeadersRequestManager(BaseRequestManager):
msg_queue_maxsize = 100
_response_msg_type: Type[Command] = BlockHeaders
# All `RequestManager` classes are expected to implement the `__call__`
# method, including changing the function signature, thus the
# `# type: ignore` here is both expected and required.
async def __call__(self, # type: ignore
block_number_or_hash: BlockIdentifier,
max_headers: int = None,
skip: int = 0,
reverse: bool = True,
timeout: int = None) -> Tuple[BlockHeader, ...]:
request_id = gen_request_id()
request = HeaderRequest(
block_number_or_hash,
max_headers,
skip,
reverse,
request_id,
)
return await self._request_and_wait(request, timeout)
def _send_sub_proto_request(self, request: HeaderRequest) -> None:
self._peer.sub_proto.send_get_block_headers(request)
async def _normalize_response(self,
msg: Dict[str, Any]
) -> Tuple[BlockHeader, ...]:
if not isinstance(msg, dict):
raise MalformedMessage("msg must be a dictionary")
elif 'headers' not in msg:
raise MalformedMessage("No 'headers' key found in response")
elif not all(isinstance(item, BlockHeader) for item in msg['headers']):
raise MalformedMessage(
"`headers` key must be a tuple of `BlockHeader` instances"
)
return msg['headers']
def _get_item_count(self, msg: Dict[str, Any]) -> int:
return len(msg['headers'])
|
src/super_gradients/common/factories/losses_factory.py | Deci-AI/super-gradients | 308 | 112949 | from super_gradients.common.factories.base_factory import BaseFactory
from super_gradients.training.losses import LOSSES
class LossesFactory(BaseFactory):
def __init__(self):
super().__init__(LOSSES)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.