hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf989dad1cd8bb8adefffe56dea5e86d050cffe | 4,217 | py | Python | api_client/fortiauth_client.py | beibei1989/api_client | 6e8ba0f53946fc39dd769067271b03308ef9325e | [
"Apache-2.0"
] | 1 | 2018-06-14T19:20:19.000Z | 2018-06-14T19:20:19.000Z | api_client/fortiauth_client.py | beibei1989/api_client | 6e8ba0f53946fc39dd769067271b03308ef9325e | [
"Apache-2.0"
] | 1 | 2020-03-24T20:36:41.000Z | 2020-03-24T20:36:41.000Z | api_client/fortiauth_client.py | beibei1989/api_client | 6e8ba0f53946fc39dd769067271b03308ef9325e | [
"Apache-2.0"
] | 5 | 2018-01-31T00:59:33.000Z | 2020-10-29T20:02:04.000Z | # Copyright 2015 Fortinet, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from api_client import base
from api_client import constants as const
from api_client import client
from api_client import generic_request
from api_client.templates import fortiauth as templates
LOG = logging.getLogger(__name__)
DEFAULT_HTTP_TIMEOUT = const.DEFAULT_HTTP_TIMEOUT * 20
DEFAULT_RETRIES = 0
DEFAULT_REDIRECTS = 1
DEFAULT_CONCURRENT_CONNECTIONS = base.DEFAULT_CONCURRENT_CONNECTIONS
DEFAULT_CONTENT_TYPE = const.DEFAULT_HTTP_HEADERS['Content-Type']
class FortiAuthApiClient(client.ApiClient):
"""The FortiOS API Client."""
user_agent = 'FortiAuth Python API Client'
def __init__(self, api_providers, user=None, password=None,
concurrent_connections=DEFAULT_CONCURRENT_CONNECTIONS,
gen_timeout=base.GENERATION_ID_TIMEOUT,
use_https=True,
connect_timeout=base.DEFAULT_CONNECT_TIMEOUT,
http_timeout=DEFAULT_HTTP_TIMEOUT,
retries=DEFAULT_RETRIES,
redirects=DEFAULT_REDIRECTS,
auto_login=True):
'''Constructor. Adds the following:
:param api_providers: a list of tuples of the form: (host, port,
is_ssl)
:param http_timeout: how long to wait before aborting an
unresponsive controller (and allow for retries to another
controller in the cluster)
:param retries: the number of http/https request to retry.
:param redirects: the number of concurrent connections.
'''
super(FortiAuthApiClient, self).__init__(
api_providers, user, password,
concurrent_connections=concurrent_connections,
gen_timeout=gen_timeout, use_https=use_https,
connect_timeout=connect_timeout, http_timeout=http_timeout,
retries=retries, redirects=redirects, auto_login=auto_login)
self._request_timeout = http_timeout * retries
self._http_timeout = http_timeout
self._retries = retries
self._redirects = redirects
self._version = None
self.message = {}
self._user = user
self._password = password
self._auto_login = auto_login
self._template = templates
def _login(self, conn=None, headers=None):
""" FortiAuthenticator use http basic auth, doesn't need to login,
here reuse the name login to unify the API client process.
:param conn: Not use here
:param headers: Not use here
:return: return authenticated Header
"""
return {'Authorization': self.format_auth_basic()}
def request(self, opt, content_type=DEFAULT_CONTENT_TYPE, **message):
"""
Issues request to controller.
"""
self.message = self.render(getattr(self._template, opt),
content_type=content_type, **message)
method = self.message['method']
url = self.message['path']
body = self.message['body'] if 'body' in self.message else None
g = generic_request.GenericRequest(
self, method, url, body, content_type, self.user_agent,
auto_login=self._auto_login,
http_timeout=self._http_timeout,
retries=self._retries, redirects=self._redirects)
response = g.start()
return self.request_response(method, url, response)
def request_response(self, method, url, response, **kwargs):
if response:
response.body = self.request_response_body(response)
return response
| 39.411215 | 78 | 0.67465 |
acf98ab09ad4766a859aab7dab0903aa5ba0ecd3 | 959 | py | Python | tests/test_sl_eol_links.py | jar398/tryphy | 8dc0c713d3bd44126c3664e930625d641298b849 | [
"BSD-2-Clause"
] | null | null | null | tests/test_sl_eol_links.py | jar398/tryphy | 8dc0c713d3bd44126c3664e930625d641298b849 | [
"BSD-2-Clause"
] | 1 | 2018-08-27T19:19:22.000Z | 2018-08-28T14:41:08.000Z | tests/test_sl_eol_links.py | jar398/tryphy | 8dc0c713d3bd44126c3664e930625d641298b849 | [
"BSD-2-Clause"
] | null | null | null | # 10 continued. sl/eol/links - POST
# STUB
import sys, unittest, json
sys.path.append('./')
sys.path.append('../')
import webapp
from test_sl_eol_get_links import SlEolGetLinksTester
service = webapp.get_service(5004, 'sl/eol/links')
class TestSlEolLinks(SlEolGetLinksTester):
@classmethod
def get_service(self):
return service
@classmethod
def http_method(self):
return 'POST'
# Insert here: edge case tests
# Insert here: inputs out of range, leading to error or long delay
# Insert here: error-generating conditions
# (See ../README.md)
def test_example_24(self):
x = self.start_request_tests(example_24)
self.assert_success(x)
# Insert: whether result is what it should be according to docs
null=None; false=False; true=True
example_24 = service.get_request('POST', {u'species': [u'Catopuma badia', u'Catopuma temminckii']})
if __name__ == '__main__':
webapp.main()
| 25.918919 | 99 | 0.695516 |
acf98ab1643afa3ad92d7f87de63ca5246cf9333 | 25,630 | py | Python | all code (not organized)/Data load for SSRN.py | TaylorChris2/Virtuoso | 87a3d59141172d5daff0ae4725b843351b52fe63 | [
"Apache-2.0"
] | null | null | null | all code (not organized)/Data load for SSRN.py | TaylorChris2/Virtuoso | 87a3d59141172d5daff0ae4725b843351b52fe63 | [
"Apache-2.0"
] | null | null | null | all code (not organized)/Data load for SSRN.py | TaylorChris2/Virtuoso | 87a3d59141172d5daff0ae4725b843351b52fe63 | [
"Apache-2.0"
] | null | null | null | import sounddevice as sd
from scipy.signal import istft
from scipy.signal import stft
import librosa
import librosa.display
import midi
import skimage.transform
import numpy as np
import os
import h5py
import time
import matplotlib.pyplot as plt
import random
start_time = time.time()
def seperate_sets(midis, mels, set_size):
midi_sets = []
mel_sets = []
loop = 0
current_set = -1
num_sets = len(midis)
while True:
if loop % set_size == 0:
midi_sets.append([])
mel_sets.append([])
current_set += 1
midi_sets[current_set].append(midis[loop])
mel_sets[current_set].append(mels[loop])
loop += 1
if loop >= num_sets:
break
return midi_sets, mel_sets
def save_data_set(set_, save_path, save_name):
if os.path.exists(os.path.join(save_path, save_name)+".h5"):
os.remove(os.path.join(save_path, save_name)+".h5")
hdf5_store = h5py.File(os.path.join(save_path, save_name)+".h5", "a")
hdf5_store.create_dataset("all_data", data = set_, compression="gzip")
def split_train_val_test(set_):
total = len(set_)
train_end_val_beginning = round(0.7 * total)
val_end_test_beginning = round(0.85 * total)
train_images = set_[:train_end_val_beginning]
val_images = set_[train_end_val_beginning:val_end_test_beginning]
test_images = set_[val_end_test_beginning:]
return train_images, val_images, test_images
def make_wave(freq, duration, sample_rate = 22050):
wave = [i/((sample_rate/(2*np.pi))/freq) for i in range(0, int(duration))]
wave = np.stack(wave)
wave = np.cos(wave)
'''
sd.play(wave,sample_rate)
cont = input("...")
'''
return wave
def load_array(path):
h5f = h5py.File(path,'r')
array = h5f['all_data'][:]
h5f.close()
return array
def save_array(array, path):
while True:
try:
if os.path.exists(path):
os.remove(path)
hdf5_store = h5py.File(path, "a")
hdf5_store.create_dataset("all_data", data = array, compression="gzip")
break
except:
pass
def note_number_2_duration(note_number):
durations = []
last_print = 0
for n,channel in enumerate(note_number):
durations.append([])
for i,note in enumerate(channel):
if note_number[n,i-1,1] != note[1]: ##note start
ind = 0
duration = 1
while True:
try:
if note_number[n,i+ind,1] != note_number[n,(i+ind+1)%(note_number.shape[1]),1]:
break
ind += 1
duration += 1
except:
break
durations[n].append([note[0],i,duration])
stacked = []
for channel in durations:
try:
channel = np.stack(channel)
stacked.append(channel)
except Exception as e:
print(e)
pass
return stacked
def duration_2_wave(duration, gradient_fraction = 3, return_different_gradients = False, gradients = None):
midi_wave = []
last = 0
lengths = []
for n,channel in enumerate(duration):
lengths.append(int(round(channel[-1,1]+channel[-1,2])))
length = np.max(lengths)
for n,channel in enumerate(duration):
midi_wave.append(np.zeros(length))
for i,note in enumerate(channel):
if note[0]>0: ## pitch
try:
if note[2] > 0: ## every note start
try:
duration = int(channel[i+1,1])-int(note[1])
except:
pass
duration = note[2]
wave = make_wave(note[0], duration, 22050)
for j,value in enumerate(wave):
midi_wave[n][int(note[1])+j]=wave[j]
if (int(note[1])+j) > last:
last = int(note[1])+j
except Exception as e:
print(e)
print(last_start, i)
cont = input("...")
midi_wave = midi_wave[:][:last+1]
actual_wave = np.zeros(midi_wave[0].shape[0])
for n,channel in enumerate(midi_wave):
if gradients is not None:
for gradient in gradients:
channel*=gradient[n]
actual_wave += channel
return actual_wave
def load_wave(path):
complete_wave = []
file = 0
first = False
while True:
try:
wave_array = load_array(path+"/"+str(file)+".h5")
first = True
for moment in wave_array:
complete_wave.append(moment)
file+=1
except:
if first:
break
else:
file+=1
complete_wave = np.stack(complete_wave)
return complete_wave
def load_graph(path):
complete_graph = []
for i in range(0, load_array(path+"/"+os.listdir(path)[0]).shape[0]):
complete_graph.append([])
file = 0
first = False
while True:
try:
array = load_array(path+"/"+str(file)+".h5")
first = True
for n,channel in enumerate(array):
for moment in channel:
complete_graph[n].append(moment)
file+=1
except:
if first:
break
else:
file+=1
complete_graph = np.stack(complete_graph)
return complete_graph
def note_number_to_wave(note_number, gradient_fraction=3, end_gradient = True, start_gradient = True, rescale_factor=1):
last = 0
rescaled_note_number = np.round(skimage.transform.rescale(note_number, (1, rescale_factor, 1)))
midi_wave = rescaled_note_number.copy()[:,:,0]
start_gradients = rescaled_note_number.copy()[:,:,0]
end_gradients = rescaled_note_number.copy()[:,:,0]
print("note number shapes:",note_number.shape,rescaled_note_number.shape)
midi_wave[:] = 0
start_gradients[:] = 1
end_gradients[:] = 1
for n,channel in enumerate(rescaled_note_number):
for i,note in enumerate(channel):
if note[0]>0: ## pitch
try:
if note[1] != channel[i-1][1] and channel[i][1] == channel[i+500][1] : ## every note start
wave_duration = 1
ind = 0
while True:
if i+ind >= channel.shape[0]-1 or (note[1] != channel[i+ind+1][1] and channel[i+ind+1][1] == channel[i+ind+500][1]):
break
wave_duration += 1
ind+=1
freq = 440*(2**((channel[i+int(wave_duration/2)][0]-69)/12))
wave = make_wave(freq, wave_duration, 22050)
general_gradient_amt = 1800#int(wave_duration/gradient_fraction)
general_gradient = []
for g in range(0,general_gradient_amt):
general_gradient.append(g/general_gradient_amt)
for j,value in enumerate(wave):
if midi_wave[n][i+j] != 0:
print("oof")
midi_wave[n][i+j]=value
try:
start_gradients[n][i+j] = general_gradient[j]
#if end_gradients[n][i+j] != 1:
# print("oof")
end_gradients[n][i+(wave_duration-j)-1] = general_gradient[j]
#if start_gradients[n][i+(wave_duration-j)-1] != 1:
# print("oof")
except Exception as e:
pass
if i+j > last:
last = i+j
except Exception as e:
print(i+ind)
print(ind)
print(channel.shape[0])
print(note[1])
print(channel[i+ind+1][1])
print(e)
print(last_start, i)
cont = input("...")
midi_wave = midi_wave[:][:last+1]
actual_wave = np.zeros(midi_wave[0].shape[0])
for n,channel in enumerate(midi_wave):
if end_gradient:
print("using end gradient")
channel*=end_gradients[n]
if start_gradient:
print("using start gradient")
channel*=start_gradients[n]
print(start_gradients[n][0])
actual_wave += channel
return actual_wave/np.max(actual_wave), midi_wave, start_gradients, end_gradients
class hp:
prepro = True # if True, run `python prepro.py` first before running `python train.py`.
# signal processing
sr = 22050 # Sampling rate.
n_fft = 2048 # fft points (samples)
frame_shift = 0.003125 # seconds
frame_length = 0.0125 # seconds
hop_length = int(sr * frame_shift) # samples. =276.
win_length = int(sr * frame_length) # samples. =1102.
n_mels = 128 # Number of Mel banks to generate
power = 1.5 # Exponent for amplifying the predicted magnitude
n_iter = 100 # Number of inversion iterations
preemphasis = .97
max_db = 100
ref_db = 20
# Model
r = 4 # Reduction factor. Do not change this.
dropout_rate = 0.05
e = 128 # == embedding
d = 256 # == hidden units of Text2Mel
c = 512 # == hidden units of SSRN
attention_win_size = 3
# data
data = "/data/private/voice/LJSpeech-1.0"
# data = "/data/private/voice/kate"
test_data = 'harvard_sentences.txt'
vocab = "PE abcdefghijklmnopqrstuvwxyz'.?" # P: Padding, E: EOS.
max_N = 180 # Maximum number of characters.
max_T = 512 # Maximum number of mel frames.
# training scheme
lr = 0.001 # Initial learning rate.
logdir = "logdir/LJ01"
sampledir = 'samples'
B = 32 # batch size
num_iterations = 2000000
def get_spectrograms(wave):
'''Parse the wave file in `fpath` and
Returns normalized melspectrogram and linear spectrogram.
Args:
fpath: A string. The full path of a sound file.
Returns:
mel: A 2d array of shape (T, n_mels) and dtype of float32.
mag: A 2d array of shape (T, 1+n_fft/2) and dtype of float32.
'''
# Loading sound file
y = wave
# Trimming
#y, _ = librosa.effects.trim(y)
# Preemphasis
y = np.append(y[0], y[1:] - hp.preemphasis * y[:-1])
# stft
linear = librosa.stft(y=y,
n_fft=hp.n_fft,
hop_length=hp.hop_length,
win_length=hp.win_length)
# magnitude spectrogram
mag = np.abs(linear) # (1+n_fft//2, T)
# mel spectrogram
mel_basis = librosa.filters.mel(hp.sr, hp.n_fft, hp.n_mels) # (n_mels, 1+n_fft//2)
mel = np.dot(mel_basis, mag) # (n_mels, t)
# to decibel
mel = 20 * np.log10(np.maximum(1e-5, mel))
mag = 20 * np.log10(np.maximum(1e-5, mag))
# normalize
mel = np.clip((mel - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)
mag = np.clip((mag - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)
# Transpose
mel = mel.T.astype(np.float32) # (T, n_mels)
mag = mag.T.astype(np.float32) # (T, 1+n_fft//2)
return mel, mag
def load_spectrograms(wave):
'''Read the wave file in `fpath`
and extracts spectrograms'''
mel, mag = get_spectrograms(wave)
t = mel.shape[0]
# Marginal padding for reduction shape sync.
num_paddings = hp.r - (t % hp.r) if t % hp.r != 0 else 0
mel = np.pad(mel, [[0, num_paddings], [0, 0]], mode="constant")
mag = np.pad(mag, [[0, num_paddings], [0, 0]], mode="constant")
# Reduction
inter = mel.copy()
mel = mel[::hp.r, :]
return mel, inter, mag
def invert_spectrogram(spectrogram):
'''Applies inverse fft.
Args:
spectrogram: [1+n_fft//2, t]
'''
return librosa.istft(spectrogram, hp.hop_length, win_length=hp.win_length, window="hann")
def griffin_lim(spectrogram):
'''Applies Griffin-Lim's raw.'''
X_best = copy.deepcopy(spectrogram)
for i in range(hp.n_iter):
print(i)
X_t = invert_spectrogram(X_best)
est = librosa.stft(X_t, hp.n_fft, hp.hop_length, win_length=hp.win_length)
phase = est / np.maximum(1e-8, np.abs(est))
X_best = spectrogram * phase
X_t = invert_spectrogram(X_best)
y = np.real(X_t)
return y
def load_array(path):
h5f = h5py.File(path,'r')
array = h5f['all_data'][:]
h5f.close()
return array
def make_wave(freq, duration, sample_rate = 22050):
wave = [i/((sample_rate/(2*np.pi))/freq) for i in range(0, int(duration))]
wave = np.stack(wave)
wave = np.cos(wave)
'''
sd.play(wave,sample_rate)
cont = input("...")
'''
return wave
def note_number_to_wave(note_number, gradient_fraction=3, end_gradient = True, start_gradient = True, rescale_factor=1):
last = 0
rescaled_note_number = np.round(skimage.transform.rescale(note_number, (1, rescale_factor, 1)))
midi_wave = rescaled_note_number.copy()[:,:,0]
start_gradients = rescaled_note_number.copy()[:,:,0]
end_gradients = rescaled_note_number.copy()[:,:,0]
print("note number shapes:",note_number.shape,rescaled_note_number.shape)
midi_wave[:] = 0
start_gradients[:] = 1
end_gradients[:] = 1
for n,channel in enumerate(rescaled_note_number):
for i,note in enumerate(channel):
if note[0]>0: ## pitch
try:
if note[1] != channel[i-1][1] and channel[i][1] == channel[i+500][1] : ## every note start
wave_duration = 1
ind = 0
while True:
if i+ind >= channel.shape[0]-1 or (note[1] != channel[i+ind+1][1] and channel[i+ind+1][1] == channel[i+ind+500][1]):
break
wave_duration += 1
ind+=1
freq = 440*(2**((channel[i+int(wave_duration/2)][0]-69)/12))
wave = make_wave(freq, wave_duration, 22050)
general_gradient_amt = int(wave_duration/gradient_fraction)
general_gradient = []
for g in range(0,general_gradient_amt):
general_gradient.append(g/general_gradient_amt)
for j,value in enumerate(wave):
if midi_wave[n][i+j] != 0:
print("oof")
midi_wave[n][i+j]=value
try:
start_gradients[n][i+j] = general_gradient[j]
#if end_gradients[n][i+j] != 1:
# print("oof")
end_gradients[n][i+(wave_duration-j)-1] = general_gradient[j]
#if start_gradients[n][i+(wave_duration-j)-1] != 1:
# print("oof")
except Exception as e:
pass
if i+j > last:
last = i+j
except Exception as e:
print(i+ind)
print(ind)
print(channel.shape[0])
print(note[1])
print(channel[i+ind+1][1])
print(e)
print(last_start, i)
cont = input("...")
midi_wave = midi_wave[:][:last+1]
actual_wave = np.zeros(midi_wave[0].shape[0])
for n,channel in enumerate(midi_wave):
if end_gradient:
print("using end gradient")
channel*=end_gradients[n]
if start_gradient:
print("using start gradient")
channel*=start_gradients[n]
print(start_gradients[n][0])
actual_wave += channel
return actual_wave/np.max(actual_wave), midi_wave, start_gradients, end_gradients
def note_number_2_duration(note_number):
durations = []
last_print = 0
for n,channel in enumerate(note_number):
durations.append([])
for i,note in enumerate(channel):
if note_number[n,i-1,1] != note[1]: ##note start
ind = 0
duration = 1
while True:
try:
if note_number[n,i+ind,1] != note_number[n,(i+ind+1)%(note_number.shape[1]),1]:
break
ind += 1
duration += 1
except:
break
durations[n].append([note[0],i,duration])
stacked = []
for channel in durations:
try:
channel = np.stack(channel)
stacked.append(channel)
except Exception as e:
print(e)
pass
return stacked
def spectrogram2wav(mag):
'''# Generate wave file from linear magnitude spectrogram
Args:
mag: A numpy array of (T, 1+n_fft//2)
Returns:
wav: A 1-D numpy array.
'''
# transpose
mag = mag.T
# de-noramlize
mag = (np.clip(mag, 0, 1) * hp.max_db) - hp.max_db + hp.ref_db
# to amplitude
mag = np.power(10.0, mag * 0.05)
# wav reconstruction
wav = griffin_lim(mag**hp.power)
# de-preemphasis
wav = signal.lfilter([1], [1, -hp.preemphasis], wav)
# trim
wav, _ = librosa.effects.trim(wav)
return wav.astype(np.float32)
slide_window = 64
set_size = 2048
pathes = []
pathes.append("C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/synced/waveforms with gradient graphs/0")
pathes.append("C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/synced/waveforms with gradient graphs/1")
pathes.append("C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/synced/waveforms with gradient graphs/2")
pathes.append("C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/synced/waveforms with gradient graphs/3")
pathes.append("C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/synced/waveforms with gradient graphs/4")
pathes.append("C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/synced/waveforms with gradient graphs/5")
pathes.append("C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/synced/waveforms with gradient graphs/6")
pathes.append("C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/synced/waveforms with gradient graphs/7")
pathes.append("C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/synced/waveforms with gradient graphs/8")
pathes.append("C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/synced/waveforms with gradient graphs/9")
pathes.append("C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/synced/waveforms with gradient graphs/10")
pathes.append("C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/synced/waveforms with gradient graphs/11")
save_folder_path = "C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/Midis and Mels for Machine Learning SSRN 128 nmels slide 32 512 time with quarter frame"
frequency_clip_midi = 512 ##amount of frequencies to be included
frequency_clip_wav = 512 ##amount of frequencies to be included
time_split = hp.max_T ##milliseconds
midis = []
wavs = []
sets = 0
sets_ = []
start_index = 0
for set_num in range(0,len(pathes)):
path = pathes[set_num]
print(path)
###loading in spectrograms-----------------------------------------------------------
y = load_wave(path+"/wavs")
y = y*0.1/np.max(y)
wave_2d = np.zeros((y.shape[0]//100+1,(y.shape[0]//500)))
for t,thing in enumerate(y):
wave_2d[(t//100)][int(thing*wave_2d.shape[1])] = 1
mel, inter, mag = load_spectrograms(y)
print(mel.shape,inter.shape,mag.shape,y.shape)
sets+=1
timef_midi = mel
timef_wav = mag
print("specgram shapes:", timef_midi.shape,timef_wav.shape)
print(np.max(timef_wav))
print(np.min(timef_wav))
print("Converted to spectrogram.")
delete_last = False
print("Split wav spectrograms.")
index = 0
segments = []
start = 0
end = time_split
while True:
segments.append(np.array(timef_midi[start:end]))
start += slide_window
end += slide_window
if np.array(timef_midi[start:end]).shape[0] < time_split:
break
##padding the ending
if segments[-1].shape[0] > 1000:
padding_amt = time_split-segments[-1].shape[0]
padding = np.zeros((padding_amt, segments[-1].shape[1]))
new_last = []
for time_ in segments[-1]:
new_last.append(time_)
for pad in padding:
#print("pad",pad)
new_last.append(pad)
segments[-1] = np.stack(new_last)
else:
print(segments[-1].shape)
del segments[-1]
delete_last = True
for segment in segments:
midis.append(segment)
time_split_mag=time_split*hp.r
slide_window_mag=slide_window*hp.r
print(time_split_mag,slide_window)
index = 0
segments = []
start = 0
end = time_split_mag
while True:
segments.append(np.array(timef_wav[start:end]))
start += slide_window_mag
end += slide_window_mag
if np.array(timef_wav[start:end]).shape[0] < time_split_mag:
break
if not delete_last:
padding_amt = time_split_mag-segments[-1].shape[0]
padding = np.zeros((padding_amt, segments[-1].shape[1]))
new_last = []
for time_ in segments[-1]:
new_last.append(time_)
for pad in padding:
new_last.append(pad)
segments[-1] = np.stack(new_last)
else:
print("DELETING LAST, LESS THAN 3 SECONDS LONG")
del segments[-1]
delete_last = True
for segment in segments:
wavs.append(segment)
print("Split midi spectrograms.")
print("Loaded in" ,len(segments), "sets in", int((time.time() - start_time)/60), "minutes and",
int(((time.time() - start_time) % 60)+1), "seconds.")
new_indexes = []
for i in range(0,len(midis)):
index = random.randint(0,len(midis)-1)
while index in new_indexes:
index = random.randint(0,len(midis)-1)
new_indexes.append(index)
print(new_indexes)
print(len(midis),len(wavs))
new_midis = []
new_wavs = []
for index in new_indexes:
print(index)
new_midis.append(midis[index])
new_wavs.append(wavs[index])
print("Loaded in" ,len(midis),len(wavs), "sets from", sets, "folders in", int((time.time() - start_time)/60), "minutes and",
int(((time.time() - start_time) % 60)+1), "seconds.")
midi_sets, wav_sets = seperate_sets(midis, wavs, set_size)
print(len(midi_sets))
start_time = time.time()
print("\nSaving loaded data in: " + save_folder_path + "...")
if not os.path.exists(save_folder_path):
os.makedirs(save_folder_path)
for n, set_ in enumerate(midi_sets):
train_midis, val_midis, test_midis = split_train_val_test(set_)
print(len(train_midis), len(val_midis), len(test_midis))
save_data_set(train_midis, save_folder_path, "Train Midis "+str(n))
save_data_set(val_midis, save_folder_path, "Val Midis "+str(n))
save_data_set(test_midis, save_folder_path, "Test Midis "+str(n))
print("Finished saving midis. Proceeding to save wavs...")
for n, set_ in enumerate(wav_sets):
train_wavs, val_wavs, test_wavs = split_train_val_test(set_)
save_data_set(train_wavs, save_folder_path, "Train Wavs "+str(n))
save_data_set(val_wavs, save_folder_path, "Val Wavs "+str(n))
save_data_set(test_wavs, save_folder_path, "Test Wavs "+str(n))
print("Finished saving wavs.")
print("\nAll data finished saving in", int((time.time() - start_time)/60), "minutes and ",
int(((time.time() - start_time) % 60)+1), "seconds.")
| 35.597222 | 188 | 0.547483 |
acf98b2fa7cba432b91de7f6fd03a587ea8c50ff | 12,189 | py | Python | ch15/ch15_part2.py | Business-Wizard/python-machine-learning-book-3rd-edition | 2dd7a32967bf10a4d33414c14e5ddb04370f67e6 | [
"MIT"
] | null | null | null | ch15/ch15_part2.py | Business-Wizard/python-machine-learning-book-3rd-edition | 2dd7a32967bf10a4d33414c14e5ddb04370f67e6 | [
"MIT"
] | 1 | 2022-02-07T20:25:04.000Z | 2022-02-07T20:25:04.000Z | ch15/ch15_part2.py | Business-Wizard/python-machine-learning-book-3rd-edition | 2dd7a32967bf10a4d33414c14e5ddb04370f67e6 | [
"MIT"
] | null | null | null | # coding: utf-8
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from collections import Counter
# *Python Machine Learning 3rd Edition* by [Sebastian Raschka](https://sebastianraschka.com) & [Vahid Mirjalili](http://vahidmirjalili.com), Packt Publishing Ltd. 2019
#
# Code Repository: https://github.com/rasbt/python-machine-learning-book-3rd-edition
#
# Code License: [MIT License](https://github.com/rasbt/python-machine-learning-book-3rd-edition/blob/master/LICENSE.txt)
# # Chapter 15: Classifying Images with Deep Convolutional Neural Networks (Part 2/2)
# Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
# ## Gender classification from face images using CNN
#
# ### Loading the CelebA dataset
celeba_bldr = tfds.builder('celeb_a')
celeba_bldr.download_and_prepare()
celeba = celeba_bldr.as_dataset(shuffle_files=False)
print(celeba.keys())
celeba_train = celeba['train']
celeba_valid = celeba['validation']
celeba_test = celeba['test']
def count_items(ds):
return sum(1 for _ in ds)
print('Train set: {}'.format(count_items(celeba_train)))
print('Validation: {}'.format(count_items(celeba_valid)))
print('Test set: {}'.format(count_items(celeba_test)))
celeba_train = celeba_train.take(16000)
celeba_valid = celeba_valid.take(1000)
print('Train set: {}'.format(count_items(celeba_train)))
print('Validation: {}'.format(count_items(celeba_valid)))
# ### Image transformation and data augmentation
## take 5 examples:
examples = [example['image'] for example in celeba_train.take(5)]
fig = plt.figure(figsize=(16, 8.5))
## Column 1: cropping to a bounding-box
ax = fig.add_subplot(2, 5, 1)
ax.imshow(examples[0])
ax = fig.add_subplot(2, 5, 6)
ax.set_title('Crop to a \nbounding-box', size=15)
img_cropped = tf.image.crop_to_bounding_box(
examples[0], 50, 20, 128, 128)
ax.imshow(img_cropped)
## Column 2: flipping (horizontally)
ax = fig.add_subplot(2, 5, 2)
ax.imshow(examples[1])
ax = fig.add_subplot(2, 5, 7)
ax.set_title('Flip (horizontal)', size=15)
img_flipped = tf.image.flip_left_right(examples[1])
ax.imshow(img_flipped)
## Column 3: adjust contrast
ax = fig.add_subplot(2, 5, 3)
ax.imshow(examples[2])
ax = fig.add_subplot(2, 5, 8)
ax.set_title('Adjust constrast', size=15)
img_adj_contrast = tf.image.adjust_contrast(
examples[2], contrast_factor=2)
ax.imshow(img_adj_contrast)
## Column 4: adjust brightness
ax = fig.add_subplot(2, 5, 4)
ax.imshow(examples[3])
ax = fig.add_subplot(2, 5, 9)
ax.set_title('Adjust brightness', size=15)
img_adj_brightness = tf.image.adjust_brightness(
examples[3], delta=0.3)
ax.imshow(img_adj_brightness)
## Column 5: cropping from image center
ax = fig.add_subplot(2, 5, 5)
ax.imshow(examples[4])
ax = fig.add_subplot(2, 5, 10)
ax.set_title('Centeral crop\nand resize', size=15)
img_center_crop = tf.image.central_crop(
examples[4], 0.7)
img_resized = tf.image.resize(
img_center_crop, size=(218, 178))
ax.imshow(img_resized.numpy().astype('uint8'))
# plt.savefig('figures/15_14.png', dpi=300)
plt.show()
tf.random.set_seed(1)
fig = plt.figure(figsize=(14, 12))
for i,example in enumerate(celeba_train.take(3)):
image = example['image']
ax = fig.add_subplot(3, 4, i*4+1)
ax.imshow(image)
if i == 0:
ax.set_title('Orig.', size=15)
ax = fig.add_subplot(3, 4, i*4+2)
img_crop = tf.image.random_crop(image, size=(178, 178, 3))
ax.imshow(img_crop)
if i == 0:
ax.set_title('Step 1: Random crop', size=15)
ax = fig.add_subplot(3, 4, i*4+3)
img_flip = tf.image.random_flip_left_right(img_crop)
ax.imshow(tf.cast(img_flip, tf.uint8))
if i == 0:
ax.set_title('Step 2: Random flip', size=15)
ax = fig.add_subplot(3, 4, i*4+4)
img_resize = tf.image.resize(img_flip, size=(128, 128))
ax.imshow(tf.cast(img_resize, tf.uint8))
if i == 0:
ax.set_title('Step 3: Resize', size=15)
# plt.savefig('figures/15_15.png', dpi=300)
plt.show()
def preprocess(example, size=(64, 64), mode='train'):
image = example['image']
label = example['attributes']['Male']
if mode == 'train':
image_cropped = tf.image.random_crop(
image, size=(178, 178, 3))
image_resized = tf.image.resize(
image_cropped, size=size)
image_flip = tf.image.random_flip_left_right(
image_resized)
return (image_flip/255.0, tf.cast(label, tf.int32))
else:
image_cropped = tf.image.crop_to_bounding_box(
image, offset_height=20, offset_width=0,
target_height=178, target_width=178)
image_resized = tf.image.resize(
image_cropped, size=size)
return (image_resized/255.0, tf.cast(label, tf.int32))
## testing:
#item = next(iter(celeba_train))
#preprocess(item, mode='train')
tf.random.set_seed(1)
ds = celeba_train.shuffle(1000, reshuffle_each_iteration=False)
ds = ds.take(2).repeat(5)
ds = ds.map(lambda x:preprocess(x, size=(178, 178), mode='train'))
fig = plt.figure(figsize=(15, 6))
for j,example in enumerate(ds):
ax = fig.add_subplot(2, 5, j//2+(j%2)*5+1)
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(example[0])
#plt.savefig('figures/15_16.png', dpi=300)
plt.show()
BATCH_SIZE = 32
BUFFER_SIZE = 1000
IMAGE_SIZE = (64, 64)
steps_per_epoch = np.ceil(16000/BATCH_SIZE)
print(steps_per_epoch)
ds_train = celeba_train.map(
lambda x: preprocess(x, size=IMAGE_SIZE, mode='train'))
ds_train = ds_train.shuffle(buffer_size=BUFFER_SIZE).repeat()
ds_train = ds_train.batch(BATCH_SIZE)
ds_valid = celeba_valid.map(
lambda x: preprocess(x, size=IMAGE_SIZE, mode='eval'))
ds_valid = ds_valid.batch(BATCH_SIZE)
# ### Training a CNN gender classifier
#
# * **Global Average Pooling**
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(
32, (3, 3), padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(rate=0.5),
tf.keras.layers.Conv2D(
64, (3, 3), padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(rate=0.5),
tf.keras.layers.Conv2D(
128, (3, 3), padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(
256, (3, 3), padding='same', activation='relu'),
])
model.compute_output_shape(input_shape=(None, 64, 64, 3))
model.add(tf.keras.layers.GlobalAveragePooling2D())
model.compute_output_shape(input_shape=(None, 64, 64, 3))
model.add(tf.keras.layers.Dense(1, activation=None))
tf.random.set_seed(1)
model.build(input_shape=(None, 64, 64, 3))
model.summary()
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(ds_train, validation_data=ds_valid,
epochs=20, steps_per_epoch=steps_per_epoch)
hist = history.history
x_arr = np.arange(len(hist['loss'])) + 1
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(1, 2, 1)
ax.plot(x_arr, hist['loss'], '-o', label='Train loss')
ax.plot(x_arr, hist['val_loss'], '--<', label='Validation loss')
ax.legend(fontsize=15)
ax.set_xlabel('Epoch', size=15)
ax.set_ylabel('Loss', size=15)
ax = fig.add_subplot(1, 2, 2)
ax.plot(x_arr, hist['accuracy'], '-o', label='Train acc.')
ax.plot(x_arr, hist['val_accuracy'], '--<', label='Validation acc.')
ax.legend(fontsize=15)
ax.set_xlabel('Epoch', size=15)
ax.set_ylabel('Accuracy', size=15)
#plt.savefig('figures/15_18.png', dpi=300)
plt.show()
ds_test = celeba_test.map(
lambda x:preprocess(x, size=IMAGE_SIZE, mode='eval')).batch(32)
results = model.evaluate(ds_test, verbose=0)
print('Test Acc: {:.2f}%'.format(results[1]*100))
history = model.fit(ds_train, validation_data=ds_valid,
epochs=30, initial_epoch=20,
steps_per_epoch=steps_per_epoch)
hist2 = history.history
x_arr = np.arange(len(hist['loss'] + hist2['loss']))
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(1, 2, 1)
ax.plot(x_arr, hist['loss']+hist2['loss'],
'-o', label='Train Loss')
ax.plot(x_arr, hist['val_loss']+hist2['val_loss'],
'--<', label='Validation Loss')
ax.legend(fontsize=15)
ax = fig.add_subplot(1, 2, 2)
ax.plot(x_arr, hist['accuracy']+hist2['accuracy'],
'-o', label='Train Acc.')
ax.plot(x_arr, hist['val_accuracy']+hist2['val_accuracy'],
'--<', label='Validation Acc.')
ax.legend(fontsize=15)
plt.show()
ds_test = celeba_test.map(
lambda x:preprocess(x, size=IMAGE_SIZE, mode='eval')).batch(32)
results = model.evaluate(ds_test, verbose=0)
print('Test Acc: {:.2f}%'.format(results[1]*100))
ds = ds_test.unbatch().take(10)
pred_logits = model.predict(ds.batch(10))
probas = tf.sigmoid(pred_logits)
probas = probas.numpy().flatten()*100
fig = plt.figure(figsize=(15, 7))
for j,example in enumerate(ds):
ax = fig.add_subplot(2, 5, j+1)
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(example[0])
label = 'Male' if example[1].numpy() == 1 else 'Female'
ax.text(
0.5, -0.15,
'GT: {:s}\nPr(Male)={:.0f}%'.format(label, probas[j]),
size=16,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
#plt.savefig('figures/figures-15_19.png', dpi=300)
plt.show()
model.save('models/celeba-cnn.h5')
# ...
#
#
# ## Summary
#
# ...
#
#
# ## Appendix:
#
# ### The effect of initial shuffling
## MNIST dataset
#datasets = tfds.load(name='mnist')
mnist_bldr = tfds.builder('mnist')
mnist_bldr.download_and_prepare()
datasets = mnist_bldr.as_dataset(shuffle_files=False)
mnist_train_orig, mnist_test_orig = datasets['train'], datasets['test']
mnist_train = mnist_train_orig.map(
lambda item: (tf.cast(item['image'], tf.float32)/255.0,
tf.cast(item['label'], tf.int32)))
mnist_test = mnist_test_orig.map(
lambda item: (tf.cast(item['image'], tf.float32)/255.0,
tf.cast(item['label'], tf.int32)))
tf.random.set_seed(1)
mnist_train = mnist_train.shuffle(buffer_size=10000,
reshuffle_each_iteration=False)
mnist_valid = mnist_train.take(100)#.batch(BATCH_SIZE)
mnist_train = mnist_train.skip(100)#.batch(BATCH_SIZE)
# **Notice that count-of-labels in mnist_valid did not stay the same when the dataset is loaded with using Builder and specifying `mnist_bldr.as_dataset(shuffle_files=False)`**
def count_labels(ds):
counter = Counter()
for example in ds:
counter.update([example[1].numpy()])
return counter
print('Count of labels:', count_labels(mnist_valid))
print('Count of labels:', count_labels(mnist_valid))
## MNIST dataset
datasets = tfds.load(name='mnist')
#mnist_bldr = tfds.builder('mnist')
#mnist_bldr.download_and_prepare()
#datasets = mnist_bldr.as_dataset(shuffle_files=False)
mnist_train_orig, mnist_test_orig = datasets['train'], datasets['test']
mnist_train = mnist_train_orig.map(
lambda item: (tf.cast(item['image'], tf.float32)/255.0,
tf.cast(item['label'], tf.int32)))
mnist_test = mnist_test_orig.map(
lambda item: (tf.cast(item['image'], tf.float32)/255.0,
tf.cast(item['label'], tf.int32)))
tf.random.set_seed(1)
mnist_train = mnist_train.shuffle(buffer_size=10000,
reshuffle_each_iteration=False)
mnist_valid = mnist_train.take(100)#.batch(BATCH_SIZE)
mnist_train = mnist_train.skip(100)#.batch(BATCH_SIZE)
# **Notice that count-of-labels in mnist_valid did not stay the same when the dataset is loaded with `tfds.load()`**
def count_labels(ds):
counter = Counter()
for example in ds:
counter.update([example[1].numpy()])
return counter
print('Count of labels:', count_labels(mnist_valid))
print('Count of labels:', count_labels(mnist_valid))
# ----
#
# Readers may ignore the next cell.
| 24.426854 | 176 | 0.676101 |
acf98bcca021b44b67ef55e85b412174b6af6aaf | 6,623 | py | Python | python_modules/automation/automation/docker/image_defs.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/automation/automation/docker/image_defs.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/automation/automation/docker/image_defs.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=print-call
import contextlib
import os
import shutil
from typing import Callable, Dict, Iterator, List, Optional
from automation.git import git_repo_root
import dagster._check as check
from .dagster_docker import DagsterDockerImage
def get_dagster_repo() -> str:
return git_repo_root()
@contextlib.contextmanager
def copy_directories(
paths: List[str], cwd: str, destination: str = "build_cache"
) -> Iterator[None]:
check.invariant(os.path.exists(cwd), "Image directory does not exist")
build_cache_dir = os.path.join(cwd, destination)
try:
os.mkdir(build_cache_dir)
paths_to_copy = []
for path in paths:
src_path = os.path.join(git_repo_root(cwd), path)
check.invariant(
os.path.exists(src_path), "Path for copying to image build does not exist"
)
_, dest_name = os.path.split(path)
dest_path = os.path.join(build_cache_dir, dest_name)
paths_to_copy.append((src_path, dest_path))
for src_path, dest_path in paths_to_copy:
print("Syncing {} to build dir {}...".format(src_path, dest_path))
if os.path.isdir(src_path):
shutil.copytree(src_path, dest_path)
else:
shutil.copy(src_path, dest_path)
yield
finally:
shutil.rmtree(build_cache_dir)
@contextlib.contextmanager
def k8s_example_cm(cwd: str) -> Iterator[None]:
with copy_directories(
[
"examples/deploy_k8s/example_project",
],
cwd,
):
yield
def get_core_celery_k8s_dirs() -> List[str]:
return [
"python_modules/dagster",
"python_modules/libraries/dagster-postgres",
"python_modules/libraries/dagster-celery",
"python_modules/libraries/dagster-k8s",
"python_modules/libraries/dagster-celery-k8s",
]
def get_core_k8s_dirs() -> List[str]:
return [
"python_modules/dagster",
"python_modules/libraries/dagster-postgres",
"python_modules/libraries/dagster-k8s",
]
@contextlib.contextmanager
def k8s_example_editable_cm(cwd: str) -> Iterator[None]:
with copy_directories(
get_core_celery_k8s_dirs()
+ [
"python_modules/libraries/dagster-aws",
],
cwd,
):
with copy_directories(
["examples/deploy_k8s/example_project"], cwd, destination="example_project"
):
yield
@contextlib.contextmanager
def k8s_dagit_editable_cm(cwd: str) -> Iterator[None]:
print("!!!!! WARNING: You must call `make rebuild_dagit` after making changes to Dagit !!!!\n")
with copy_directories(
get_core_celery_k8s_dirs()
+ [
"python_modules/dagster-graphql",
"python_modules/dagit",
],
cwd,
):
yield
@contextlib.contextmanager
def k8s_dagit_example_cm(cwd: str) -> Iterator[None]:
with copy_directories(
get_core_celery_k8s_dirs()
+ [
"python_modules/libraries/dagster-aws",
"python_modules/dagster-graphql",
"python_modules/dagit",
],
cwd,
):
with copy_directories(
["examples/deploy_k8s/example_project"], cwd, destination="example_project"
):
yield
@contextlib.contextmanager
def k8s_celery_worker_editable_cm(cwd: str) -> Iterator[None]:
with copy_directories(
get_core_celery_k8s_dirs(),
cwd,
):
yield
@contextlib.contextmanager
def user_code_example_cm(cwd: str) -> Iterator[None]:
with copy_directories(
[
"examples/deploy_k8s/example_project",
],
cwd,
):
yield
@contextlib.contextmanager
def user_code_example_editable_cm(cwd: str) -> Iterator[None]:
with copy_directories(
get_core_celery_k8s_dirs() + ["python_modules/libraries/dagster-aws"],
cwd,
):
with copy_directories(
["examples/deploy_k8s/example_project"], cwd, destination="example_project"
):
yield
@contextlib.contextmanager
def dagster_k8s_editable_cm(cwd: str) -> Iterator[None]:
print("!!!!! WARNING: You must call `make rebuild_dagit` after making changes to Dagit !!!!\n")
with copy_directories(
get_core_k8s_dirs()
+ [
"python_modules/dagster-graphql",
"python_modules/dagit",
"python_modules/libraries/dagster-aws",
],
cwd,
):
yield
@contextlib.contextmanager
def dagster_celery_k8s_editable_cm(cwd: str) -> Iterator[None]:
print("!!!!! WARNING: You must call `make rebuild_dagit` after making changes to Dagit !!!!\n")
with copy_directories(
get_core_celery_k8s_dirs()
+ [
"python_modules/dagster-graphql",
"python_modules/dagit",
"python_modules/libraries/dagster-aws",
],
cwd,
):
yield
# Some images have custom build context manager functions, listed here
CUSTOM_BUILD_CONTEXTMANAGERS: Dict[str, Callable] = {
"k8s-example": k8s_example_cm,
"k8s-example-editable": k8s_example_editable_cm,
"k8s-dagit-editable": k8s_dagit_editable_cm,
"k8s-dagit-example": k8s_dagit_example_cm,
"k8s-celery-worker-editable": k8s_celery_worker_editable_cm,
"user-code-example": user_code_example_cm,
"user-code-example-editable": user_code_example_editable_cm,
"dagster-k8s-editable": dagster_k8s_editable_cm,
"dagster-celery-k8s-editable": dagster_celery_k8s_editable_cm,
}
def list_images(images_path: Optional[str] = None) -> List[DagsterDockerImage]:
"""List all images that we manage.
Returns:
List[DagsterDockerImage]: A list of all images managed by this tool.
"""
images_path = images_path or os.path.join(os.path.dirname(__file__), "images")
image_folders = [f.name for f in os.scandir(images_path) if f.is_dir()]
images = []
for image in image_folders:
img = DagsterDockerImage(image, path=os.path.join(images_path, image))
if image in CUSTOM_BUILD_CONTEXTMANAGERS:
img = img._replace(build_cm=CUSTOM_BUILD_CONTEXTMANAGERS[image])
images.append(img)
return images
def get_image(name: str, images_path: Optional[str] = None) -> DagsterDockerImage:
"""Retrieve the image information from the list defined above."""
image = next((img for img in list_images(images_path=images_path) if img.image == name), None)
return check.not_none(image, "could not find image {}".format(name))
| 29.30531 | 99 | 0.65031 |
acf98bd4e1cb8eb740cacc5ca7c98f3930022237 | 3,129 | py | Python | MNISTtf/off_manifold/tflib/lsun_label.py | dberga/MineGAN | 36b048c2fcaeb80b22f3c03288e33d862d7e3113 | [
"MIT"
] | 76 | 2020-03-04T16:25:10.000Z | 2022-03-25T08:58:18.000Z | MNISTtf/off_manifold/tflib/lsun_label.py | dberga/MineGAN | 36b048c2fcaeb80b22f3c03288e33d862d7e3113 | [
"MIT"
] | 7 | 2020-05-24T07:02:44.000Z | 2022-02-10T01:57:40.000Z | MNISTtf/off_manifold/tflib/lsun_label.py | dberga/MineGAN | 36b048c2fcaeb80b22f3c03288e33d862d7e3113 | [
"MIT"
] | 9 | 2020-07-04T16:35:14.000Z | 2022-03-12T06:20:40.000Z |
from os import listdir
import numpy as np
import scipy.misc
import time
import pdb
Label={'bedroom':0,
'kitchen':1,
'dining_room':2,
'conference_room':3,
'living_room':4,
'bridge':5,
'tower':6,
'classroom':7,
'church_outdoor':8,
'restaurant':9}
def make_generator(path, n_files, batch_size,image_size, IW = False, pharse='train'):
epoch_count = [1]
image_list_main = listdir(path)
image_list = []
for sub_class in image_list_main:
# pdb.set_trace()
sub_class_path =path + '/'+ sub_class + '/'+ pharse
sub_class_image = listdir(sub_class_path)
image_list.extend([sub_class_path + '/' + i for i in sub_class_image])
def get_epoch():
images = np.zeros((batch_size, 3, 64, 64), dtype='int32')
labels = np.zeros((batch_size,), dtype='int32')
files = range(len(image_list))
random_state = np.random.RandomState(epoch_count[0])
random_state.shuffle(files)
epoch_count[0] += 1
for n, i in enumerate(files):
#image = scipy.misc.imread("{}/{}.png".format(path, str(i+1).zfill(len(str(n_files)))))
image = scipy.misc.imread("{}".format(image_list[i]))
label = Label[image_list[i].split('/')[2]]
image = scipy.misc.imresize(image,(image_size,image_size))
images[n % batch_size] = image.transpose(2,0,1)
labels[n % batch_size] = label
if n > 0 and n % batch_size == 0:
yield (images,labels)
def get_epoch_from_end():
images = np.zeros((batch_size, 3, 64, 64), dtype='int32')
files = range(n_files)
random_state = np.random.RandomState(epoch_count[0])
random_state.shuffle(files)
epoch_count[0] += 1
for n, i in enumerate(files):
#image = scipy.misc.imread("{}/{}.png".format(path, str(i+1).zfill(len(str(n_files)))))
image = scipy.misc.imread("{}".format(path + image_list[-i-1]))
image = scipy.misc.imresize(image,(image_size,image_size))
images[n % batch_size] = image.transpose(2,0,1)
if n > 0 and n % batch_size == 0:
yield (images,labels)
return get_epoch_from_end if IW else get_epoch
def load_from_end(batch_size, data_dir='/home/ishaan/data/imagenet64',image_size = 64, NUM_TRAIN = 7000):
return (
make_generator(data_dir+'/train/', NUM_TRAIN, batch_size,image_size, IW =True),
make_generator(data_dir+'/val/', 10000, batch_size,image_size, IW =True)
)
def load(batch_size, data_dir='/home/ishaan/data/imagenet64',image_size = 64, NUM_TRAIN = 7000):
return (
make_generator(data_dir, NUM_TRAIN, batch_size,image_size, pharse='train'),
make_generator(data_dir, 10000, batch_size,image_size, pharse='val')
)
if __name__ == '__main__':
train_gen, valid_gen = load(64)
t0 = time.time()
for i, batch in enumerate(train_gen(), start=1):
print("{}\t{}".format(str(time.time() - t0), batch[0][0,0,0,0]))
if i == 1000:
break
t0 = time.time()
| 39.1125 | 105 | 0.607862 |
acf98c85a00bab53ab9f0c0c18c0477cf7d637eb | 526 | py | Python | Mail-Sender/mail.py | Tanny1810/Mail-Sender | 04dbbecfacf79a468def151b92154c90b28e4d4c | [
"MIT"
] | null | null | null | Mail-Sender/mail.py | Tanny1810/Mail-Sender | 04dbbecfacf79a468def151b92154c90b28e4d4c | [
"MIT"
] | null | null | null | Mail-Sender/mail.py | Tanny1810/Mail-Sender | 04dbbecfacf79a468def151b92154c90b28e4d4c | [
"MIT"
] | 1 | 2022-01-17T06:40:38.000Z | 2022-01-17T06:40:38.000Z | # sends mail
import json
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
def senMail(frEmail,toEmail,pwd,subj,message):
msg = MIMEMultipart()
msg['From'] = frEmail
msg['To'] = toEmail
msg['Subject'] = subj
msg.attach(MIMEText(message, 'plain'))
server = smtplib.SMTP('smtp.gmail.com: 587')
server.starttls()
server.login(msg['From'], pwd)
server.sendmail(msg['From'], msg['To'], msg.as_string())
server.quit()
| 26.3 | 61 | 0.65019 |
acf98ced68adb46b17392feeb1078f56b9b4194a | 15,533 | py | Python | tests/image_test.py | obkyrush/jax | 8662c5f660678b6320a1a8fc46e917e97c399b57 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-06-12T07:03:42.000Z | 2021-06-27T08:48:12.000Z | tests/image_test.py | obkyrush/jax | 8662c5f660678b6320a1a8fc46e917e97c399b57 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2022-01-31T13:20:35.000Z | 2022-02-14T13:20:49.000Z | tests/image_test.py | obkyrush/jax | 8662c5f660678b6320a1a8fc46e917e97c399b57 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import itertools
import unittest
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import image
from jax import numpy as jnp
from jax import test_util as jtu
from jax.config import config
# We use TensorFlow and PIL as reference implementations.
try:
import tensorflow as tf
except ImportError:
tf = None
try:
from PIL import Image as PIL_Image
except ImportError:
PIL_Image = None
config.parse_flags_with_absl()
float_dtypes = jtu.dtypes.all_floating
inexact_dtypes = jtu.dtypes.inexact
class ImageTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_target={}_method={}_antialias={}".format(
jtu.format_shape_dtype_string(image_shape, dtype),
jtu.format_shape_dtype_string(target_shape, dtype), method,
antialias),
"dtype": dtype, "image_shape": image_shape,
"target_shape": target_shape,
"method": method, "antialias": antialias}
for dtype in float_dtypes
for target_shape, image_shape in itertools.combinations_with_replacement(
[[2, 3, 2, 4], [2, 6, 4, 4], [2, 33, 17, 4], [2, 50, 38, 4]], 2)
for method in ["nearest", "bilinear", "lanczos3", "lanczos5", "bicubic"]
for antialias in [False, True]))
@unittest.skipIf(not tf, "Test requires TensorFlow")
def testResizeAgainstTensorFlow(self, dtype, image_shape, target_shape, method,
antialias):
# TODO(phawkins): debug this. There is a small mismatch between TF and JAX
# for some cases of non-antialiased bicubic downscaling; we would expect
# exact equality.
if method == "bicubic" and any(x < y for x, y in
zip(target_shape, image_shape)):
raise unittest.SkipTest("non-antialiased bicubic downscaling mismatch")
rng = jtu.rand_default(self.rng())
args_maker = lambda: (rng(image_shape, dtype),)
def tf_fn(x):
out = tf.image.resize(
x.astype(np.float64), tf.constant(target_shape[1:-1]),
method=method, antialias=antialias).numpy().astype(dtype)
return out
jax_fn = partial(image.resize, shape=target_shape, method=method,
antialias=antialias)
self._CheckAgainstNumpy(tf_fn, jax_fn, args_maker, check_dtypes=True,
tol={np.float16: 2e-2, jnp.bfloat16: 1e-1,
np.float32: 1e-4, np.float64: 1e-4})
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_target={}_method={}".format(
jtu.format_shape_dtype_string(image_shape, dtype),
jtu.format_shape_dtype_string(target_shape, dtype), method),
"dtype": dtype, "image_shape": image_shape,
"target_shape": target_shape,
"method": method}
for dtype in [np.float32]
for target_shape, image_shape in itertools.combinations_with_replacement(
[[3, 2], [6, 4], [33, 17], [50, 39]], 2)
for method in ["nearest", "bilinear", "lanczos3", "bicubic"]))
@unittest.skipIf(not PIL_Image, "Test requires PIL")
def testResizeAgainstPIL(self, dtype, image_shape, target_shape, method):
rng = jtu.rand_uniform(self.rng())
args_maker = lambda: (rng(image_shape, dtype),)
def pil_fn(x):
pil_methods = {
"nearest": PIL_Image.NEAREST,
"bilinear": PIL_Image.BILINEAR,
"bicubic": PIL_Image.BICUBIC,
"lanczos3": PIL_Image.LANCZOS,
}
img = PIL_Image.fromarray(x.astype(np.float32))
out = np.asarray(img.resize(target_shape[::-1], pil_methods[method]),
dtype=dtype)
return out
jax_fn = partial(image.resize, shape=target_shape, method=method,
antialias=True)
self._CheckAgainstNumpy(pil_fn, jax_fn, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_target={}_method={}".format(
jtu.format_shape_dtype_string(image_shape, dtype),
jtu.format_shape_dtype_string(target_shape, dtype), method),
"dtype": dtype, "image_shape": image_shape, "target_shape": target_shape,
"method": method}
for dtype in inexact_dtypes
for image_shape, target_shape in [
([3, 1, 2], [6, 1, 4]),
([1, 3, 2, 1], [1, 6, 4, 1]),
]
for method in ["nearest", "linear", "lanczos3", "lanczos5", "cubic"]))
def testResizeUp(self, dtype, image_shape, target_shape, method):
data = [64, 32, 32, 64, 50, 100]
expected_data = {}
expected_data["nearest"] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data["linear"] = [
64.0, 56.0, 40.0, 32.0, 56.0, 52.0, 44.0, 40.0, 40.0, 44.0, 52.0, 56.0,
36.5, 45.625, 63.875, 73.0, 45.5, 56.875, 79.625, 91.0, 50.0, 62.5,
87.5, 100.0
]
expected_data["lanczos3"] = [
75.8294, 59.6281, 38.4313, 22.23, 60.6851, 52.0037, 40.6454, 31.964,
35.8344, 41.0779, 47.9383, 53.1818, 24.6968, 43.0769, 67.1244, 85.5045,
35.7939, 56.4713, 83.5243, 104.2017, 44.8138, 65.1949, 91.8603, 112.2413
]
expected_data["lanczos5"] = [
77.5699, 60.0223, 40.6694, 23.1219, 61.8253, 51.2369, 39.5593, 28.9709,
35.7438, 40.8875, 46.5604, 51.7041, 21.5942, 43.5299, 67.7223, 89.658,
32.1213, 56.784, 83.984, 108.6467, 44.5802, 66.183, 90.0082, 111.6109
]
expected_data["cubic"] = [
70.1453, 59.0252, 36.9748, 25.8547, 59.3195, 53.3386, 41.4789, 35.4981,
36.383, 41.285, 51.0051, 55.9071, 30.2232, 42.151, 65.8032, 77.731,
41.6492, 55.823, 83.9288, 98.1026, 47.0363, 62.2744, 92.4903, 107.7284
]
x = np.array(data, dtype=dtype).reshape(image_shape)
output = image.resize(x, target_shape, method)
expected = np.array(expected_data[method], dtype=dtype).reshape(target_shape)
self.assertAllClose(output, expected, atol=1e-04)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_target={}_method={}_antialias={}".format(
jtu.format_shape_dtype_string(image_shape, dtype),
jtu.format_shape_dtype_string(target_shape, dtype), method,
antialias),
"dtype": dtype, "image_shape": image_shape,
"target_shape": target_shape,
"method": method, "antialias": antialias}
for dtype in [np.float32]
for target_shape, image_shape in itertools.combinations_with_replacement(
[[2, 3, 2, 4], [2, 6, 4, 4], [2, 33, 17, 4], [2, 50, 38, 4]], 2)
for method in ["bilinear", "lanczos3", "lanczos5", "bicubic"]
for antialias in [False, True]))
def testResizeGradients(self, dtype, image_shape, target_shape, method,
antialias):
rng = jtu.rand_default(self.rng())
args_maker = lambda: (rng(image_shape, dtype),)
jax_fn = partial(image.resize, shape=target_shape, method=method,
antialias=antialias)
jtu.check_grads(jax_fn, args_maker(), order=2, rtol=1e-2, eps=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_target={}_method={}".format(
jtu.format_shape_dtype_string(image_shape, dtype),
jtu.format_shape_dtype_string(target_shape, dtype), method),
"dtype": dtype, "image_shape": image_shape,
"target_shape": target_shape,
"scale": scale, "translation": translation, "method": method}
for dtype in inexact_dtypes
for image_shape, target_shape, scale, translation in [
([3, 1, 2], [6, 1, 4], [2.0, 1.0, 2.0], [1.0, 0.0, -1.0]),
([1, 3, 2, 1], [1, 6, 4, 1], [1.0, 2.0, 2.0, 1.0], [0.0, 1.0, -1.0, 0.0])]
for method in ["linear", "lanczos3", "lanczos5", "cubic"]))
def testScaleAndTranslateUp(self, dtype, image_shape, target_shape, scale,
translation, method):
data = [64, 32, 32, 64, 50, 100]
# Note zeros occur in the output because the sampling location is outside
# the boundaries of the input image.
expected_data = {}
expected_data["linear"] = [
0.0, 0.0, 0.0, 0.0, 56.0, 40.0, 32.0, 0.0, 52.0, 44.0, 40.0, 0.0, 44.0,
52.0, 56.0, 0.0, 45.625, 63.875, 73.0, 0.0, 56.875, 79.625, 91.0, 0.0
]
expected_data["lanczos3"] = [
0.0, 0.0, 0.0, 0.0, 59.6281, 38.4313, 22.23, 0.0, 52.0037, 40.6454,
31.964, 0.0, 41.0779, 47.9383, 53.1818, 0.0, 43.0769, 67.1244, 85.5045,
0.0, 56.4713, 83.5243, 104.2017, 0.0
]
expected_data["lanczos5"] = [
0.0, 0.0, 0.0, 0.0, 60.0223, 40.6694, 23.1219, 0.0, 51.2369, 39.5593,
28.9709, 0.0, 40.8875, 46.5604, 51.7041, 0.0, 43.5299, 67.7223, 89.658,
0.0, 56.784, 83.984, 108.6467, 0.0
]
expected_data["cubic"] = [
0.0, 0.0, 0.0, 0.0, 59.0252, 36.9748, 25.8547, 0.0, 53.3386, 41.4789,
35.4981, 0.0, 41.285, 51.0051, 55.9071, 0.0, 42.151, 65.8032, 77.731,
0.0, 55.823, 83.9288, 98.1026, 0.0
]
x = np.array(data, dtype=dtype).reshape(image_shape)
# Should we test different float types here?
scale_a = jnp.array(scale, dtype=jnp.float32)
translation_a = jnp.array(translation, dtype=jnp.float32)
output = image.scale_and_translate(x, target_shape, range(len(image_shape)),
scale_a, translation_a,
method)
expected = np.array(
expected_data[method], dtype=dtype).reshape(target_shape)
self.assertAllClose(output, expected, atol=2e-03)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_method={}_antialias={}".format(
jtu.dtype_str(dtype), method, antialias),
"dtype": dtype, "method": method, "antialias": antialias}
for dtype in inexact_dtypes
for method in ["linear", "lanczos3", "lanczos5", "cubic"]
for antialias in [True, False]))
def testScaleAndTranslateDown(self, dtype, method, antialias):
image_shape = [1, 6, 7, 1]
target_shape = [1, 3, 3, 1]
data = [
51, 38, 32, 89, 41, 21, 97, 51, 33, 87, 89, 34, 21, 97, 43, 25, 25, 92,
41, 11, 84, 11, 55, 111, 23, 99, 50, 83, 13, 92, 52, 43, 90, 43, 14, 89,
71, 32, 23, 23, 35, 93
]
if antialias:
expected_data = {}
expected_data["linear"] = [
43.5372, 59.3694, 53.6907, 49.3221, 56.8168, 55.4849, 0, 0, 0
]
expected_data["lanczos3"] = [
43.2884, 57.9091, 54.6439, 48.5856, 58.2427, 53.7551, 0, 0, 0
]
expected_data["lanczos5"] = [
43.9209, 57.6360, 54.9575, 48.9272, 58.1865, 53.1948, 0, 0, 0
]
expected_data["cubic"] = [
42.9935, 59.1687, 54.2138, 48.2640, 58.2678, 54.4088, 0, 0, 0
]
else:
expected_data = {}
expected_data["linear"] = [
43.6071, 89, 59, 37.1785, 27.2857, 58.3571, 0, 0, 0
]
expected_data["lanczos3"] = [
44.1390, 87.8786, 63.3111, 25.1161, 20.8795, 53.6165, 0, 0, 0
]
expected_data["lanczos5"] = [
44.8835, 85.5896, 66.7231, 16.9983, 19.8891, 47.1446, 0, 0, 0
]
expected_data["cubic"] = [
43.6426, 88.8854, 60.6638, 31.4685, 22.1204, 58.3457, 0, 0, 0
]
x = np.array(data, dtype=dtype).reshape(image_shape)
expected = np.array(
expected_data[method], dtype=dtype).reshape(target_shape)
scale_a = jnp.array([1.0, 0.35, 0.4, 1.0], dtype=jnp.float32)
translation_a = jnp.array([0.0, 0.2, 0.1, 0.0], dtype=jnp.float32)
output = image.scale_and_translate(
x, target_shape, (0,1,2,3),
scale_a, translation_a, method, antialias=antialias)
self.assertAllClose(output, expected, atol=2e-03)
# Tests that running with just a subset of dimensions that have non-trivial
# scale and translation.
output = image.scale_and_translate(
x, target_shape, (1,2),
scale_a[1:3], translation_a[1:3], method, antialias=antialias)
self.assertAllClose(output, expected, atol=2e-03)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "antialias={}".format(antialias),
"antialias": antialias}
for antialias in [True, False]))
def testScaleAndTranslateJITs(self, antialias):
image_shape = [1, 6, 7, 1]
target_shape = [1, 3, 3, 1]
data = [
51, 38, 32, 89, 41, 21, 97, 51, 33, 87, 89, 34, 21, 97, 43, 25, 25, 92,
41, 11, 84, 11, 55, 111, 23, 99, 50, 83, 13, 92, 52, 43, 90, 43, 14, 89,
71, 32, 23, 23, 35, 93
]
if antialias:
expected_data = [
43.5372, 59.3694, 53.6907, 49.3221, 56.8168, 55.4849, 0, 0, 0
]
else:
expected_data = [43.6071, 89, 59, 37.1785, 27.2857, 58.3571, 0, 0, 0]
x = jnp.array(data, dtype=jnp.float32).reshape(image_shape)
expected = jnp.array(expected_data, dtype=jnp.float32).reshape(target_shape)
scale_a = jnp.array([1.0, 0.35, 0.4, 1.0], dtype=jnp.float32)
translation_a = jnp.array([0.0, 0.2, 0.1, 0.0], dtype=jnp.float32)
def jit_fn(in_array, s, t):
return jax.image.scale_and_translate(
in_array, target_shape, (0, 1, 2, 3), s, t,
"linear", antialias, precision=jax.lax.Precision.HIGHEST)
output = jax.jit(jit_fn)(x, scale_a, translation_a)
self.assertAllClose(output, expected, atol=2e-03)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "antialias={}".format(antialias),
"antialias": antialias}
for antialias in [True, False]))
def testScaleAndTranslateGradFinite(self, antialias):
image_shape = [1, 6, 7, 1]
target_shape = [1, 3, 3, 1]
data = [
51, 38, 32, 89, 41, 21, 97, 51, 33, 87, 89, 34, 21, 97, 43, 25, 25, 92,
41, 11, 84, 11, 55, 111, 23, 99, 50, 83, 13, 92, 52, 43, 90, 43, 14, 89,
71, 32, 23, 23, 35, 93
]
x = jnp.array(data, dtype=jnp.float32).reshape(image_shape)
scale_a = jnp.array([1.0, 0.35, 0.4, 1.0], dtype=jnp.float32)
translation_a = jnp.array([0.0, 0.2, 0.1, 0.0], dtype=jnp.float32)
def scale_fn(s):
return jnp.sum(jax.image.scale_and_translate(
x, target_shape, (0, 1, 2, 3), s, translation_a, "linear", antialias,
precision=jax.lax.Precision.HIGHEST))
scale_out = jax.grad(scale_fn)(scale_a)
self.assertTrue(jnp.all(jnp.isfinite(scale_out)))
def translate_fn(t):
return jnp.sum(jax.image.scale_and_translate(
x, target_shape, (0, 1, 2, 3), scale_a, t, "linear", antialias,
precision=jax.lax.Precision.HIGHEST))
translate_out = jax.grad(translate_fn)(translation_a)
self.assertTrue(jnp.all(jnp.isfinite(translate_out)))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| 42.556164 | 82 | 0.619455 |
acf98cfccc8cefe8b224fe21396546b4ebe41588 | 7,272 | py | Python | juriscraper/lib/html_utils.py | swipswaps/juriscraper | fec54f7fc53096db16345b35c73aca9a52aaecb2 | [
"BSD-2-Clause"
] | null | null | null | juriscraper/lib/html_utils.py | swipswaps/juriscraper | fec54f7fc53096db16345b35c73aca9a52aaecb2 | [
"BSD-2-Clause"
] | null | null | null | juriscraper/lib/html_utils.py | swipswaps/juriscraper | fec54f7fc53096db16345b35c73aca9a52aaecb2 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
import re
import sys
from lxml import etree, html
from lxml.etree import XMLSyntaxError
from lxml.html import fromstring, html5parser, tostring
from lxml.html.clean import Cleaner
from six import text_type
from six.moves.html_parser import HTMLParser
from six.moves.urllib.parse import urlsplit, urlunsplit
try:
# Use cchardet for performance to detect the character encoding.
import cchardet as chardet
except ImportError:
import chardet
if sys.maxunicode == 65535:
from .log_tools import make_default_logger
logger = make_default_logger()
logger.warn("You are using a narrow build of Python, which is not "
"completely supported. See issue #188 for details.")
def get_xml_parsed_text(text):
return etree.fromstring(text)
def get_html_parsed_text(text):
return html.fromstring(text)
def get_html5_parsed_text(text):
"""Return content using the html5parser, ideal for faulty html.
This dance is slightly different than usual because it uses the
html5parser to first create an _Element object, then serialize it using
`tostring`, then parse *that* using the usual fromstring function. The
end result is that irregularities in the html are fixed by the
html5parser, and the usual lxml parser gives us the same API we are
used to.
:param text: The html of the document
:return: an lxml.HtmlElement object
"""
parsed = html5parser.document_fromstring(text.encode('utf-8'))
return fromstring(tostring(parsed, encoding='unicode'))
def get_table_column_text(html, cell_num, path_base=False):
path_cell = '//table//tr/td[%d]' % cell_num
path = path_base + path_cell if path_base else path_cell
return [cell.text_content().strip() for cell in html.xpath(path)]
def get_table_column_links(html, cell_num, path_base=False):
path_cell = '//table//tr/td[%d]//a/@href' % cell_num
path = path_base + path_cell if path_base else path_cell
return html.xpath(path)
def get_clean_body_content(content, remove_extra_tags=[]):
"""Parse out the body from an html string, clean it up, and send it along.
"""
remove_tags = ['a', 'body', 'font', 'noscript']
remove_tags.extend(remove_extra_tags)
cleaner = Cleaner(style=True,
remove_tags=remove_tags)
try:
return cleaner.clean_html(content)
except XMLSyntaxError:
return "Unable to extract the content from this file. Please try " \
"reading the original."
def get_visible_text(html_content):
html_tree = html.fromstring(html_content)
text = html_tree.xpath("""//text()[normalize-space() and not(parent::style |
parent::link |
parent::head |
parent::script)]""")
return " ".join(text)
def html_unescape(s):
h = HTMLParser()
return h.unescape(s)
def set_response_encoding(request):
"""Set the encoding if it isn't set already.
Use cchardet for added performance.
"""
if request:
# If the encoding is iso-8859-1, switch it to cp1252 (a superset)
if request.encoding == 'ISO-8859-1':
request.encoding = 'cp1252'
if request.encoding is None:
# Requests detects the encoding when the item is GET'ed using
# HTTP headers, and then when r.text is accessed, if the encoding
# hasn't been set by that point. By setting the encoding here, we
# ensure that it's done by cchardet, if it hasn't been done with
# HTTP headers. This way it is done before r.text is accessed
# (which would do it with vanilla chardet). This is a big
# performance boon, and can be removed once requests is upgraded
if isinstance(request.content, text_type):
as_bytes = request.content.encode()
request.encoding = chardet.detect(as_bytes)['encoding']
else:
request.encoding = chardet.detect(request.content)['encoding']
def clean_html(text):
""" Cleans up text before we make it into an HTML tree:
1. Nukes <![CDATA stuff.
2. Nukes XML encoding declarations
3. Replaces </br> with <br/>
4. Nukes invalid bytes in input
5. ?
"""
# Remove <![CDATA because it causes breakage in lxml.
text = re.sub(r'<!\[CDATA\[', u'', text)
text = re.sub(r'\]\]>', u'', text)
# Remove <?xml> declaration in Unicode objects, because it causes an
# error: "ValueError: Unicode strings with encoding declaration are not
# supported."
# Note that the error only occurs if the <?xml> tag has an "encoding"
# attribute, but we remove it in all cases, as there's no downside to
# removing it. This moves our encoding detection to chardet, rather than
# lxml.
if isinstance(text, text_type):
text = re.sub(r'^\s*<\?xml\s+.*?\?>', '', text)
# Fix invalid bytes in XML (http://stackoverflow.com/questions/8733233/)
# Note that this won't work completely on narrow builds of Python, which
# existed prior to Py3. Thus, we check if it's a narrow build, and adjust
# accordingly.
if sys.maxunicode == 65535:
text = re.sub(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD]+',
u'', text)
else:
text = re.sub(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD'
u'\U00010000-\U0010FFFF]+', u'', text)
return text
def fix_links_but_keep_anchors(link):
# Wrap the function below so that we have one that can be passed to
# lxml's rewrite_links method, which doesn't accept any parameters.
return fix_links_in_lxml_tree(link, keep_anchors=True)
def fix_links_in_lxml_tree(link, keep_anchors=False):
"""Fix links in an lxml tree.
:param keep_anchors: Whether to nuke anchors at the ends of links.
This function is called by the rewrite_links method of an lxml tree, and is
used to normalize links in a few ways. It makes links absolute, works
around buggy URLs and nukes anchors.
Example: html_tree.rewrite_links(fix_links_in_lxml_tree, base_href=my_url)
Some URLS, like the following, make no sense:
- https://www.appeals2.az.gov/../Decisions/CR20130096OPN.pdf.
^^^^ -- This makes no sense!
The fix is to remove any extra '/..' patterns at the beginning of the
path.
Others have annoying anchors on the end, like:
- http://example.com/path/#anchor
Note that lxml has a method generally for this purpose called
make_links_absolute, but we cannot use it because it does not work
around invalid relative URLS, nor remove anchors. This is a limitation
of Python's urljoin that will be fixed in Python 3.5 according to a bug
we filed: http://bugs.python.org/issue22118
"""
url_parts = urlsplit(link)
url = urlunsplit(
url_parts[:2] +
(re.sub('^(/\.\.)+', '', url_parts.path),) +
url_parts[3:]
)
if keep_anchors:
return url
else:
return url.split('#')[0]
| 37.292308 | 85 | 0.651403 |
acf98d0f15f7f493654822751fb2619de20e5505 | 2,684 | py | Python | fluid/sequence_tagging_for_ner/infer.py | phlrain/models | 59adc0d6f38cd2351e16608d6c9d4e72dd5e7fea | [
"Apache-2.0"
] | 1 | 2018-11-23T10:29:49.000Z | 2018-11-23T10:29:49.000Z | fluid/sequence_tagging_for_ner/infer.py | phlrain/models | 59adc0d6f38cd2351e16608d6c9d4e72dd5e7fea | [
"Apache-2.0"
] | null | null | null | fluid/sequence_tagging_for_ner/infer.py | phlrain/models | 59adc0d6f38cd2351e16608d6c9d4e72dd5e7fea | [
"Apache-2.0"
] | 2 | 2018-06-14T13:59:36.000Z | 2018-11-14T12:34:47.000Z | from __future__ import print_function
import numpy as np
import six
import paddle
import paddle.fluid as fluid
from network_conf import ner_net
import reader
from utils import load_dict, load_reverse_dict
from utils_extend import to_lodtensor
def infer(model_path, batch_size, test_data_file, vocab_file, target_file,
use_gpu):
"""
use the model under model_path to predict the test data, the result will be printed on the screen
return nothing
"""
word_dict = load_dict(vocab_file)
word_reverse_dict = load_reverse_dict(vocab_file)
label_dict = load_dict(target_file)
label_reverse_dict = load_reverse_dict(target_file)
test_data = paddle.batch(
reader.data_reader(test_data_file, word_dict, label_dict),
batch_size=batch_size)
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(model_path, exe)
for data in test_data():
word = to_lodtensor([x[0] for x in data], place)
mark = to_lodtensor([x[1] for x in data], place)
target = to_lodtensor([x[2] for x in data], place)
crf_decode = exe.run(
inference_program,
feed={"word": word,
"mark": mark,
"target": target},
fetch_list=fetch_targets,
return_numpy=False)
lod_info = (crf_decode[0].lod())[0]
np_data = np.array(crf_decode[0])
assert len(data) == len(lod_info) - 1
for sen_index in six.moves.xrange(len(data)):
assert len(data[sen_index][0]) == lod_info[
sen_index + 1] - lod_info[sen_index]
word_index = 0
for tag_index in six.moves.xrange(lod_info[sen_index],
lod_info[sen_index + 1]):
word = word_reverse_dict[data[sen_index][0][word_index]]
gold_tag = label_reverse_dict[data[sen_index][2][
word_index]]
tag = label_reverse_dict[np_data[tag_index][0]]
print(word + "\t" + gold_tag + "\t" + tag)
word_index += 1
print("")
if __name__ == "__main__":
infer(
model_path="models/params_pass_0",
batch_size=6,
test_data_file="data/test",
vocab_file="data/vocab.txt",
target_file="data/target.txt",
use_gpu=False)
| 35.786667 | 101 | 0.598361 |
acf98e135daedaaf0030b40023a67e863df8f481 | 2,099 | py | Python | results/rabi_and_lmg_optimizations_crossingcriticalphase_20190305/insufficient_tf_optimizations/script_lmg_doublebang_neldermead_50spins_bound04.py | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | [
"MIT"
] | 1 | 2020-07-21T02:31:41.000Z | 2020-07-21T02:31:41.000Z | results/rabi_and_lmg_optimizations_crossingcriticalphase_20190305/insufficient_tf_optimizations/script_lmg_doublebang_neldermead_50spins_bound04.py | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | [
"MIT"
] | null | null | null | results/rabi_and_lmg_optimizations_crossingcriticalphase_20190305/insufficient_tf_optimizations/script_lmg_doublebang_neldermead_50spins_bound04.py | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
import pandas as pd
import logging
if '../../' not in sys.path:
sys.path.append('../../')
import src.optimization as optimization
model = 'lmg'
model_parameters = dict(num_spins=50)
protocol = 'doublebang'
optimization_method = 'Nelder-Mead'
parameters_constraints = [-4, 4]
task=dict(initial_intensity=0, final_intensity=2)
# ------ build and check name for output file
additional_file_name_qualifiers = '50spins'
output_file_name = (model + '_' + protocol + '_' +
optimization_method.replace('-', '').lower() +
'_bound{:02}'.format(parameters_constraints[1]))
if additional_file_name_qualifiers is not None:
output_file_name += '_' + additional_file_name_qualifiers
filenum = 1
_output_file_name = output_file_name
while os.path.isfile(_output_file_name + '.csv'):
_output_file_name = output_file_name + '({:02})'.format(filenum)
filenum += 1
output_file_name = _output_file_name + '.csv'
# ------ set up logger
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s]"
"[%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
# consoleHandler = logging.StreamHandler()
# consoleHandler.setFormatter(logFormatter)
# rootLogger.addHandler(consoleHandler)
fileHandler = logging.FileHandler(output_file_name[:-4] + '.log')
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging.DEBUG)
rootLogger.addHandler(fileHandler)
logging.info('Output file name will be "{}"'.format(output_file_name))
# ------ start optimization
results = optimization.find_best_protocol(
problem_specification=dict(
model=model,
model_parameters=model_parameters,
task=task
),
optimization_specs=dict(
protocol=protocol,
optimization_method=optimization_method,
parameters_constraints=parameters_constraints
),
other_options=dict(
scan_times=np.linspace(0.1, 4, 200)
)
)
# ------ save results to file
results.to_csv(output_file_name)
| 29.985714 | 70 | 0.706527 |
acf993ad7e74939f9573174cb140205127d14869 | 669 | py | Python | session3/exercise6.py | mililnm/learntocode | 59de3476d9802ee4ecc3473f0c87be4a0a8fae87 | [
"MIT"
] | 1 | 2018-07-30T07:36:36.000Z | 2018-07-30T07:36:36.000Z | session3/exercise6.py | mililnm/learntocode | 59de3476d9802ee4ecc3473f0c87be4a0a8fae87 | [
"MIT"
] | null | null | null | session3/exercise6.py | mililnm/learntocode | 59de3476d9802ee4ecc3473f0c87be4a0a8fae87 | [
"MIT"
] | null | null | null | def sum2(xs, ys):
# pairwise sum assuming that xs and ys aren't the same length
def test(test_case_xs, test_case_ys, expected):
actual = sum2(test_case_xs, test_case_ys)
if actual == expected:
print("Passed test for " + str(test_case_xs) + ", " + str(test_case_ys))
else:
print("Didn't pass test for " + str(test_case_xs) + ", " + str(test_case_ys))
print("The result was " + str(actual) + " but it should have been " + str(expected))
test([], [], [])
test([1, 2], [3, 4], [4, 6])
test([-10, 10, 20], [10, -10, -20], [0, 0, 0])
test([1, 2, 3, 4, 5], [1, 2, 3], [2, 4, 6, 4, 5])
test([1, 2, 3], [1, 2, 3, 4, 5], [2, 4, 6, 4, 5]) | 41.8125 | 92 | 0.559043 |
acf9943375e494ba01f7fb793c20ec8c984147c3 | 264 | py | Python | toontown/friends/TTPlayerFriendsManagerUD.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 3 | 2020-01-02T08:43:36.000Z | 2020-07-05T08:59:02.000Z | toontown/friends/TTPlayerFriendsManagerUD.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | 1 | 2021-06-08T17:16:48.000Z | 2021-06-08T17:16:48.000Z | toontown/friends/TTPlayerFriendsManagerUD.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | 4 | 2019-06-20T23:45:23.000Z | 2020-10-14T20:30:15.000Z | from direct.directnotify import DirectNotifyGlobal
from otp.friends.PlayerFriendsManagerUD import PlayerFriendsManagerUD
class TTPlayerFriendsManagerUD(PlayerFriendsManagerUD):
notify = DirectNotifyGlobal.directNotify.newCategory("TTPlayerFriendsManagerUD")
| 37.714286 | 84 | 0.878788 |
acf994b863134078691fff20398b090ba503f1ed | 1,130 | py | Python | brickmos/defaults.py | merschformann/brickmos | 16dbc230cce01f29f67d6c803bd54ea6cd97a233 | [
"MIT"
] | 2 | 2022-02-21T02:32:07.000Z | 2022-02-22T06:47:40.000Z | brickmos/defaults.py | merschformann/brickmos | 16dbc230cce01f29f67d6c803bd54ea6cd97a233 | [
"MIT"
] | null | null | null | brickmos/defaults.py | merschformann/brickmos | 16dbc230cce01f29f67d6c803bd54ea6cd97a233 | [
"MIT"
] | null | null | null | def get_default_colors():
"""
Returns the default colors as a CSV string.
"""
return r"""rgb;Bricklink Color Name;Bricklink Color ID;Bricklink Part ID
255,255,255;White;1;3024
175,181,199;LightBluishGray;86;3024
89,93,96;DarkBluishGray;85;3024
33,33,33;Black;11;3024
106,14,21;DarkRed;59;3024
179,0,6;Red;5;3024
88,42,18;ReddishBrown;88;3024
222,198,156;Tan;2;3024
144,116,80;DarkTan;69;3024
227,160,91;MediumNougat;150;3024
179,84,8;DarkOrange;68;3024
255,126,20;Orange;4;3024
247,186,48;BrightLightOrange;110;3024
247,209,23;Yellow;3;3024
241,225,103;BrightLightYellow;103;3024
223,238,165;YellowishGreen;158;3024
166,202,85;Lime;34;3024
127,143,86;OliveGreen;155;3024
46,85,67;DarkGreen;80;3024
0,100,46;Green;6;3024
16,203,49;BrightGreen;36;3024
0,138,128;DarkTurquoise;39;3024
20,48,68;DarkBlue;63;3024
0,87,166;Blue;7;3024
73,151,250;DarkAzure;153;3024
95,189,247;MediumAzure;156;3024
97,175,255;MediumBlue;42;3024
164,194,230;BrightLightBlue;105;3024
90,113,132;SandBlue;55;3024
95,38,131;DarkPurple;89;3024
136,94,158;MediumLavender;157;3024
200,112,128;DarkPink;47;3024
255,187,255;BrightPink;104;3024"""
| 28.974359 | 76 | 0.776106 |
acf995ba4adee5652bf497dcac8aaaa0df89b254 | 702 | py | Python | tests/test_day22.py | arcadecoffee/advent-2021 | 57d24cd6ba6e2b4d7e68ea492b955b73eaad7b6a | [
"MIT"
] | null | null | null | tests/test_day22.py | arcadecoffee/advent-2021 | 57d24cd6ba6e2b4d7e68ea492b955b73eaad7b6a | [
"MIT"
] | null | null | null | tests/test_day22.py | arcadecoffee/advent-2021 | 57d24cd6ba6e2b4d7e68ea492b955b73eaad7b6a | [
"MIT"
] | null | null | null | """
Tests for Day 22
"""
from day22.module import part_1, part_2, \
FULL_INPUT_FILE, TEST_INPUT_FILE_1, TEST_INPUT_FILE_2, TEST_INPUT_FILE_3
def test_part_1_1():
result = part_1(TEST_INPUT_FILE_1)
assert result == 39
def test_part_1_2():
result = part_1(TEST_INPUT_FILE_2)
assert result == 590784
def test_part_1_3():
result = part_1(TEST_INPUT_FILE_3)
assert result == 474140
def test_part_1_full():
result = part_1(FULL_INPUT_FILE)
assert result == 546724
def test_part_2():
result = part_2(TEST_INPUT_FILE_3)
assert result == 2758514936282235
def test_part_2_full():
result = part_2(FULL_INPUT_FILE)
assert result == 1346544039176841
| 18.972973 | 76 | 0.720798 |
acf99627a4a864fed58c3ead17197f0015979cec | 2,678 | py | Python | examples/webcam_and_rtmp/socket_liaison_asyncore.py | EnterStudios/astral | 4b75a8c54cc102b85ad582caefe97411e1469ec8 | [
"MIT"
] | 13 | 2015-12-03T08:30:38.000Z | 2021-04-19T13:30:00.000Z | examples/webcam_and_rtmp/socket_liaison_asyncore.py | EnterStudios/astral | 4b75a8c54cc102b85ad582caefe97411e1469ec8 | [
"MIT"
] | null | null | null | examples/webcam_and_rtmp/socket_liaison_asyncore.py | EnterStudios/astral | 4b75a8c54cc102b85ad582caefe97411e1469ec8 | [
"MIT"
] | 3 | 2016-04-18T07:27:42.000Z | 2018-07-03T04:48:58.000Z | #! /usr/bin/env python
'''
ASTRAL: TCP Socket liaison/tunnel asyncore test for RTMP streaming
http://code.activestate.com/recipes/483732/
'''
import socket,asyncore
class forwarder(asyncore.dispatcher):
def __init__(self, ip, port, remoteip,remoteport,backlog=5):
asyncore.dispatcher.__init__(self)
self.remoteip=remoteip
self.remoteport=remoteport
self.create_socket(socket.AF_INET,socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((ip,port))
self.listen(backlog)
def handle_accept(self):
conn, addr = self.accept()
# print '--- Connect --- '
sender(receiver(conn),self.remoteip,self.remoteport)
class receiver(asyncore.dispatcher):
def __init__(self,conn):
asyncore.dispatcher.__init__(self,conn)
self.from_remote_buffer=''
self.to_remote_buffer=''
self.sender=None
def handle_connect(self):
pass
def handle_read(self):
read = self.recv(4096)
# print '%04i -->'%len(read)
self.from_remote_buffer += read
def writable(self):
return (len(self.to_remote_buffer) > 0)
def handle_write(self):
sent = self.send(self.to_remote_buffer)
# print '%04i <--'%sent
self.to_remote_buffer = self.to_remote_buffer[sent:]
def handle_close(self):
self.close()
if self.sender:
self.sender.close()
class sender(asyncore.dispatcher):
def __init__(self, receiver, remoteaddr,remoteport):
asyncore.dispatcher.__init__(self)
self.receiver=receiver
receiver.sender=self
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((remoteaddr, remoteport))
def handle_connect(self):
pass
def handle_read(self):
read = self.recv(4096)
# print '<-- %04i'%len(read)
self.receiver.to_remote_buffer += read
def writable(self):
return (len(self.receiver.from_remote_buffer) > 0)
def handle_write(self):
sent = self.send(self.receiver.from_remote_buffer)
# print '--> %04i'%sent
self.receiver.from_remote_buffer = self.receiver.from_remote_buffer[sent:]
def handle_close(self):
self.close()
self.receiver.close()
if __name__=='__main__':
publisher_address = ('127.0.0.1', 1935)
liaison_address = ('127.0.0.1', 5000)
forwarder(liaison_address[0], liaison_address[1], publisher_address[0], publisher_address[1])
print 'Liaison started: %s:%d <-> %s:%d <-> viewer' % (publisher_address[0], publisher_address[1], liaison_address[0], liaison_address[1])
asyncore.loop()
| 29.108696 | 142 | 0.647125 |
acf996a47300031432c1c5b393b28d424dc61b66 | 1,955 | py | Python | app/db/repos/oauth.py | maxzhenzhera/my_vocab_backend | 2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1 | [
"MIT"
] | null | null | null | app/db/repos/oauth.py | maxzhenzhera/my_vocab_backend | 2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1 | [
"MIT"
] | null | null | null | app/db/repos/oauth.py | maxzhenzhera/my_vocab_backend | 2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1 | [
"MIT"
] | null | null | null | from typing import ClassVar
from sqlalchemy.dialects.postgresql import insert as pg_insert
from sqlalchemy.future import select as sa_select
from sqlalchemy.orm import joinedload
from .base import BaseRepo
from ..models import (
OAuthConnection,
User
)
from ...services.authentication.oauth.dataclasses_ import OAuthUser
__all__ = ['OAuthConnectionsRepo']
class OAuthConnectionsRepo(BaseRepo[OAuthConnection]):
model: ClassVar = OAuthConnection
async def link_google_connection(
self,
oauth_user: OAuthUser,
internal_user: User
) -> OAuthConnection:
oauth_connection_on_insert: dict[str, str | int] = {
# OAuthConnection.user_id
'user_id': internal_user.id,
# OAuthConnection.google_id
'google_id': oauth_user.id
}
return await self._link_connection(oauth_connection_on_insert)
async def _link_connection(
self,
oauth_connection_on_insert: dict[str, str | int]
) -> OAuthConnection:
oauth_connection_on_conflict = oauth_connection_on_insert.copy()
oauth_connection_on_conflict.pop('user_id')
insert_stmt = (
pg_insert(OAuthConnection)
.values(**oauth_connection_on_insert)
)
update_on_conflict_stmt = (
insert_stmt
.on_conflict_do_update(
index_elements=[OAuthConnection.user_id],
set_=oauth_connection_on_conflict
)
)
result = await self._return_from_statement(update_on_conflict_stmt)
return self._get_entity_or_raise(result)
async def fetch_by_google_id(self, google_id: str) -> OAuthConnection:
stmt = (
sa_select(OAuthConnection)
.options(joinedload(OAuthConnection.user))
.where(OAuthConnection.google_id == google_id)
)
return await self._fetch_entity(stmt)
| 31.532258 | 75 | 0.663939 |
acf996bd23e0140f58c91966f46377c3231d9f26 | 34,085 | py | Python | beetsplug/replaygain.py | stragu/beets | da46a62772ab7a88c5799c84841f744dfc0f0a20 | [
"MIT"
] | null | null | null | beetsplug/replaygain.py | stragu/beets | da46a62772ab7a88c5799c84841f744dfc0f0a20 | [
"MIT"
] | null | null | null | beetsplug/replaygain.py | stragu/beets | da46a62772ab7a88c5799c84841f744dfc0f0a20 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Fabrice Laporte, Yevgeny Bezman, and Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import subprocess
import os
import collections
import itertools
import sys
import warnings
import re
from beets import logging
from beets import ui
from beets.plugins import BeetsPlugin
from beets.util import syspath, command_output, displayable_path
# Utilities.
class ReplayGainError(Exception):
"""Raised when a local (to a track or an album) error occurs in one
of the backends.
"""
class FatalReplayGainError(Exception):
"""Raised when a fatal error occurs in one of the backends.
"""
class FatalGstreamerPluginReplayGainError(FatalReplayGainError):
"""Raised when a fatal error occurs in the GStreamerBackend when
loading the required plugins."""
def call(args):
"""Execute the command and return its output or raise a
ReplayGainError on failure.
"""
try:
return command_output(args)
except subprocess.CalledProcessError as e:
raise ReplayGainError(
"{0} exited with status {1}".format(args[0], e.returncode)
)
except UnicodeEncodeError:
# Due to a bug in Python 2's subprocess on Windows, Unicode
# filenames can fail to encode on that platform. See:
# http://code.google.com/p/beets/issues/detail?id=499
raise ReplayGainError("argument encoding failed")
# Backend base and plumbing classes.
Gain = collections.namedtuple("Gain", "gain peak")
AlbumGain = collections.namedtuple("AlbumGain", "album_gain track_gains")
class Backend(object):
"""An abstract class representing engine for calculating RG values.
"""
def __init__(self, config, log):
"""Initialize the backend with the configuration view for the
plugin.
"""
self._log = log
def compute_track_gain(self, items):
raise NotImplementedError()
def compute_album_gain(self, album):
# TODO: implement album gain in terms of track gain of the
# individual tracks which can be used for any backend.
raise NotImplementedError()
# bsg1770gain backend
class Bs1770gainBackend(Backend):
"""bs1770gain is a loudness scanner compliant with ITU-R BS.1770 and
its flavors EBU R128, ATSC A/85 and Replaygain 2.0.
"""
def __init__(self, config, log):
super(Bs1770gainBackend, self).__init__(config, log)
config.add({
'chunk_at': 5000,
'method': 'replaygain',
})
self.chunk_at = config['chunk_at'].as_number()
self.method = b'--' + bytes(config['method'].get(unicode))
cmd = b'bs1770gain'
try:
call([cmd, self.method])
self.command = cmd
except OSError:
raise FatalReplayGainError(
'Is bs1770gain installed? Is your method in config correct?'
)
if not self.command:
raise FatalReplayGainError(
'no replaygain command found: install bs1770gain'
)
def compute_track_gain(self, items):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
output = self.compute_gain(items, False)
return output
def compute_album_gain(self, album):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = album.items()
output = self.compute_gain(supported_items, True)
if not output:
raise ReplayGainError('no output from bs1770gain')
return AlbumGain(output[-1], output[:-1])
def isplitter(self, items, chunk_at):
"""Break an iterable into chunks of at most size `chunk_at`,
generating lists for each chunk.
"""
iterable = iter(items)
while True:
result = []
for i in range(chunk_at):
try:
a = next(iterable)
except StopIteration:
break
else:
result.append(a)
if result:
yield result
else:
break
def compute_gain(self, items, is_album):
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if len(items) == 0:
return []
albumgaintot = 0.0
albumpeaktot = 0.0
returnchunks = []
# In the case of very large sets of music, we break the tracks
# into smaller chunks and process them one at a time. This
# avoids running out of memory.
if len(items) > self.chunk_at:
i = 0
for chunk in self.isplitter(items, self.chunk_at):
i += 1
returnchunk = self.compute_chunk_gain(chunk, is_album)
albumgaintot += returnchunk[-1].gain
albumpeaktot += returnchunk[-1].peak
returnchunks = returnchunks + returnchunk[0:-1]
returnchunks.append(Gain(albumgaintot / i, albumpeaktot / i))
return returnchunks
else:
return self.compute_chunk_gain(items, is_album)
def compute_chunk_gain(self, items, is_album):
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command.
cmd = [self.command]
cmd = cmd + [self.method]
cmd = cmd + [b'-it']
# Workaround for Windows: the underlying tool fails on paths
# with the \\?\ prefix, so we don't use it here. This
# prevents the backend from working with long paths.
args = cmd + [syspath(i.path, prefix=False) for i in items]
# Invoke the command.
self._log.debug("executing {0}", " ".join(map(displayable_path, args)))
output = call(args)
self._log.debug(u'analysis finished: {0}', output)
results = self.parse_tool_output(output,
len(items) + is_album)
self._log.debug(u'{0} items, {1} results', len(items), len(results))
return results
def parse_tool_output(self, text, num_lines):
"""Given the output from bs1770gain, parse the text and
return a list of dictionaries
containing information about each analyzed file.
"""
out = []
data = text.decode('utf8', errors='ignore')
regex = re.compile(
ur'(\s{2,2}\[\d+\/\d+\].*?|\[ALBUM\].*?)'
'(?=\s{2,2}\[\d+\/\d+\]|\s{2,2}\[ALBUM\]'
':|done\.\s)', re.DOTALL | re.UNICODE)
results = re.findall(regex, data)
for parts in results[0:num_lines]:
part = parts.split(b'\n')
if len(part) == 0:
self._log.debug('bad tool output: {0!r}', text)
raise ReplayGainError('bs1770gain failed')
try:
song = {
'file': part[0],
'gain': float((part[1].split('/'))[1].split('LU')[0]),
'peak': float(part[2].split('/')[1]),
}
except IndexError:
self._log.info('bs1770gain reports (faulty file?): {}', parts)
continue
out.append(Gain(song['gain'], song['peak']))
return out
# mpgain/aacgain CLI tool backend.
class CommandBackend(Backend):
def __init__(self, config, log):
super(CommandBackend, self).__init__(config, log)
config.add({
'command': u"",
'noclip': True,
})
self.command = config["command"].get(unicode)
if self.command:
# Explicit executable path.
if not os.path.isfile(self.command):
raise FatalReplayGainError(
'replaygain command does not exist: {0}'.format(
self.command
)
)
else:
# Check whether the program is in $PATH.
for cmd in (b'mp3gain', b'aacgain'):
try:
call([cmd, b'-v'])
self.command = cmd
except OSError:
pass
if not self.command:
raise FatalReplayGainError(
'no replaygain command found: install mp3gain or aacgain'
)
self.noclip = config['noclip'].get(bool)
target_level = config['targetlevel'].as_number()
self.gain_offset = int(target_level - 89)
def compute_track_gain(self, items):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
supported_items = filter(self.format_supported, items)
output = self.compute_gain(supported_items, False)
return output
def compute_album_gain(self, album):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = filter(self.format_supported, album.items())
if len(supported_items) != len(album.items()):
self._log.debug(u'tracks are of unsupported format')
return AlbumGain(None, [])
output = self.compute_gain(supported_items, True)
return AlbumGain(output[-1], output[:-1])
def format_supported(self, item):
"""Checks whether the given item is supported by the selected tool.
"""
if 'mp3gain' in self.command and item.format != 'MP3':
return False
elif 'aacgain' in self.command and item.format not in ('MP3', 'AAC'):
return False
return True
def compute_gain(self, items, is_album):
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if len(items) == 0:
self._log.debug('no supported tracks to analyze')
return []
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command. The "-o" option makes the output
# easily parseable (tab-delimited). "-s s" forces gain
# recalculation even if tags are already present and disables
# tag-writing; this turns the mp3gain/aacgain tool into a gain
# calculator rather than a tag manipulator because we take care
# of changing tags ourselves.
cmd = [self.command, b'-o', b'-s', b's']
if self.noclip:
# Adjust to avoid clipping.
cmd = cmd + [b'-k']
else:
# Disable clipping warning.
cmd = cmd + [b'-c']
cmd = cmd + [b'-d', bytes(self.gain_offset)]
cmd = cmd + [syspath(i.path) for i in items]
self._log.debug(u'analyzing {0} files', len(items))
self._log.debug(u"executing {0}", " ".join(map(displayable_path, cmd)))
output = call(cmd)
self._log.debug(u'analysis finished')
return self.parse_tool_output(output,
len(items) + (1 if is_album else 0))
def parse_tool_output(self, text, num_lines):
"""Given the tab-delimited output from an invocation of mp3gain
or aacgain, parse the text and return a list of dictionaries
containing information about each analyzed file.
"""
out = []
for line in text.split(b'\n')[1:num_lines + 1]:
parts = line.split(b'\t')
if len(parts) != 6 or parts[0] == b'File':
self._log.debug(u'bad tool output: {0}', text)
raise ReplayGainError('mp3gain failed')
d = {
'file': parts[0],
'mp3gain': int(parts[1]),
'gain': float(parts[2]),
'peak': float(parts[3]) / (1 << 15),
'maxgain': int(parts[4]),
'mingain': int(parts[5]),
}
out.append(Gain(d['gain'], d['peak']))
return out
# GStreamer-based backend.
class GStreamerBackend(Backend):
def __init__(self, config, log):
super(GStreamerBackend, self).__init__(config, log)
self._import_gst()
# Initialized a GStreamer pipeline of the form filesrc ->
# decodebin -> audioconvert -> audioresample -> rganalysis ->
# fakesink The connection between decodebin and audioconvert is
# handled dynamically after decodebin figures out the type of
# the input file.
self._src = self.Gst.ElementFactory.make("filesrc", "src")
self._decbin = self.Gst.ElementFactory.make("decodebin", "decbin")
self._conv = self.Gst.ElementFactory.make("audioconvert", "conv")
self._res = self.Gst.ElementFactory.make("audioresample", "res")
self._rg = self.Gst.ElementFactory.make("rganalysis", "rg")
if self._src is None or self._decbin is None or self._conv is None \
or self._res is None or self._rg is None:
raise FatalGstreamerPluginReplayGainError(
"Failed to load required GStreamer plugins"
)
# We check which files need gain ourselves, so all files given
# to rganalsys should have their gain computed, even if it
# already exists.
self._rg.set_property("forced", True)
self._rg.set_property("reference-level",
config["targetlevel"].as_number())
self._sink = self.Gst.ElementFactory.make("fakesink", "sink")
self._pipe = self.Gst.Pipeline()
self._pipe.add(self._src)
self._pipe.add(self._decbin)
self._pipe.add(self._conv)
self._pipe.add(self._res)
self._pipe.add(self._rg)
self._pipe.add(self._sink)
self._src.link(self._decbin)
self._conv.link(self._res)
self._res.link(self._rg)
self._rg.link(self._sink)
self._bus = self._pipe.get_bus()
self._bus.add_signal_watch()
self._bus.connect("message::eos", self._on_eos)
self._bus.connect("message::error", self._on_error)
self._bus.connect("message::tag", self._on_tag)
# Needed for handling the dynamic connection between decodebin
# and audioconvert
self._decbin.connect("pad-added", self._on_pad_added)
self._decbin.connect("pad-removed", self._on_pad_removed)
self._main_loop = self.GLib.MainLoop()
self._files = []
def _import_gst(self):
"""Import the necessary GObject-related modules and assign `Gst`
and `GObject` fields on this object.
"""
try:
import gi
except ImportError:
raise FatalReplayGainError(
"Failed to load GStreamer: python-gi not found"
)
try:
gi.require_version('Gst', '1.0')
except ValueError as e:
raise FatalReplayGainError(
"Failed to load GStreamer 1.0: {0}".format(e)
)
from gi.repository import GObject, Gst, GLib
# Calling GObject.threads_init() is not needed for
# PyGObject 3.10.2+
with warnings.catch_warnings():
warnings.simplefilter("ignore")
GObject.threads_init()
Gst.init([sys.argv[0]])
self.GObject = GObject
self.GLib = GLib
self.Gst = Gst
def compute(self, files, album):
self._error = None
self._files = list(files)
if len(self._files) == 0:
return
self._file_tags = collections.defaultdict(dict)
if album:
self._rg.set_property("num-tracks", len(self._files))
if self._set_first_file():
self._main_loop.run()
if self._error is not None:
raise self._error
def compute_track_gain(self, items):
self.compute(items, False)
if len(self._file_tags) != len(items):
raise ReplayGainError("Some tracks did not receive tags")
ret = []
for item in items:
ret.append(Gain(self._file_tags[item]["TRACK_GAIN"],
self._file_tags[item]["TRACK_PEAK"]))
return ret
def compute_album_gain(self, album):
items = list(album.items())
self.compute(items, True)
if len(self._file_tags) != len(items):
raise ReplayGainError("Some items in album did not receive tags")
ret = []
for item in items:
ret.append(Gain(self._file_tags[item]["TRACK_GAIN"],
self._file_tags[item]["TRACK_PEAK"]))
last_tags = self._file_tags[items[-1]]
return AlbumGain(Gain(last_tags["ALBUM_GAIN"],
last_tags["ALBUM_PEAK"]), ret)
def close(self):
self._bus.remove_signal_watch()
def _on_eos(self, bus, message):
# A file finished playing in all elements of the pipeline. The
# RG tags have already been propagated. If we don't have a next
# file, we stop processing.
if not self._set_next_file():
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
def _on_error(self, bus, message):
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
err, debug = message.parse_error()
f = self._src.get_property("location")
# A GStreamer error, either an unsupported format or a bug.
self._error = ReplayGainError(
"Error {0!r} - {1!r} on file {2!r}".format(err, debug, f)
)
def _on_tag(self, bus, message):
tags = message.parse_tag()
def handle_tag(taglist, tag, userdata):
# The rganalysis element provides both the existing tags for
# files and the new computes tags. In order to ensure we
# store the computed tags, we overwrite the RG values of
# received a second time.
if tag == self.Gst.TAG_TRACK_GAIN:
self._file_tags[self._file]["TRACK_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_TRACK_PEAK:
self._file_tags[self._file]["TRACK_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_GAIN:
self._file_tags[self._file]["ALBUM_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_PEAK:
self._file_tags[self._file]["ALBUM_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_REFERENCE_LEVEL:
self._file_tags[self._file]["REFERENCE_LEVEL"] = \
taglist.get_double(tag)[1]
tags.foreach(handle_tag, None)
def _set_first_file(self):
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
self._pipe.set_state(self.Gst.State.NULL)
self._src.set_property("location", syspath(self._file.path))
self._pipe.set_state(self.Gst.State.PLAYING)
return True
def _set_file(self):
"""Initialize the filesrc element with the next file to be analyzed.
"""
# No more files, we're done
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
# Disconnect the decodebin element from the pipeline, set its
# state to READY to to clear it.
self._decbin.unlink(self._conv)
self._decbin.set_state(self.Gst.State.READY)
# Set a new file on the filesrc element, can only be done in the
# READY state
self._src.set_state(self.Gst.State.READY)
self._src.set_property("location", syspath(self._file.path))
# Ensure the filesrc element received the paused state of the
# pipeline in a blocking manner
self._src.sync_state_with_parent()
self._src.get_state(self.Gst.CLOCK_TIME_NONE)
# Ensure the decodebin element receives the paused state of the
# pipeline in a blocking manner
self._decbin.sync_state_with_parent()
self._decbin.get_state(self.Gst.CLOCK_TIME_NONE)
return True
def _set_next_file(self):
"""Set the next file to be analyzed while keeping the pipeline
in the PAUSED state so that the rganalysis element can correctly
handle album gain.
"""
# A blocking pause
self._pipe.set_state(self.Gst.State.PAUSED)
self._pipe.get_state(self.Gst.CLOCK_TIME_NONE)
# Try setting the next file
ret = self._set_file()
if ret:
# Seek to the beginning in order to clear the EOS state of the
# various elements of the pipeline
self._pipe.seek_simple(self.Gst.Format.TIME,
self.Gst.SeekFlags.FLUSH,
0)
self._pipe.set_state(self.Gst.State.PLAYING)
return ret
def _on_pad_added(self, decbin, pad):
sink_pad = self._conv.get_compatible_pad(pad, None)
assert(sink_pad is not None)
pad.link(sink_pad)
def _on_pad_removed(self, decbin, pad):
# Called when the decodebin element is disconnected from the
# rest of the pipeline while switching input files
peer = pad.get_peer()
assert(peer is None)
class AudioToolsBackend(Backend):
"""ReplayGain backend that uses `Python Audio Tools
<http://audiotools.sourceforge.net/>`_ and its capabilities to read more
file formats and compute ReplayGain values using it replaygain module.
"""
def __init__(self, config, log):
super(AudioToolsBackend, self).__init__(config, log)
self._import_audiotools()
def _import_audiotools(self):
"""Check whether it's possible to import the necessary modules.
There is no check on the file formats at runtime.
:raises :exc:`ReplayGainError`: if the modules cannot be imported
"""
try:
import audiotools
import audiotools.replaygain
except ImportError:
raise FatalReplayGainError(
"Failed to load audiotools: audiotools not found"
)
self._mod_audiotools = audiotools
self._mod_replaygain = audiotools.replaygain
def open_audio_file(self, item):
"""Open the file to read the PCM stream from the using
``item.path``.
:return: the audiofile instance
:rtype: :class:`audiotools.AudioFile`
:raises :exc:`ReplayGainError`: if the file is not found or the
file format is not supported
"""
try:
audiofile = self._mod_audiotools.open(item.path)
except IOError:
raise ReplayGainError(
"File {} was not found".format(item.path)
)
except self._mod_audiotools.UnsupportedFile:
raise ReplayGainError(
"Unsupported file type {}".format(item.format)
)
return audiofile
def init_replaygain(self, audiofile, item):
"""Return an initialized :class:`audiotools.replaygain.ReplayGain`
instance, which requires the sample rate of the song(s) on which
the ReplayGain values will be computed. The item is passed in case
the sample rate is invalid to log the stored item sample rate.
:return: initialized replagain object
:rtype: :class:`audiotools.replaygain.ReplayGain`
:raises: :exc:`ReplayGainError` if the sample rate is invalid
"""
try:
rg = self._mod_replaygain.ReplayGain(audiofile.sample_rate())
except ValueError:
raise ReplayGainError(
"Unsupported sample rate {}".format(item.samplerate)
)
return
return rg
def compute_track_gain(self, items):
"""Compute ReplayGain values for the requested items.
:return list: list of :class:`Gain` objects
"""
return [self._compute_track_gain(item) for item in items]
def _title_gain(self, rg, audiofile):
"""Get the gain result pair from PyAudioTools using the `ReplayGain`
instance `rg` for the given `audiofile`.
Wraps `rg.title_gain(audiofile.to_pcm())` and throws a
`ReplayGainError` when the library fails.
"""
try:
# The method needs an audiotools.PCMReader instance that can
# be obtained from an audiofile instance.
return rg.title_gain(audiofile.to_pcm())
except ValueError as exc:
# `audiotools.replaygain` can raise a `ValueError` if the sample
# rate is incorrect.
self._log.debug('error in rg.title_gain() call: {}', exc)
raise ReplayGainError('audiotools audio data error')
def _compute_track_gain(self, item):
"""Compute ReplayGain value for the requested item.
:rtype: :class:`Gain`
"""
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
# Each call to title_gain on a ReplayGain object returns peak and gain
# of the track.
rg_track_gain, rg_track_peak = rg._title_gain(rg, audiofile)
self._log.debug(u'ReplayGain for track {0} - {1}: {2:.2f}, {3:.2f}',
item.artist, item.title, rg_track_gain, rg_track_peak)
return Gain(gain=rg_track_gain, peak=rg_track_peak)
def compute_album_gain(self, album):
"""Compute ReplayGain values for the requested album and its items.
:rtype: :class:`AlbumGain`
"""
self._log.debug(u'Analysing album {0}', album)
# The first item is taken and opened to get the sample rate to
# initialize the replaygain object. The object is used for all the
# tracks in the album to get the album values.
item = list(album.items())[0]
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
track_gains = []
for item in album.items():
audiofile = self.open_audio_file(item)
rg_track_gain, rg_track_peak = self._title_gain(rg, audiofile)
track_gains.append(
Gain(gain=rg_track_gain, peak=rg_track_peak)
)
self._log.debug(u'ReplayGain for track {0}: {1:.2f}, {2:.2f}',
item, rg_track_gain, rg_track_peak)
# After getting the values for all tracks, it's possible to get the
# album values.
rg_album_gain, rg_album_peak = rg.album_gain()
self._log.debug(u'ReplayGain for album {0}: {1:.2f}, {2:.2f}',
album, rg_album_gain, rg_album_peak)
return AlbumGain(
Gain(gain=rg_album_gain, peak=rg_album_peak),
track_gains=track_gains
)
# Main plugin logic.
class ReplayGainPlugin(BeetsPlugin):
"""Provides ReplayGain analysis.
"""
backends = {
"command": CommandBackend,
"gstreamer": GStreamerBackend,
"audiotools": AudioToolsBackend,
"bs1770gain": Bs1770gainBackend
}
def __init__(self):
super(ReplayGainPlugin, self).__init__()
# default backend is 'command' for backward-compatibility.
self.config.add({
'overwrite': False,
'auto': True,
'backend': u'command',
'targetlevel': 89,
})
self.overwrite = self.config['overwrite'].get(bool)
backend_name = self.config['backend'].get(unicode)
if backend_name not in self.backends:
raise ui.UserError(
u"Selected ReplayGain backend {0} is not supported. "
u"Please select one of: {1}".format(
backend_name,
u', '.join(self.backends.keys())
)
)
# On-import analysis.
if self.config['auto']:
self.import_stages = [self.imported]
try:
self.backend_instance = self.backends[backend_name](
self.config, self._log
)
except (ReplayGainError, FatalReplayGainError) as e:
raise ui.UserError(
'replaygain initialization failed: {0}'.format(e)
)
def track_requires_gain(self, item):
return self.overwrite or \
(not item.rg_track_gain or not item.rg_track_peak)
def album_requires_gain(self, album):
# Skip calculating gain only when *all* files don't need
# recalculation. This way, if any file among an album's tracks
# needs recalculation, we still get an accurate album gain
# value.
return self.overwrite or \
any([not item.rg_album_gain or not item.rg_album_peak
for item in album.items()])
def store_track_gain(self, item, track_gain):
item.rg_track_gain = track_gain.gain
item.rg_track_peak = track_gain.peak
item.store()
self._log.debug(u'applied track gain {0}, peak {1}',
item.rg_track_gain, item.rg_track_peak)
def store_album_gain(self, album, album_gain):
album.rg_album_gain = album_gain.gain
album.rg_album_peak = album_gain.peak
album.store()
self._log.debug(u'applied album gain {0}, peak {1}',
album.rg_album_gain, album.rg_album_peak)
def handle_album(self, album, write):
"""Compute album and track replay gain store it in all of the
album's items.
If ``write`` is truthy then ``item.write()`` is called for each
item. If replay gain information is already present in all
items, nothing is done.
"""
if not self.album_requires_gain(album):
self._log.info(u'Skipping album {0}', album)
return
self._log.info(u'analyzing {0}', album)
try:
album_gain = self.backend_instance.compute_album_gain(album)
if len(album_gain.track_gains) != len(album.items()):
raise ReplayGainError(
u"ReplayGain backend failed "
u"for some tracks in album {0}".format(album)
)
self.store_album_gain(album, album_gain.album_gain)
for item, track_gain in itertools.izip(album.items(),
album_gain.track_gains):
self.store_track_gain(item, track_gain)
if write:
item.try_write()
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e)
)
def handle_track(self, item, write):
"""Compute track replay gain and store it in the item.
If ``write`` is truthy then ``item.write()`` is called to write
the data to disk. If replay gain information is already present
in the item, nothing is done.
"""
if not self.track_requires_gain(item):
self._log.info(u'Skipping track {0}', item)
return
self._log.info(u'analyzing {0}', item)
try:
track_gains = self.backend_instance.compute_track_gain([item])
if len(track_gains) != 1:
raise ReplayGainError(
u"ReplayGain backend failed for track {0}".format(item)
)
self.store_track_gain(item, track_gains[0])
if write:
item.try_write()
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e)
)
def imported(self, session, task):
"""Add replay gain info to items or albums of ``task``.
"""
if task.is_album:
self.handle_album(task.album, False)
else:
self.handle_track(task.item, False)
def commands(self):
"""Return the "replaygain" ui subcommand.
"""
def func(lib, opts, args):
self._log.setLevel(logging.INFO)
write = ui.should_write()
if opts.album:
for album in lib.albums(ui.decargs(args)):
self.handle_album(album, write)
else:
for item in lib.items(ui.decargs(args)):
self.handle_track(item, write)
cmd = ui.Subcommand('replaygain', help='analyze for ReplayGain')
cmd.parser.add_album_option()
cmd.func = func
return [cmd]
| 35.992608 | 79 | 0.590876 |
acf996ca7c4d8ea40434609126478a1b94d64736 | 3,534 | py | Python | testinfra/backend/paramiko.py | NTTDATA-UK/testinfra | 47c0dc2e1e1ef23ccbbc5ece39528f9e066c69f2 | [
"Apache-2.0"
] | 1 | 2020-03-09T17:32:39.000Z | 2020-03-09T17:32:39.000Z | testinfra/backend/paramiko.py | NTTDATA-UK/testinfra | 47c0dc2e1e1ef23ccbbc5ece39528f9e066c69f2 | [
"Apache-2.0"
] | null | null | null | testinfra/backend/paramiko.py | NTTDATA-UK/testinfra | 47c0dc2e1e1ef23ccbbc5ece39528f9e066c69f2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import os
try:
import paramiko
except ImportError:
raise RuntimeError((
"You must install paramiko package (pip install paramiko) "
"to use the paramiko backend"))
import paramiko.ssh_exception
from testinfra.backend import base
class IgnorePolicy(paramiko.MissingHostKeyPolicy):
"""Policy for ignoring missing host key."""
def missing_host_key(self, client, hostname, key):
pass
class ParamikoBackend(base.BaseBackend):
NAME = "paramiko"
def __init__(self, hostspec, ssh_config=None, *args, **kwargs):
self.host, self.user, self.port = self.parse_hostspec(hostspec)
self.ssh_config = ssh_config
self._client = None
super(ParamikoBackend, self).__init__(self.host, *args, **kwargs)
@property
def client(self):
if self._client is None:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
cfg = {
"hostname": self.host,
"port": int(self.port) if self.port else 22,
"username": self.user,
}
if self.ssh_config:
ssh_config = paramiko.SSHConfig()
with open(os.path.expanduser(self.ssh_config)) as f:
ssh_config.parse(f)
for key, value in ssh_config.lookup(self.host).items():
if key == "hostname":
cfg[key] = value
elif key == "user":
cfg["username"] = value
elif key == "port":
cfg[key] = int(value)
elif key == "identityfile":
cfg["key_filename"] = os.path.expanduser(value[0])
elif key == "stricthostkeychecking" and value == "no":
client.set_missing_host_key_policy(IgnorePolicy())
client.connect(**cfg)
self._client = client
return self._client
def _exec_command(self, command):
chan = self.client.get_transport().open_session()
chan.exec_command(command)
rc = chan.recv_exit_status()
stdout = b''.join(chan.makefile('rb'))
stderr = b''.join(chan.makefile_stderr('rb'))
return rc, stdout, stderr
def run(self, command, *args, **kwargs):
command = self.get_command(command, *args)
command = self.encode(command)
try:
rc, stdout, stderr = self._exec_command(command)
except paramiko.ssh_exception.SSHException:
if not self.client.get_transport().is_active():
# try to reinit connection (once)
self._client = None
rc, stdout, stderr = self._exec_command(command)
else:
raise
return self.result(rc, command, stdout, stderr)
| 35.69697 | 74 | 0.605263 |
acf997ced11c9653762a418ec1c467a03dd61672 | 43 | py | Python | sql_faker/sql_faker/random_data/__init__.py | lkmc2/python-sql-faker | a68ac9a011b75b23f20d961fa1da08597ebe9445 | [
"MIT"
] | 12 | 2018-10-12T14:22:35.000Z | 2021-05-04T08:39:12.000Z | sql_faker/sql_faker/random_data/__init__.py | lkmc2/python-sql-faker | a68ac9a011b75b23f20d961fa1da08597ebe9445 | [
"MIT"
] | null | null | null | sql_faker/sql_faker/random_data/__init__.py | lkmc2/python-sql-faker | a68ac9a011b75b23f20d961fa1da08597ebe9445 | [
"MIT"
] | 2 | 2019-09-11T13:11:32.000Z | 2020-12-17T03:43:14.000Z | # coding=utf-8
"""
该模块用于存放随机值生成器
"""
| 6.142857 | 17 | 0.534884 |
acf99a44fc055b7532d01d4231008d36d40110d7 | 144 | py | Python | tensorstock/__init__.py | Hourout/tensorstock | 7c7fa3a47bfd4b8eb505368d018a2a493cb734b6 | [
"Apache-2.0"
] | null | null | null | tensorstock/__init__.py | Hourout/tensorstock | 7c7fa3a47bfd4b8eb505368d018a2a493cb734b6 | [
"Apache-2.0"
] | null | null | null | tensorstock/__init__.py | Hourout/tensorstock | 7c7fa3a47bfd4b8eb505368d018a2a493cb734b6 | [
"Apache-2.0"
] | null | null | null | from tensorstock import chart
from tensorstock import feature
from tensorstock import metrics
__version__ = '0.1.0'
__author__ = 'JinQing Lee' | 20.571429 | 31 | 0.805556 |
acf99b16735919f2fa01bf50bc4e4be9aea749c8 | 1,441 | py | Python | src/kde_crime/kde_test.py | ras9841/UP-STAT-2018 | cad06bfac3c12b4cb14c3b703e23c52cc391383a | [
"MIT"
] | null | null | null | src/kde_crime/kde_test.py | ras9841/UP-STAT-2018 | cad06bfac3c12b4cb14c3b703e23c52cc391383a | [
"MIT"
] | 1 | 2018-05-08T12:16:50.000Z | 2018-05-08T21:28:40.000Z | src/kde_crime/kde_test.py | ras9841/UP-STAT-2018 | cad06bfac3c12b4cb14c3b703e23c52cc391383a | [
"MIT"
] | null | null | null | from spatial_kde import *
from sklearn.model_selection import train_test_split
import pandas as pd
import matplotlib.pyplot as plt
data_loc = "../../data/RPD_crime2011toNow.csv"
data = process_RPD_data(data_loc)
print("Loaded data")
Y = data[["class"]]
X = data[["X", "Y"]]
print("Starting Predictions")
n_trials = 25
results = np.zeros([2,n_trials])
for test in range(2):
print("Running test #%d"%(test+1))
for i in range(n_trials):
print("\nRunning trial %d/%d"%(i+1, n_trials))
# Setup Data
if test == 0:
X_tr, X_te, Y_tr, Y_te = train_test_split(X, Y, test_size=0.30)
else:
X_tr, X_te, Y_tr, Y_te = train_test_split(X, Y, test_size=0.30,\
stratify=Y)
train_df = pd.concat([X_tr, Y_tr], axis=1)
y = Y_te.values.reshape(Y_te.shape[0],)
print("Starting KDE")
kde = KDE()
kde.train(train_df)
print("Making predictions")
yhat = kde.predict(X_te)
results[test, i] = compute_accuracy(y, yhat)*100
print("Accuracy: %d%%"%(results[test,i]))
results = results.T
print("NS Accuracy: (%.3f +/- %.3f)%%"%(results[:,0].mean(),\
results[:,0].std()))
print("STRAT Accuracy: (%.3f +/- %.3f)%%"%(results[:,1].mean(),\
results[:,1].std()))
results_df = pd.DataFrame(results, columns=["Random", "Stratified"])
results_df.boxplot()
plt.grid(False)
plt.ylabel("Accuracy (%)")
plt.show()
| 29.408163 | 76 | 0.605135 |
acf99bb656ad7b09715e59005e07c573e3674483 | 3,956 | py | Python | api/healthy2_check.py | qq2380912466/17wanxiaoCheckin | 5db21ec31c35a4e01fa7e405933b5ed1bb7911f1 | [
"MIT"
] | 175 | 2020-07-08T00:56:55.000Z | 2021-03-06T07:32:25.000Z | api/healthy2_check.py | qq2380912466/17wanxiaoCheckin | 5db21ec31c35a4e01fa7e405933b5ed1bb7911f1 | [
"MIT"
] | 39 | 2020-07-19T03:23:12.000Z | 2021-02-03T15:20:02.000Z | api/healthy2_check.py | qq2380912466/17wanxiaoCheckin | 5db21ec31c35a4e01fa7e405933b5ed1bb7911f1 | [
"MIT"
] | 898 | 2020-07-09T02:14:15.000Z | 2021-03-06T07:29:52.000Z | """
第二类健康打卡相关函数
@create:2021/03/10
@filename:healthy2_check.py
@author:ReaJason
@email_addr:reajason@163.com
@blog_website:https://reajason.top
@last_modify:2021/04/24
"""
import time
import requests
from setting import log
def get_healthy2_check_posh_json(token):
"""
获取第二类健康打卡的打卡数据
:param token: 用户令牌
:return: 返回dict数据
"""
for _ in range(3):
try:
res = requests.post(
url="https://reportedh5.17wanxiao.com/api/reported/recall",
data={"token": token},
timeout=10,
).json()
except:
log.warning("完美校园第二类健康打卡post参数获取失败,正在重试...")
time.sleep(1)
continue
if res["code"] == 0:
log.info("完美校园第二类健康打卡post参数获取成功")
return res["data"]
else:
log.warning(f"完美校园第二类健康打卡post参数获取失败,{res}")
return None
def healthy2_check_in(token, custom_id, post_dict):
"""
第二类健康打卡
:param token: 用户令牌
:param custom_id: 健康打卡id
:param post_dict: 健康打卡数据
:return:
"""
if not post_dict.get("whereabouts"):
errmsg = f"完美校园第二类健康打卡方式错误,请选第一类健康打卡"
log.warning(errmsg)
return {'status': 0, 'errmsg': errmsg}
check_json = {
"userId": post_dict["userId"],
"name": post_dict["name"],
"stuNo": post_dict["stuNo"],
"whereabouts": post_dict["whereabouts"],
"familyWhereabouts": "",
"beenToWuhan": post_dict["beenToWuhan"],
"contactWithPatients": post_dict["contactWithPatients"],
"symptom": post_dict["symptom"],
"fever": post_dict["fever"],
"cough": post_dict["cough"],
"soreThroat": post_dict["soreThroat"],
"debilitation": post_dict["debilitation"],
"diarrhea": post_dict["diarrhea"],
"cold": post_dict["cold"],
"staySchool": post_dict["staySchool"],
"contacts": post_dict["contacts"],
"emergencyPhone": post_dict["emergencyPhone"],
"address": post_dict["address"],
"familyForAddress": "",
"collegeId": post_dict["collegeId"],
"majorId": post_dict["majorId"],
"classId": post_dict["classId"],
"classDescribe": post_dict["classDescribeAll"],
"temperature": post_dict["temperature"],
"confirmed": post_dict["confirmed"],
"isolated": post_dict["isolated"],
"passingWuhan": post_dict["passingWuhan"],
"passingHubei": post_dict["passingHubei"],
"patientSide": post_dict["patientSide"],
"patientContact": post_dict["patientContact"],
"mentalHealth": post_dict["mentalHealth"],
"wayToSchool": post_dict["wayToSchool"],
"backToSchool": post_dict["backToSchool"],
"haveBroadband": post_dict["haveBroadband"],
"emergencyContactName": post_dict["emergencyContactName"],
"helpInfo": "",
"passingCity": "",
"longitude": post_dict["longitude"],
"latitude": post_dict["latitude"],
"token": token,
}
headers = {
"referer": f"https://reportedh5.17wanxiao.com/nCovReport/index.html?token={token}&customerId={custom_id}",
"content-type": "application/x-www-form-urlencoded;charset=UTF-8",
}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/reported/receive",
headers=headers,
data=check_json,
).json()
log.info(res)
return {
'status': 1,
'res': res,
'post_dict': {
'name': post_dict["name"],
"updatainfo_detail": post_dict,
'checkbox': [{'description': key, 'value': value} for key, value in check_json.items()]
},
'check_json': check_json,
'type': "healthy2",
}
except:
errmsg = f"完美校园第二类健康打卡打卡请求出错"
log.warning(errmsg)
return {'status': 0, 'errmsg': errmsg}
| 32.694215 | 114 | 0.578109 |
acf99c7c8a9f184d2ce884dc02e51cf9b0494425 | 17,169 | py | Python | unit_tests.py | propelwise/sarle-labeler | 8cdb3d494b46df2bc820592e14c9c8e23d08fa07 | [
"MIT"
] | 2 | 2020-11-24T00:53:28.000Z | 2020-11-24T02:05:39.000Z | unit_tests.py | propelwise/sarle-labeler | 8cdb3d494b46df2bc820592e14c9c8e23d08fa07 | [
"MIT"
] | null | null | null | unit_tests.py | propelwise/sarle-labeler | 8cdb3d494b46df2bc820592e14c9c8e23d08fa07 | [
"MIT"
] | 2 | 2021-03-17T16:36:35.000Z | 2022-01-10T08:20:52.000Z | #unit_tests.py
#Copyright (c) 2020 Rachel Lea Ballantyne Draelos
#MIT License
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE
import os
import re
import copy
import string
import shutil
import pandas as pd
import numpy as np
import load
import term_search
from vocab import gr1cm
from vocab import vocabulary_ct
from rules import rule_functions
#Note: this module originally contained over 1,400 lines of unit testing
#code. However, we decided to keep most of this code private, because it
#is based on CT report data that 'looks real' and we do not want to create
#the impression that any real report data was made public through unit tests
#without permission. The CT reports cannot be made public at this time due to
#patient privacy concerns.
#We have made public all unit tests which do not subjectively appear to reveal
#real report data.
############################
# Testing vocabulary_ct.py #----------------------------------------------------
############################
def test_nodulegr1cm_handling():
x1 = ' 0.2 cm left lower lobe pulmonary nodule series 3 image 33 is newly noted '
assert gr1cm.nodulegr1cm_handling(x1)==0
x2 = ' 2 mm left upper lobe juxtapleural nodule series 3 image 59 stable '
assert gr1cm.nodulegr1cm_handling(x2)==0
x3 = ' 2 cm left upper lobe juxtapleural nodule series 3 image 59 stable ' #made up sentence
assert gr1cm.nodulegr1cm_handling(x3)==1
x4 = ' a few left lower lobe pulmonary nodules are visualized '
assert gr1cm.nodulegr1cm_handling(x4)==0
x5 = ' again seen in the right lower lobe is a solid pulmonary nodule measuring approximately 1.2 x 1.2 cm series 4 image 340 increased from prior measurement of 0.8 x 0.8 cm '
assert gr1cm.nodulegr1cm_handling(x5)==1
x6 = 'within the left upper lobe there is a round nodule which measures 2.2 x 1.6 cm which is most likely within a pre existing cavity'
assert gr1cm.nodulegr1cm_handling(x6)==1
print('Passed test_nodulegr1cm_handling()')
def test_lymphadenopathy_handling():
x1 = ' 0.9 cm prevascular lymph node is unchanged '
assert gr1cm.lymphadenopathy_handling(x1)==0
x2 = ' 1 cm low right paratracheal lymph node is nonspecific and may be reactive in nature '
assert gr1cm.lymphadenopathy_handling(x2)==0
x3 = ' while this may represent a small lymph node a small esophageal diverticulum may have a similar appearance '
assert gr1cm.lymphadenopathy_handling(x3)==0
x4 = ' no severe change in 1.4 cm right paratracheal lymph node series 3 image 18 '
assert gr1cm.lymphadenopathy_handling(x4)==1
x5 = ' an enlarged subcarinal lymph node is seen measuring up to 1.3 cm '
assert gr1cm.lymphadenopathy_handling(x5)==1
x6 = ' stable right precarinal lymph node that measures 1.2 cm in short axis '
assert gr1cm.lymphadenopathy_handling(x6)==1
x7 = 'severe lymphadenopathy' #made up sentence
assert gr1cm.lymphadenopathy_handling(x7)==1
print('Passed test_lymphadenopathy_handling()')
#############################
# Testing rule_functions.py #---------------------------------------------------
#############################
def test_delete_mainword():
#Example for ' otherwise unremarkable'
x1 = ' visualized upper abdomen demonstrates calcific atherosclerosis of the aorta otherwise unremarkable '
_, o1 = rule_functions.delete_mainword(sentence=x1, mainword=' otherwise unremarkable')
c1 = ' visualized upper abdomen demonstrates calcific atherosclerosis of the aorta '
assert o1==c1
#Example for ' near complete resolution of'
x2 = ' near complete resolution of air fluid level in the left upper lobe '
_, o2 = rule_functions.delete_mainword(sentence=x2, mainword = ' near complete resolution of')
c2 = ' air fluid level in the left upper lobe '
assert o2==c2
#Example for ' near resolution of'
x3 = ' near resolution of a previously seen 5 mm right upper lobe nodule likely reflecting resolving infection or inflammation '
_, o3 = rule_functions.delete_mainword(sentence=x3, mainword = ' near resolution of')
c3 = ' a previously seen 5 mm right upper lobe nodule likely reflecting resolving infection or inflammation '
assert o3==c3
print('Passed test_delete_mainword()')
def test_delete_part():
#Example for ' within normal limits'
x1 = ' main pulmonary artery within normal limits in size '
_, o1 = rule_functions.delete_part(sentence=x1,delete_part='before',mainword=' within normal limits')
c1 = ' in size '
assert o1==c1
#Example for ' normal in'
x2 = ' the remainder of the airways including the trachea bronchus intermedius right middle and lower lobe bronchi and left upper and lower lobe bronchi appear normal in caliber and are clear '
_, o2 = rule_functions.delete_part(sentence=x2,delete_part='before',mainword=' normal in')
c2 = ' caliber and are clear '
assert o2==c2
#Example for ' normal size'
x3 = ' there are patent internal iliac arteries and the bilateral external iliac arteries common femoral proximal sfa and profunda are all normal size and caliber without atherosclerotic disease '
_, o3 = rule_functions.delete_part(sentence=x3,delete_part='before',mainword=' normal size')
c3 = ' and caliber without atherosclerotic disease '
assert o3==c3
#Example for ' without'
x4 = ' there are patent internal iliac arteries and the bilateral external iliac arteries common femoral proximal sfa and profunda are all normal size and caliber without atherosclerotic disease '
_, o4 = rule_functions.delete_part(sentence=x4,delete_part='after',mainword=' without')
c4 = ' there are patent internal iliac arteries and the bilateral external iliac arteries common femoral proximal sfa and profunda are all normal size and caliber'
assert o4==c4
#Example for ' resolution of'
x5 = ' interval resolution of previously described small groundglass nodules '
_, o5 = rule_functions.delete_part(sentence=x5,delete_part='after',mainword=' resolution of')
c5 = ' interval'
assert o5==c5
#Example for ' removal of'
x6 = ' interval removal of a surgical drain in the left aspect of the clamshell sternotomy '
_, o6 = rule_functions.delete_part(sentence=x6,delete_part='after',mainword=' removal of')
c6 = ' interval'
assert o6==c6
#Example for ' removed'
x7 = ' previously noted left pleural pigtail catheter appears to have been removed '
_, o7 = rule_functions.delete_part(sentence=x7,delete_part='before',mainword=' removed')
c7 = ' ' #space remains when we delete everything before the word
assert o7==c7
#Example for ' free of' (made up example)
x8 = ' free of consolidation or signs of infection'
_, o8 = rule_functions.delete_part(sentence=x8,delete_part='after',mainword=' free of')
c8 = '' #no space when we delete everything after the word
assert o8==c8
print('Passed test_delete_part()')
def test_delete_part_until():
#Example for ' no '
x1 = 'otherwise no significant change in findings on ct examination of the chest with partial atelectasis of the right upper lobe and a right hilar mass as well as mediastinal lymphadenopathy and multiple pulmonary nodules'
_, o1 = rule_functions.delete_part_until(x1, 'after', ' no ', until_hit=['and','change'])
c1 = 'otherwise change in findings on ct examination of the chest with partial atelectasis of the right upper lobe and a right hilar mass as well as mediastinal lymphadenopathy and multiple pulmonary nodules'
assert o1==c1
x2 = ' there is a an oblong focus of consolidation within the posterior medial right base on image 89 series 4 adjacent to the pleural effusion this contains no air bronchograms and appears to obliterate some posterior basilar subsegmental bronchi '
_, o2 = rule_functions.delete_part_until(x2, 'after', ' no ', until_hit=['and','change'])
c2 = ' there is a an oblong focus of consolidation within the posterior medial right base on image 89 series 4 adjacent to the pleural effusion this contains and appears to obliterate some posterior basilar subsegmental bronchi '
assert o2==c2
x3 = ' there is no axillary adenopathy and there are scattered mediastinal nodes and a normal size main pulmonary artery with severely enlarged left atrium and left atrial appendage'
_, o3 = rule_functions.delete_part_until(x3, 'after', ' no ', until_hit=['and','change'])
c3 = ' there is and there are scattered mediastinal nodes and a normal size main pulmonary artery with severely enlarged left atrium and left atrial appendage'
assert o3==c3
#Examples made up to test 'before'
x4 = ' this is a made up sentence no to test the function '
_, o4 = rule_functions.delete_part_until(x4, 'before', ' test', until_hit=['made','up'])
c4 = ' this is a made up the function '
assert o4==c4
print('Passed test_delete_part_until()')
def test_delete_entire_unless_immediate():
#Example for ' not '
x1 = ' immediately posterior to the sternomanubrial junction is a small fluid collection with an air fluid level also favored to represent postoperative change although an abscess or phlegmon is not entirely excluded '
_, o1 = rule_functions.delete_entire_unless_immediate(sentence=x1,mainword=' not',position='after',wrange=2,unless_in=['exclude','change'])
c1 = ' immediately posterior to the sternomanubrial junction is a small fluid collection with an air fluid level also favored to represent postoperative change although an abscess or phlegmon is not entirely excluded '
assert o1==c1
x2 = ' the main pulmonary artery is not dilated '
_, o2 = rule_functions.delete_entire_unless_immediate(sentence=x2,mainword=' not',position='after',wrange=2,unless_in=['exclude','change'])
c2 = ''
assert o2==c2
#Example for ' resolved'
x3 = ' previously described anterior loculated components have resolved '
_, o3 = rule_functions.delete_entire_unless_immediate(x3,mainword=' resolved',position='before',wrange=1,unless_in=['almost','near','partial','large','essential'])
c3 = ''
assert o3==c3
x4 = ' compared to most recent prior examination from january diffuse bilateral consolidative and ground glass opacities are essentially resolved as are bilateral effusions '
_, o4 = rule_functions.delete_entire_unless_immediate(x4,mainword=' resolved',position='before',wrange=1,unless_in=['almost','near','partial','large','essential'])
c4 = ' compared to most recent prior examination from january diffuse bilateral consolidative and ground glass opacities are essentially resolved as are bilateral effusions '
assert o4==c4
print('Passed test_delete_entire_unless_immediate()')
def test_delete():
#Example for ' normal'
x1 = ' the remainder of the airways including the trachea bronchus intermedius right middle and lower lobe bronchi and left upper and lower lobe bronchi appear normal in caliber and are clear '
_, o1 = rule_functions.delete(x1,' normal')
assert o1==''
#Example for ' unremarkable'
x2 = ' the upper abdomen is unremarkable '
_, o2 = rule_functions.delete(x2,' unremarkable')
assert o2==''
#Example for ' negative for'
x3 = ' negative for malignancy ' #made up
_, o3 = rule_functions.delete(x3, ' negative for')
assert o3==''
print('Passed test_delete()')
def test_delete_if_first_word():
#Example for 'please'
x1 = 'please refer to the concurrent ct abdomen pelvis report for additional details'
_, o1 = rule_functions.delete_if_first_word(x1, 'please')
assert o1 == ''
_, o2 = rule_functions.delete_if_first_word(' '+x1,'please')
assert o2 == ''
print('Passed test_delete_if_first_word()')
def test_non_handling():
x1 = ' 8mm non calcified nodule right lower lobe nodule is unchanged '
_, o1 = rule_functions.non_handling(x1, 'non')
c1 =' 8mm nodule right lower lobe nodule is unchanged '
assert o1==c1
x2 = ' a lytic lesion of the posterior right 6th rib is seen which may now contain a non displaced fracture '
_, o2 = rule_functions.non_handling(x2, 'non')
c2 = ' a lytic lesion of the posterior right 6th rib is seen which may now contain a fracture '
assert o2==c2
x3 = ' 1 cm low right paratracheal lymph node is nonspecific and may be reactive in nature '
_, o3 = rule_functions.non_handling(x3, 'non')
c3 = ' 1 cm low right paratracheal lymph node is and may be reactive in nature '
assert o3==c3
print('Passed test_non_handling()')
def test_patent_handling():
x1 = ' tracheobronchial tree is patent '
_, o1 = rule_functions.patent_handling(x1, ' patent')
c1 = ' '
assert o1==c1
x2 = 'patent bronchial anastomoses '
_, o2 = rule_functions.patent_handling(x2, ' patent')
c2 = ' '
assert o2==c2
x3 = ' the bronchial anastomoses are patent and intact and the central bronchi are patent and perhaps slightly dilated '
_, o3 = rule_functions.patent_handling(x3, ' patent')
c3 = ' and perhaps slightly dilated '
assert o3==c3
x4 = ' central airways are patent with some groundglass upper lobe opacities apically that have developed favor radiation changes '
_, o4 = rule_functions.patent_handling(x4, ' patent')
c4 = ' with some groundglass upper lobe opacities apically that have developed favor radiation changes '
assert o4==c4
x5 = ' patent central airways status post bilateral lung transplantation.bilateral chest tubes remains in place '
_, o5 = rule_functions.patent_handling(x5, ' patent')
c5 = ' status post bilateral lung transplantation.bilateral chest tubes remains in place '
assert o5==c5
x6 = ' patent central airways with debris within the trachea '
_, o6 = rule_functions.patent_handling(x6, ' patent')
c6 = ' with debris within the trachea '
assert o6==c6
print('Passed test_patent_handling()')
def test_clear_handling():
x1 = ' the right lung remains clear '
_, o1 = rule_functions.clear_handling(x1, ' clear')
c1 = ' '
assert c1==o1
x2 = ' the central airways are clear status post bilateral lung transplantation '
_, o2 = rule_functions.clear_handling(x2, ' clear')
c2 = ' status post bilateral lung transplantation '
assert c2==o2
x3 = ' central airways are clear with normal caliber of the left bronchial anastomosis status post solitary left lung transplant '
_, o3 = rule_functions.clear_handling(x3,' clear')
c3 = ' status post solitary left lung transplant '
assert c3==o3
print('Passed test_clear_handling()')
def test_subcentimeter_handling():
#Don't change the sentence if the word 'node' is not present after the word 'subcentimeter'
x1 = ' enlarged lymph node and subcentimeter nodules in the left lung ' #made up
_, o1 = rule_functions.subcentimeter_handling(x1, ' subcentimeter')
c1 = ' enlarged lymph node and subcentimeter nodules in the left lung '
assert o1==c1
x2 = '1.3 cm pretracheal lymph node unchanged there are a few other subcentimeter lymph nodes which are not changed from prior'
_, o2 = rule_functions.subcentimeter_handling(x2, ' subcentimeter')
c2 = '1.3 cm pretracheal lymph node unchanged there are a few others which are not changed from prior'
assert o2==c2
x3 = '1.5 cm mediastinal lymph node and a subcentimeter lymph node' #made up
_, o3 = rule_functions.subcentimeter_handling(x3, ' subcentimeter')
c3 = '1.5 cm mediastinal lymph node and a'
assert o3==c3
print('Passed test_subcentimeter_handling()')
if __name__=='__main__':
test_nodulegr1cm_handling()
test_lymphadenopathy_handling()
test_delete_mainword()
test_delete_part()
test_delete_part_until()
test_delete_entire_unless_immediate()
test_delete()
test_delete_if_first_word()
test_non_handling()
test_patent_handling()
test_clear_handling()
test_subcentimeter_handling()
| 50.946588 | 253 | 0.719611 |
acf99ca3c2bcf55b77903114ff7cfaaf1269c6cd | 1,763 | py | Python | python/oneflow/compatible/single_client/eager/op_infer_util.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 3,285 | 2020-07-31T05:51:22.000Z | 2022-03-31T15:20:16.000Z | python/oneflow/compatible/single_client/eager/op_infer_util.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 2,417 | 2020-07-31T06:28:58.000Z | 2022-03-31T23:04:14.000Z | python/oneflow/compatible/single_client/eager/op_infer_util.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 520 | 2020-07-31T05:52:42.000Z | 2022-03-29T02:38:11.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from google.protobuf import text_format
from oneflow._oneflow_internal.oneflow.core.operator import (
op_node_signature as op_node_signature_cfg,
)
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.framework import c_api_util as c_api_util
from oneflow.core.operator import op_node_signature_pb2 as op_node_signature_pb
def Infer(op_conf, ibn2blob_object, scope_symbol_id=None):
if scope_symbol_id is None:
scope_symbol_id = flow.current_scope().symbol_id
op_conf.scope_symbol_id = scope_symbol_id
upstream_signature = MakeUpstreamSignature(ibn2blob_object)
return c_api_util.InferOpConf(op_conf, upstream_signature)
def MakeUpstreamSignature(ibn2blob_object):
upstream_signature_cfg = op_node_signature_cfg.OpNodeSignature()
for (ibn, blob_object) in ibn2blob_object.items():
blob_object.op_arg_blob_attr.DumpToOpNodeSignature(ibn, upstream_signature_cfg)
blob_object.op_arg_parallel_attr.DumpToOpNodeSignature(
ibn, upstream_signature_cfg
)
return text_format.Parse(
str(upstream_signature_cfg), op_node_signature_pb.OpNodeSignature()
)
| 40.068182 | 87 | 0.79637 |
acf99ce05799abd3ef84bdc15d40310482811345 | 3,389 | py | Python | sample.py | alexcdot/gen-MA-BC | ef0cb71f461ed7241fd2961c3605a91caa13d07b | [
"MIT"
] | null | null | null | sample.py | alexcdot/gen-MA-BC | ef0cb71f461ed7241fd2961c3605a91caa13d07b | [
"MIT"
] | null | null | null | sample.py | alexcdot/gen-MA-BC | ef0cb71f461ed7241fd2961c3605a91caa13d07b | [
"MIT"
] | null | null | null | import argparse
import os
import pickle
import torch
import torch.nn as nn
from torch.autograd import Variable
from model import *
from bball_data import BBallData
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--trial', type=int, required=True, help='trial')
parser.add_argument('-n', '--n_samples', type=int, default=5, required=False, help='number of samples')
parser.add_argument('-b', '--burn_in', type=int, default=0, required=False, help='burn-in period')
parser.add_argument('-l', '--seq_len', type=int, default=0, required=False, help='length of sequence')
parser.add_argument('-m', '--model', type=str, default='best', required=False, help='which saved model to sample from')
parser.add_argument('-s', '--seqs_per_sample', type=int, default=1, required=False, help='number of sequences per sample')
parser.add_argument('-f', '--filedesc', type=str, default='', required=False, help='descriptor to add to end of filename')
parser.add_argument('--shuffle', action='store_true', default=False, help='shuffle ground-truth burn-in from test set')
args = parser.parse_args()
trial = args.trial
save_path = 'saved/%03d/' % trial
params = pickle.load(open(save_path+'params.p', 'rb'))
if not torch.cuda.is_available():
params['cuda'] = False
# make samples folder
if not os.path.exists(save_path+'samples/'):
os.makedirs(save_path+'samples/')
# load the model
if params['cuda']:
state_dict = torch.load(save_path+'model/'+params['model']+'_state_dict_'
+args.model+'.pth')
else:
state_dict = torch.load(save_path+'model/'+params['model']+'_state_dict_'
+args.model+'.pth', map_location='cpu')
model = eval(params['model'])(params)
if params['cuda']:
model.cuda()
model.load_state_dict(state_dict)
# set the burn-in (and save for plotting)
# TODO: need a better way to save different burn-ins for different sets of samples
params['burn_in'] = args.burn_in
params['seqs_per_sample'] = args.seqs_per_sample
pickle.dump(params, open(save_path+'params.p', 'wb'), protocol=2)
print(params)
# set up the file name
file_desc = '' if len(args.filedesc) == 0 else '_'+args.filedesc
# sample for a fixed sequence length
if args.seq_len > 0:
file_desc += '_len'+str(args.seq_len)
# load ground-truth burn-ins
test_loader = torch.utils.data.DataLoader(
BBallData(train=False, preprocess=True, subsample=params['subsample'],
params=params),
batch_size=args.n_samples, shuffle=args.shuffle)
data, macro_goals = next(iter(test_loader))
if params['cuda']:
data, macro_goals = data.cuda(), macro_goals.cuda()
data = Variable(data.squeeze().transpose(0, 1))
macro_goals = Variable(macro_goals.squeeze().transpose(0, 1))
# generate samples
if params.get('genMacro'):
samples, macro_samples = model.sample(data, macro_goals, burn_in=params['burn_in'],
seq_len=args.seq_len, seqs_per_sample=args.seqs_per_sample)
# save macro-goals
if hasattr(macro_samples.data, "cpu"):
macro_samples = macro_samples.data.cpu().numpy()
pickle.dump(macro_samples, open(save_path+'samples/macro_goals'+file_desc+'.p', 'wb'))
else:
samples = model.sample(data, macro_goals, burn_in=params['burn_in'],
seq_len=args.seq_len, seqs_per_sample=args.seqs_per_sample)
# save samples
if hasattr(samples.data, "cpu"):
samples = samples.data.cpu().numpy()
pickle.dump(samples, open(save_path+'samples/samples'+file_desc+'.p', 'wb'))
| 36.836957 | 122 | 0.727353 |
acf99eaa7e8adff801e310ffbd58516d0406664b | 1,173 | py | Python | BIGAN/efficient_gan.py | yusukekyokawa/BiGAN | 858c8417ccced44d5eb92178dee9c413567e20d9 | [
"MIT"
] | null | null | null | BIGAN/efficient_gan.py | yusukekyokawa/BiGAN | 858c8417ccced44d5eb92178dee9c413567e20d9 | [
"MIT"
] | null | null | null | BIGAN/efficient_gan.py | yusukekyokawa/BiGAN | 858c8417ccced44d5eb92178dee9c413567e20d9 | [
"MIT"
] | null | null | null | import numpy as np
from keras.models import Model
from keras.layers import Input, Dense
import keras.backend as K
def sum_of_residual(y_true, y_pred):
return K.sum(K.abs(y_true - y_pred))
class EfficientGAN(object):
def __init__(self, input_dim, g):
self.input_dim = input_dim
self.g = g
g.trainable = False
# Input layer cann't be trained. Add new layer as same size & same distribution
anogan_in = Input(shape=(input_dim,))
g_in = Dense((input_dim), activation='tanh', trainable=True)(anogan_in)
g_out = g(g_in)
self.model = Model(inputs=anogan_in, outputs=g_out)
self.model_weight = None
def compile(self, optim):
self.model.compile(loss=sum_of_residual, optimizer=optim)
K.set_learning_phase(0)
def compute_anomaly_score(self, x, iterations=300):
z = np.random.uniform(-1, 1, size=(1, self.input_dim))
# learning for changing latent
loss = self.model.fit(z, x, batch_size=1, epochs=iterations, verbose=0)
loss = loss.history['loss'][-1]
similar_data = self.model.predict_on_batch(z)
return loss, similar_data
| 32.583333 | 87 | 0.663257 |
acf9a021b4a957992f66891bc334b15afb009a67 | 718 | py | Python | probe/probetest.py | rfrsilva/TeaStore | 84273ffdf4dd2a06d2c48acd124333ddb330c4e0 | [
"Apache-2.0"
] | null | null | null | probe/probetest.py | rfrsilva/TeaStore | 84273ffdf4dd2a06d2c48acd124333ddb330c4e0 | [
"Apache-2.0"
] | null | null | null | probe/probetest.py | rfrsilva/TeaStore | 84273ffdf4dd2a06d2c48acd124333ddb330c4e0 | [
"Apache-2.0"
] | 2 | 2021-07-17T15:00:42.000Z | 2021-07-17T15:36:27.000Z | import os
import time
import datetime
"""
Based on the amazing guide
http://www.dabeaz.com/generators/Generators.pdf
Works as tail -f
:param file_obj:
:return
"""
file_obj = open("mylogfile.log", "r")
file_obj.seek(0, os.SEEK_END) # End-of-file
count = 0
stoptime = datetime.datetime.now() + datetime.timedelta(minutes=20)
while datetime.datetime.now() < stoptime:
line = file_obj.readline()
if len(line) != 0:
if line[-1] != '\n':
time.sleep(0.1) # Sleep briefly
continue
print "\n"
print "Linha:" + line
line = [x.strip() for x in line.split(',')]
print "Tamanho da linha:" + str(len(line))
if len(line) > 2:
print line[1]
| 25.642857 | 67 | 0.60585 |
acf9a0ad0dc33214e0d0b2f48e0bbfe2df63f052 | 11,929 | py | Python | chatterbot/trainers.py | tigerTech888/ChatterBot-master | 62f4cfdf9f13830f3d60138375573a9b7256e601 | [
"BSD-3-Clause"
] | null | null | null | chatterbot/trainers.py | tigerTech888/ChatterBot-master | 62f4cfdf9f13830f3d60138375573a9b7256e601 | [
"BSD-3-Clause"
] | null | null | null | chatterbot/trainers.py | tigerTech888/ChatterBot-master | 62f4cfdf9f13830f3d60138375573a9b7256e601 | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
import csv
import time
from dateutil import parser as date_parser
from chatterbot.conversation import Statement
from chatterbot.tagging import PosLemmaTagger
from chatterbot import utils
class Trainer(object):
"""
Base class for all other trainer classes.
:param boolean show_training_progress: Show progress indicators for the
trainer. The environment variable ``CHATTERBOT_SHOW_TRAINING_PROGRESS``
can also be set to control this. ``show_training_progress`` will override
the environment variable if it is set.
"""
def __init__(self, chatbot, **kwargs):
self.chatbot = chatbot
environment_default = os.getenv('CHATTERBOT_SHOW_TRAINING_PROGRESS', True)
self.show_training_progress = kwargs.get(
'show_training_progress',
environment_default
)
def get_preprocessed_statement(self, input_statement):
"""
Preprocess the input statement.
"""
for preprocessor in self.chatbot.preprocessors:
input_statement = preprocessor(input_statement)
return input_statement
def train(self, *args, **kwargs):
"""
This method must be overridden by a child class.
"""
raise self.TrainerInitializationException()
class TrainerInitializationException(Exception):
"""
Exception raised when a base class has not overridden
the required methods on the Trainer base class.
"""
def __init__(self, message=None):
default = (
'A training class must be specified before calling train(). '
'See http://chatterbot.readthedocs.io/en/stable/training.html'
)
super().__init__(message or default)
def _generate_export_data(self):
result = []
for statement in self.chatbot.storage.filter():
if statement.in_response_to:
result.append([statement.in_response_to, statement.text])
return result
def export_for_training(self, file_path='./export.json'):
"""
Create a file from the database that can be used to
train other chat bots.
"""
import json
export = {'conversations': self._generate_export_data()}
with open(file_path, 'w+') as jsonfile:
json.dump(export, jsonfile, ensure_ascii=False)
class ListTrainer(Trainer):
"""
Allows a chat bot to be trained using a list of strings
where the list represents a conversation.
"""
def train(self, conversation):
"""
Train the chat bot based on the provided list of
statements that represents a single conversation.
"""
previous_statement_text = None
previous_statement_search_text = ''
statements_to_create = []
for conversation_count, text in enumerate(conversation):
if self.show_training_progress:
utils.print_progress_bar(
'List Trainer',
conversation_count + 1, len(conversation)
)
statement_search_text = self.chatbot.storage.tagger.get_text_index_string(text)
statement = self.get_preprocessed_statement(
Statement(
text=text,
search_text=statement_search_text,
in_response_to=previous_statement_text,
search_in_response_to=previous_statement_search_text,
conversation='training'
)
)
previous_statement_text = statement.text
previous_statement_search_text = statement_search_text
statements_to_create.append(statement)
self.chatbot.storage.create_many(statements_to_create)
class ChatterBotCorpusTrainer(Trainer):
"""
Allows the chat bot to be trained using data from the
ChatterBot dialog corpus.
"""
def train(self, *corpus_paths):
from chatterbot.corpus import load_corpus, list_corpus_files
data_file_paths = []
# Get the paths to each file the bot will be trained with
for corpus_path in corpus_paths:
data_file_paths.extend(list_corpus_files(corpus_path))
for corpus, categories, file_path in load_corpus(*data_file_paths):
statements_to_create = []
# Train the chat bot with each statement and response pair
for conversation_count, conversation in enumerate(corpus):
if self.show_training_progress:
utils.print_progress_bar(
'Training ' + str(os.path.basename(file_path)),
conversation_count + 1,
len(corpus)
)
previous_statement_text = None
previous_statement_search_text = ''
for text in conversation:
statement_search_text = self.chatbot.storage.tagger.get_text_index_string(text)
statement = Statement(
text=text,
search_text=statement_search_text,
in_response_to=previous_statement_text,
search_in_response_to=previous_statement_search_text,
conversation='training'
)
statement.add_tags(*categories)
statement = self.get_preprocessed_statement(statement)
previous_statement_text = statement.text
previous_statement_search_text = statement_search_text
statements_to_create.append(statement)
self.chatbot.storage.create_many(statements_to_create)
class UbuntuCorpusTrainer(Trainer):
"""
Allow chatbots to be trained with the data from the Ubuntu Dialog Corpus.
"""
def __init__(self, chatbot, **kwargs):
super().__init__(chatbot, **kwargs)
home_directory = os.path.expanduser('~')
self.data_download_url = kwargs.get(
'ubuntu_corpus_data_download_url',
'http://cs.mcgill.ca/~jpineau/datasets/ubuntu-corpus-1.0/ubuntu_dialogs.tgz'
)
self.data_directory = kwargs.get(
'ubuntu_corpus_data_directory',
os.path.join(home_directory, 'ubuntu_data')
)
self.extracted_data_directory = os.path.join(
self.data_directory, 'ubuntu_dialogs'
)
# Create the data directory if it does not already exist
if not os.path.exists(self.data_directory):
os.makedirs(self.data_directory)
def is_downloaded(self, file_path):
"""
Check if the data file is already downloaded.
"""
if os.path.exists(file_path):
self.chatbot.logger.info('File is already downloaded')
return True
return False
def is_extracted(self, file_path):
"""
Check if the data file is already extracted.
"""
if os.path.isdir(file_path):
self.chatbot.logger.info('File is already extracted')
return True
return False
def download(self, url, show_status=True):
"""
Download a file from the given url.
Show a progress indicator for the download status.
Based on: http://stackoverflow.com/a/15645088/1547223
"""
import requests
file_name = url.split('/')[-1]
file_path = os.path.join(self.data_directory, file_name)
# Do not download the data if it already exists
if self.is_downloaded(file_path):
return file_path
with open(file_path, 'wb') as open_file:
print('Downloading %s' % url)
response = requests.get(url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None:
# No content length header
open_file.write(response.content)
else:
download = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
download += len(data)
open_file.write(data)
if show_status:
done = int(50 * download / total_length)
sys.stdout.write('\r[%s%s]' % ('=' * done, ' ' * (50 - done)))
sys.stdout.flush()
# Add a new line after the download bar
sys.stdout.write('\n')
print('Download location: %s' % file_path)
return file_path
def extract(self, file_path):
"""
Extract a tar file at the specified file path.
"""
import tarfile
print('Extracting {}'.format(file_path))
if not os.path.exists(self.extracted_data_directory):
os.makedirs(self.extracted_data_directory)
def track_progress(members):
sys.stdout.write('.')
for member in members:
# This will be the current file being extracted
yield member
with tarfile.open(file_path) as tar:
tar.extractall(path=self.extracted_data_directory, members=track_progress(tar))
self.chatbot.logger.info('File extracted to {}'.format(self.extracted_data_directory))
return True
def train(self):
import glob
tagger = PosLemmaTagger(language=self.chatbot.storage.tagger.language)
# Download and extract the Ubuntu dialog corpus if needed
corpus_download_path = self.download(self.data_download_url)
# Extract if the directory does not already exist
if not self.is_extracted(self.extracted_data_directory):
self.extract(corpus_download_path)
extracted_corpus_path = os.path.join(
self.extracted_data_directory,
'**', '**', '*.tsv'
)
def chunks(items, items_per_chunk):
for start_index in range(0, len(items), items_per_chunk):
end_index = start_index + items_per_chunk
yield items[start_index:end_index]
file_list = glob.glob(extracted_corpus_path)
file_groups = tuple(chunks(file_list, 10000))
start_time = time.time()
for tsv_files in file_groups:
statements_from_file = []
for tsv_file in tsv_files:
with open(tsv_file, 'r', encoding='utf-8') as tsv:
reader = csv.reader(tsv, delimiter='\t')
previous_statement_text = None
previous_statement_search_text = ''
for row in reader:
if len(row) > 0:
statement = Statement(
text=row[3],
in_response_to=previous_statement_text,
conversation='training',
created_at=date_parser.parse(row[0]),
persona=row[1]
)
for preprocessor in self.chatbot.preprocessors:
statement = preprocessor(statement)
statement.search_text = tagger.get_text_index_string(statement.text)
statement.search_in_response_to = previous_statement_search_text
previous_statement_text = statement.text
previous_statement_search_text = statement.search_text
statements_from_file.append(statement)
self.chatbot.storage.create_many(statements_from_file)
print('Training took', time.time() - start_time, 'seconds.')
| 34.082857 | 99 | 0.592087 |
acf9a14654988cebe1e2cbe8bb10779d1ceb02bb | 2,041 | py | Python | parser/module/biaffine.py | danielhers/hlt-suda-ucca-parser | 107229d585c337bef538385848f27fc13daa81c0 | [
"MIT"
] | 21 | 2019-03-14T03:33:01.000Z | 2020-11-17T04:12:51.000Z | parser/module/biaffine.py | LucasMoncuit/ucca-parser | 1886012f85afa9fa60284a3b276e8649ed63288e | [
"MIT"
] | 4 | 2019-06-16T14:31:43.000Z | 2020-10-14T07:18:09.000Z | parser/module/biaffine.py | LucasMoncuit/ucca-parser | 1886012f85afa9fa60284a3b276e8649ed63288e | [
"MIT"
] | 9 | 2019-06-13T12:40:57.000Z | 2020-09-14T11:11:05.000Z | import torch
import torch.nn as nn
class Biaffine(nn.Module):
"""
BiAffine Attention layer from https://arxiv.org/abs/1611.01734
Expects inputs as batch-first sequences [batch_size, seq_length, dim].
Returns score matrices as [batch_size, dim, dim] for arc attention
(out_channels=1), and score as [batch_size, out_channels, dim, dim]
for label attention (where out_channels=#labels).
"""
def __init__(self, in_dim, out_channels, bias_head=True, bias_dep=True):
super(Biaffine, self).__init__()
self.bias_head = bias_head
self.bias_dep = bias_dep
self.U = nn.Parameter(torch.Tensor(out_channels,
in_dim + int(bias_head),
in_dim + int(bias_dep)))
self.reset_parameters()
def reset_parameters(self):
# stdv = 1. / self.U.size(1)**0.5
# self.U.data.uniform_(-stdv, stdv)
self.U.data.zero_()
def forward(self, Rh, Rd):
"""
Returns S = (Rh @ U @ Rd.T) with dims [batchsize, n_channels, t, t]
S[b, c, i, j] = Score sample b Label c Head i Dep j
"""
if self.bias_head:
Rh = self.add_ones_col(Rh)
if self.bias_dep:
Rd = self.add_ones_col(Rd)
# Add dimension to Rh and Rd for batch matrix products,
# shape [batch, t, d] -> [batch, 1, t, d]
Rh = Rh.unsqueeze(1)
Rd = Rd.unsqueeze(1)
S = Rh @ self.U @ torch.transpose(Rd, -1, -2)
# If out_channels == 1, squeeze [batch, 1, t, t] -> [batch, t, t]
return S.squeeze(1)
@staticmethod
def add_ones_col(X):
"""
Add column of ones to each matrix in batch.
"""
batch_size, len, dim = X.size()
b = X.new_ones((batch_size, len, 1), requires_grad=True)
return torch.cat([X, b], -1)
def __repr__(self):
tmpstr = self.__class__.__name__
tmpstr += '(\n (U): {}\n)'.format(self.U.size())
return tmpstr
| 32.919355 | 76 | 0.562959 |
acf9a172877ecdeebeba511499f3cdd8629d609b | 5,975 | py | Python | Contest/ABC170/e/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/ABC170/e/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/ABC170/e/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
from bisect import bisect_left, bisect_right, insort_right
class SquareSkipList:
# SkipList の層数を 2 にした感じの何か
# std::multiset の代用になる
def __init__(self, values = None, sorted_ = False, square = 1000, seed = 42, inf = float("inf")):
# values: 初期値のリスト
# sorted_: 初期値がソート済みであるか
# square: 最大データ数の平方根
# seed: 乱数のシード
# inf: 番兵(要素がタプルのときは (float("inf"), float("inf")) にする)
self.square = square
if values is None:
self.rand_y = seed
self.layer1 = [inf]
self.layer0 = [[]]
else:
self.layer1 = layer1 = []
self.layer0 = layer0 = []
if not sorted_:
values.sort()
y = seed
l0 = []
for v in values:
y ^= (y & 0x7ffff) << 13
y ^= y >> 17
y ^= (y & 0x7ffffff) << 5
if y % square == 0:
layer0.append(l0)
l0 = []
layer1.append(v)
else:
l0.append(v)
layer1.append(inf)
layer0.append(l0)
self.rand_y = y
def add(self, x): # 要素の追加 # O(sqrt(n))
# xorshift
y = self.rand_y
y ^= (y & 0x7ffff) << 13
y ^= y >> 17
y ^= (y & 0x7ffffff) << 5
self.rand_y = y
if y % self.square == 0:
layer1, layer0 = self.layer1, self.layer0
idx1 = bisect_right(layer1, x)
layer1.insert(idx1, x)
layer0_idx1 = layer0[idx1]
idx0 = bisect_right(layer0_idx1, x)
layer0.insert(idx1 + 1, layer0_idx1[idx0:])
del layer0_idx1[idx0:]
else:
idx1 = bisect_right(self.layer1, x)
insort_right(self.layer0[idx1], x)
def remove(self, x): # 要素の削除 # O(sqrt(n))
# x が存在しない場合、x 以上の最小の要素が削除される
idx1 = bisect_left(self.layer1, x)
layer0_idx1 = self.layer0[idx1]
idx0 = bisect_left(layer0_idx1, x)
if idx0 == len(layer0_idx1):
del self.layer1[idx1]
self.layer0[idx1] += self.layer0.pop(idx1 + 1)
else:
del layer0_idx1[idx0]
def search_higher_equal(self, x): # x 以上の最小の値を返す O(log(n))
idx1 = bisect_left(self.layer1, x)
layer0_idx1 = self.layer0[idx1]
idx0 = bisect_left(layer0_idx1, x)
if idx0 == len(layer0_idx1):
return self.layer1[idx1]
return layer0_idx1[idx0]
def search_higher(self, x): # x を超える最小の値を返す O(log(n))
idx1 = bisect_right(self.layer1, x)
layer0_idx1 = self.layer0[idx1]
idx0 = bisect_right(layer0_idx1, x)
if idx0 == len(layer0_idx1):
return self.layer1[idx1]
return layer0_idx1[idx0]
def search_lower(self, x): # x 未満の最大の値を返す O(log(n))
idx1 = bisect_left(self.layer1, x)
layer0_idx1 = self.layer0[idx1]
idx0 = bisect_left(layer0_idx1, x)
if idx0 == 0: # layer0_idx1 が空の場合とすべて x 以上の場合
return self.layer1[idx1 - 1]
return layer0_idx1[idx0 - 1]
def pop(self, idx):
# 小さい方から idx 番目の要素を削除してその要素を返す(0-indexed)
# O(sqrt(n))
# for を回すので重め、使うなら square パラメータを大きめにするべき
layer0 = self.layer0
s = -1
for i, l0 in enumerate(layer0):
s += len(l0) + 1
if s >= idx:
break
if s == idx:
layer0[i] += layer0.pop(i + 1)
return self.layer1.pop(i)
else:
return layer0[i].pop(idx - s)
def pop_max(self):
# 最大値を削除してその要素を返す(0-indexed) O(1)
# 空ならエラー
if self.layer0[-1]:
return self.layer0[-1].pop()
else:
del self.layer0[-1]
return self.layer1.pop(-2)
def __getitem__(self, item):
# 小さい方から idx 番目の要素を返す O(sqrt(N))
layer0 = self.layer0
s = -1
for i, l0 in enumerate(layer0):
s += len(l0) + 1
if s >= item:
break
if s == item:
return self.layer1[i]
else:
return layer0[i][item - s]
def min(self): # 最小値を返す 空なら inf を返す O(1)
return self.layer0[0][0] if self.layer0[0] else self.layer1[0]
def max(self): # 最大値を返す 空なら inf を返す O(1)
return self.layer0[-1][-1] if self.layer0[-1] else self.layer1[-2] if len(self.layer0) >= 2 else inf
def merge(self, r): # 結合 O(sqrt(n))
self.layer0[-1] += r.layer0[0]
self.layer0 += r.layer0[1:]
del self.layer1[-1]
self.layer1 += r.layer1
def split(self, k): # k 以上を切り離す O(sqrt(n))
idx1 = bisect_left(self.layer1, k)
layer0_idx1 = self.layer0[idx1]
idx0 = bisect_left(layer0_idx1, k)
r = SquareSkipList(square = self.square, seed = self.rand_y)
r.layer1 = self.layer1[idx1:]
r.layer0 = [layer0_idx1[idx0:]] + self.layer0[idx1 + 1:]
del self.layer1[idx1:-1], layer0_idx1[idx0:], self.layer0[idx1 + 1:]
return r
def print(self):
print(self.layer1)
print(self.layer0)
def __iter__(self):
layer1 = self.layer1
layer0 = self.layer0
idx1 = idx0 = 0
layer0_idx1 = layer0[idx1]
while True:
if len(layer0_idx1) == idx0:
if len(layer1) - 1 == idx1:
return
yield layer1[idx1]
idx1 += 1
layer0_idx1 = layer0[idx1]
idx0 = 0
else:
yield layer0_idx1[idx0]
idx0 += 1
INF = float("inf")
(n, q), *D = [[*map(int, o.split())] for o in open(0)]
K = [[] for _ in [None] * 200001]
R, P = [INF], [INF]
for a, b in D[:n]:
R += a,
P += b,
K[b] += a,
K = [SquareSkipList(k) for k in K]
maxes = []
for c, d in D[n:]:
r = R[c]
before = K[P[c]]
#solving | 31.951872 | 108 | 0.50477 |
acf9a219e557e90d70b1f73ee59b7735fb277db3 | 13,642 | py | Python | solve.py | cdkrot/pace2020-sat-dp-solver | bf6bedaa42af57f0ef96bd8faf88c8a826cc4e36 | [
"MIT"
] | null | null | null | solve.py | cdkrot/pace2020-sat-dp-solver | bf6bedaa42af57f0ef96bd8faf88c8a826cc4e36 | [
"MIT"
] | null | null | null | solve.py | cdkrot/pace2020-sat-dp-solver | bf6bedaa42af57f0ef96bd8faf88c8a826cc4e36 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from typing import List, Set
import sys, itertools
from pysat.solvers import Solver
import os
########### Instance and Result
class Instance:
def __init__(self, n: int, m: int, adj):
self._n = n
self._m = m
self._adj = adj
def vertex_number(self) -> int:
return self._n
def edge_number(self) -> int:
return self._m
def adj(self, v) -> List[int]:
return self._adj[v]
def set_adj(self, v, new_adj):
self._adj[v] = new_adj
def edges(self):
for v in range(self.vertex_number()):
for u in self.adj(v):
if v < u:
yield (v, u)
def vertex_set(self):
return range(self.vertex_number())
def clone(self):
tmp = Instance(self.vertex_number(),
self.edge_number(),
[[u for u in self._adj[v]] for v in self.vertex_set()])
return tmp
def __repr__(self):
return "Instance({}, {})".format(self.vertex_number(), list(self.edges()))
class Result:
def __init__(self, depth: int, parents: List[int]):
self._parents = parents
self._depth = depth
def depth(self):
return self._depth
def roots(self):
for v in range(len(self._parents)):
if self._parents[v] == -1:
yield v
def parent(self, i: int) -> int:
return self._parents[i]
def __repr__(self):
return "Result({}, {})".format(self.depth(), self._parents)
def read_instance(fp) -> Instance:
n: int = -1
m: int = -1
adj: List[List[int]] = None
for line in fp:
line: str = line.strip()
if not line or line.startswith("c"):
continue
if line.startswith("p"):
toks = line.split()
n = int(toks[2])
m = int(toks[3])
adj = [[] for i in range(n)]
else:
toks = line.split()
a = int(toks[0]) - 1
b = int(toks[1]) - 1
adj[a].append(b)
adj[b].append(a)
return Instance(n, m, adj)
def read_instance_from_args() -> Instance:
return read_instance(sys.argv[1].split('\n'))
def write_instance(instance: Instance, fl):
print("p tdp {} {}".format(instance.vertex_number(), instance.edge_number()), file=fl)
for (v, u) in instance.edges():
print("{} {}".format(v + 1, u + 1), file=fl)
def print_result(out, instance: Instance, result: Result):
if type(result) == int:
raise ValueError("228")
print(result.depth())
for i in range(instance.vertex_number()):
print(result.parent(i) + 1)
# ###### Cover
# def get_cover_pulp(instance: Instance):
# from pulp import LpProblem, LpVariable, lpSum, LpMinimize
# print("x")
# prob = LpProblem("", LpMinimize)
# I = instance.vertex_set()
# x = [LpVariable(str(i), cat='Binary') for i in I]
# prob += lpSum(x) # objective
# for v in I:
# for u in instance.adj(v):
# if v < u:
# prob += (x[v] + x[u] >= 1)
# prob.solve()
# result = [x[i].value() >= 0.99 for i in I]
# # print("VC IS", sum(result), file=sys.stderr)
# return result
# def get_cover(instance: Instance):
# from ortools.linear_solver import pywraplp
# solver = pywraplp.Solver('',
# pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
# I = instance.vertex_set()
# x = [solver.IntVar(0, solver.infinity(), str(i)) for i in I]
# objective = solver.Minimize(sum(x))
# for v in I:
# for u in instance.adj(v):
# if v < u:
# solver.Add(x[v] + x[u] >= 1)
# solver.set_time_limit(20 * 1000)
# solver.Solve()
# result = [x[i].solution_value() >= 0.99 for i in I]
# # print("VC IS", sum(result), file=sys.stderr)
# return result
#### SAT-based solving
def make_solver():
return Solver(name='Glucose4')
def solve_limited_with_sat(instance: Instance, mi: int) -> Result:
if instance.vertex_number() == 0:
return lambda: Result(0, [])
solver: Solver = make_solver()
# We are going to have N x N x length vars
n: int = instance.vertex_number()
length: int = mi + 1
def flat_var(a: int, b: int, c: int) -> int:
return 1 + ((a + b * n) * length + c)
# basic relations:
for v in range(n):
for u in range(n):
for w in range(n):
for i in range(1, length):
# (v, u, i), (u, w, i) => (v, w, i)
# (v, w, i) or not (v, u, i) or not (u, w, i)
solver.add_clause([-flat_var(min(v, u), max(v, u), i),
-flat_var(min(u, w), max(u, w), i),
+flat_var(min(v, w), max(v, w), i)])
# Constraint D1: P[0] is empty and P[length - 1] is full
for v in range(n):
for u in range(v, n):
solver.add_clause([-flat_var(v, u, 0)])
solver.add_clause([flat_var(v, u, length - 1)])
# Constraint D2: P[i + 1] is refinement of P[i]
for v in range(n):
for u in range(v, n):
for i in range(1, length):
solver.add_clause([-flat_var(v, u, i - 1), flat_var(v, u, i)])
# Constraint D3: there is at most one new vertex each time
for v in range(n):
for u in range(v + 1, n):
for i in range(1, length):
solver.add_clause([-flat_var(v, u, i), flat_var(v, v, i - 1), flat_var(u, u, i - 1)])
# Constraint D4 each edge spanned by the tree:
for (v, u) in instance.edges():
assert v < u
for i in range(1, length):
solver.add_clause([-flat_var(v, v, i), -flat_var(u, u, i),
flat_var(v, v, i - 1), flat_var(v, u, i)])
solver.add_clause([-flat_var(v, v, i), -flat_var(u, u, i),
flat_var(u, u, i - 1), flat_var(v, u, i)])
if not solver.solve():
return None
true_set = set(filter(lambda x: x > 0, solver.get_model()))
def recover():
length = mi + 1
first_time = [-1 for i in range(n)]
for i in range(length - 1, 0, -1):
for v in range(n):
if flat_var(v, v, i) in true_set:
first_time[v] = i
parents = [-1 for i in range(n)]
for (tm, v) in sorted(zip(first_time, itertools.count())):
for u in range(n):
if u != v and parents[u] == -1 and flat_var(min(v, u), max(v, u), tm) in true_set:
parents[u] = v
# we assume here, that there was only one connected comp.
assert parents.count(-1) == 1
return Result(mi, parents)
return recover
###### Kernelize
# def kernelize_add_edges(instance: Instance, p_was, VC: List[bool], td: int):
# for v in range(instance.vertex_number()):
# neigh = set(instance.adj(v))
# for u in range(instance.vertex_number()):
# if v != u and (VC[v] or VC[u]) and u not in neigh:
# count = 0
# for x in instance.adj(u):
# if x in neigh:
# count += 1
# if count >= td:
# instance.adj(v).append(u)
# neigh.add(u)
# instance.adj(u).append(v)
# p_was[0] = True
# def kernelize_remove_vertices(instance_orig: Instance, p_was, p_recover, VC, td: int):
# instance: Instance = instance_orig.clone()
# removed_mark = [False for v in range(instance.vertex_number())]
# for v in instance.vertex_set():
# a = instance.adj(v)
# is_good = True
# for vert in a:
# if len(instance.adj(vert)) <= td:
# is_good = False
# if not is_good:
# continue
# for i in range(len(a)):
# adj_set = set(instance.adj(a[i]))
# for j in range(i):
# if a[j] not in adj_set:
# is_good = False
# break
# if not is_good:
# break
# if is_good:
# removed_mark[v] = True
# for u in a:
# instance.adj(u).remove(v)
# instance.set_adj(v, [])
# # # May happen only for one vertex in c.c.
# # # Let's not get rid of it
# # for v in instance.vertex_set():
# # if len(instance.adj(v)) == 0:
# # removed_mark[v] = True
# if sum(removed_mark) == 0:
# return (instance_orig, VC)
# p_was[0] = True
# # print("DZING", file=sys.stderr)
# new_n = 0
# new_m = 0
# new_vs = [-1 for i in instance.vertex_set()]
# vs_old = []
# newVC = []
# for v in instance.vertex_set():
# if not removed_mark[v]:
# new_vs[v] = new_n
# vs_old.append(v)
# newVC.append(VC[v])
# new_n += 1
# adj = [[] for i in range(new_n)]
# new_m = 0
# for (v, u) in instance.edges():
# adj[new_vs[v]].append(new_vs[u])
# adj[new_vs[u]].append(new_vs[v])
# new_m += 1
# oldrecover = p_recover[0]
# def recover(result: Result) -> Result:
# #print("was: ", Instance(new_n, new_m, adj), result, td)
# newarr = [None for i in instance.vertex_set()]
# depth = [None for i in instance.vertex_set()]
# for v in instance.vertex_set():
# if new_vs[v] != -1:
# tmp = result.parent(new_vs[v])
# if tmp == -1:
# newarr[v] = tmp
# else:
# newarr[v] = vs_old[tmp]
# def calc_height(vert):
# if depth[vert] is not None:
# return
# if newarr[vert] == -1:
# depth[vert] = 1
# return
# calc_height(newarr[vert])
# depth[vert] = 1 + depth[newarr[vert]]
# for v in instance.vertex_set():
# if not newarr[v] is None:
# calc_height(v)
# #print("depth was", depth)
# #print("newarr", newarr)
# #print("removed_mark", removed_mark)
# for v in reversed(instance.vertex_set()):
# if removed_mark[v]:
# bottom_most = (-1, -1)
# #print(instance_orig.adj(v))
# for u in instance_orig.adj(v):
# # print("for", v, "considering", u, depth[u])
# if v < u or not removed_mark[u]:
# bottom_most = max(bottom_most, (depth[u], u))
# depth[v] = bottom_most[0] + 1
# newarr[v] = bottom_most[1]
# #print("to: ", instance_orig, Result(max(depth), newarr))
# return oldrecover(Result(max(depth), newarr))
# p_recover[0] = recover
# # print(td, removed_mark, instance_orig, '->', Instance(new_n, new_m, adj))
# return (Instance(new_n, new_m, adj), newVC)
# def kernelize(instance: Instance, td: int, VC):
# p_was = [True]
# p_recover = [lambda x: x]
# while p_was[0]:
# p_was[0] = False
# kernelize_add_edges(instance, p_was, VC, td)
# (instance, VC) = kernelize_remove_vertices(instance, p_was, p_recover, VC, td)
# return (instance, p_recover[0])
###### Core Skeleton
def solve_limited_with_kernels(instance: Instance, mi: int, VC):
(instance_prime, goback) = kernelize(instance.clone(), mi, VC)
res = solve_limited_with_sat(instance_prime, mi)
# res2 = solve_limited_with_sat(instance, mi)
#
# b1 = res == None
# b2 = res2 == None
# if b1 != b2:
# print(instance, instance_prime)
# raise
if not res:
return res
return lambda: goback(res())
def solve_limited(instance: Instance, mi: int, VC):
return solve_limited_with_sat(instance, mi)
#return solve_limited_with_kernels(instance, mi, VC)
def transfer_to_cpp(instance: Instance, VC):
import cffi
ffi = cffi.FFI()
ffi.cdef("void python_enter_point(int n, int m, int* edges, int* vc);")
lib = ffi.dlopen("./cppsolve.so")
lib.python_enter_point(instance.vertex_number(),
instance.edge_number(),
[instance.vertex_number() * a + b for (a, b) in instance.edges()],
ffi.NULL)
def solve(instance: Instance) -> Result:
VC = None #VC = get_cover(instance)
if instance.vertex_number() <= 34:
transfer_to_cpp(instance, VC)
lo: int = 0
hi: int = 1
recover = None
while True:
recover = solve_limited(instance, hi, VC)
if recover:
break
lo = hi
hi *= 2
while hi - lo > 1:
mi: int = lo + (hi - lo) // 2
rs = solve_limited(instance, mi, VC)
if rs:
hi = mi
recover = rs
else:
lo = mi
print("recovering", file=sys.stderr)
print("hi was", hi, file=sys.stderr)
return recover()
def main():
instance: Instance = None
if False and len(sys.argv) > 1:
instance = read_instance_from_args()
else:
instance = read_instance(sys.stdin)
result: Result = solve(instance)
print_result(sys.stdout, instance, result)
if __name__ == '__main__':
main()
| 29.025532 | 101 | 0.50887 |
acf9a312f198bdf02e6119865ea1a3def0659d1f | 7,226 | py | Python | tests/unit/utils/warnings_test.py | sunbenxin/salt | b821f6a174e67a3e1def1ba7fa16885cd985bb0c | [
"Apache-2.0"
] | 1 | 2016-03-13T09:05:15.000Z | 2016-03-13T09:05:15.000Z | tests/unit/utils/warnings_test.py | sunbenxin/salt | b821f6a174e67a3e1def1ba7fa16885cd985bb0c | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/warnings_test.py | sunbenxin/salt | b821f6a174e67a3e1def1ba7fa16885cd985bb0c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details
:license: Apache 2.0, see LICENSE for more details.
tests.unit.utils.warnings_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test ``salt.utils.warn_until`` and ``salt.utils.kwargs_warn_until``
'''
# Import python libs
import sys
import warnings
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import warn_until, kwargs_warn_until
from salt.version import SaltStackVersion
class WarnUntilTestCase(TestCase):
def test_warn_until_warning_raised(self):
# We *always* want *all* warnings thrown on this module
warnings.filterwarnings('always', '', DeprecationWarning, __name__)
def raise_warning(_version_info_=(0, 16, 0)):
warn_until(
(0, 17), 'Deprecation Message!',
_version_info_=_version_info_
)
def raise_named_version_warning(_version_info_=(0, 16, 0)):
warn_until(
'Hydrogen', 'Deprecation Message!',
_version_info_=_version_info_
)
# raise_warning should show warning until version info is >= (0, 17)
with warnings.catch_warnings(record=True) as recorded_warnings:
raise_warning()
self.assertEqual(
'Deprecation Message!', str(recorded_warnings[0].message)
)
# raise_warning should show warning until version info is >= (0, 17)
with warnings.catch_warnings(record=True) as recorded_warnings:
raise_named_version_warning()
self.assertEqual(
'Deprecation Message!', str(recorded_warnings[0].message)
)
# the deprecation warning is not issued because we passed
# _dont_call_warning
with warnings.catch_warnings(record=True) as recorded_warnings:
warn_until(
(0, 17), 'Foo', _dont_call_warnings=True,
_version_info_=(0, 16)
)
self.assertEqual(0, len(recorded_warnings))
# Let's set version info to (0, 17), a RuntimeError should be raised
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'\'0.17.0\' is released. Current version is now \'0.17.0\'. '
r'Please remove the warning.'):
raise_warning(_version_info_=(0, 17, 0))
# Let's set version info to (0, 17), a RuntimeError should be raised
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'\'Hydrogen((.*))\' is released. Current version is now '
r'\'([\d.]+)\'. Please remove the warning.'):
raise_named_version_warning(_version_info_=(sys.maxint, 16, 0))
# Even though we're calling warn_until, we pass _dont_call_warnings
# because we're only after the RuntimeError
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'\'0.17.0\' is released. Current version is now \'0.17.0\'. '
r'Please remove the warning.'):
warn_until(
(0, 17), 'Foo', _dont_call_warnings=True
)
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'\'Hydrogen((.*))\' is released. Current version is now '
r'\'([\d.]+)\'. Please remove the warning.'):
warn_until(
'Hydrogen', 'Foo', _dont_call_warnings=True,
_version_info_=(sys.maxint, 16, 0)
)
# version on the deprecation message gets properly formatted
with warnings.catch_warnings(record=True) as recorded_warnings:
vrs = SaltStackVersion.from_name('Helium')
warn_until(
'Helium', 'Deprecation Message until {version}!',
_version_info_=(vrs.major - 1, 0)
)
self.assertEqual(
'Deprecation Message until {0}!'.format(vrs.formatted_version),
str(recorded_warnings[0].message)
)
def test_kwargs_warn_until_warning_raised(self):
# We *always* want *all* warnings thrown on this module
warnings.filterwarnings('always', '', DeprecationWarning, __name__)
def raise_warning(**kwargs):
_version_info_ = kwargs.pop('_version_info_', (0, 16, 0))
kwargs_warn_until(
kwargs,
(0, 17),
_version_info_=_version_info_
)
# raise_warning({...}) should show warning until version info is >= (0, 17)
with warnings.catch_warnings(record=True) as recorded_warnings:
raise_warning(foo=42) # with a kwarg
self.assertEqual(
'The following parameter(s) have been deprecated and '
'will be removed in \'0.17.0\': \'foo\'.',
str(recorded_warnings[0].message)
)
# With no **kwargs, should not show warning until version info is >= (0, 17)
with warnings.catch_warnings(record=True) as recorded_warnings:
kwargs_warn_until(
{}, # no kwargs
(0, 17),
_version_info_=(0, 16, 0)
)
self.assertEqual(0, len(recorded_warnings))
# Let's set version info to (0, 17), a RuntimeError should be raised
# regardless of whether or not we pass any **kwargs.
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'\'0.17.0\' is released. Current version is now \'0.17.0\'. '
r'Please remove the warning.'):
raise_warning(_version_info_=(0, 17)) # no kwargs
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'\'0.17.0\' is released. Current version is now \'0.17.0\'. '
r'Please remove the warning.'):
raise_warning(bar='baz', qux='quux', _version_info_=(0, 17)) # some kwargs
if __name__ == '__main__':
from integration import run_tests
run_tests(WarnUntilTestCase, needs_daemon=False)
| 41.528736 | 87 | 0.580819 |
acf9a374486990cda4e6ce63e941836844b1ed8b | 31,469 | py | Python | cognigraph/nodes/processors.py | cognigraphtravis/cognigraph | cfed2a32a4b22b15687b13b40a52e54fdbed703a | [
"MIT"
] | null | null | null | cognigraph/nodes/processors.py | cognigraphtravis/cognigraph | cfed2a32a4b22b15687b13b40a52e54fdbed703a | [
"MIT"
] | null | null | null | cognigraph/nodes/processors.py | cognigraphtravis/cognigraph | cfed2a32a4b22b15687b13b40a52e54fdbed703a | [
"MIT"
] | null | null | null | import time
from typing import Tuple
import math
from vendor.nfb.pynfb.protocols.ssd.topomap_selector_ica import ICADialog
import numpy as np
import mne
from numpy.linalg import svd
from scipy.optimize import linprog
from sklearn.preprocessing import normalize
from mne.preprocessing import find_outliers
from mne.minimum_norm import apply_inverse_raw # , make_inverse_operator
from mne.minimum_norm import make_inverse_operator as mne_make_inverse_operator
from mne.beamformer import apply_lcmv_raw
from ..helpers.make_lcmv import make_lcmv
from .node import ProcessorNode
from ..helpers.matrix_functions import (make_time_dimension_second,
put_time_dimension_back_from_second,
last_sample)
from ..helpers.inverse_model import (get_default_forward_file,
get_clean_forward,
make_inverse_operator,
matrix_from_inverse_operator)
from ..helpers.pynfb import (pynfb_ndarray_function_wrapper,
ExponentialMatrixSmoother)
from ..helpers.channels import channel_labels_saver
from ..helpers.aux_tools import nostdout
from .. import TIME_AXIS
from vendor.nfb.pynfb.signal_processing import filters
class Preprocessing(ProcessorNode):
CHANGES_IN_THESE_REQUIRE_RESET = ('collect_for_x_seconds', )
UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ('mne_info', )
SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {'mne_info': channel_labels_saver}
def __init__(self, collect_for_x_seconds: int=60):
super().__init__()
self.collect_for_x_seconds = collect_for_x_seconds # type: int
self._samples_collected = None # type: int
self._samples_to_be_collected = None # type: int
self._enough_collected = None # type: bool
self._means = None # type: np.ndarray
self._mean_sums_of_squares = None # type: np.ndarray
self._bad_channel_indices = None # type: List[int]
self._interpolation_matrix = None # type: np.ndarray
self._reset_statistics()
def _initialize(self):
self.mne_info = self.traverse_back_and_find('mne_info')
frequency = self.mne_info['sfreq']
self._samples_to_be_collected = int(math.ceil(
self.collect_for_x_seconds * frequency))
def _update(self):
# Have we collected enough samples without the new input?
enough_collected = self._samples_collected >=\
self._samples_to_be_collected
if not enough_collected:
if self.input_node.output is not None and\
self.input_node.output.shape[TIME_AXIS] > 0:
self._update_statistics()
elif not self._enough_collected: # We just got enough samples
self._enough_collected = True
standard_deviations = self._calculate_standard_deviations()
self._bad_channel_indices = find_outliers(standard_deviations)
if any(self._bad_channel_indices):
# message = Message(there_has_been_a_change=True,
# output_history_is_no_longer_valid=True)
# self._deliver_a_message_to_receivers(message)
# self.mne_info['bads'].append(self._bad_channel_indices)
# self.mne_info['bads'] = self._bad_channel_indices
# TODO: handle emergent bad channels on the go
pass
self.output = self.input_node.output
def _reset(self) -> bool:
self._reset_statistics()
self._input_history_is_no_longer_valid = True
return self._input_history_is_no_longer_valid
def _reset_statistics(self):
self._samples_collected = 0
self._enough_collected = False
self._means = 0
self._mean_sums_of_squares = 0
self._bad_channel_indices = []
def _update_statistics(self):
input_array = self.input_node.output.astype(np.dtype('float64'))
# Using float64 is necessary because otherwise rounding error
# in recursive formula accumulate
n = self._samples_collected
m = input_array.shape[TIME_AXIS] # number of new samples
self._samples_collected += m
self._means = (
self._means * n + np.sum(input_array, axis=TIME_AXIS)) / (n + m)
self._mean_sums_of_squares = (
self._mean_sums_of_squares * n +
np.sum(input_array ** 2, axis=TIME_AXIS)) / (n + m)
def _calculate_standard_deviations(self):
n = self._samples_collected
return np.sqrt(
n / (n - 1) * (self._mean_sums_of_squares - self._means ** 2))
def _on_input_history_invalidation(self):
self._reset_statistics()
def _check_value(self, key, value):
pass
class InverseModel(ProcessorNode):
SUPPORTED_METHODS = ['MNE', 'dSPM', 'sLORETA']
UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ('mne_info', )
CHANGES_IN_THESE_REQUIRE_RESET = ('mne_inverse_model_file_path',
'mne_forward_model_file_path',
'snr', 'method')
SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {'mne_info': channel_labels_saver}
def __init__(self, forward_model_path=None, snr=1.0, method='MNE'):
super().__init__()
self.snr = snr
self._user_provided_forward_model_file_path = forward_model_path
self._default_forward_model_file_path = None
self.mne_info = None
self.fwd = None
self._inverse_model_matrix = None
self.method = method
def _initialize(self):
mne_info = self.traverse_back_and_find('mne_info')
self._bad_channels = mne_info['bads']
if self._user_provided_forward_model_file_path is None:
self._default_forward_model_file_path =\
get_default_forward_file(mne_info)
self.fwd, missing_ch_names = get_clean_forward(
self.mne_forward_model_file_path, mne_info)
mne_info['bads'] = list(set(mne_info['bads'] + missing_ch_names))
inverse_operator = make_inverse_operator(self.fwd, mne_info)
self._inverse_model_matrix = matrix_from_inverse_operator(
inverse_operator=inverse_operator, mne_info=mne_info,
snr=self.snr, method=self.method)
frequency = mne_info['sfreq']
# channel_count = self._inverse_model_matrix.shape[0]
channel_count = self.fwd['nsource']
channel_labels = ['vertex #{}'.format(i + 1)
for i in range(channel_count)]
self.mne_info = mne.create_info(channel_labels, frequency)
def _update(self):
mne_info = self.traverse_back_and_find('mne_info')
bads = mne_info['bads']
if bads != self._bad_channels:
inverse_operator = make_inverse_operator(self.fwd, mne_info)
self._inverse_model_matrix = matrix_from_inverse_operator(
inverse_operator=inverse_operator, mne_info=mne_info,
snr=self.snr, method=self.method)
self._bad_channels = bads
input_array = self.input_node.output
raw_array = mne.io.RawArray(input_array, mne_info, verbose='ERROR')
raw_array.pick_types(eeg=True, meg=False, stim=False, exclude='bads')
data = raw_array.get_data()
self.output = self._apply_inverse_model_matrix(data)
def _on_input_history_invalidation(self):
# The methods implemented in this node do not rely on past inputs
pass
def _check_value(self, key, value):
if key == 'method':
if value not in self.SUPPORTED_METHODS:
raise ValueError(
'Method {} is not supported.'.format(value) +
' Use one of: {}'.format(self.SUPPORTED_METHODS))
if key == 'snr':
if value <= 0:
raise ValueError(
'snr (signal-to-noise ratio) must be a positive number.')
def _reset(self):
self._should_reinitialize = True
self.initialize()
output_history_is_no_longer_valid = True
return output_history_is_no_longer_valid
@property
def mne_forward_model_file_path(self):
return self._user_provided_forward_model_file_path or\
self._default_forward_model_file_path
@mne_forward_model_file_path.setter
def mne_forward_model_file_path(self, value):
# This setter is for public use, hence the "user_provided"
self._user_provided_forward_model_file_path = value
def _apply_inverse_model_matrix(self, input_array: np.ndarray):
W = self._inverse_model_matrix # VERTICES x CHANNELS
output_array = W.dot(make_time_dimension_second(input_array))
return put_time_dimension_back_from_second(output_array)
class LinearFilter(ProcessorNode):
UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ('mne_info', )
CHANGES_IN_THESE_REQUIRE_RESET = ('lower_cutoff', 'upper_cutoff')
SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {'mne_info':
lambda info: (info['nchan'], )}
def __init__(self, lower_cutoff, upper_cutoff):
super().__init__()
self.lower_cutoff = lower_cutoff
self.upper_cutoff = upper_cutoff
self._linear_filter = None # type: filters.ButterFilter
def _initialize(self):
mne_info = self.traverse_back_and_find('mne_info')
frequency = mne_info['sfreq']
channel_count = mne_info['nchan']
if not (self.lower_cutoff is None and self.upper_cutoff is None):
band = (self.lower_cutoff, self.upper_cutoff)
self._linear_filter = filters.ButterFilter(
band, fs=frequency, n_channels=channel_count)
self._linear_filter.apply = pynfb_ndarray_function_wrapper(
self._linear_filter.apply)
else:
self._linear_filter = None
def _update(self):
input = self.input_node.output
if self._linear_filter is not None:
self.output = self._linear_filter.apply(input)
else:
self.output = input
def _check_value(self, key, value):
if value is None:
pass
elif key == 'lower_cutoff':
if (hasattr(self, 'upper_cutoff') and
self.upper_cutoff is not None and
value > self.upper_cutoff):
raise ValueError(
'Lower cutoff can`t be set higher that the upper cutoff')
if value < 0:
raise ValueError('Lower cutoff must be a positive number')
elif key == 'upper_cutoff':
if (hasattr(self, 'upper_cutoff') and
self.lower_cutoff is not None and
value < self.lower_cutoff):
raise ValueError(
'Upper cutoff can`t be set lower that the lower cutoff')
if value < 0:
raise ValueError('Upper cutoff must be a positive number')
def _on_input_history_invalidation(self):
if self._linear_filter is not None:
self._linear_filter.reset()
def _reset(self):
self._should_reinitialize = True
self.initialize()
output_history_is_no_longer_valid = True
return output_history_is_no_longer_valid
class EnvelopeExtractor(ProcessorNode):
def __init__(self, factor=0.9):
super().__init__()
self.method = 'Exponential smoothing'
self.factor = factor
self._envelope_extractor = None # type: ExponentialMatrixSmoother
def _initialize(self):
channel_count = self.traverse_back_and_find('mne_info')['nchan']
self._envelope_extractor = ExponentialMatrixSmoother(
factor=self.factor, column_count=channel_count)
self._envelope_extractor.apply = pynfb_ndarray_function_wrapper(
self._envelope_extractor.apply)
def _update(self):
input = self.input_node.output
self.output = self._envelope_extractor.apply(np.abs(input))
def _check_value(self, key, value):
if key == 'factor':
if value <= 0 or value >= 1:
raise ValueError('Factor must be a number between 0 and 1')
if key == 'method':
if value not in self.SUPPORTED_METHODS:
raise ValueError(
'Method {} is not supported.' +
' Use one of: {}'.format(value, self.SUPPORTED_METHODS))
def _reset(self):
self._should_reinitialize = True
self.initialize()
output_history_is_no_longer_valid = True
return output_history_is_no_longer_valid
def _on_input_history_invalidation(self):
self._envelope_extractor.reset()
UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ('mne_info', )
CHANGES_IN_THESE_REQUIRE_RESET = ('method', 'factor')
SUPPORTED_METHODS = ('Exponential smoothing', )
SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {'mne_info':
lambda info: (info['nchan'],)}
class Beamformer(ProcessorNode):
SUPPORTED_OUTPUT_TYPES = ('power', 'activation')
UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ('mne_info',)
CHANGES_IN_THESE_REQUIRE_RESET = ('snr', 'output_type', 'is_adaptive',
'fixed_orientation',
'mne_forward_model_file_path')
SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {'mne_info': channel_labels_saver}
def __init__(self, snr: float=1.0, output_type: str='power',
is_adaptive: bool=False, fixed_orientation: bool=True,
forward_model_path: str=None,
forgetting_factor_per_second: float=0.99):
super().__init__()
self.snr = snr # type: float
self._user_provided_forward_model_file_path = forward_model_path
self._default_forward_model_file_path = None # type: str
self.mne_info = None # type: mne.Info
self.output_type = output_type # type: np.dtype
self.is_adaptive = is_adaptive # type: bool
self._initialized_as_adaptive = None # type: bool
self.fixed_orientation = fixed_orientation # type: bool
self._initialized_as_fixed = None # type: bool
self._channel_indices = None # type: list
self._gain_matrix = None # type: np.ndarray
self._Rxx = None # type: np.ndarray
self.forgetting_factor_per_second = forgetting_factor_per_second
self._forgetting_factor_per_sample = None # type: float
def _initialize(self):
mne_info = self.traverse_back_and_find('mne_info')
if self._user_provided_forward_model_file_path is None:
self._default_forward_model_file_path = get_default_forward_file(
mne_info)
try:
fwd, missing_ch_names = get_clean_forward(
self.mne_forward_model_file_path, mne_info)
except ValueError:
raise Exception('BAD FORWARD + DATA COMBINATION!')
mne_info['bads'] = list(set(mne_info['bads'] + missing_ch_names))
self._gain_matrix = fwd['sol']['data']
G = self._gain_matrix
if self.is_adaptive is False:
Rxx = G.dot(G.T)
elif self.is_adaptive is True:
Rxx = np.zeros([G.shape[0], G.shape[0]]) # G.dot(G.T)
goods = mne.pick_types(mne_info, eeg=True, meg=False, exclude='bads')
ch_names = [mne_info['ch_names'][i] for i in goods]
self._Rxx = mne.Covariance(Rxx, ch_names, mne_info['bads'],
mne_info['projs'], nfree=1)
self._mne_info = mne_info
frequency = mne_info['sfreq']
self._forgetting_factor_per_sample = np.power(
self.forgetting_factor_per_second, 1 / frequency)
n_vert = fwd['nsource']
channel_labels = ['vertex #{}'.format(i + 1) for i in range(n_vert)]
self.mne_info = mne.create_info(channel_labels, frequency)
self._initialized_as_adaptive = self.is_adaptive
self._initialized_as_fixed = self.fixed_orientation
self.fwd_surf = mne.convert_forward_solution(
fwd, surf_ori=True, force_fixed=False)
if not self.is_adaptive:
self._filters = make_lcmv(
info=self._mne_info, forward=self.fwd_surf,
data_cov=self._Rxx, reg=0.05, pick_ori='max-power',
weight_norm='unit-noise-gain', reduce_rank=False)
else:
self._filters = None
def _update(self):
t1 = time.time()
input_array = self.input_node.output
raw_array = mne.io.RawArray(
input_array, self._mne_info, verbose='ERROR')
raw_array.pick_types(eeg=True, meg=False, stim=False, exclude='bads')
raw_array.set_eeg_reference(ref_channels='average', projection=True)
t2 = time.time()
self.logger.debug('Prepare arrays in {:.1f} ms'.format(
(t2 - t1) * 1000))
if self.is_adaptive:
self._update_covariance_matrix(input_array)
t1 = time.time()
self._filters = make_lcmv(info=self._mne_info,
forward=self.fwd_surf,
data_cov=self._Rxx, reg=0.5,
pick_ori='max-power',
weight_norm='unit-noise-gain',
reduce_rank=False)
t2 = time.time()
self.logger.debug('Assembled lcmv instance in {:.1f} ms'.format(
(t2 - t1) * 1000))
self._filters['source_nn'] = []
t1 = time.time()
stc = apply_lcmv_raw(raw=raw_array, filters=self._filters,
max_ori_out='signed')
t2 = time.time()
self.logger.debug('Applied lcmv inverse in {:.1f} ms'.format(
(t2 - t1) * 1000))
output = stc.data
t1 = time.time()
if self.fixed_orientation is True:
if self.output_type == 'power':
output = output ** 2
else:
vertex_count = self.fwd_surf['nsource']
output = np.sum(
np.power(output, 2).reshape((vertex_count, 3, -1)), axis=1)
if self.output_type == 'activation':
output = np.sqrt(output)
self.output = output
t2 = time.time()
self.logger.debug(
'Finalized in {:.1f} ms'.format(
(t2 - t1) * 1000))
@property
def mne_forward_model_file_path(self):
# TODO: fix this
return (self._user_provided_forward_model_file_path or
self._default_forward_model_file_path)
@mne_forward_model_file_path.setter
def mne_forward_model_file_path(self, value):
# This setter is for public use, hence the "user_provided"
self._user_provided_forward_model_file_path = value
def _reset(self) -> bool:
# Only change adaptiveness or fixed_orientation requires reinit
# if (self._initialized_as_adaptive is not self.is_adaptive
# or self._initialized_as_fixed is not self.fixed_orientation):
self._should_reinitialize = True
self.initialize()
output_history_is_no_longer_valid = True
return output_history_is_no_longer_valid
def _on_input_history_invalidation(self):
# Only adaptive version relies on history
if self._initialized_as_adaptive is True:
self._should_reinitialize = True
self.initialize()
def _check_value(self, key, value):
if key == 'output_type':
if value not in self.SUPPORTED_OUTPUT_TYPES:
raise ValueError(
'Method {} is not supported.' +
' Use one of: {}'.format(
value, self.SUPPORTED_OUTPUT_TYPES))
if key == 'snr':
if value <= 0:
raise ValueError(
'snr (signal-to-noise ratio) must be a positive number')
if key == 'is_adaptive':
if not isinstance(value, bool):
raise ValueError(
'Beamformer type (adaptive vs nonadaptive) is not set')
def _update_covariance_matrix(self, input_array):
t1 = time.time()
alpha = self._forgetting_factor_per_sample
sample_count = input_array.shape[TIME_AXIS]
self.logger.debug('Number of samples: {}'.format(sample_count))
new_Rxx_data = self._Rxx.data
raw_array = mne.io.RawArray(
input_array, self._mne_info, verbose='ERROR')
raw_array.pick_types(eeg=True, meg=False, stim=False, exclude='bads')
raw_array.set_eeg_reference(ref_channels='average', projection=True)
input_array_nobads = raw_array.get_data()
t2 = time.time()
self.logger.debug(
'Prepared covariance update in {:.2f} ms'.format((t2 - t1) * 1000))
samples = make_time_dimension_second(input_array_nobads).T
new_Rxx_data = (alpha * new_Rxx_data +
(1 - alpha) * samples.T.dot(samples))
t3 = time.time()
self.logger.debug(
'Updated matrix data in {:.2f} ms'.format((t3 - t2) * 1000))
self._Rxx = mne.Covariance(new_Rxx_data, self._Rxx.ch_names,
raw_array.info['bads'],
raw_array.info['projs'], nfree=1)
t4 = time.time()
self.logger.debug('Created instance of covariance' +
' in {:.2f} ms'.format((t4 - t4) * 1000))
# TODO: implement this function
def pynfb_filter_based_processor_class(pynfb_filter_class):
"""
Returns a ProcessorNode subclass with the functionality of
pynfb_filter_class
pynfb_filter_class: subclass of pynfb.signal_processing.filters.BaseFilter
Sample usage 1:
LinearFilter = pynfb_filter_based_processor_class(filters.ButterFilter)
linear_filter = LinearFilter(band, fs, n_channels, order)
Sample usage 2
(this would correspond to a different implementation of this function):
LinearFilter = pynfb_filter_based_processor_class(filters.ButterFilter)
linear_filter = LinearFilter(band, order)
In this case LinearFilter should provide
fs and n_channels parameters to filters.ButterFilter automatically
"""
class PynfbFilterBasedProcessorClass(ProcessorNode):
def _on_input_history_invalidation(self):
pass
def _check_value(self, key, value):
pass
@property
def CHANGES_IN_THESE_REQUIRE_RESET(self) -> Tuple[str]:
pass
@property
def UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION(self) -> Tuple[str]:
pass
def _reset(self):
pass
def __init__(self):
pass
def _initialize(self):
pass
def _update(self):
pass
return PynfbFilterBasedProcessorClass
class MCE(ProcessorNode):
input = []
output = []
UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ()
CHANGES_IN_THESE_REQUIRE_RESET = ('mne_forward_model_file_path', 'snr')
def __init__(self, snr=1.0, forward_model_path=None, n_comp=40):
super().__init__()
self.snr = snr
self.mne_forward_model_file_path = forward_model_path
self.n_comp = n_comp
self.mne_info = None
# pass
def _initialize(self):
print('INITIALIZING MCE NODE ...')
mne_info = self.traverse_back_and_find('mne_info')
# mne_info['custom_ref_applied'] = True
# -------- truncated svd for fwd_opr operator -------- #
fwd, missing_ch_names = get_clean_forward(
self.mne_forward_model_file_path, mne_info)
mne_info['bads'] = list(set(mne_info['bads'] + missing_ch_names))
fwd_fix = mne.convert_forward_solution(
fwd, surf_ori=True, force_fixed=False)
self._gain_matrix = fwd_fix['sol']['data']
print('MCE: COMPUTING SVD OF THE FORWARD OPERATOR')
U, S, V = svd(self._gain_matrix)
Sn = np.zeros([self.n_comp, V.shape[0]])
Sn[:self.n_comp, :self.n_comp] = np.diag(S[:self.n_comp])
self.Un = U[:, :self.n_comp]
self.A_non_ori = Sn @ V
# ---------------------------------------------------- #
# -------- leadfield dims -------- #
N_SEN = self._gain_matrix.shape[0]
# -------------------------------- #
# ------------------------ noise-covariance ------------------------ #
cov_data = np.identity(N_SEN)
ch_names = np.array(mne_info['ch_names'])[mne.pick_types(mne_info,
eeg=True,
meg=False)]
ch_names = list(ch_names)
noise_cov = mne.Covariance(
cov_data, ch_names, mne_info['bads'],
mne_info['projs'], nfree=1)
# ------------------------------------------------------------------ #
self.mne_inv = mne_make_inverse_operator(
mne_info, fwd_fix, noise_cov, depth=0.8,
loose=1, fixed=False, verbose='ERROR')
self.mne_info = mne_info
self.Sn = Sn
self.V = V
def _update(self):
input_array = self.input_node.output
last_slice = last_sample(input_array)
n_src = self.mne_inv['nsource']
n_times = input_array.shape[1]
output_mce = np.empty([n_src, n_times])
raw_slice = mne.io.RawArray(np.expand_dims(last_slice, axis=1),
self.mne_info, verbose='ERROR')
raw_slice.pick_types(eeg=True, meg=False, stim=False, exclude='bads')
raw_slice.set_eeg_reference(ref_channels='average', projection=True)
# ------------------- get dipole orientations --------------------- #
stc_slice = apply_inverse_raw(raw_slice, self.mne_inv,
pick_ori='vector',
method='MNE', lambda2=1, verbose='ERROR')
Q = normalize(stc_slice.data[:, :, 0]) # dipole orientations
# ----------------------------------------------------------------- #
# -------- setup linprog params -------- #
n_sen = self.A_non_ori.shape[0]
A_eq = np.empty([n_sen, n_src])
for i in range(n_src):
A_eq[:, i] = self.A_non_ori[:, i * 3: (i + 1) * 3] @ Q[i, :].T
data_slice = raw_slice.get_data()[:, 0]
b_eq = self.Un.T @ data_slice
c = np.ones(A_eq.shape[1])
# -------------------------------------- #
with nostdout():
sol = linprog(c, A_eq=A_eq, b_eq=b_eq,
method='interior-point', bounds=(0, None),
options={'disp': False})
output_mce[:, :] = sol.x[:, np.newaxis]
self.output = output_mce
self.sol = sol
return Q, A_eq, data_slice, b_eq, c
def _on_input_history_invalidation(self):
# The methods implemented in this node do not rely on past inputs
pass
def _reset(self):
self._should_reinitialize = True
self.initialize()
output_history_is_no_longer_valid = True
return output_history_is_no_longer_valid
def _check_value(self, key, value):
if key == 'snr':
if value <= 0:
raise ValueError(
'snr (signal-to-noise ratio) must be a positive number.')
class ICARejection(ProcessorNode):
def __init__(self, collect_for_x_seconds: int=60):
super().__init__()
self.collect_for_x_seconds = collect_for_x_seconds # type: int
self._samples_collected = None # type: int
self._samples_to_be_collected = None # type: int
self._enough_collected = None # type: bool
self._reset_statistics()
self._ica_rejector = None
def _on_input_history_invalidation(self):
self._reset_statistics()
def _check_value(self, key, value):
pass
CHANGES_IN_THESE_REQUIRE_RESET = ('collect_for_x_seconds', )
def _initialize(self):
self._mne_info = self.traverse_back_and_find('mne_info')
self._frequency = self._mne_info['sfreq']
self._good_ch_inds = mne.pick_types(self._mne_info, eeg=True,
meg=False, stim=False,
exclude='bads')
channels = self._mne_info['chs']
self._ch_locs = np.array([ch['loc'] for ch in channels])
n_ch = len(self._good_ch_inds)
self._samples_to_be_collected = int(math.ceil(
self.collect_for_x_seconds * self._frequency))
self._collected_timeseries = np.zeros(
[n_ch, self._samples_to_be_collected])
self._linear_filter = filters.ButterFilter(
[1, 200], fs=self._frequency,
n_channels=len(self._good_ch_inds))
self._linear_filter.apply = pynfb_ndarray_function_wrapper(
self._linear_filter.apply)
def _reset(self) -> bool:
self._reset_statistics()
self._input_history_is_no_longer_valid = True
return self._input_history_is_no_longer_valid
def _reset_statistics(self):
self._samples_collected = 0
self._enough_collected = False
def _update(self):
# Have we collected enough samples without the new input?
self.output = self.input_node.output
enough_collected = self._samples_collected >=\
self._samples_to_be_collected
if not enough_collected:
if self.input_node.output is not None and\
self.input_node.output.shape[TIME_AXIS] > 0:
self._update_statistics()
elif not self._enough_collected: # We just got enough samples
self._enough_collected = True
print('COLLECTED ENOUGH SAMPLES')
ica = ICADialog(
self._collected_timeseries.T,
list(np.array(self._mne_info['ch_names'])[self._good_ch_inds]),
self._ch_locs[self._good_ch_inds, :], self._frequency)
ica.exec_()
self._ica_rejector = ica.rejection.val.T
else:
self.output[self._good_ch_inds, :] = np.dot(
self._ica_rejector,
self.input_node.output[self._good_ch_inds, :])
def _update_statistics(self):
input_array = self.input_node.output.astype(np.dtype('float64'))
n = self._samples_collected
m = input_array.shape[TIME_AXIS] # number of new samples
self._samples_collected += m
self._collected_timeseries[:, n:n + m] = self._linear_filter.apply(
input_array[self._good_ch_inds, :])
# Using float64 is necessary because otherwise rounding error
# in recursive formula accumulate
pass
UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ('mne_info', )
SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {'mne_info': channel_labels_saver}
| 39.238155 | 83 | 0.611236 |
acf9a37fae71a5ee76aede3e3fbb4671d29b4b7f | 803 | py | Python | contextual-repr-analysis/contexteval/models/__init__.py | Albert-Ma/bert-fine-tuned-gain | f752c1182f1c800f5f56998e13fd6115929df655 | [
"Apache-2.0"
] | 2 | 2020-10-29T01:26:43.000Z | 2021-12-12T12:05:26.000Z | contextual-repr-analysis/contexteval/models/__init__.py | Albert-Ma/bert-fine-tuned-gain | f752c1182f1c800f5f56998e13fd6115929df655 | [
"Apache-2.0"
] | null | null | null | contextual-repr-analysis/contexteval/models/__init__.py | Albert-Ma/bert-fine-tuned-gain | f752c1182f1c800f5f56998e13fd6115929df655 | [
"Apache-2.0"
] | null | null | null | from contexteval.models.pairwise_tagger import PairwiseTagger
from contexteval.models.selective_regressor import SelectiveRegressor
from contexteval.models.selective_tagger import SelectiveTagger
from contexteval.models.tagger import Tagger
from contexteval.models.word_conditional_majority_pairwise_tagger import (
WordConditionalMajorityPairwiseTagger)
from contexteval.models.word_conditional_majority_selective_tagger import (
WordConditionalMajoritySelectiveTagger)
from contexteval.models.word_conditional_majority_tagger import WordConditionalMajorityTagger
__all__ = ["PairwiseTagger", "SelectiveRegressor", "SelectiveTagger", "Tagger",
"WordConditionalMajorityPairwiseTagger",
"WordConditionalMajoritySelectiveTagger",
"WordConditionalMajorityTagger"]
| 53.533333 | 93 | 0.84807 |
acf9a3998ce9f7d79ec2d3dea7757a691efccda7 | 1,354 | py | Python | Server_Client/Scripts/client.py | SRFG-MAT/RoboGen-DeepSpeechServices | eab5ee4bf8b9a4bc0758c6173447f2c5bb15d171 | [
"MIT"
] | null | null | null | Server_Client/Scripts/client.py | SRFG-MAT/RoboGen-DeepSpeechServices | eab5ee4bf8b9a4bc0758c6173447f2c5bb15d171 | [
"MIT"
] | null | null | null | Server_Client/Scripts/client.py | SRFG-MAT/RoboGen-DeepSpeechServices | eab5ee4bf8b9a4bc0758c6173447f2c5bb15d171 | [
"MIT"
] | null | null | null | import requests
import argparse
def writeBytesToFile(dataToWrite):
fileStreamWrite = open("./clientAudio.mp3", "wb")
fileStreamWrite.write(dataToWrite)
fileStreamWrite.close()
def saveReceivedAudioFile(resp):
if resp.status_code != 200:
print(f"{resp.status_code}: {resp.json()['Message']}!")
return
writeBytesToFile(bytearray(resp.json()['data']))
print("Audio file saved")
def getAudioRequest(hostName, port, text, language):
return requests.get(f'http://{hostName}:{port}/audio', params={'text': text, 'language': language})
if __name__ == "__main__":
argsparser = argparse.ArgumentParser(description="Started Client")
argsparser.add_argument("-host", "--hostname",
required=True, help="Hostname is required!")
argsparser.add_argument(
"-p", "--port", help="Port is optional!", default=5000)
argsparser.add_argument("-t", "--text", required=True,
help="Text to translate is required!")
argsparser.add_argument("-l", "--language",
help="Language of speaker is in default German! Supported --> [en | de]!", default="de")
args = argsparser.parse_args()
response = getAudioRequest(
args.hostname, args.port, args.text, args.language)
saveReceivedAudioFile(response)
| 32.238095 | 116 | 0.645495 |
acf9a410b16c8946c7bb1b57da3cf3522ee5caa1 | 36,310 | py | Python | detectron/core/test.py | gbegkas/Detectron | 8d53dcdc2d1282938636f8dd45859101214730ff | [
"Apache-2.0"
] | null | null | null | detectron/core/test.py | gbegkas/Detectron | 8d53dcdc2d1282938636f8dd45859101214730ff | [
"Apache-2.0"
] | null | null | null | detectron/core/test.py | gbegkas/Detectron | 8d53dcdc2d1282938636f8dd45859101214730ff | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Inference functionality for most Detectron models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import cv2
import logging
import numpy as np
import multiprocessing
from caffe2.python import core
from caffe2.python import workspace
import pycocotools.mask as mask_util
from detectron.core.config import cfg
from detectron.utils.timer import Timer
import detectron.core.test_retinanet as test_retinanet
import detectron.modeling.FPN as fpn
import detectron.utils.blob as blob_utils
import detectron.utils.boxes as box_utils
import detectron.utils.image as image_utils
import detectron.utils.keypoints as keypoint_utils
from joblib import Parallel, delayed
logger = logging.getLogger(__name__)
def im_detect_all(model, im, box_proposals, timers=None):
if timers is None:
timers = defaultdict(Timer)
# Handle RetinaNet testing separately for now
if cfg.RETINANET.RETINANET_ON:
cls_boxes = test_retinanet.im_detect_bbox(model, im, timers)
return cls_boxes, None, None
timers['im_detect_bbox'].tic()
if cfg.TEST.BBOX_AUG.ENABLED:
scores, boxes, im_scale = im_detect_bbox_aug(model, im, box_proposals)
else:
scores, boxes, im_scale = im_detect_bbox(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals
)
timers['im_detect_bbox'].toc()
# score and boxes are from the whole image after score thresholding and nms
# (they are not separated by class)
# cls_boxes boxes and scores are separated by class and in the format used
# for evaluating results
timers['misc_bbox'].tic()
scores, boxes, cls_boxes = box_results_with_nms_and_limit(scores, boxes)
timers['misc_bbox'].toc()
if cfg.MODEL.MASK_ON and boxes.shape[0] > 0:
timers['im_detect_mask'].tic()
if cfg.TEST.MASK_AUG.ENABLED:
masks = im_detect_mask_aug(model, im, boxes)
else:
masks = im_detect_mask(model, im_scale, boxes)
timers['im_detect_mask'].toc()
timers['misc_mask'].tic()
cls_segms = segm_results(
cls_boxes, masks, boxes, im.shape[0], im.shape[1], timers
)
timers['misc_mask'].toc()
else:
cls_segms = None
if cfg.MODEL.KEYPOINTS_ON and boxes.shape[0] > 0:
timers['im_detect_keypoints'].tic()
if cfg.TEST.KPS_AUG.ENABLED:
heatmaps = im_detect_keypoints_aug(model, im, boxes)
else:
heatmaps = im_detect_keypoints(model, im_scale, boxes)
timers['im_detect_keypoints'].toc()
timers['misc_keypoints'].tic()
cls_keyps = keypoint_results(cls_boxes, heatmaps, boxes)
timers['misc_keypoints'].toc()
else:
cls_keyps = None
return cls_boxes, cls_segms, cls_keyps
def im_conv_body_only(model, im, target_scale, target_max_size):
"""Runs `model.conv_body_net` on the given image `im`."""
im_blob, im_scale, _im_info = blob_utils.get_image_blob(
im, target_scale, target_max_size
)
workspace.FeedBlob(core.ScopedName('data'), im_blob)
workspace.RunNet(model.conv_body_net.Proto().name)
return im_scale
def im_detect_bbox(model, im, target_scale, target_max_size, boxes=None):
"""Bounding box object detection for an image with given box proposals.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals in 0-indexed
[x1, y1, x2, y2] format, or None if using RPN
Returns:
scores (ndarray): R x K array of object class scores for K classes
(K includes background as object category 0)
boxes (ndarray): R x 4*K array of predicted bounding boxes
im_scales (list): list of image scales used in the input blob (as
returned by _get_blobs and for use with im_detect_mask, etc.)
"""
inputs, im_scale = _get_blobs(im, boxes, target_scale, target_max_size)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(inputs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(
hashes, return_index=True, return_inverse=True
)
inputs['rois'] = inputs['rois'][index, :]
boxes = boxes[index, :]
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS and not cfg.MODEL.FASTER_RCNN:
_add_multilevel_rois_for_test(inputs, 'rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.net.Proto().name)
# Read out blobs
if cfg.MODEL.FASTER_RCNN:
rois = workspace.FetchBlob(core.ScopedName('rois'))
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scale
# Softmax class probabilities
scores = workspace.FetchBlob(core.ScopedName('cls_prob')).squeeze()
# In case there is 1 proposal
scores = scores.reshape([-1, scores.shape[-1]])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = workspace.FetchBlob(core.ScopedName('bbox_pred')).squeeze()
# In case there is 1 proposal
box_deltas = box_deltas.reshape([-1, box_deltas.shape[-1]])
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
# Remove predictions for bg class (compat with MSRA code)
box_deltas = box_deltas[:, -4:]
pred_boxes = box_utils.bbox_transform(
boxes, box_deltas, cfg.MODEL.BBOX_REG_WEIGHTS
)
pred_boxes = box_utils.clip_tiled_boxes(pred_boxes, im.shape)
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
pred_boxes = np.tile(pred_boxes, (1, scores.shape[1]))
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes, im_scale
def im_detect_bbox_aug(model, im, box_proposals=None):
"""Performs bbox detection with test-time augmentations.
Function signature is the same as for im_detect_bbox.
"""
assert not cfg.TEST.BBOX_AUG.SCALE_SIZE_DEP, \
'Size dependent scaling not implemented'
assert not cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION' or \
cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION', \
'Coord heuristic must be union whenever score heuristic is union'
assert not cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION' or \
cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \
'Score heuristic must be union whenever coord heuristic is union'
assert not cfg.MODEL.FASTER_RCNN or \
cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \
'Union heuristic must be used to combine Faster RCNN predictions'
# Collect detections computed under different transformations
scores_ts = []
boxes_ts = []
def add_preds_t(scores_t, boxes_t):
scores_ts.append(scores_t)
boxes_ts.append(boxes_t)
# Perform detection on the horizontally flipped image
if cfg.TEST.BBOX_AUG.H_FLIP:
scores_hf, boxes_hf, _ = im_detect_bbox_hflip(
model,
im,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
box_proposals=box_proposals
)
add_preds_t(scores_hf, boxes_hf)
# Compute detections at different scales
for scale in cfg.TEST.BBOX_AUG.SCALES:
max_size = cfg.TEST.BBOX_AUG.MAX_SIZE
scores_scl, boxes_scl = im_detect_bbox_scale(
model, im, scale, max_size, box_proposals
)
add_preds_t(scores_scl, boxes_scl)
if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:
scores_scl_hf, boxes_scl_hf = im_detect_bbox_scale(
model, im, scale, max_size, box_proposals, hflip=True
)
add_preds_t(scores_scl_hf, boxes_scl_hf)
# Perform detection at different aspect ratios
for aspect_ratio in cfg.TEST.BBOX_AUG.ASPECT_RATIOS:
scores_ar, boxes_ar = im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals
)
add_preds_t(scores_ar, boxes_ar)
if cfg.TEST.BBOX_AUG.ASPECT_RATIO_H_FLIP:
scores_ar_hf, boxes_ar_hf = im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals, hflip=True
)
add_preds_t(scores_ar_hf, boxes_ar_hf)
# Compute detections for the original image (identity transform) last to
# ensure that the Caffe2 workspace is populated with blobs corresponding
# to the original image on return (postcondition of im_detect_bbox)
scores_i, boxes_i, im_scale_i = im_detect_bbox(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals
)
add_preds_t(scores_i, boxes_i)
# Combine the predicted scores
if cfg.TEST.BBOX_AUG.SCORE_HEUR == 'ID':
scores_c = scores_i
elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'AVG':
scores_c = np.mean(scores_ts, axis=0)
elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION':
scores_c = np.vstack(scores_ts)
else:
raise NotImplementedError(
'Score heur {} not supported'.format(cfg.TEST.BBOX_AUG.SCORE_HEUR)
)
# Combine the predicted boxes
if cfg.TEST.BBOX_AUG.COORD_HEUR == 'ID':
boxes_c = boxes_i
elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'AVG':
boxes_c = np.mean(boxes_ts, axis=0)
elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION':
boxes_c = np.vstack(boxes_ts)
else:
raise NotImplementedError(
'Coord heur {} not supported'.format(cfg.TEST.BBOX_AUG.COORD_HEUR)
)
return scores_c, boxes_c, im_scale_i
def im_detect_bbox_hflip(
model, im, target_scale, target_max_size, box_proposals=None
):
"""Performs bbox detection on the horizontally flipped image.
Function signature is the same as for im_detect_bbox.
"""
# Compute predictions on the flipped image
im_hf = im[:, ::-1, :]
im_width = im.shape[1]
if not cfg.MODEL.FASTER_RCNN:
box_proposals_hf = box_utils.flip_boxes(box_proposals, im_width)
else:
box_proposals_hf = None
scores_hf, boxes_hf, im_scale = im_detect_bbox(
model, im_hf, target_scale, target_max_size, boxes=box_proposals_hf
)
# Invert the detections computed on the flipped image
boxes_inv = box_utils.flip_boxes(boxes_hf, im_width)
return scores_hf, boxes_inv, im_scale
def im_detect_bbox_scale(
model, im, target_scale, target_max_size, box_proposals=None, hflip=False
):
"""Computes bbox detections at the given scale.
Returns predictions in the original image space.
"""
if hflip:
scores_scl, boxes_scl, _ = im_detect_bbox_hflip(
model, im, target_scale, target_max_size, box_proposals=box_proposals
)
else:
scores_scl, boxes_scl, _ = im_detect_bbox(
model, im, target_scale, target_max_size, boxes=box_proposals
)
return scores_scl, boxes_scl
def im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals=None, hflip=False
):
"""Computes bbox detections at the given width-relative aspect ratio.
Returns predictions in the original image space.
"""
# Compute predictions on the transformed image
im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio)
if not cfg.MODEL.FASTER_RCNN:
box_proposals_ar = box_utils.aspect_ratio(box_proposals, aspect_ratio)
else:
box_proposals_ar = None
if hflip:
scores_ar, boxes_ar, _ = im_detect_bbox_hflip(
model,
im_ar,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
box_proposals=box_proposals_ar
)
else:
scores_ar, boxes_ar, _ = im_detect_bbox(
model,
im_ar,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
boxes=box_proposals_ar
)
# Invert the detected boxes
boxes_inv = box_utils.aspect_ratio(boxes_ar, 1.0 / aspect_ratio)
return scores_ar, boxes_inv
def im_detect_mask(model, im_scale, boxes):
"""Infer instance segmentation masks. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scales (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_masks (ndarray): R x K x M x M array of class specific soft masks
output by the network (must be processed by segm_results to convert
into hard masks in the original image coordinate space)
"""
M = cfg.MRCNN.RESOLUTION
if boxes.shape[0] == 0:
pred_masks = np.zeros((0, M, M), np.float32)
return pred_masks
inputs = {'mask_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'mask_rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.mask_net.Proto().name)
# Fetch masks
pred_masks = workspace.FetchBlob(
core.ScopedName('mask_fcn_probs')
).squeeze()
if cfg.MRCNN.CLS_SPECIFIC_MASK:
pred_masks = pred_masks.reshape([-1, cfg.MODEL.NUM_CLASSES, M, M])
else:
pred_masks = pred_masks.reshape([-1, 1, M, M])
return pred_masks
def im_detect_mask_aug(model, im, boxes):
"""Performs mask detection with test-time augmentations.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): BGR image to test
boxes (ndarray): R x 4 array of bounding boxes
Returns:
masks (ndarray): R x K x M x M array of class specific soft masks
"""
assert not cfg.TEST.MASK_AUG.SCALE_SIZE_DEP, \
'Size dependent scaling not implemented'
# Collect masks computed under different transformations
masks_ts = []
# Compute masks for the original image (identity transform)
im_scale_i = im_conv_body_only(model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
masks_i = im_detect_mask(model, im_scale_i, boxes)
masks_ts.append(masks_i)
# Perform mask detection on the horizontally flipped image
if cfg.TEST.MASK_AUG.H_FLIP:
masks_hf = im_detect_mask_hflip(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes
)
masks_ts.append(masks_hf)
# Compute detections at different scales
for scale in cfg.TEST.MASK_AUG.SCALES:
max_size = cfg.TEST.MASK_AUG.MAX_SIZE
masks_scl = im_detect_mask_scale(model, im, scale, max_size, boxes)
masks_ts.append(masks_scl)
if cfg.TEST.MASK_AUG.SCALE_H_FLIP:
masks_scl_hf = im_detect_mask_scale(
model, im, scale, max_size, boxes, hflip=True
)
masks_ts.append(masks_scl_hf)
# Compute masks at different aspect ratios
for aspect_ratio in cfg.TEST.MASK_AUG.ASPECT_RATIOS:
masks_ar = im_detect_mask_aspect_ratio(model, im, aspect_ratio, boxes)
masks_ts.append(masks_ar)
if cfg.TEST.MASK_AUG.ASPECT_RATIO_H_FLIP:
masks_ar_hf = im_detect_mask_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=True
)
masks_ts.append(masks_ar_hf)
# Combine the predicted soft masks
if cfg.TEST.MASK_AUG.HEUR == 'SOFT_AVG':
masks_c = np.mean(masks_ts, axis=0)
elif cfg.TEST.MASK_AUG.HEUR == 'SOFT_MAX':
masks_c = np.amax(masks_ts, axis=0)
elif cfg.TEST.MASK_AUG.HEUR == 'LOGIT_AVG':
def logit(y):
return -1.0 * np.log((1.0 - y) / np.maximum(y, 1e-20))
logit_masks = [logit(y) for y in masks_ts]
logit_masks = np.mean(logit_masks, axis=0)
masks_c = 1.0 / (1.0 + np.exp(-logit_masks))
else:
raise NotImplementedError(
'Heuristic {} not supported'.format(cfg.TEST.MASK_AUG.HEUR)
)
return masks_c
def im_detect_mask_hflip(model, im, target_scale, target_max_size, boxes):
"""Performs mask detection on the horizontally flipped image.
Function signature is the same as for im_detect_mask_aug.
"""
# Compute the masks for the flipped image
im_hf = im[:, ::-1, :]
boxes_hf = box_utils.flip_boxes(boxes, im.shape[1])
im_scale = im_conv_body_only(model, im_hf, target_scale, target_max_size)
masks_hf = im_detect_mask(model, im_scale, boxes_hf)
# Invert the predicted soft masks
masks_inv = masks_hf[:, :, :, ::-1]
return masks_inv
def im_detect_mask_scale(
model, im, target_scale, target_max_size, boxes, hflip=False
):
"""Computes masks at the given scale."""
if hflip:
masks_scl = im_detect_mask_hflip(
model, im, target_scale, target_max_size, boxes
)
else:
im_scale = im_conv_body_only(model, im, target_scale, target_max_size)
masks_scl = im_detect_mask(model, im_scale, boxes)
return masks_scl
def im_detect_mask_aspect_ratio(model, im, aspect_ratio, boxes, hflip=False):
"""Computes mask detections at the given width-relative aspect ratio."""
# Perform mask detection on the transformed image
im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio)
boxes_ar = box_utils.aspect_ratio(boxes, aspect_ratio)
if hflip:
masks_ar = im_detect_mask_hflip(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes_ar
)
else:
im_scale = im_conv_body_only(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE
)
masks_ar = im_detect_mask(model, im_scale, boxes_ar)
return masks_ar
def im_detect_keypoints(model, im_scale, boxes):
"""Infer instance keypoint poses. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scales (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_heatmaps (ndarray): R x J x M x M array of keypoint location
logits (softmax inputs) for each of the J keypoint types output
by the network (must be processed by keypoint_results to convert
into point predictions in the original image coordinate space)
"""
M = cfg.KRCNN.HEATMAP_SIZE
if boxes.shape[0] == 0:
pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)
return pred_heatmaps
inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'keypoint_rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.keypoint_net.Proto().name)
pred_heatmaps = workspace.FetchBlob(core.ScopedName('kps_score')).squeeze()
# In case of 1
if pred_heatmaps.ndim == 3:
pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0)
return pred_heatmaps
def im_detect_keypoints_aug(model, im, boxes):
"""Computes keypoint predictions with test-time augmentations.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): BGR image to test
boxes (ndarray): R x 4 array of bounding boxes
Returns:
heatmaps (ndarray): R x J x M x M array of keypoint location logits
"""
# Collect heatmaps predicted under different transformations
heatmaps_ts = []
# Tag predictions computed under downscaling and upscaling transformations
ds_ts = []
us_ts = []
def add_heatmaps_t(heatmaps_t, ds_t=False, us_t=False):
heatmaps_ts.append(heatmaps_t)
ds_ts.append(ds_t)
us_ts.append(us_t)
# Compute the heatmaps for the original image (identity transform)
im_scale = im_conv_body_only(model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
heatmaps_i = im_detect_keypoints(model, im_scale, boxes)
add_heatmaps_t(heatmaps_i)
# Perform keypoints detection on the horizontally flipped image
if cfg.TEST.KPS_AUG.H_FLIP:
heatmaps_hf = im_detect_keypoints_hflip(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes
)
add_heatmaps_t(heatmaps_hf)
# Compute detections at different scales
for scale in cfg.TEST.KPS_AUG.SCALES:
ds_scl = scale < cfg.TEST.SCALE
us_scl = scale > cfg.TEST.SCALE
heatmaps_scl = im_detect_keypoints_scale(
model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes
)
add_heatmaps_t(heatmaps_scl, ds_scl, us_scl)
if cfg.TEST.KPS_AUG.SCALE_H_FLIP:
heatmaps_scl_hf = im_detect_keypoints_scale(
model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes, hflip=True
)
add_heatmaps_t(heatmaps_scl_hf, ds_scl, us_scl)
# Compute keypoints at different aspect ratios
for aspect_ratio in cfg.TEST.KPS_AUG.ASPECT_RATIOS:
heatmaps_ar = im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes
)
add_heatmaps_t(heatmaps_ar)
if cfg.TEST.KPS_AUG.ASPECT_RATIO_H_FLIP:
heatmaps_ar_hf = im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=True
)
add_heatmaps_t(heatmaps_ar_hf)
# Select the heuristic function for combining the heatmaps
if cfg.TEST.KPS_AUG.HEUR == 'HM_AVG':
np_f = np.mean
elif cfg.TEST.KPS_AUG.HEUR == 'HM_MAX':
np_f = np.amax
else:
raise NotImplementedError(
'Heuristic {} not supported'.format(cfg.TEST.KPS_AUG.HEUR)
)
def heur_f(hms_ts):
return np_f(hms_ts, axis=0)
# Combine the heatmaps
if cfg.TEST.KPS_AUG.SCALE_SIZE_DEP:
heatmaps_c = combine_heatmaps_size_dep(
heatmaps_ts, ds_ts, us_ts, boxes, heur_f
)
else:
heatmaps_c = heur_f(heatmaps_ts)
return heatmaps_c
def im_detect_keypoints_hflip(model, im, target_scale, target_max_size, boxes):
"""Computes keypoint predictions on the horizontally flipped image.
Function signature is the same as for im_detect_keypoints_aug.
"""
# Compute keypoints for the flipped image
im_hf = im[:, ::-1, :]
boxes_hf = box_utils.flip_boxes(boxes, im.shape[1])
im_scale = im_conv_body_only(model, im_hf, target_scale, target_max_size)
heatmaps_hf = im_detect_keypoints(model, im_scale, boxes_hf)
# Invert the predicted keypoints
heatmaps_inv = keypoint_utils.flip_heatmaps(heatmaps_hf)
return heatmaps_inv
def im_detect_keypoints_scale(
model, im, target_scale, target_max_size, boxes, hflip=False
):
"""Computes keypoint predictions at the given scale."""
if hflip:
heatmaps_scl = im_detect_keypoints_hflip(
model, im, target_scale, target_max_size, boxes
)
else:
im_scale = im_conv_body_only(model, im, target_scale, target_max_size)
heatmaps_scl = im_detect_keypoints(model, im_scale, boxes)
return heatmaps_scl
def im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=False
):
"""Detects keypoints at the given width-relative aspect ratio."""
# Perform keypoint detectionon the transformed image
im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio)
boxes_ar = box_utils.aspect_ratio(boxes, aspect_ratio)
if hflip:
heatmaps_ar = im_detect_keypoints_hflip(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes_ar
)
else:
im_scale = im_conv_body_only(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE
)
heatmaps_ar = im_detect_keypoints(model, im_scale, boxes_ar)
return heatmaps_ar
def combine_heatmaps_size_dep(hms_ts, ds_ts, us_ts, boxes, heur_f):
"""Combines heatmaps while taking object sizes into account."""
assert len(hms_ts) == len(ds_ts) and len(ds_ts) == len(us_ts), \
'All sets of hms must be tagged with downscaling and upscaling flags'
# Classify objects into small+medium and large based on their box areas
areas = box_utils.boxes_area(boxes)
sm_objs = areas < cfg.TEST.KPS_AUG.AREA_TH
l_objs = areas >= cfg.TEST.KPS_AUG.AREA_TH
# Combine heatmaps computed under different transformations for each object
hms_c = np.zeros_like(hms_ts[0])
for i in range(hms_c.shape[0]):
hms_to_combine = []
for hms_t, ds_t, us_t in zip(hms_ts, ds_ts, us_ts):
# Discard downscaling predictions for small and medium objects
if sm_objs[i] and ds_t:
continue
# Discard upscaling predictions for large objects
if l_objs[i] and us_t:
continue
hms_to_combine.append(hms_t[i])
hms_c[i] = heur_f(hms_to_combine)
return hms_c
def box_results_with_nms_and_limit(scores, boxes):
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
"""
num_classes = cfg.MODEL.NUM_CLASSES
cls_boxes = [[] for _ in range(num_classes)]
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
for j in range(1, num_classes):
inds = np.where(scores[:, j] > cfg.TEST.SCORE_THRESH)[0]
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4:(j + 1) * 4]
dets_j = np.hstack((boxes_j, scores_j[:, np.newaxis])).astype(
np.float32, copy=False
)
if cfg.TEST.SOFT_NMS.ENABLED:
nms_dets, _ = box_utils.soft_nms(
dets_j,
sigma=cfg.TEST.SOFT_NMS.SIGMA,
overlap_thresh=cfg.TEST.NMS,
score_thresh=0.0001,
method=cfg.TEST.SOFT_NMS.METHOD
)
else:
keep = box_utils.nms(dets_j, cfg.TEST.NMS)
nms_dets = dets_j[keep, :]
# Refine the post-NMS boxes using bounding-box voting
if cfg.TEST.BBOX_VOTE.ENABLED:
nms_dets = box_utils.box_voting(
nms_dets,
dets_j,
cfg.TEST.BBOX_VOTE.VOTE_TH,
scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD
)
cls_boxes[j] = nms_dets
# Limit to max_per_image detections **over all classes**
if cfg.TEST.DETECTIONS_PER_IM > 0:
image_scores = np.hstack(
[cls_boxes[j][:, -1] for j in range(1, num_classes)]
)
if len(image_scores) > cfg.TEST.DETECTIONS_PER_IM:
image_thresh = np.sort(image_scores)[-cfg.TEST.DETECTIONS_PER_IM]
for j in range(1, num_classes):
keep = np.where(cls_boxes[j][:, -1] >= image_thresh)[0]
cls_boxes[j] = cls_boxes[j][keep, :]
im_results = np.vstack([cls_boxes[j] for j in range(1, num_classes)])
boxes = im_results[:, :-1]
scores = im_results[:, -1]
return scores, boxes, cls_boxes
def segm_results(cls_boxes, masks, ref_boxes, im_h, im_w, timers):
num_classes = cfg.MODEL.NUM_CLASSES
cls_segms = [[] for _ in range(num_classes)]
ind = []
mask_ind = 0
# To work around an issue with cv2.resize (it seems to automatically pad
# with repeated border values), we manually zero-pad the masks by 1 pixel
# prior to resizing back to the original image resolution. This prevents
# "top hat" artifacts. We therefore need to expand the reference boxes by an
# appropriate factor.
M = cfg.MRCNN.RESOLUTION
scale = (M + 2.0) / M
ref_boxes = box_utils.expand_boxes(ref_boxes, scale)
ref_boxes = ref_boxes.astype(np.int32)
padded_mask = np.zeros((M + 2, M + 2), dtype=np.float32)
# skip j = 0, because it's the background class
for j in range(1, num_classes):
segms = []
num_threads = multiprocessing.cpu_count() - 1
for cls_segm, indicator in Parallel(n_jobs=num_threads)(delayed(test)(k, im_w, im_h, ref_boxes, padded_mask, masks, j) for k in range(cls_boxes[j].shape[0])):
cls_segms[j].append(cls_segm)
ind.append(indicator)
mask_ind = mask_ind + max(ind) + 1
# for _ in range(cls_boxes[j].shape[0]):
# if cfg.MRCNN.CLS_SPECIFIC_MASK:
# padded_mask[1:-1, 1:-1] = masks[mask_ind, j, :, :]
# else:
# padded_mask[1:-1, 1:-1] = masks[mask_ind, 0, :, :]
#
# ref_box = ref_boxes[mask_ind, :]
# w = ref_box[2] - ref_box[0] + 1
# h = ref_box[3] - ref_box[1] + 1
# w = np.maximum(w, 1)
# h = np.maximum(h, 1)
#
# mask = cv2.resize(padded_mask, (w, h))
# mask = np.array(mask > cfg.MRCNN.THRESH_BINARIZE, dtype=np.uint8)
# im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
#
# x_0 = max(ref_box[0], 0)
# x_1 = min(ref_box[2] + 1, im_w)
# y_0 = max(ref_box[1], 0)
# y_1 = min(ref_box[3] + 1, im_h)
#
# im_mask[y_0:y_1, x_0:x_1] = mask[
# (y_0 - ref_box[1]):(y_1 - ref_box[1]),
# (x_0 - ref_box[0]):(x_1 - ref_box[0])
# ]
#
# # Get RLE encoding used by the COCO evaluation API
# rle = mask_util.encode(
# np.array(im_mask[:, :, np.newaxis], order='F')
# )[0]
# segms.append(rle)
#
# mask_ind += 1
#
# cls_segms[j] = segms
assert mask_ind == masks.shape[0]
return cls_segms
def test(k, im_w, im_h, ref_boxes, padded_mask, masks, j):
# timers['maskResize'].tic()
# segms = []
mask_ind = j - 1 + k
if cfg.MRCNN.CLS_SPECIFIC_MASK:
padded_mask[1:-1, 1:-1] = masks[mask_ind, j, :, :]
else:
padded_mask[1:-1, 1:-1] = masks[mask_ind, 0, :, :]
ref_box = ref_boxes[mask_ind, :]
w = ref_box[2] - ref_box[0] + 1
h = ref_box[3] - ref_box[1] + 1
w = np.maximum(w, 1)
h = np.maximum(h, 1)
mask = cv2.resize(padded_mask, (w, h))
mask = np.array(mask > cfg.MRCNN.THRESH_BINARIZE, dtype=np.uint8)
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
x_0 = max(ref_box[0], 0)
x_1 = min(ref_box[2] + 1, im_w)
y_0 = max(ref_box[1], 0)
y_1 = min(ref_box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - ref_box[1]):(y_1 - ref_box[1]),
(x_0 - ref_box[0]):(x_1 - ref_box[0])
]
# timers['maskResize'].toc()
# timers['maskRLE'].tic()
# Get RLE encoding used by the COCO evaluation API
rle = mask_util.encode(
np.array(im_mask[:, :, np.newaxis], order='F')
)[0]
return [rle, mask_ind]
# timers['maskRLE'].toc()
def keypoint_results(cls_boxes, pred_heatmaps, ref_boxes):
num_classes = cfg.MODEL.NUM_CLASSES
cls_keyps = [[] for _ in range(num_classes)]
person_idx = keypoint_utils.get_person_class_index()
xy_preds = keypoint_utils.heatmaps_to_keypoints(pred_heatmaps, ref_boxes)
# NMS OKS
if cfg.KRCNN.NMS_OKS:
keep = keypoint_utils.nms_oks(xy_preds, ref_boxes, 0.3)
xy_preds = xy_preds[keep, :, :]
ref_boxes = ref_boxes[keep, :]
pred_heatmaps = pred_heatmaps[keep, :, :, :]
cls_boxes[person_idx] = cls_boxes[person_idx][keep, :]
kps = [xy_preds[i] for i in range(xy_preds.shape[0])]
cls_keyps[person_idx] = kps
return cls_keyps
def _get_rois_blob(im_rois, im_scale):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid with columns
[level, x1, y1, x2, y2]
"""
rois, levels = _project_im_rois(im_rois, im_scale)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (ndarray): image pyramid levels used by each projected RoI
"""
rois = im_rois.astype(np.float, copy=False) * scales
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
return rois, levels
def _add_multilevel_rois_for_test(blobs, name):
"""Distributes a set of RoIs across FPN pyramid levels by creating new level
specific RoI blobs.
Arguments:
blobs (dict): dictionary of blobs
name (str): a key in 'blobs' identifying the source RoI blob
Returns:
[by ref] blobs (dict): new keys named by `name + 'fpn' + level`
are added to dict each with a value that's an R_level x 5 ndarray of
RoIs (see _get_rois_blob for format)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn.map_rois_to_fpn_levels(blobs[name][:, 1:5], lvl_min, lvl_max)
fpn.add_multilevel_roi_blobs(
blobs, name, blobs[name], lvls, lvl_min, lvl_max
)
def _get_blobs(im, rois, target_scale, target_max_size):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale, blobs['im_info'] = \
blob_utils.get_image_blob(im, target_scale, target_max_size)
if rois is not None:
blobs['rois'] = _get_rois_blob(rois, im_scale)
return blobs, im_scale
| 36.382766 | 166 | 0.651529 |
acf9a448ea0ffad46dac32e92bbc61e8015c7f25 | 389 | py | Python | maniacal-moths/newsly/newsly/asgi.py | Kushagra-0801/summer-code-jam-2020 | aae9a678b0b30f20ab3cc6cf2b0606ee1f762ca0 | [
"MIT"
] | null | null | null | maniacal-moths/newsly/newsly/asgi.py | Kushagra-0801/summer-code-jam-2020 | aae9a678b0b30f20ab3cc6cf2b0606ee1f762ca0 | [
"MIT"
] | null | null | null | maniacal-moths/newsly/newsly/asgi.py | Kushagra-0801/summer-code-jam-2020 | aae9a678b0b30f20ab3cc6cf2b0606ee1f762ca0 | [
"MIT"
] | 1 | 2020-08-04T05:44:34.000Z | 2020-08-04T05:44:34.000Z | """
ASGI config for newsly project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'newsly.settings')
application = get_asgi_application()
| 22.882353 | 78 | 0.784062 |
acf9a47de7c3b209ab23005ea638a121664f70b6 | 22,472 | py | Python | timm/models/crossvit.py | Robert-JunWang/pytorch-image-models | 7c67d6aca992f039eece0af5f7c29a43d48c00e4 | [
"Apache-2.0"
] | 38 | 2022-02-09T07:58:33.000Z | 2022-03-31T08:26:37.000Z | timm/models/crossvit.py | Robert-JunWang/pytorch-image-models | 7c67d6aca992f039eece0af5f7c29a43d48c00e4 | [
"Apache-2.0"
] | 9 | 2022-02-15T22:23:48.000Z | 2022-03-24T08:19:37.000Z | timm/models/crossvit.py | Robert-JunWang/pytorch-image-models | 7c67d6aca992f039eece0af5f7c29a43d48c00e4 | [
"Apache-2.0"
] | 11 | 2022-02-11T08:05:53.000Z | 2022-03-29T12:22:49.000Z | """ CrossViT Model
@inproceedings{
chen2021crossvit,
title={{CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification}},
author={Chun-Fu (Richard) Chen and Quanfu Fan and Rameswar Panda},
booktitle={International Conference on Computer Vision (ICCV)},
year={2021}
}
Paper link: https://arxiv.org/abs/2103.14899
Original code: https://github.com/IBM/CrossViT/blob/main/models/crossvit.py
NOTE: model names have been renamed from originals to represent actual input res all *_224 -> *_240 and *_384 -> *_408
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# Copyright IBM All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Modifed from Timm. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.hub
from functools import partial
from typing import List
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .fx_features import register_notrace_function
from .helpers import build_model_with_cfg
from .layers import DropPath, to_2tuple, trunc_normal_, _assert
from .registry import register_model
from .vision_transformer import Mlp, Block
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 240, 240), 'pool_size': None, 'crop_pct': 0.875,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True,
'first_conv': ('patch_embed.0.proj', 'patch_embed.1.proj'),
'classifier': ('head.0', 'head.1'),
**kwargs
}
default_cfgs = {
'crossvit_15_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_224.pth'),
'crossvit_15_dagger_240': _cfg(
url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_dagger_224.pth',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_15_dagger_408': _cfg(
url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_dagger_384.pth',
input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0,
),
'crossvit_18_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_224.pth'),
'crossvit_18_dagger_240': _cfg(
url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_dagger_224.pth',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_18_dagger_408': _cfg(
url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_dagger_384.pth',
input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0,
),
'crossvit_9_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_9_224.pth'),
'crossvit_9_dagger_240': _cfg(
url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_9_dagger_224.pth',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_base_240': _cfg(
url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_base_224.pth'),
'crossvit_small_240': _cfg(
url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_small_224.pth'),
'crossvit_tiny_240': _cfg(
url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_tiny_224.pth'),
}
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, multi_conv=False):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
if multi_conv:
if patch_size[0] == 12:
self.proj = nn.Sequential(
nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=3, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1),
)
elif patch_size[0] == 16:
self.proj = nn.Sequential(
nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1),
)
else:
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
_assert(H == self.img_size[0],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
_assert(W == self.img_size[1],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.wq = nn.Linear(dim, dim, bias=qkv_bias)
self.wk = nn.Linear(dim, dim, bias=qkv_bias)
self.wv = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
# B1C -> B1H(C/H) -> BH1(C/H)
q = self.wq(x[:, 0:1, ...]).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
# BNC -> BNH(C/H) -> BHN(C/H)
k = self.wk(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
# BNC -> BNH(C/H) -> BHN(C/H)
v = self.wv(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1)) * self.scale # BH1(C/H) @ BH(C/H)N -> BH1N
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, 1, C) # (BH1N @ BHN(C/H)) -> BH1(C/H) -> B1H(C/H) -> B1C
x = self.proj(x)
x = self.proj_drop(x)
return x
class CrossAttentionBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = CrossAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x[:, 0:1, ...] + self.drop_path(self.attn(self.norm1(x)))
return x
class MultiScaleBlock(nn.Module):
def __init__(self, dim, patches, depth, num_heads, mlp_ratio, qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
num_branches = len(dim)
self.num_branches = num_branches
# different branch could have different embedding size, the first one is the base
self.blocks = nn.ModuleList()
for d in range(num_branches):
tmp = []
for i in range(depth[d]):
tmp.append(Block(
dim=dim[d], num_heads=num_heads[d], mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias,
drop=drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer))
if len(tmp) != 0:
self.blocks.append(nn.Sequential(*tmp))
if len(self.blocks) == 0:
self.blocks = None
self.projs = nn.ModuleList()
for d in range(num_branches):
if dim[d] == dim[(d + 1) % num_branches] and False:
tmp = [nn.Identity()]
else:
tmp = [norm_layer(dim[d]), act_layer(), nn.Linear(dim[d], dim[(d + 1) % num_branches])]
self.projs.append(nn.Sequential(*tmp))
self.fusion = nn.ModuleList()
for d in range(num_branches):
d_ = (d + 1) % num_branches
nh = num_heads[d_]
if depth[-1] == 0: # backward capability:
self.fusion.append(
CrossAttentionBlock(
dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias,
drop=drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer))
else:
tmp = []
for _ in range(depth[-1]):
tmp.append(CrossAttentionBlock(
dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias,
drop=drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer))
self.fusion.append(nn.Sequential(*tmp))
self.revert_projs = nn.ModuleList()
for d in range(num_branches):
if dim[(d + 1) % num_branches] == dim[d] and False:
tmp = [nn.Identity()]
else:
tmp = [norm_layer(dim[(d + 1) % num_branches]), act_layer(),
nn.Linear(dim[(d + 1) % num_branches], dim[d])]
self.revert_projs.append(nn.Sequential(*tmp))
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
outs_b = []
for i, block in enumerate(self.blocks):
outs_b.append(block(x[i]))
# only take the cls token out
proj_cls_token = torch.jit.annotate(List[torch.Tensor], [])
for i, proj in enumerate(self.projs):
proj_cls_token.append(proj(outs_b[i][:, 0:1, ...]))
# cross attention
outs = []
for i, (fusion, revert_proj) in enumerate(zip(self.fusion, self.revert_projs)):
tmp = torch.cat((proj_cls_token[i], outs_b[(i + 1) % self.num_branches][:, 1:, ...]), dim=1)
tmp = fusion(tmp)
reverted_proj_cls_token = revert_proj(tmp[:, 0:1, ...])
tmp = torch.cat((reverted_proj_cls_token, outs_b[i][:, 1:, ...]), dim=1)
outs.append(tmp)
return outs
def _compute_num_patches(img_size, patches):
return [i[0] // p * i[1] // p for i, p in zip(img_size, patches)]
@register_notrace_function
def scale_image(x, ss: Tuple[int, int], crop_scale: bool = False): # annotations for torchscript
"""
Pulled out of CrossViT.forward_features to bury conditional logic in a leaf node for FX tracing.
Args:
x (Tensor): input image
ss (tuple[int, int]): height and width to scale to
crop_scale (bool): whether to crop instead of interpolate to achieve the desired scale. Defaults to False
Returns:
Tensor: the "scaled" image batch tensor
"""
H, W = x.shape[-2:]
if H != ss[0] or W != ss[1]:
if crop_scale and ss[0] <= H and ss[1] <= W:
cu, cl = int(round((H - ss[0]) / 2.)), int(round((W - ss[1]) / 2.))
x = x[:, :, cu:cu + ss[0], cl:cl + ss[1]]
else:
x = torch.nn.functional.interpolate(x, size=ss, mode='bicubic', align_corners=False)
return x
class CrossViT(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(
self, img_size=224, img_scale=(1.0, 1.0), patch_size=(8, 16), in_chans=3, num_classes=1000,
embed_dim=(192, 384), depth=((1, 3, 1), (1, 3, 1), (1, 3, 1)), num_heads=(6, 12), mlp_ratio=(2., 2., 4.),
qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.,
norm_layer=partial(nn.LayerNorm, eps=1e-6), multi_conv=False, crop_scale=False,
):
super().__init__()
self.num_classes = num_classes
self.img_size = to_2tuple(img_size)
img_scale = to_2tuple(img_scale)
self.img_size_scaled = [tuple([int(sj * si) for sj in self.img_size]) for si in img_scale]
self.crop_scale = crop_scale # crop instead of interpolate for scale
num_patches = _compute_num_patches(self.img_size_scaled, patch_size)
self.num_branches = len(patch_size)
self.embed_dim = embed_dim
self.num_features = embed_dim[0] # to pass the tests
self.patch_embed = nn.ModuleList()
# hard-coded for torch jit script
for i in range(self.num_branches):
setattr(self, f'pos_embed_{i}', nn.Parameter(torch.zeros(1, 1 + num_patches[i], embed_dim[i])))
setattr(self, f'cls_token_{i}', nn.Parameter(torch.zeros(1, 1, embed_dim[i])))
for im_s, p, d in zip(self.img_size_scaled, patch_size, embed_dim):
self.patch_embed.append(
PatchEmbed(img_size=im_s, patch_size=p, in_chans=in_chans, embed_dim=d, multi_conv=multi_conv))
self.pos_drop = nn.Dropout(p=drop_rate)
total_depth = sum([sum(x[-2:]) for x in depth])
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)] # stochastic depth decay rule
dpr_ptr = 0
self.blocks = nn.ModuleList()
for idx, block_cfg in enumerate(depth):
curr_depth = max(block_cfg[:-1]) + block_cfg[-1]
dpr_ = dpr[dpr_ptr:dpr_ptr + curr_depth]
blk = MultiScaleBlock(
embed_dim, num_patches, block_cfg, num_heads=num_heads, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr_, norm_layer=norm_layer)
dpr_ptr += curr_depth
self.blocks.append(blk)
self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)])
self.head = nn.ModuleList([
nn.Linear(embed_dim[i], num_classes) if num_classes > 0 else nn.Identity()
for i in range(self.num_branches)])
for i in range(self.num_branches):
trunc_normal_(getattr(self, f'pos_embed_{i}'), std=.02)
trunc_normal_(getattr(self, f'cls_token_{i}'), std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
out = set()
for i in range(self.num_branches):
out.add(f'cls_token_{i}')
pe = getattr(self, f'pos_embed_{i}', None)
if pe is not None and pe.requires_grad:
out.add(f'pos_embed_{i}')
return out
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.ModuleList(
[nn.Linear(self.embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() for i in
range(self.num_branches)])
def forward_features(self, x):
B = x.shape[0]
xs = []
for i, patch_embed in enumerate(self.patch_embed):
x_ = x
ss = self.img_size_scaled[i]
x_ = scale_image(x_, ss, self.crop_scale)
x_ = patch_embed(x_)
cls_tokens = self.cls_token_0 if i == 0 else self.cls_token_1 # hard-coded for torch jit script
cls_tokens = cls_tokens.expand(B, -1, -1)
x_ = torch.cat((cls_tokens, x_), dim=1)
pos_embed = self.pos_embed_0 if i == 0 else self.pos_embed_1 # hard-coded for torch jit script
x_ = x_ + pos_embed
x_ = self.pos_drop(x_)
xs.append(x_)
for i, blk in enumerate(self.blocks):
xs = blk(xs)
# NOTE: was before branch token section, move to here to assure all branch token are before layer norm
xs = [norm(xs[i]) for i, norm in enumerate(self.norm)]
return [xo[:, 0] for xo in xs]
def forward(self, x):
xs = self.forward_features(x)
ce_logits = [head(xs[i]) for i, head in enumerate(self.head)]
if not isinstance(self.head[0], nn.Identity):
ce_logits = torch.mean(torch.stack(ce_logits, dim=0), dim=0)
return ce_logits
def _create_crossvit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
def pretrained_filter_fn(state_dict):
new_state_dict = {}
for key in state_dict.keys():
if 'pos_embed' in key or 'cls_token' in key:
new_key = key.replace(".", "_")
else:
new_key = key
new_state_dict[new_key] = state_dict[key]
return new_state_dict
return build_model_with_cfg(
CrossViT, variant, pretrained,
default_cfg=default_cfgs[variant],
pretrained_filter_fn=pretrained_filter_fn,
**kwargs)
@register_model
def crossvit_tiny_240(pretrained=False, **kwargs):
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[96, 192], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[3, 3], mlp_ratio=[4, 4, 1], **kwargs)
model = _create_crossvit(variant='crossvit_tiny_240', pretrained=pretrained, **model_args)
return model
@register_model
def crossvit_small_240(pretrained=False, **kwargs):
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[6, 6], mlp_ratio=[4, 4, 1], **kwargs)
model = _create_crossvit(variant='crossvit_small_240', pretrained=pretrained, **model_args)
return model
@register_model
def crossvit_base_240(pretrained=False, **kwargs):
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[384, 768], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[12, 12], mlp_ratio=[4, 4, 1], **kwargs)
model = _create_crossvit(variant='crossvit_base_240', pretrained=pretrained, **model_args)
return model
@register_model
def crossvit_9_240(pretrained=False, **kwargs):
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]],
num_heads=[4, 4], mlp_ratio=[3, 3, 1], **kwargs)
model = _create_crossvit(variant='crossvit_9_240', pretrained=pretrained, **model_args)
return model
@register_model
def crossvit_15_240(pretrained=False, **kwargs):
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1], **kwargs)
model = _create_crossvit(variant='crossvit_15_240', pretrained=pretrained, **model_args)
return model
@register_model
def crossvit_18_240(pretrained=False, **kwargs):
model_args = dict(
img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], **kwargs)
model = _create_crossvit(variant='crossvit_18_240', pretrained=pretrained, **model_args)
return model
@register_model
def crossvit_9_dagger_240(pretrained=False, **kwargs):
model_args = dict(
img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]],
num_heads=[4, 4], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs)
model = _create_crossvit(variant='crossvit_9_dagger_240', pretrained=pretrained, **model_args)
return model
@register_model
def crossvit_15_dagger_240(pretrained=False, **kwargs):
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs)
model = _create_crossvit(variant='crossvit_15_dagger_240', pretrained=pretrained, **model_args)
return model
@register_model
def crossvit_15_dagger_408(pretrained=False, **kwargs):
model_args = dict(
img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs)
model = _create_crossvit(variant='crossvit_15_dagger_408', pretrained=pretrained, **model_args)
return model
@register_model
def crossvit_18_dagger_240(pretrained=False, **kwargs):
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs)
model = _create_crossvit(variant='crossvit_18_dagger_240', pretrained=pretrained, **model_args)
return model
@register_model
def crossvit_18_dagger_408(pretrained=False, **kwargs):
model_args = dict(
img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs)
model = _create_crossvit(variant='crossvit_18_dagger_408', pretrained=pretrained, **model_args)
return model
| 43.215385 | 119 | 0.618503 |
acf9a488aba3c0bd5cc20ddbdb3e4173323d066d | 3,214 | py | Python | typos.py | succa/adversarial-ml-text-classification | 1efce8e198c2825dea2f50148e83864a1b6a6fd1 | [
"MIT"
] | 101 | 2017-05-02T11:20:33.000Z | 2021-12-16T11:05:26.000Z | typos.py | succa/adversarial-ml-text-classification | 1efce8e198c2825dea2f50148e83864a1b6a6fd1 | [
"MIT"
] | 4 | 2018-05-18T17:57:56.000Z | 2020-01-29T07:53:32.000Z | typos.py | succa/adversarial-ml-text-classification | 1efce8e198c2825dea2f50148e83864a1b6a6fd1 | [
"MIT"
] | 35 | 2017-05-03T13:48:35.000Z | 2021-11-19T16:56:49.000Z | # Based on https://github.com/Woorank/tipo/
KEY_MISHITS_MAP = {
'1': [ '2', 'q' ],
'2': [ '1', 'q', 'w', '3' ],
'3': [ '2', 'w', 'e', '4' ],
'4': [ '3', 'e', 'r', '5' ],
'5': [ '4', 'r', 't', '6' ],
'6': [ '5', 't', 'y', '7' ],
'7': [ '6', 'y', 'u', '8' ],
'8': [ '7', 'u', 'i', '9' ],
'9': [ '8', 'i', 'o', '0' ],
'0': [ '9', 'o', 'p', '-' ],
'-': [ '0', 'p' ],
'q': [ '1', '2', 'w', 'a' ],
'w': [ 'q', 'a', 's', 'e', '3', '2' ],
'e': [ 'w', 's', 'd', 'r', '4', '3' ],
'r': [ 'e', 'd', 'f', 't', '5', '4' ],
't': [ 'r', 'f', 'g', 'y', '6', '5' ],
'y': [ 't', 'g', 'h', 'u', '7', '6' ],
'u': [ 'y', 'h', 'j', 'i', '8', '7' ],
'i': [ 'u', 'j', 'k', 'o', '9', '8' ],
'o': [ 'i', 'k', 'l', 'p', '0', '9' ],
'p': [ 'o', 'l', '-', '0' ],
'a': [ 'z', 's', 'w', 'q' ],
's': [ 'a', 'z', 'x', 'd', 'e', 'w' ],
'd': [ 's', 'x', 'c', 'f', 'r', 'e' ],
'f': [ 'd', 'c', 'v', 'g', 't', 'r' ],
'g': [ 'f', 'v', 'b', 'h', 'y', 't' ],
'h': [ 'g', 'b', 'n', 'j', 'u', 'y' ],
'j': [ 'h', 'n', 'm', 'k', 'i', 'u' ],
'k': [ 'j', 'm', 'l', 'o', 'i' ],
'l': [ 'k', 'p', 'o' ],
'z': [ 'x', 's', 'a' ],
'x': [ 'z', 'c', 'd', 's' ],
'c': [ 'x', 'v', 'f', 'd' ],
'v': [ 'c', 'b', 'g', 'f' ],
'b': [ 'v', 'n', 'h', 'g' ],
'n': [ 'b', 'm', 'j', 'h' ],
'm': [ 'n', 'k', 'j' ]
}
def get_keyboard_miss_typos(word):
'''
>>> get_keyboard_miss_typos('cat') == { \
'xat', 'vat', 'fat', 'dat', 'czt', 'cst', 'cwt', \
'cqt', 'car', 'caf', 'cag', 'cay', 'ca6', 'ca5' \
}
True
>>> get_keyboard_miss_typos('Cat') == { \
'Xat', 'Vat', 'Fat', 'Dat', 'Czt', 'Cst', 'Cwt', \
'Cqt', 'Car', 'Caf', 'Cag', 'Cay', 'Ca6', 'Ca5' \
}
True
'''
typos = set()
for i in range(len(word)):
replacements = KEY_MISHITS_MAP.get(word[i].lower()) or []
for replacement in replacements:
if word[i].isupper():
replacement = replacement.upper()
typo = word[:i] + replacement + word[i+1:]
typos.add(typo)
return typos
def get_missing_letter_typos(word):
'''
>>> get_missing_letter_typos('cat') == {'at', 'ct', 'ca'}
True
'''
typos = set()
for i in range(len(word)):
typo = word[:i] + word[i+1:]
typos.add(typo)
return typos
def get_mixed_letter_typos(word):
'''
>>> get_mixed_letter_typos('cat') == {'act', 'cta'}
True
'''
typos = set()
for i in range(len(word) - 1):
typo = word[:i] + word[i+1] + word[i] + word[i+2:]
if typo != word:
typos.add(typo)
return typos
def get_double_letter_typos(word):
'''
>>> get_double_letter_typos('cat') == {'ccat', 'caat', 'catt'}
True
'''
typos = set()
for i in range(len(word)):
typo = word[:i] + word[i] + word[i:]
typos.add(typo)
return typos
def typos(word):
'''
>>> isinstance(typos('cat'), set)
>>> len(typos('cat')) > 0
'''
sets = [get_keyboard_miss_typos(word),
get_mixed_letter_typos(word),
get_double_letter_typos(word),
get_missing_letter_typos(word)]
return set.union(*sets)
| 27.947826 | 66 | 0.375856 |
acf9a4b10257ce6d9a353031da7c25204cda5401 | 4,601 | py | Python | save_beta_rabbit.py | nateGeorge/Google-foobar-challenges | 72e0f66132a616303f39c63cb234196572ecbc72 | [
"MIT"
] | null | null | null | save_beta_rabbit.py | nateGeorge/Google-foobar-challenges | 72e0f66132a616303f39c63cb234196572ecbc72 | [
"MIT"
] | null | null | null | save_beta_rabbit.py | nateGeorge/Google-foobar-challenges | 72e0f66132a616303f39c63cb234196572ecbc72 | [
"MIT"
] | null | null | null | """
Save Beta Rabbit
================
Oh no! The mad Professor Boolean has trapped Beta Rabbit in an NxN grid of rooms. In the center of each room (except for the top left room) is a hungry zombie. In order to be freed, and to avoid being eaten, Beta Rabbit must move through this grid and feed the zombies.
Beta Rabbit starts at the top left room of the grid. For each room in the grid, there is a door to the room above, below, left, and right. There is no door in cases where there is no room in that direction. However, the doors are locked in such a way that Beta Rabbit can only ever move to the room below or to the right. Once Beta Rabbit enters a room, the zombie immediately starts crawling towards him, and he must feed the zombie until it is full to ward it off. Thankfully, Beta Rabbit took a class about zombies and knows how many units of food each zombie needs be full.
To be freed, Beta Rabbit needs to make his way to the bottom right room (which also has a hungry zombie) and have used most of the limited food he has. He decides to take the path through the grid such that he ends up with as little food as possible at the end.
Write a function answer(food, grid) that returns the number of units of food Beta Rabbit will have at the end, given that he takes a route using up as much food as possible without him being eaten, and ends at the bottom right room. If there does not exist a route in which Beta Rabbit will not be eaten, then return -1.
food is the amount of food Beta Rabbit starts with, and will be a positive integer no larger than 200.
grid will be a list of N elements. Each element of grid will itself be a list of N integers each, denoting a single row of N rooms. The first element of grid will be the list denoting the top row, the second element will be the list denoting second row from the top, and so on until the last element, which is the list denoting the bottom row. In the list denoting a single row, the first element will be the amount of food the zombie in the left-most room in that row needs, the second element will be the amount the zombie in the room to its immediate right needs and so on. The top left room will always contain the integer 0, to indicate that there is no zombie there.
The number of rows N will not exceed 20, and the amount of food each zombie requires will be a positive integer not exceeding 10.
Languages
=========
To provide a Python solution, edit solution.py
To provide a Java solution, edit solution.java
Test cases
==========
Inputs:
(int) food = 7
(int) grid = [[0, 2, 5], [1, 1, 3], [2, 1, 1]]
Output:
(int) 0
Inputs:
(int) food = 12
(int) grid = [[0, 2, 5], [1, 1, 3], [2, 1, 1]]
Output:
(int) 1
Use verify [file] to test your solution and see how it does. When you are finished editing your code, use submit [file] to submit your answer. If your solution passes the test cases, it will be removed from your home folder.
"""
# help from:
# http://garethrees.org/2013/06/11/tabular/
# http://codereview.stackexchange.com/questions/91317/google-foobar-challenge-save-beta-rabbit-in-python?newreg=793f3386300e42f6901129a4f412ed51
from functools import wraps
def memoized(table=None):
"""Return a memoizer for functions with a single (hashable) argument.
The optional argument table gives the initial state of the table
mapping arguments to results.
"""
if table is None:
table = dict()
def memoizer(f):
@wraps(f)
def wrapper(arg):
try:
return table[arg]
except KeyError:
return table.setdefault(arg, f(arg))
return wrapper
return memoizer
def answer(food, grid):
@memoized({})
def r((t, i, j)):
# Smallest remainder from t after subtracting the numbers on a path
# from top left to (i, j) in grid, or total + 1 if there is no
# path whose sum is less than or equal to t.
t -= grid[i][j]
if i < 0 or j < 0 or t < 0:
return food + 1
elif i == j == 0:
return t
else:
return min(r((t, i - 1, j)), r((t, i, j - 1)))
remainder = r((food, len(grid) - 1, len(grid) - 1))
return remainder if remainder <= food else -1
if __name__ == "__main__":
food = 7
grid = [[0, 2, 5], [1, 1, 3], [2, 1, 1]]
print answer(food, grid) # should be 0
food = 12
grid = [[0, 2, 5], [1, 1, 3], [2, 1, 1]]
print answer(food, grid) # should be 1
food = 12
grid = [[0, 2, 5], [11, 11, 11], [2, 3, 3]]
print answer(food, grid) # should be -1
| 47.43299 | 672 | 0.678113 |
acf9a554b2617550b4b32fef8280ca880fc317db | 15,628 | py | Python | src/sage/quadratic_forms/quadratic_form__mass__Siegel_densities.py | bopopescu/classic_diff_geom | 2b1d88becbc8cb30962e0995cc78e429e0f5589f | [
"BSL-1.0"
] | null | null | null | src/sage/quadratic_forms/quadratic_form__mass__Siegel_densities.py | bopopescu/classic_diff_geom | 2b1d88becbc8cb30962e0995cc78e429e0f5589f | [
"BSL-1.0"
] | null | null | null | src/sage/quadratic_forms/quadratic_form__mass__Siegel_densities.py | bopopescu/classic_diff_geom | 2b1d88becbc8cb30962e0995cc78e429e0f5589f | [
"BSL-1.0"
] | 1 | 2020-07-24T12:08:30.000Z | 2020-07-24T12:08:30.000Z | """
Local Masses and Siegel Densities
"""
######################################################################################################
## Computes the local masses (rep'n densities of a form by itself) for a quadratic forms over ZZ
## using the papers of Pall [PSPUM VIII (1965), pp95--105] for p>2, and Watson [Mathematika
## 23, no. 1, (1976), pp 94--106] for p=2. These formulas will also work for any local field
## which is unramified at p=2.
##
## Copyright by Jonathan Hanke 2007 <jonhanke@gmail.com>
######################################################################################################
import copy
from sage.misc.misc import prod
from sage.misc.mrange import mrange
from sage.functions.all import floor
from sage.rings.integer_ring import ZZ
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing
from sage.rings.rational_field import QQ
from sage.rings.arith import legendre_symbol, kronecker, prime_divisors
from sage.functions.all import sgn
from sage.quadratic_forms.special_values import gamma__exact, zeta__exact, quadratic_L_function__exact
from sage.misc.functional import squarefree_part
from sage.symbolic.constants import pi
from sage.matrix.matrix_space import MatrixSpace
def mass__by_Siegel_densities(self, odd_algorithm="Pall", even_algorithm="Watson"):
"""
Gives the mass of transformations (det 1 and -1).
WARNING: THIS IS BROKEN RIGHT NOW... =(
Optional Arguments:
- When p > 2 -- odd_algorithm = "Pall" (only one choice for now)
- When p = 2 -- even_algorithm = "Kitaoka" or "Watson"
REFERENCES:
- Nipp's Book "Tables of Quaternary Quadratic Forms".
- Papers of Pall (only for p>2) and Watson (for `p=2` -- tricky!).
- Siegel, Milnor-Hussemoller, Conway-Sloane Paper IV, Kitoaka (all of which
have problems...)
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1,1])
sage: Q.mass__by_Siegel_densities()
1/384
sage: Q.mass__by_Siegel_densities() - (2^Q.dim() * factorial(Q.dim()))^(-1)
0
::
sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1])
sage: Q.mass__by_Siegel_densities()
1/48
sage: Q.mass__by_Siegel_densities() - (2^Q.dim() * factorial(Q.dim()))^(-1)
0
"""
## Setup
n = self.dim()
s = floor((n-1)/2)
if n % 2 != 0:
char_d = squarefree_part(2*self.det()) ## Accounts for the det as a QF
else:
char_d = squarefree_part(self.det())
## Form the generic zeta product
generic_prod = ZZ(2) * (pi)**(-ZZ(n) * (n+1) / 4)
##########################################
generic_prod *= (self.det())**(ZZ(n+1)/2) ## ***** This uses the Hessian Determinant ********
##########################################
#print "gp1 = ", generic_prod
generic_prod *= prod([gamma__exact(ZZ(j)/2) for j in range(1,n+1)])
#print "\n---", [(ZZ(j)/2, gamma__exact(ZZ(j)/2)) for j in range(1,n+1)]
#print "\n---", prod([gamma__exact(ZZ(j)/2) for j in range(1,n+1)])
#print "gp2 = ", generic_prod
generic_prod *= prod([zeta__exact(ZZ(j)) for j in range(2, 2*s+1, 2)])
#print "\n---", [zeta__exact(ZZ(j)) for j in range(2, 2*s+1, 2)]
#print "\n---", prod([zeta__exact(ZZ(j)) for j in range(2, 2*s+1, 2)])
#print "gp3 = ", generic_prod
if (n % 2 == 0):
generic_prod *= ZZ(1) * quadratic_L_function__exact(n/2, (-1)**(n/2) * char_d)
#print " NEW = ", ZZ(1) * quadratic_L_function__exact(n/2, (-1)**(n/2) * char_d)
#print
#print "gp4 = ", generic_prod
#print "generic_prod =", generic_prod
## Determine the adjustment factors
adj_prod = 1
for p in prime_divisors(2 * self.det()):
## Cancel out the generic factors
p_adjustment = prod([1 - ZZ(p)**(-j) for j in range(2, 2*s+1, 2)])
if (n % 2 == 0):
p_adjustment *= ZZ(1) * (1 - kronecker((-1)**(n/2) * char_d, p) * ZZ(p)**(-n/2))
#print " EXTRA = ", ZZ(1) * (1 - kronecker((-1)**(n/2) * char_d, p) * ZZ(p)**(-n/2))
#print "Factor to cancel the generic one:", p_adjustment
## Insert the new mass factors
if p == 2:
if even_algorithm == "Kitaoka":
p_adjustment = p_adjustment / self.Kitaoka_mass_at_2()
elif even_algorithm == "Watson":
p_adjustment = p_adjustment / self.Watson_mass_at_2()
else:
raise TypeError("There is a problem -- your even_algorithm argument is invalid. Try again. =(")
else:
if odd_algorithm == "Pall":
p_adjustment = p_adjustment / self.Pall_mass_density_at_odd_prime(p)
else:
raise TypeError("There is a problem -- your optional arguments are invalid. Try again. =(")
#print "p_adjustment for p =", p, "is", p_adjustment
## Put them together (cumulatively)
adj_prod *= p_adjustment
#print "Cumulative adj_prod =", adj_prod
## Extra adjustment for the case of a 2-dimensional form.
#if (n == 2):
# generic_prod *= 2
## Return the mass
mass = generic_prod * adj_prod
return mass
def Pall_mass_density_at_odd_prime(self, p):
"""
Returns the local representation density of a form (for
representing itself) defined over `ZZ`, at some prime `p>2`.
REFERENCES:
Pall's article "The Weight of a Genus of Positive n-ary Quadratic Forms"
appearing in Proc. Symp. Pure Math. VIII (1965), pp95--105.
INPUT:
`p` -- a prime number > 2.
OUTPUT:
a rational number.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,0,0,1,0,1])
sage: Q.Pall_mass_density_at_odd_prime(3)
[(0, Quadratic form in 3 variables over Integer Ring with coefficients:
[ 1 0 0 ]
[ * 1 0 ]
[ * * 1 ])] [(0, 3, 8)] [8/9] 8/9
8/9
"""
## Check that p is a positive prime -- unnecessary since it's done implicitly in the next step. =)
if p<=2:
raise TypeError("Oops! We need p to be a prime > 2.")
## Step 1: Obtain a p-adic (diagonal) local normal form, and
## compute the invariants for each Jordan block.
jordan_list = self.jordan_blocks_by_scale_and_unimodular(p)
modified_jordan_list = [(a, Q.dim(), Q.det()) for (a,Q) in jordan_list] ## List of pairs (scale, det)
#print jordan_list
#print modified_jordan_list
## Step 2: Compute the list of local masses for each Jordan block
jordan_mass_list = []
for (s,n,d) in modified_jordan_list:
generic_factor = prod([1 - p**(-2*j) for j in range(1, floor((n-1)/2)+1)])
#print "generic factor: ", generic_factor
if (n % 2 == 0):
m = n/2
generic_factor *= (1 + legendre_symbol(((-1)**m) * d, p) * p**(-m))
#print "jordan_mass: ", generic_factor
jordan_mass_list = jordan_mass_list + [generic_factor]
## Step 3: Compute the local mass $\al_p$ at p.
MJL = modified_jordan_list
s = len(modified_jordan_list)
M = [sum([MJL[j][1] for j in range(i, s)]) for i in range(s-1)] ## Note: It's s-1 since we don't need the last M.
#print "M = ", M
nu = sum([M[i] * MJL[i][0] * MJL[i][1] for i in range(s-1)]) - ZZ(sum([J[0] * J[1] * (J[1]-1) for J in MJL]))/ZZ(2)
p_mass = prod(jordan_mass_list)
p_mass *= 2**(s-1) * p**nu
print jordan_list, MJL, jordan_mass_list, p_mass
## Return the result
return p_mass
def Watson_mass_at_2(self):
"""
Returns the local mass of the quadratic form when `p=2`, according
to Watson's Theorem 1 of "The 2-adic density of a quadratic form"
in Mathematika 23 (1976), pp 94--106.
INPUT:
none
OUTPUT:
a rational number
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1])
sage: Q.Watson_mass_at_2() ## WARNING: WE NEED TO CHECK THIS CAREFULLY!
384
"""
## Make a 0-dim'l quadratic form (for initialization purposes)
Null_Form = copy.deepcopy(self)
Null_Form.__init__(ZZ, 0)
## Step 0: Compute Jordan blocks and bounds of the scales to keep track of
Jordan_Blocks = self.jordan_blocks_by_scale_and_unimodular(2)
scale_list = [B[0] for B in Jordan_Blocks]
s_min = min(scale_list)
s_max = max(scale_list)
## Step 1: Compute dictionaries of the diagonal block and 2x2 block for each scale
diag_dict = dict((i, Null_Form) for i in range(s_min-2, s_max + 4)) ## Initialize with the zero form
dim2_dict = dict((i, Null_Form) for i in range(s_min, s_max + 4)) ## Initialize with the zero form
for (s,L) in Jordan_Blocks:
i = 0
while (i < L.dim()-1) and (L[i,i+1] == 0): ## Find where the 2x2 blocks start
i = i + 1
if i < (L.dim() - 1):
diag_dict[s] = L.extract_variables(range(i)) ## Diagonal Form
dim2_dict[s+1] = L.extract_variables(range(i, L.dim())) ## Non-diagonal Form
else:
diag_dict[s] = L
#print "diag_dict = ", diag_dict
#print "dim2_dict = ", dim2_dict
#print "Jordan_Blocks = ", Jordan_Blocks
## Step 2: Compute three dictionaries of invariants (for n_j, m_j, nu_j)
n_dict = dict((j,0) for j in range(s_min+1, s_max+2))
m_dict = dict((j,0) for j in range(s_min, s_max+4))
for (s,L) in Jordan_Blocks:
n_dict[s+1] = L.dim()
if diag_dict[s].dim() == 0:
m_dict[s+1] = ZZ(1)/ZZ(2) * L.dim()
else:
m_dict[s+1] = floor(ZZ(L.dim() - 1) / ZZ(2))
#print " ==>", ZZ(L.dim() - 1) / ZZ(2), floor(ZZ(L.dim() - 1) / ZZ(2))
nu_dict = dict((j,n_dict[j+1] - 2*m_dict[j+1]) for j in range(s_min, s_max+1))
nu_dict[s_max+1] = 0
#print "n_dict = ", n_dict
#print "m_dict = ", m_dict
#print "nu_dict = ", nu_dict
## Step 3: Compute the e_j dictionary
eps_dict = {}
for j in range(s_min, s_max+3):
two_form = (diag_dict[j-2] + diag_dict[j] + dim2_dict[j]).scale_by_factor(2)
j_form = (two_form + diag_dict[j-1]).base_change_to(IntegerModRing(4))
if j_form.dim() == 0:
eps_dict[j] = 1
else:
iter_vec = [4] * j_form.dim()
alpha = sum([True for x in mrange(iter_vec) if j_form(x) == 0])
beta = sum([True for x in mrange(iter_vec) if j_form(x) == 2])
if alpha > beta:
eps_dict[j] = 1
elif alpha == beta:
eps_dict[j] = 0
else:
eps_dict[j] = -1
#print "eps_dict = ", eps_dict
## Step 4: Compute the quantities nu, q, P, E for the local mass at 2
nu = sum([j * n_dict[j] * (ZZ(n_dict[j] + 1) / ZZ(2) + \
sum([n_dict[r] for r in range(j+1, s_max+2)])) for j in range(s_min+1, s_max+2)])
q = sum([sgn(nu_dict[j-1] * (n_dict[j] + sgn(nu_dict[j]))) for j in range(s_min+1, s_max+2)])
P = prod([ prod([1 - QQ(4)**(-i) for i in range(1, m_dict[j]+1)]) for j in range(s_min+1, s_max+2)])
E = prod([ZZ(1)/ZZ(2) * (1 + eps_dict[j] * QQ(2)**(-m_dict[j])) for j in range(s_min, s_max+3)])
#print "\nFinal Summary:"
#print "nu =", nu
#print "q = ", q
#print "P = ", P
#print "E = ", E
## Step 5: Compute the local mass for the prime 2.
mass_at_2 = QQ(2)**(nu - q) * P / E
return mass_at_2
def Kitaoka_mass_at_2(self):
"""
Returns the local mass of the quadratic form when `p=2`, according
to Theorem 5.6.3 on pp108--9 of Kitaoka's Book "The Arithmetic of
Quadratic Forms".
INPUT:
none
OUTPUT:
a rational number > 0
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1])
sage: Q.Kitaoka_mass_at_2() ## WARNING: WE NEED TO CHECK THIS CAREFULLY!
1/2
"""
## Make a 0-dim'l quadratic form (for initialization purposes)
Null_Form = copy.deepcopy(self)
Null_Form.__init__(ZZ, 0)
## Step 0: Compute Jordan blocks and bounds of the scales to keep track of
Jordan_Blocks = self.jordan_blocks_by_scale_and_unimodular(2)
scale_list = [B[0] for B in Jordan_Blocks]
s_min = min(scale_list)
s_max = max(scale_list)
## Step 1: Compute dictionaries of the diagonal block and 2x2 block for each scale
diag_dict = dict((i, Null_Form) for i in range(s_min-2, s_max + 4)) ## Initialize with the zero form
dim2_dict = dict((i, Null_Form) for i in range(s_min, s_max + 4)) ## Initialize with the zero form
for (s,L) in Jordan_Blocks:
i = 0
while (i < L.dim()-1) and (L[i,i+1] == 0): ## Find where the 2x2 blocks start
i = i + 1
if i < (L.dim() - 1):
diag_dict[s] = L.extract_variables(range(i)) ## Diagonal Form
dim2_dict[s+1] = L.extract_variables(range(i, L.dim())) ## Non-diagonal Form
else:
diag_dict[s] = L
#print "diag_dict = ", diag_dict
#print "dim2_dict = ", dim2_dict
#print "Jordan_Blocks = ", Jordan_Blocks
################## START EDITING HERE ##################
## Compute q := sum of the q_j
q = 0
for j in range(s_min, s_max + 1):
if diag_dict[j].dim() > 0: ## Check that N_j is odd (i.e. rep'ns an odd #)
if diag_dict[j+1].dim() == 0:
q += Jordan_Blocks[j][1].dim() ## When N_{j+1} is "even", add n_j
else:
q += Jordan_Blocks[j][1].dim() + 1 ## When N_{j+1} is "odd", add n_j + 1
## Compute P = product of the P_j
P = QQ(1)
for j in range(s_min, s_max + 1):
tmp_m = dim2_dict[j].dim() / 2
P *= prod([QQ(1) - QQ(4**(-k)) for j in range(1, tmp_m + 1)])
## Compute the product E := prod_j (1 / E_j)
E = QQ(1)
for j in range(s_min - 1, s_max + 2):
if (diag_dict[j-1].dim() == 0) and (diag_dict[j+1].dim() == 0) and \
((diag_dict[j].dim() != 2) or (((diag_dict[j][0,0] - diag_dict[j][1,1]) % 4) != 0)):
## Deal with the complicated case:
tmp_m = dim2_dict[j].dim() / 2
if dim2_dict[j].is_hyperbolic(2):
E *= 2 / (1 + 2**(-tmp_m))
else:
E *= 2 / (1 - 2**(-tmp_m))
else:
E *= 2
## DIAGNOSTIC
#print "\nFinal Summary:"
#print "nu =", nu
#print "q = ", q
#print "P = ", P
#print "E = ", E
## Compute the exponent w
w = QQ(0)
for j in range(s_min, s_max+1):
n_j = Jordan_Blocks[j][1].dim()
for k in range(j+1, s_max+1):
n_k = Jordan_Blocks[k][1].dim()
w += j * n_j * (n_k + QQ(n_j + 1) / 2)
## Step 5: Compute the local mass for the prime 2.
mass_at_2 = (QQ(2)**(w - q)) * P * E
return mass_at_2
def mass_at_two_by_counting_mod_power(self, k):
"""
Computes the local mass at `p=2` assuming that it's stable `(mod 2^k)`.
Note: This is **way** too slow to be useful, even when k=1!!!
TO DO: Remove this routine, or try to compile it!
INPUT:
k -- an integer >= 1
OUTPUT:
a rational number
EXAMPLE::
sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1])
sage: Q.mass_at_two_by_counting_mod_power(1)
4
"""
R = IntegerModRing(2**k)
Q1 = self.base_change_to(R)
n = self.dim()
MS = MatrixSpace(R, n)
ct = sum([1 for x in mrange([2**k] * (n**2)) if Q1(MS(x)) == Q1]) ## Count the solutions mod 2^k
two_mass = ZZ(1)/2 * (ZZ(ct) / ZZ(2)**(k*n*(n-1)/2))
return two_mass
| 35.599089 | 122 | 0.561748 |
acf9a8d5e6eecb6189b9c5104dc562c6b1ffbfae | 11,787 | py | Python | qiskit/transpiler/passes/optimize_1q_gates.py | ismaila-at-za-ibm/qiskit-terra | 08303ec98ac7b33fde55266dc3a74466fbdcae95 | [
"Apache-2.0"
] | 1 | 2020-09-03T12:28:44.000Z | 2020-09-03T12:28:44.000Z | qiskit/transpiler/passes/optimize_1q_gates.py | ismaila-at-za-ibm/qiskit-terra | 08303ec98ac7b33fde55266dc3a74466fbdcae95 | [
"Apache-2.0"
] | null | null | null | qiskit/transpiler/passes/optimize_1q_gates.py | ismaila-at-za-ibm/qiskit-terra | 08303ec98ac7b33fde55266dc3a74466fbdcae95 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
Transpiler pass to optimize chains of single-qubit u1, u2, u3 gates by combining them into
a single gate.
"""
import networkx as nx
import numpy as np
import sympy
from sympy import Number as N
from qiskit.mapper import MapperError
from qiskit.extensions.standard.u1 import U1Gate
from qiskit.extensions.standard.u2 import U2Gate
from qiskit.extensions.standard.u3 import U3Gate
from qiskit.circuit.instruction import Instruction
from qiskit.transpiler._basepasses import TransformationPass
from qiskit.quantum_info.operators.quaternion import quaternion_from_euler
from qiskit.transpiler.passes.mapping.unroller import Unroller
_CHOP_THRESHOLD = 1e-15
class Optimize1qGates(TransformationPass):
"""Simplify runs of single qubit gates in the ["u1", "u2", "u3", "cx", "id"] basis."""
def __init__(self):
super().__init__()
self.requires.append(Unroller(["u1", "u2", "u3", "cx", "id"]))
def run(self, dag):
"""Return a new circuit that has been optimized."""
runs = dag.collect_runs(["u1", "u2", "u3", "id"])
for run in runs:
run_qarg = dag.multi_graph.node[run[0]]["qargs"][0]
right_name = "u1"
right_parameters = (N(0), N(0), N(0)) # (theta, phi, lambda)
for current_node in run:
node = dag.multi_graph.node[current_node]
left_name = node["name"]
if (node["condition"] is not None
or len(node["qargs"]) != 1
or node["qargs"][0] != run_qarg
or left_name not in ["u1", "u2", "u3", "id"]):
raise MapperError("internal error")
if left_name == "u1":
left_parameters = (N(0), N(0), node["op"].params[0])
elif left_name == "u2":
left_parameters = (sympy.pi / 2, node["op"].params[0], node["op"].params[1])
elif left_name == "u3":
left_parameters = tuple(node["op"].params)
else:
left_name = "u1" # replace id with u1
left_parameters = (N(0), N(0), N(0))
# Compose gates
name_tuple = (left_name, right_name)
if name_tuple == ("u1", "u1"):
# u1(lambda1) * u1(lambda2) = u1(lambda1 + lambda2)
right_parameters = (N(0), N(0), right_parameters[2] +
left_parameters[2])
elif name_tuple == ("u1", "u2"):
# u1(lambda1) * u2(phi2, lambda2) = u2(phi2 + lambda1, lambda2)
right_parameters = (sympy.pi / 2, right_parameters[1] +
left_parameters[2], right_parameters[2])
elif name_tuple == ("u2", "u1"):
# u2(phi1, lambda1) * u1(lambda2) = u2(phi1, lambda1 + lambda2)
right_name = "u2"
right_parameters = (sympy.pi / 2, left_parameters[1],
right_parameters[2] + left_parameters[2])
elif name_tuple == ("u1", "u3"):
# u1(lambda1) * u3(theta2, phi2, lambda2) =
# u3(theta2, phi2 + lambda1, lambda2)
right_parameters = (right_parameters[0], right_parameters[1] +
left_parameters[2], right_parameters[2])
elif name_tuple == ("u3", "u1"):
# u3(theta1, phi1, lambda1) * u1(lambda2) =
# u3(theta1, phi1, lambda1 + lambda2)
right_name = "u3"
right_parameters = (left_parameters[0], left_parameters[1],
right_parameters[2] + left_parameters[2])
elif name_tuple == ("u2", "u2"):
# Using Ry(pi/2).Rz(2*lambda).Ry(pi/2) =
# Rz(pi/2).Ry(pi-2*lambda).Rz(pi/2),
# u2(phi1, lambda1) * u2(phi2, lambda2) =
# u3(pi - lambda1 - phi2, phi1 + pi/2, lambda2 + pi/2)
right_name = "u3"
right_parameters = (sympy.pi - left_parameters[2] -
right_parameters[1], left_parameters[1] +
sympy.pi / 2, right_parameters[2] +
sympy.pi / 2)
elif name_tuple[1] == "nop":
right_name = left_name
right_parameters = left_parameters
else:
# For composing u3's or u2's with u3's, use
# u2(phi, lambda) = u3(pi/2, phi, lambda)
# together with the qiskit.mapper.compose_u3 method.
right_name = "u3"
# Evaluate the symbolic expressions for efficiency
left_parameters = tuple(map(lambda x: x.evalf(), list(left_parameters)))
right_parameters = tuple(map(lambda x: x.evalf(), list(right_parameters)))
right_parameters = Optimize1qGates.compose_u3(left_parameters[0],
left_parameters[1],
left_parameters[2],
right_parameters[0],
right_parameters[1],
right_parameters[2])
# Why evalf()? This program:
# OPENQASM 2.0;
# include "qelib1.inc";
# qreg q[2];
# creg c[2];
# u3(0.518016983430947*pi,1.37051598592907*pi,1.36816383603222*pi) q[0];
# u3(1.69867232277986*pi,0.371448347747471*pi,0.461117217930936*pi) q[0];
# u3(0.294319836336836*pi,0.450325871124225*pi,1.46804720442555*pi) q[0];
# measure q -> c;
# took >630 seconds (did not complete) to optimize without
# calling evalf() at all, 19 seconds to optimize calling
# evalf() AFTER compose_u3, and 1 second to optimize
# calling evalf() BEFORE compose_u3.
# 1. Here down, when we simplify, we add f(theta) to lambda to
# correct the global phase when f(theta) is 2*pi. This isn't
# necessary but the other steps preserve the global phase, so
# we continue in that manner.
# 2. The final step will remove Z rotations by 2*pi.
# 3. Note that is_zero is true only if the expression is exactly
# zero. If the input expressions have already been evaluated
# then these final simplifications will not occur.
# TODO After we refactor, we should have separate passes for
# exact and approximate rewriting.
# Y rotation is 0 mod 2*pi, so the gate is a u1
if (right_parameters[0] % (2 * sympy.pi)).is_zero \
and right_name != "u1":
right_name = "u1"
right_parameters = (0, 0, right_parameters[1] +
right_parameters[2] +
right_parameters[0])
# Y rotation is pi/2 or -pi/2 mod 2*pi, so the gate is a u2
if right_name == "u3":
# theta = pi/2 + 2*k*pi
if ((right_parameters[0] - sympy.pi / 2) % (2 * sympy.pi)).is_zero:
right_name = "u2"
right_parameters = (sympy.pi / 2, right_parameters[1],
right_parameters[2] +
(right_parameters[0] - sympy.pi / 2))
# theta = -pi/2 + 2*k*pi
if ((right_parameters[0] + sympy.pi / 2) % (2 * sympy.pi)).is_zero:
right_name = "u2"
right_parameters = (sympy.pi / 2, right_parameters[1] +
sympy.pi, right_parameters[2] -
sympy.pi + (right_parameters[0] +
sympy.pi / 2))
# u1 and lambda is 0 mod 2*pi so gate is nop (up to a global phase)
if right_name == "u1" and (right_parameters[2] % (2 * sympy.pi)).is_zero:
right_name = "nop"
# Simplify the symbolic parameters
right_parameters = tuple(map(sympy.simplify, list(right_parameters)))
# Replace the data of the first node in the run
new_op = Instruction("", [], [], [])
if right_name == "u1":
new_op = U1Gate(right_parameters[2], run_qarg)
if right_name == "u2":
new_op = U2Gate(right_parameters[1], right_parameters[2], run_qarg)
if right_name == "u3":
new_op = U3Gate(*right_parameters, run_qarg)
nx.set_node_attributes(dag.multi_graph, name='name',
values={run[0]: right_name})
nx.set_node_attributes(dag.multi_graph, name='op',
values={run[0]: new_op})
# Delete the other nodes in the run
for current_node in run[1:]:
dag._remove_op_node(current_node)
if right_name == "nop":
dag._remove_op_node(run[0])
return dag
@staticmethod
def compose_u3(theta1, phi1, lambda1, theta2, phi2, lambda2):
"""Return a triple theta, phi, lambda for the product.
u3(theta, phi, lambda)
= u3(theta1, phi1, lambda1).u3(theta2, phi2, lambda2)
= Rz(phi1).Ry(theta1).Rz(lambda1+phi2).Ry(theta2).Rz(lambda2)
= Rz(phi1).Rz(phi').Ry(theta').Rz(lambda').Rz(lambda2)
= u3(theta', phi1 + phi', lambda2 + lambda')
Return theta, phi, lambda.
"""
# Careful with the factor of two in yzy_to_zyz
thetap, phip, lambdap = Optimize1qGates.yzy_to_zyz((lambda1 + phi2), theta1, theta2)
(theta, phi, lamb) = (thetap, phi1 + phip, lambda2 + lambdap)
return (theta, phi, lamb)
@staticmethod
def yzy_to_zyz(xi, theta1, theta2, eps=1e-9): # pylint: disable=invalid-name
"""Express a Y.Z.Y single qubit gate as a Z.Y.Z gate.
Solve the equation
.. math::
Ry(theta1).Rz(xi).Ry(theta2) = Rz(phi).Ry(theta).Rz(lambda)
for theta, phi, and lambda.
Return a solution theta, phi, and lambda.
"""
quaternion_yzy = quaternion_from_euler([theta1, xi, theta2], 'yzy')
euler = quaternion_yzy.to_zyz()
quaternion_zyz = quaternion_from_euler(euler, 'zyz')
# output order different than rotation order
out_angles = (euler[1], euler[0], euler[2])
abs_inner = abs(quaternion_zyz.data.dot(quaternion_yzy.data))
if not np.allclose(abs_inner, 1, eps):
raise MapperError('YZY and ZYZ angles do not give same rotation matrix.')
out_angles = tuple(0 if np.abs(angle) < _CHOP_THRESHOLD else angle
for angle in out_angles)
return out_angles
| 51.247826 | 96 | 0.502503 |
acf9aa608a7533cc0065e25930779bfb1a30a2ed | 4,545 | py | Python | app/main/views.py | GLouisG/BlogPoint | 4203982eb9b3eb8962c238e88e59f8be44554ba1 | [
"MIT"
] | null | null | null | app/main/views.py | GLouisG/BlogPoint | 4203982eb9b3eb8962c238e88e59f8be44554ba1 | [
"MIT"
] | null | null | null | app/main/views.py | GLouisG/BlogPoint | 4203982eb9b3eb8962c238e88e59f8be44554ba1 | [
"MIT"
] | null | null | null | from app.requests import find_quotes
from . import main
from flask import render_template, request, redirect, url_for, abort,flash
from ..models import User, Blog, Comment, Sub
from flask_login import login_required, current_user
from .forms import BlogForm, UpdateProfile, BlogUpdate, CommentForm
from .. import db, photos
from ..email import mail_message
@main.route('/')
def index():
blogs = Blog.query.limit(5).all()
thequote = find_quotes()
return render_template("index.html", blogs=blogs, thequote=thequote)
@main.route('/create_new',methods = ['GET','POST'])
@login_required
def new_blog():
form = BlogForm()
if form.validate_on_submit():
title = form.title.data
content = form.content.data
user_id = current_user._get_current_object().id
new_blog_obj = Blog(content = content,title =title,user_id=user_id)
new_blog_obj.save_blog()
return redirect(url_for('main.index'))
followers = Sub.query.filter_by(writer = current_user.username).all()
for subscriber in followers:
mail_message("New post!", "email/subscriber", subscriber.email, user=subscriber)
return render_template('new_blog.html', form = form)
@main.route('/comment/<int:blog_id>', methods = ['POST','GET'])
@login_required
def comment(blog_id):
form = CommentForm()
all_comments = Comment.query.filter_by(blog_id = blog_id).all()
blog = Blog.query.get(blog_id)
if form.validate_on_submit():
content = form.content.data
blog_id = blog_id
user_id = current_user._get_current_object().id
new_comment = Comment(blog_id=blog_id,content=content, user_id=user_id)
new_comment.save_comment()
return redirect(url_for('.comment', blog_id=blog_id))
print(blog)
return render_template('comment.html', form = form, blog = blog, all_comments=all_comments)
@main.route('/index/<int:id>/delete',methods = ['GET','POST'])
@login_required
def delete(id):
current_post = Blog.query.filter_by(id=id).first()
if current_post.user != current_user:
abort(404)
db.session.delete(current_post)
db.session.commit()
return redirect(url_for('.index'))
@main.route('/comment/<int:id>/delcomm',methods = ['GET','POST'])
@login_required
def delete_comm(id):
current_comm = Comment.query.filter_by(id=id).first()
if current_comm.user != current_user:
abort(404)
db.session.delete(current_comm)
db.session.commit()
return redirect(url_for('.index'))
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
user_id = current_user._get_current_object().id
blogs = Blog.query.filter_by(user_id = user_id).all()
status = None
if current_user.blog:
status = 'Author'
else:
status = 'Dedicated Reader'
if user is None:
abort(404)
return render_template("profile/profile.html", user=user, blogs = blogs, status=status)
@main.route('/blogs/<uname>/updateprofile', methods = ['GET','POST'])
@login_required
def update_profile(uname):
form = UpdateProfile()
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/update/<int:id>',methods = ['GET','POST'])
@login_required
def blog_updater(id):
ablog = Blog.query.filter_by(id=id).first()
if ablog.user != current_user:
abort(404)
form = BlogUpdate()
if form.validate_on_submit():
ablog.title = form.title.data
ablog.content = form.content.data
db.session.add(ablog)
db.session.commit()
return redirect(url_for('main.index'))
return render_template('update_blog.html',form = form)
@main.route('/subscription/<author>', methods = ['POST','GET'])
@login_required
def subscription(author):
subber = Sub.query.filter_by(email_add=current_user.email).first()
if subber:
db.session.delete(subber)
db.session.commit()
return redirect(url_for('.index'))
else:
email = current_user._get_current_object().email
writer = author
new_sub_object = Sub(email_add = email, writer=writer)
new_sub_object.save_sub()
return redirect(url_for('.index'))
return redirect(url_for('.index'))
| 34.172932 | 99 | 0.679428 |
acf9aae3f2f7dcd19e80f5bde5f99e4b2094e2cc | 191 | py | Python | scripts/de_dup_phylogeny.py | emilydolson/phylodiversity-metrics-in-EC-GPTP-2021 | 5c8c5ad703757724d2a13329347103deb7da3dc1 | [
"MIT"
] | null | null | null | scripts/de_dup_phylogeny.py | emilydolson/phylodiversity-metrics-in-EC-GPTP-2021 | 5c8c5ad703757724d2a13329347103deb7da3dc1 | [
"MIT"
] | null | null | null | scripts/de_dup_phylogeny.py | emilydolson/phylodiversity-metrics-in-EC-GPTP-2021 | 5c8c5ad703757724d2a13329347103deb7da3dc1 | [
"MIT"
] | null | null | null | import pandas as pd
import sys
import numpy as np
df = pd.read_csv(sys.argv[1])
df.replace(float("inf"), np.nan, inplace=True)
df = df.groupby("id").aggregate(max)
df.to_csv("phylogeny.csv") | 23.875 | 46 | 0.722513 |
acf9ad3ee94efbf1b8752ab1fe893844f31f7279 | 1,079 | py | Python | glance/test_glance-controller-node.py | cyberxml/testinfra-openstack-tests | 8b57ff2901463deeaa4d58486bb6d14f65ba3d24 | [
"MIT"
] | null | null | null | glance/test_glance-controller-node.py | cyberxml/testinfra-openstack-tests | 8b57ff2901463deeaa4d58486bb6d14f65ba3d24 | [
"MIT"
] | null | null | null | glance/test_glance-controller-node.py | cyberxml/testinfra-openstack-tests | 8b57ff2901463deeaa4d58486bb6d14f65ba3d24 | [
"MIT"
] | null | null | null | import pytest
@pytest.mark.parametrize("name", [
# ("openstack-utils"),
# ("python-glance-store"),
# ("python-glanceclient"),
("openstack-glance"),
# ("python-glance"),
])
def test_packages(host, name):
pkg = host.package(name)
assert pkg.is_installed
def test_listening_interfaces(host):
sckt = host.socket("tcp://0.0.0.0:9292")
assert sckt.is_listening
@pytest.mark.parametrize("process,enabled", [
("openstack-glance-api", True),
("openstack-glance-registry", True),
])
def test_services(host, process, enabled):
svc = host.service(process)
assert svc.is_running
if enabled:
assert svc.is_enabled
@pytest.mark.parametrize("service,conf_file", [
("glance", "glance-api.conf"),
("glance", "glance-cache.conf"),
("glance", "glance-registry.conf"),
("glance", "glance-scrubber.conf"),
("glance", "schema-image.json"),
#("glance", "policy.json"),
])
def test_main_services_files(host, service, conf_file):
_file = host.file("/etc/" + service + "/" + conf_file)
assert _file.exists
| 27.666667 | 58 | 0.647822 |
acf9ad659b573ce6884c02e796ba59f84f069d8e | 714 | py | Python | article2word.py | hail-linda/transcribe | 4c9f503c55af7a90d4df92fa42483e07a6a56f6c | [
"MIT"
] | 3 | 2020-03-05T16:32:42.000Z | 2020-06-09T08:56:51.000Z | article2word.py | hail-linda/transcribe | 4c9f503c55af7a90d4df92fa42483e07a6a56f6c | [
"MIT"
] | null | null | null | article2word.py | hail-linda/transcribe | 4c9f503c55af7a90d4df92fa42483e07a6a56f6c | [
"MIT"
] | null | null | null | import string
fin = open("article.txt")
output = open("words.txt","w")
words = []
count = 0
for line in fin:
line = line.replace('-',' ')
for word in line.split():
word = word.replace('\"','')
word = word.replace('\t','')
word = word.replace(' ','')
word = word.replace('\'','')
word = word.replace('!','')
word = word.replace('?','')
word = word.strip()
for i in range(10):
word = word.replace(str(i),'')
word = word.strip(string.punctuation + string.whitespace)
word = word.lower()
if word not in words and len(word)>1:
word = word.replace(' ','')
words.append(word)
count = count + 1
print count , ' ' , word
print >> output, word
fin.close()
output.close()
| 22.3125 | 59 | 0.581232 |
acf9ad7f1df1974cf222e1b6c7412f1d72c37725 | 3,739 | py | Python | source/codegen/generate_service.py | ni/grpc-device | 9713da936ba712930554bdd8f8c7452be509e900 | [
"MIT"
] | 24 | 2021-03-25T18:37:59.000Z | 2022-03-03T16:33:56.000Z | source/codegen/generate_service.py | ni/grpc-device | 9713da936ba712930554bdd8f8c7452be509e900 | [
"MIT"
] | 129 | 2021-04-03T15:16:04.000Z | 2022-03-25T21:48:18.000Z | source/codegen/generate_service.py | ni/grpc-device | 9713da936ba712930554bdd8f8c7452be509e900 | [
"MIT"
] | 24 | 2021-03-31T12:36:14.000Z | 2022-02-25T03:01:25.000Z | import os
import argparse
import metadata_mutation
import metadata_validation
from mako.lookup import TemplateLookup
import common_helpers
from template_helpers import instantiate_mako_template, load_metadata, write_if_changed
def generate_service_file(metadata, template_file_name, generated_file_suffix, gen_dir):
module_name = metadata["config"]["module_name"]
output_dir = os.path.join(gen_dir, module_name)
file_name = module_name + generated_file_suffix
output_file_path = os.path.join(output_dir, file_name)
os.makedirs(output_dir, exist_ok=True)
template = instantiate_mako_template(template_file_name)
write_if_changed(
output_file_path,
template.render(data=metadata))
def mutate_metadata(metadata: dict):
config = metadata["config"]
attribute_expander = metadata_mutation.AttributeAccessorExpander(metadata)
for function_name in metadata["functions"]:
function = metadata["functions"][function_name]
parameters = function["parameters"]
metadata_mutation.sanitize_names(parameters)
metadata_mutation.set_var_args_types(parameters, config)
metadata_mutation.mark_size_params(parameters)
metadata_mutation.mark_non_proto_params(parameters)
metadata_mutation.mark_mapped_enum_params(
parameters, metadata["enums"])
metadata_mutation.populate_grpc_types(parameters, config)
attribute_expander.expand_attribute_value_params(function)
attribute_expander.patch_attribute_enum_type(function_name, function)
def generate_all(metadata_dir: str, gen_dir: str, validate_only: bool):
metadata = load_metadata(metadata_dir)
metadata_validation.validate_metadata(metadata)
if validate_only:
return
lookup = TemplateLookup(directories=metadata_dir)
metadata["lookup"] = lookup
mutate_metadata(metadata)
generate_service_file(metadata, "proto.mako", ".proto", gen_dir)
generate_service_file(metadata, "service.h.mako", "_service.h", gen_dir)
generate_service_file(metadata, "service.cpp.mako",
"_service.cpp", gen_dir)
generate_service_file(
metadata,
"service_registrar.h.mako",
"_service_registrar.h",
gen_dir)
generate_service_file(
metadata,
"service_registrar.cpp.mako",
"_service_registrar.cpp",
gen_dir)
generate_service_file(metadata, "library_interface.h.mako",
"_library_interface.h", gen_dir)
generate_service_file(metadata, "library.cpp.mako",
"_library.cpp", gen_dir)
generate_service_file(metadata, "library.h.mako", "_library.h", gen_dir)
generate_service_file(metadata, "mock_library.h.mako",
"_mock_library.h", gen_dir)
generate_service_file(metadata, "client.h.mako", "_client.h", gen_dir)
generate_service_file(metadata, "client.cpp.mako", "_client.cpp", gen_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate files for specified NI driver API gRPC service.")
parser.add_argument(
"metadata", help="The path to the directory containing the metadata for the API being generated.")
parser.add_argument(
"--output", "-o", help="The path to the top-level directory to save the generated files. The API-specific sub-directories will be automatically created.")
parser.add_argument(
"--validate", "-v", dest="validate", action="store_true", help="Just validate the metadata and don't generate any files",
)
args = parser.parse_args()
generate_all(
args.metadata, "." if args.output is None else args.output, args.validate)
| 42.011236 | 162 | 0.719176 |
acf9ade8e020182ed156c8c53d8bff3594cabeda | 953 | py | Python | servequnit/network.py | bnkr/selenit | bdbedd930a5d324ddfbebcc0be3998d7d517eced | [
"MIT"
] | 1 | 2015-03-04T22:45:52.000Z | 2015-03-04T22:45:52.000Z | servequnit/network.py | bnkr/selenit | bdbedd930a5d324ddfbebcc0be3998d7d517eced | [
"MIT"
] | null | null | null | servequnit/network.py | bnkr/selenit | bdbedd930a5d324ddfbebcc0be3998d7d517eced | [
"MIT"
] | null | null | null | import random, socket, os
def get_external_address(routable=True):
"""
Really messy way of determining which IP is network accessible. This is
slow and not particularly reliable, but using the machine's hostname results
in the local address being returned.
"""
maybe_external = ['8.8.8.8', ]
for host in maybe_external:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((host, 80))
name_or_ip = sock.getsockname()[0]
if name_or_ip.startswith('127.'):
continue
elif name_or_ip == 'localhost':
continue
else:
return name_or_ip
except socket.error:
pass
finally:
sock.close()
raise Exception("could not find an external address")
def get_random_port():
"""Can collide anyway."""
return random.randint(1025, pow(2, 16))
| 29.78125 | 80 | 0.594963 |
acf9ae2ec9832be7854af2eeff15c281bf56ace7 | 12,042 | py | Python | openff/interchange/tests/energy_tests/test_energies.py | umesh-timalsina/openff-system | 20c90d7ad1b2dfc80315172c5b0061938178d854 | [
"MIT"
] | null | null | null | openff/interchange/tests/energy_tests/test_energies.py | umesh-timalsina/openff-system | 20c90d7ad1b2dfc80315172c5b0061938178d854 | [
"MIT"
] | null | null | null | openff/interchange/tests/energy_tests/test_energies.py | umesh-timalsina/openff-system | 20c90d7ad1b2dfc80315172c5b0061938178d854 | [
"MIT"
] | null | null | null | from copy import deepcopy
import mdtraj as md
import numpy as np
import pytest
from openff.toolkit.topology import Molecule, Topology
from openff.units import unit
from openff.utilities.testing import skip_if_missing
from simtk import openmm
from simtk import unit as simtk_unit
from simtk.openmm import app
from openff.interchange.components.mdtraj import OFFBioTop
from openff.interchange.drivers.openmm import _get_openmm_energies, get_openmm_energies
from openff.interchange.drivers.report import EnergyError, EnergyReport
from openff.interchange.stubs import ForceField
from openff.interchange.tests.utils import HAS_GROMACS, HAS_LAMMPS, needs_gmx, needs_lmp
from openff.interchange.utils import get_test_file_path
if HAS_GROMACS:
from openff.interchange.drivers.gromacs import (
_get_mdp_file,
_run_gmx_energy,
get_gromacs_energies,
)
if HAS_LAMMPS:
from openff.interchange.drivers.lammps import get_lammps_energies
def test_energy_report():
"""Test that multiple failing energies are captured in the EnergyError"""
kj_mol = unit.kilojoule / unit.mol
a = EnergyReport(
energies={
"a": 1 * kj_mol,
"_FLAG": 2 * kj_mol,
"KEY_": 1.2 * kj_mol,
}
)
b = EnergyReport(
energies={
"a": -1 * kj_mol,
"_FLAG": -2 * kj_mol,
"KEY_": -0.1 * kj_mol,
}
)
custom_tolerances = {
"a": 1 * kj_mol,
"_FLAG": 1 * kj_mol,
"KEY_": 1 * kj_mol,
}
with pytest.raises(EnergyError, match=r"_FLAG[\s\S]*KEY_"):
a.compare(b, custom_tolerances=custom_tolerances)
@skip_if_missing("mbuild")
@needs_gmx
@needs_lmp
@pytest.mark.xfail
@pytest.mark.slow
@pytest.mark.parametrize("constrained", [True, False])
@pytest.mark.parametrize("mol_smi", ["C"]) # ["C", "CC"]
def test_energies_single_mol(constrained, mol_smi):
import mbuild as mb
mol = Molecule.from_smiles(mol_smi)
mol.generate_conformers(n_conformers=1)
mol.name = "FOO"
top = mol.to_topology()
top.box_vectors = None # [10, 10, 10] * simtk_unit.nanometer
if constrained:
parsley = ForceField("openff-1.0.0.offxml")
else:
parsley = ForceField("openff_unconstrained-1.0.0.offxml")
off_sys = parsley.create_openff_interchange(top)
off_sys.handlers["Electrostatics"].method = "cutoff"
mol.to_file("out.xyz", file_format="xyz")
compound: mb.Compound = mb.load("out.xyz")
packed_box: mb.Compound = mb.fill_box(
compound=compound, n_compounds=1, box=mb.Box(lengths=[10, 10, 10])
)
positions = packed_box.xyz * unit.nanometer
off_sys.positions = positions
# Compare directly to toolkit's reference implementation
omm_energies = get_openmm_energies(off_sys, round_positions=8)
omm_reference = parsley.create_openmm_system(top)
reference_energies = _get_openmm_energies(
omm_sys=omm_reference,
box_vectors=off_sys.box,
positions=off_sys.positions,
round_positions=8,
)
omm_energies.compare(reference_energies)
mdp = "cutoff_hbonds" if constrained else "auto"
# Compare GROMACS writer and OpenMM export
gmx_energies = get_gromacs_energies(off_sys, mdp=mdp)
custom_tolerances = {
"Bond": 2e-5 * simtk_unit.kilojoule_per_mole,
"Electrostatics": 2 * simtk_unit.kilojoule_per_mole,
"vdW": 2 * simtk_unit.kilojoule_per_mole,
"Nonbonded": 2 * simtk_unit.kilojoule_per_mole,
"Angle": 1e-4 * simtk_unit.kilojoule_per_mole,
}
gmx_energies.compare(
omm_energies,
custom_tolerances=custom_tolerances,
)
if not constrained:
other_energies = get_openmm_energies(
off_sys,
round_positions=8,
hard_cutoff=True,
electrostatics=True,
)
lmp_energies = get_lammps_energies(off_sys)
custom_tolerances = {
"vdW": 5.0 * simtk_unit.kilojoule_per_mole,
"Electrostatics": 5.0 * simtk_unit.kilojoule_per_mole,
}
lmp_energies.compare(other_energies, custom_tolerances=custom_tolerances)
@needs_gmx
@needs_lmp
@pytest.mark.slow
def test_liquid_argon():
argon = Molecule.from_smiles("[#18]")
pdbfile = app.PDBFile(get_test_file_path("packed-argon.pdb"))
top = Topology.from_openmm(pdbfile.topology, unique_molecules=[argon])
argon_ff = ForceField(get_test_file_path("argon.offxml"))
out = argon_ff.create_openff_interchange(top)
out.positions = pdbfile.positions
omm_energies = get_openmm_energies(out)
gmx_energies = get_gromacs_energies(
out,
mdp="auto",
writer="internal",
)
omm_energies.compare(
gmx_energies,
custom_tolerances={
"vdW": 0.008 * simtk_unit.kilojoule_per_mole,
},
)
argon_ff_no_switch = deepcopy(argon_ff)
argon_ff_no_switch["vdW"].switch_width *= 0
out_no_switch = argon_ff_no_switch.create_openff_interchange(top)
out_no_switch.positions = pdbfile.positions
lmp_energies = get_lammps_energies(out_no_switch)
omm_energies.compare(
lmp_energies,
custom_tolerances={
"vdW": 10.5 * simtk_unit.kilojoule_per_mole,
},
)
@needs_gmx
@pytest.mark.skip("Skip until residues are matched between gro and top")
@pytest.mark.parametrize(
"toolkit_file_path",
[
# ("systems/test_systems/1_cyclohexane_1_ethanol.pdb", 18.165),
"systems/packmol_boxes/cyclohexane_ethanol_0.4_0.6.pdb",
],
)
def test_packmol_boxes(toolkit_file_path):
# TODO: Isolate a set of systems here instead of using toolkit data
# TODO: Fix nonbonded energy differences
from openff.toolkit.utils import get_data_file_path
pdb_file_path = get_data_file_path(toolkit_file_path)
pdbfile = openmm.app.PDBFile(pdb_file_path)
ethanol = Molecule.from_smiles("CCO")
cyclohexane = Molecule.from_smiles("C1CCCCC1")
omm_topology = pdbfile.topology
off_topology = OFFBioTop.from_openmm(
omm_topology, unique_molecules=[ethanol, cyclohexane]
)
off_topology.mdtop = md.Topology.from_openmm(omm_topology)
parsley = ForceField("openff_unconstrained-1.0.0.offxml")
off_sys = parsley.create_openff_interchange(off_topology)
off_sys.box = np.asarray(
pdbfile.topology.getPeriodicBoxVectors().value_in_unit(simtk_unit.nanometer)
)
off_sys.positions = pdbfile.positions
sys_from_toolkit = parsley.create_openmm_system(off_topology)
omm_energies = get_openmm_energies(off_sys, hard_cutoff=True, electrostatics=False)
reference = _get_openmm_energies(
sys_from_toolkit,
off_sys.box,
off_sys.positions,
hard_cutoff=True,
electrostatics=False,
)
omm_energies.compare(
reference,
custom_tolerances={
"Electrostatics": 2e-2 * simtk_unit.kilojoule_per_mole,
},
)
# custom_tolerances={"HarmonicBondForce": 1.0}
# Compare GROMACS writer and OpenMM export
gmx_energies = get_gromacs_energies(off_sys, electrostatics=False)
omm_energies_rounded = get_openmm_energies(
off_sys,
round_positions=8,
hard_cutoff=True,
electrostatics=False,
)
omm_energies_rounded.compare(
other=gmx_energies,
custom_tolerances={
"Angle": 1e-2 * simtk_unit.kilojoule_per_mole,
"Torsion": 1e-2 * simtk_unit.kilojoule_per_mole,
"Electrostatics": 3200 * simtk_unit.kilojoule_per_mole,
},
)
@needs_lmp
@pytest.mark.slow
def test_water_dimer():
tip3p = ForceField(get_test_file_path("tip3p.offxml"))
water = Molecule.from_smiles("O")
top = Topology.from_molecules(2 * [water])
top.mdtop = md.Topology.from_openmm(top.to_openmm())
pdbfile = openmm.app.PDBFile(get_test_file_path("water-dimer.pdb"))
positions = pdbfile.positions
openff_sys = tip3p.create_openff_interchange(top)
openff_sys.positions = positions
openff_sys.box = [10, 10, 10] * unit.nanometer
omm_energies = get_openmm_energies(
openff_sys,
hard_cutoff=True,
electrostatics=False,
)
toolkit_energies = _get_openmm_energies(
tip3p.create_openmm_system(top),
openff_sys.box,
openff_sys.positions,
hard_cutoff=True,
electrostatics=False,
)
omm_energies.compare(toolkit_energies)
# TODO: Fix GROMACS energies by handling SETTLE constraints
# gmx_energies, _ = get_gromacs_energies(openff_sys)
# compare_gromacs_openmm(omm_energies=omm_energies, gmx_energies=gmx_energies)
openff_sys["Electrostatics"].method = "cutoff"
omm_energies_cutoff = get_gromacs_energies(openff_sys)
lmp_energies = get_lammps_energies(openff_sys)
lmp_energies.compare(omm_energies_cutoff)
@needs_gmx
@skip_if_missing("foyer")
@skip_if_missing("mbuild")
@pytest.mark.slow
def test_process_rb_torsions():
"""Test that the GROMACS driver reports Ryckaert-Bellemans torsions"""
import foyer
import mbuild as mb
oplsaa = foyer.Forcefield(name="oplsaa")
ethanol = Molecule.from_smiles("CCO")
ethanol.generate_conformers(n_conformers=1)
ethanol.generate_unique_atom_names()
# Run this OFFMol through MoSDeF infrastructure and OPLS-AA
from openff.interchange.components.mbuild import offmol_to_compound
my_compound = offmol_to_compound(ethanol)
my_compound.box = mb.Box(lengths=[4, 4, 4])
oplsaa = foyer.Forcefield(name="oplsaa")
struct = oplsaa.apply(my_compound)
struct.save("eth.top", overwrite=True)
struct.save("eth.gro", overwrite=True)
# Get single-point energies using GROMACS
oplsaa_energies = _run_gmx_energy(
top_file="eth.top", gro_file="eth.gro", mdp_file=_get_mdp_file("default")
)
assert oplsaa_energies.energies["Torsion"].m != 0.0
@needs_gmx
def test_gmx_14_energies_exist():
# TODO: Make sure 1-4 energies are accurate, not just existent
# Use a molecule with only one 1-4 interaction, and
# make it between heavy atoms because H-H 1-4 are weak
mol = Molecule.from_smiles("ClC#CCl")
mol.name = "HPER"
mol.generate_conformers(n_conformers=1)
parsley = ForceField("openff-1.0.0.offxml")
out = parsley.create_openff_interchange(topology=mol.to_topology())
out.positions = mol.conformers[0]
# Put this molecule in a large box with cut-off electrostatics
# to prevent it from interacting with images of itself
out.box = [40, 40, 40]
out["Electrostatics"].method = "cutoff"
gmx_energies = get_gromacs_energies(out)
# The only possible non-bonded interactions should be from 1-4 intramolecular interactions
assert gmx_energies.energies["vdW"].m != 0.0
assert gmx_energies.energies["Electrostatics"].m != 0.0
# TODO: It would be best to save the 1-4 interactions, split off into vdW and Electrostatics
# in the energies. This might be tricky/intractable to do for engines that are not GROMACS
@needs_gmx
@needs_lmp
@pytest.mark.xfail
@pytest.mark.slow
def test_cutoff_electrostatics():
ion_ff = ForceField(get_test_file_path("ions.offxml"))
ions = Topology.from_molecules(
[
Molecule.from_smiles("[#3]"),
Molecule.from_smiles("[#17]"),
]
)
out = ion_ff.create_openff_interchange(ions)
out.box = [4, 4, 4] * unit.nanometer
gmx = []
lmp = []
for d in np.linspace(0.75, 0.95, 5):
positions = np.zeros((2, 3)) * unit.nanometer
positions[1, 0] = d * unit.nanometer
out.positions = positions
out["Electrostatics"].method = "cutoff"
gmx.append(get_gromacs_energies(out, mdp="auto").energies["Electrostatics"].m)
lmp.append(
get_lammps_energies(out)
.energies["Electrostatics"]
.m_as(unit.kilojoule / unit.mol)
)
assert np.sum(np.sqrt(np.square(np.asarray(lmp) - np.asarray(gmx)))) < 1e-3
| 30.563452 | 96 | 0.690002 |
acf9ae683f679a30cab4ee8baae6af05a6172802 | 7,684 | py | Python | trch-bl0/genisr.py | cimes-isi/hpsc-baremetal | f4c8097f72a348e3a69db7051c9118ebd4b1b0f3 | [
"BSD-3-Clause"
] | null | null | null | trch-bl0/genisr.py | cimes-isi/hpsc-baremetal | f4c8097f72a348e3a69db7051c9118ebd4b1b0f3 | [
"BSD-3-Clause"
] | null | null | null | trch-bl0/genisr.py | cimes-isi/hpsc-baremetal | f4c8097f72a348e3a69db7051c9118ebd4b1b0f3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python2
import argparse
import re
import sys
import os
def parse_defs(fname, incpaths):
defs = {}
for incpath in ['.'] + incpaths:
try:
for line in open(os.path.join(incpath, fname)):
line = line.strip()
m = re.match(r'^#define\s+(\w+)\s+(\w+)', line)
if m:
key = m.group(1)
val = m.group(2)
defs[key] = val
break # first found file only
except IOError:
pass
return defs
def expand_macros(defs, s):
for v in defs:
s = re.sub(r'\b%s\b' % v, defs[v], s)
return s
def parse_irqmap(fname, defs, incpaths):
d = {}
ifdef = [True] # stack, each bool element indicates if enabled
linenum = 0
for line in open(fname):
linenum += 1
line = line.strip()
if len(line) == 0 or line.startswith('//'):
continue
m = re.match(r'^#else(\s+//.*)?', line)
if m:
ifdef[-1] = not ifdef[-1]
continue
m = re.match(r'^#endif(\s+//.*)?', line)
if m:
ifdef = ifdef[:-1]
continue
if not ifdef[-1]:
continue
m = re.match(r'^#if (.+)', line)
if m:
expr = m.group(1)
cond = bool(eval(expand_macros(defs, expr)))
ifdef += [cond]
continue
m = re.match(r'^#ifdef ([A-Za-z0-9_]+)', line)
if m:
macro = m.group(1)
ifdef += [macro in defs]
continue
m = re.match(r'^#error (.*)', line)
if m:
msg = m.group(1)
raise Exception("error on line %u: %s" % (linenum, msg))
continue
m = re.match(r'^#info (.*)', line)
if m:
msg = m.group(1)
print(msg)
continue
m = re.match(r'^#include ["<]([^">]+)[>"]', line)
if m:
incfile = m.group(1)
defs.update(parse_defs(incfile, incpaths))
continue
m = re.match(r'^#.*', line)
if m:
raise Exception(("parse error on line %u: "
"invalid preprocessor directive") % linenum)
line = expand_macros(defs, line)
p = line
if ':' in p: # explicitly named C ISR
kv = [s.strip() for s in p.split(':')]
irq = int(eval(kv[0]))
if irq in d:
raise Exception("line %u: IRQ %u redefined" % (linenum, irq))
d[irq] = kv[1]
else: # create an ISR stub
if '-' in p:
r = map(int, p.split('-'))
irq_nums = range(r[0], r[1])
else:
irq_nums = [int(p)]
for n in irq_nums:
d[n] = None
return d
def dict_entry(s):
m = re.match(r'([^=]*)(=(.*))?', s)
if m:
name = m.group(1).strip()
value_group = m.group(3)
value = value_group.strip() if value_group is not None else ""
if not m or len(name) == 0:
raise Exception("Invalid dict entry string: '%s'" % s)
return { name: value }
parser = argparse.ArgumentParser(
description="Generate assembly source for vector table")
parser.add_argument('--internal-irqs', type=int, default=16,
help='Number internal IRQs')
parser.add_argument('--external-irqs', type=int, default=240,
help='Number external IRQs')
parser.add_argument('--irqmap',
help='IRQ to ISR handler map file')
parser.add_argument('--include-dir', '-I', action='append', default=['.'],
help='Add path where to look for included files')
parser.add_argument('--define', '-D', action='append',
type=dict_entry, default=[],
help='Define a macro, format: NAME[=VALUE]')
parser.add_argument('--verbose', '-v', action='store_true',
help='Print IRQ map')
parser.add_argument('out_asm',
help='Output file with generated C source')
parser.add_argument('out_c',
help='Output file with generated assembly source')
args = parser.parse_args()
defs = {}
for d in args.define:
defs.update(d)
irqmap = parse_irqmap(args.irqmap, defs, args.include_dir)
if args.verbose:
for irq in irqmap:
print("%4u: %s" % (irq, irqmap[irq]))
if irqmap is None:
irqmap = range(0, 240)
def external(irq):
return irq - args.internal_irqs
def is_internal(irq):
return irq < 16;
NVIC_BASE = 0xe000e000
NVIC_ICPR = 0x280
# ISR handlers for each vector number
# The rest of the vectors (not in this dict) get default handler
DEFAULT_ISR = "hang"
isr = {
0: None,
1: "reset",
11: "svc",
15: "systick",
}
f = open(args.out_asm, "w")
f.write(
"""/* This file was automatically generated by genisr.py. */
.cpu cortex-m4
.thumb
.global __entry
.word __stacktop
"""
)
for i in range(0, args.internal_irqs + args.external_irqs):
handler = None
if i in isr:
if isr[i] is not None:
handler = isr[i]
elif external(i) in irqmap:
handler = "isr%u" % external(i)
elif is_internal(i):
handler = "exc%u" % i
else:
handler = DEFAULT_ISR
if handler is not None:
f.write(".word %s\n" % handler)
f.write("\n")
f.write(
"""
__entry: /* same as 'reset', but must not be marked with .thumb_func */
.thumb_func
reset:
b crt_init
b hang
b hang
.thumb_func
svc:
mov r0, #0
sub r0, #7 // 0xfffffff9: priveledged Thread mode with main stack
bx r0
.thumb_func
systick:
push {r0, r1, lr}
bl systick_isr
/* Clear Pending flag */
ldr r0, icsr_addr
mov r1, #1
lsl r1, #25 /* PENDSTCLR */
str r1, [r0]
pop {r0, r1, pc}
.align 2
icsr_addr: /* re-useable by across exc handlers */
.word 0xe000ed04
.thumb_func
hang: b .
b hang
.thumb_func
crt_init:
// Zero-initialize .bss
ldr r0, =__bss_start
ldr r1, =__bss_end
mov r2, #0
bss_zero_loop:
str r2, [r0]
add r0, #4
cmp r0, r1
bne bss_zero_loop
bl _main
b hang
"""
+ "\n");
for irq in range(args.internal_irqs):
if not irq in isr:
f.write(("""
.thumb_func
exc%u:
b exc%u
""") % (irq, irq))
for irq in irqmap:
nvic_icpr_addr = NVIC_BASE + NVIC_ICPR + (irq // 32) * 4
nvic_icpr_shift = irq % 32
if irqmap[irq] is not None:
isr = irqmap[irq]
else:
isr = "c_isr%u" % irq
f.write(("""
.thumb_func
isr%u:
push {r0, r1, lr}
mov r1, #%u
ldr r0, isr%u_fmt_str_addr
bl printf
bl %s
/* Clear Pending flag */
ldr r0, isr%u_icpr_addr
mov r1, #1
lsl r1, #%u
str r1, [r0]
pop {r0, r1, pc}
.align 2
isr%u_icpr_addr:
.word 0x%08x
isr%u_fmt_str_addr:
.word isr_fmt_str
""") % (irq, irq, irq, isr, irq, nvic_icpr_shift, irq, nvic_icpr_addr, irq))
if len(irqmap) > 0:
f.write("""
isr_fmt_str:
.string "IRQ #%u\\r\\n"
""")
# Generate C source for stub IRQ handlers (ISRs)
f = open(args.out_c, "w")
f.write(
"""
/* This file was automatically generated by genisr.py.
*
* The following define stub functions that are called by the IRQ handlers
* defined in assembly in vectors.s (generated by genvec.py).
*/
""")
f.write(
"""
#include "printf.h"
""")
# Create stub ISRs for IRQs for which no ISR func was named
for irq in irqmap:
if irqmap[irq] is None:
f.write(
"""
int c_isr%u (void) {
static unsigned num_invoc = 0;
void *p = 0x0;
asm ("mov %%0, lr\\n" : "=r" (p));
printf("IRQ %u (%%lu): LR %%p\\r\\n", num_invoc, p);
num_invoc++;
return(0);
}
""" % (irq, irq))
| 23.144578 | 77 | 0.539823 |
acf9af8e55670a4cbbf8db57ba11faa06a4371fe | 135 | py | Python | routes/ui/__init__.py | timb-machine-mirrors/pcf | d697a531da8c4206a6d874e689312a359446f8da | [
"MIT"
] | 2 | 2021-05-08T22:40:31.000Z | 2021-05-09T19:16:28.000Z | routes/ui/__init__.py | timb-machine-mirrors/pcf | d697a531da8c4206a6d874e689312a359446f8da | [
"MIT"
] | null | null | null | routes/ui/__init__.py | timb-machine-mirrors/pcf | d697a531da8c4206a6d874e689312a359446f8da | [
"MIT"
] | 3 | 2021-08-12T06:40:57.000Z | 2021-12-19T11:23:03.000Z | from flask import Blueprint
routes = Blueprint('routes', __name__)
from .project import *
from .struct import *
from .tools import *
| 16.875 | 38 | 0.748148 |
acf9afca133d5b5dd92ff7ffff02c324345fe7ac | 1,740 | py | Python | colour/plotting/tests/test_characterisation.py | JGoldstone/colour | 6829b363d5f0682bff0f4826995e7ceac189ff28 | [
"BSD-3-Clause"
] | null | null | null | colour/plotting/tests/test_characterisation.py | JGoldstone/colour | 6829b363d5f0682bff0f4826995e7ceac189ff28 | [
"BSD-3-Clause"
] | null | null | null | colour/plotting/tests/test_characterisation.py | JGoldstone/colour | 6829b363d5f0682bff0f4826995e7ceac189ff28 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.plotting.characterisation` module.
"""
import unittest
from matplotlib.pyplot import Axes, Figure
from colour.plotting import (
plot_single_colour_checker,
plot_multi_colour_checkers,
)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'TestPlotSingleColourChecker',
'TestPlotMultiColourCheckers',
]
class TestPlotSingleColourChecker(unittest.TestCase):
"""
Defines :func:`colour.plotting.characterisation.plot_single_colour_checker`
definition unit tests methods.
"""
def test_plot_single_colour_checker(self):
"""
Tests :func:`colour.plotting.characterisation.\
plot_single_colour_checker` definition.
"""
figure, axes = plot_single_colour_checker()
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotMultiColourCheckers(unittest.TestCase):
"""
Defines :func:`colour.plotting.characterisation.plot_multi_colour_checkers`
definition unit tests methods.
"""
def test_plot_multi_colour_checkers(self):
"""
Tests :func:`colour.plotting.characterisation.\
plot_multi_colour_checkers` definition.
"""
figure, axes = plot_multi_colour_checkers(
['ColorChecker 1976', 'ColorChecker 2005'])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
if __name__ == '__main__':
unittest.main()
| 26.363636 | 79 | 0.716667 |
acf9afdcc6cb6882ee07f3d96df86beac2a3f67a | 1,302 | py | Python | new_corpus/_matplotlib_marker.py | obrmmk/multiese-1 | 137f050c40553ce907c985421e0d76b51ca351f7 | [
"MIT"
] | null | null | null | new_corpus/_matplotlib_marker.py | obrmmk/multiese-1 | 137f050c40553ce907c985421e0d76b51ca351f7 | [
"MIT"
] | null | null | null | new_corpus/_matplotlib_marker.py | obrmmk/multiese-1 | 137f050c40553ce907c985421e0d76b51ca351f7 | [
"MIT"
] | 9 | 2021-11-30T02:41:05.000Z | 2022-03-17T14:55:42.000Z | import matplotlib.pyplot as plt
データ列 = [1, 2, 3]
データ列2 = [2, 3, 4]
__X__ = '.'
marker = __X__
'''
@X('.';'o';'^';'v';'<';'>';'x';'X';'s';'D';'*')
@Y(ポイント;丸;[[|上]三角|▲|△];[下三角|▽|▼];左三角;右三角;[バツ|クロス];大バツ;四角;[ダイアモンド|菱形];星)
@alt(マーカー|印)
<オプション>マーカーを__Y__に変更する
<オプション>__Y__マーカーを[使う|加える]
<オプション>__Y__マーカーを描画する
'''
plt.plot(データ列, データ列2, marker=__X__)
'''
折れ線グラフに__Y__マーカーを[使う|加える]
折れ線グラフのマーカーを__Y__[|印]にする
[データ列を|]折れ線グラフに描画して、マーカーを__Y__[印|]にする
'''
plt.plot(データ列, データ列2, marker=__X__, markerfacecolor='r')
'''
{折れ線グラフに|赤い__Y__マーカーを}描画する
折れ線グラフの__Y__マーカーを[赤くする|赤色にする]
'''
plt.plot(データ列, データ列2, marker=__X__, markerfacecolor='b')
'''
{折れ線グラフに|青い__Y__マーカーを}描画する
折れ線グラフの__Y__マーカーを[青くする|青色にする]
'''
plt.plot(データ列, データ列2, marker=__X__, markerfacecolor='k')
'''
{折れ線グラフに|黒い__Y__マーカーを}描画する
折れ線グラフの__Y__マーカーを[黒くする|黒色にする]
'''
plt.plot(データ列, データ列2, marker=__X__, markerfacecolor='y')
'''
{折れ線グラフに|黄色い__Y__マーカーを}描画する
折れ線グラフの__Y__マーカーを[黄色くする|黄色にする]
'''
plt.plot(データ列, データ列2, marker=__X__, markerfacecolor='g')
'''
{折れ線グラフに|緑色の__Y__マーカーを}描画する
折れ線グラフの__Y__マーカーを緑色にする
'''
plt.plot(データ列, データ列2, marker=__X__, markersize=n)
'''
折れ線グラフに[大きさ|サイズ]nの__Y__マーカーを描画する
折れ線グラフの__Y__マーカーの[大きさ|サイズ]をnに設定する
'''
plt.plot(データ列, データ列2, marker=__X__, markeredgewidth=n)
'''
{折れ線グラフに|線幅nの__Y__マーカーを}描画する
折れ線グラフの__Y__マーカーの線幅をnに設定する
'''
| 19.147059 | 71 | 0.705837 |
acf9b0323ad5c75b1dae73bdf8c3207c01685946 | 93 | py | Python | gui/hooks/hook-webrtcvad.py | nlpsuge/ffsubsync | 6e4b90aea72ffc0d4cc5b48a3063f30e6dc012ff | [
"MIT"
] | 4,533 | 2019-02-25T13:30:32.000Z | 2020-05-10T20:44:17.000Z | gui/hooks/hook-webrtcvad.py | nlpsuge/ffsubsync | 6e4b90aea72ffc0d4cc5b48a3063f30e6dc012ff | [
"MIT"
] | 83 | 2020-05-11T01:08:09.000Z | 2022-03-07T02:23:47.000Z | gui/hooks/hook-webrtcvad.py | nlpsuge/ffsubsync | 6e4b90aea72ffc0d4cc5b48a3063f30e6dc012ff | [
"MIT"
] | 172 | 2019-02-25T20:52:48.000Z | 2020-05-08T17:34:50.000Z | from PyInstaller.utils.hooks import copy_metadata
datas = copy_metadata('webrtcvad-wheels')
| 23.25 | 49 | 0.827957 |
acf9b0a8b4788d2c621e8dca74fc551d37e02a57 | 7,975 | py | Python | dashboard/dashboard/services/issue_tracker_service_test.py | ncalexan/catapult | d21a98f0ee0bc0394eb93922d0b274fd6ac281d5 | [
"BSD-3-Clause"
] | 1 | 2019-01-04T10:08:58.000Z | 2019-01-04T10:08:58.000Z | dashboard/dashboard/services/issue_tracker_service_test.py | Saloni-prsd/catapult | a923c2a6de79f0f209157ab09849d695a98f4470 | [
"BSD-3-Clause"
] | null | null | null | dashboard/dashboard/services/issue_tracker_service_test.py | Saloni-prsd/catapult | a923c2a6de79f0f209157ab09849d695a98f4470 | [
"BSD-3-Clause"
] | 1 | 2019-04-21T23:48:15.000Z | 2019-04-21T23:48:15.000Z | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import httplib
import json
import mock
import unittest
from apiclient import errors
from dashboard.common import testing_common
from dashboard.services import issue_tracker_service
@mock.patch('services.issue_tracker_service.discovery.build', mock.MagicMock())
class IssueTrackerServiceTest(testing_common.TestCase):
def testAddBugComment_Basic(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._MakeCommentRequest = mock.Mock()
self.assertTrue(service.AddBugComment(12345, 'The comment'))
self.assertEqual(1, service._MakeCommentRequest.call_count)
service._MakeCommentRequest.assert_called_with(
12345, {'updates': {}, 'content': 'The comment'}, send_email=True)
def testAddBugComment_WithNoBug_ReturnsFalse(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._MakeCommentRequest = mock.Mock()
self.assertFalse(service.AddBugComment(None, 'Some comment'))
self.assertFalse(service.AddBugComment(-1, 'Some comment'))
def testAddBugComment_WithOptionalParameters(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._MakeCommentRequest = mock.Mock()
self.assertTrue(service.AddBugComment(
12345, 'Some other comment', status='Fixed',
labels=['Foo'], cc_list=['someone@chromium.org']))
self.assertEqual(1, service._MakeCommentRequest.call_count)
service._MakeCommentRequest.assert_called_with(
12345,
{
'updates': {
'status': 'Fixed',
'cc': ['someone@chromium.org'],
'labels': ['Foo'],
},
'content': 'Some other comment'
},
send_email=True)
def testAddBugComment_MergeBug(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._MakeCommentRequest = mock.Mock()
self.assertTrue(service.AddBugComment(12345, 'Dupe', merge_issue=54321))
self.assertEqual(1, service._MakeCommentRequest.call_count)
service._MakeCommentRequest.assert_called_with(
12345,
{
'updates': {
'status': 'Duplicate',
'mergedInto': 54321,
},
'content': 'Dupe'
},
send_email=True)
@mock.patch('logging.error')
def testAddBugComment_Error(self, mock_logging_error):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._ExecuteRequest = mock.Mock(return_value=None)
self.assertFalse(service.AddBugComment(12345, 'My bug comment'))
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertEqual(1, mock_logging_error.call_count)
def testNewBug_Success_NewBugReturnsId(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._ExecuteRequest = mock.Mock(return_value={'id': 333})
response = service.NewBug('Bug title', 'body', owner='someone@chromium.org')
bug_id = response['bug_id']
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertEqual(333, bug_id)
def testNewBug_Failure_HTTPException(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._ExecuteRequest = mock.Mock(
side_effect=httplib.HTTPException('reason'))
response = service.NewBug('Bug title', 'body', owner='someone@chromium.org')
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertIn('error', response)
def testNewBug_Failure_NewBugReturnsError(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._ExecuteRequest = mock.Mock(return_value={})
response = service.NewBug('Bug title', 'body', owner='someone@chromium.org')
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertTrue('error' in response)
def testNewBug_HttpError_NewBugReturnsError(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
error_content = {
'error': {'message': 'The user does not exist: test@chromium.org',
'code': 404}
}
service._ExecuteRequest = mock.Mock(side_effect=errors.HttpError(
mock.Mock(return_value={'status': 404}), json.dumps(error_content)))
response = service.NewBug('Bug title', 'body', owner='someone@chromium.org')
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertTrue('error' in response)
def testNewBug_UsesExpectedParams(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._MakeCreateRequest = mock.Mock()
service.NewBug('Bug title', 'body', owner='someone@chromium.org',
cc='somebody@chromium.org, nobody@chromium.org')
service._MakeCreateRequest.assert_called_with(
{
'title': 'Bug title',
'summary': 'Bug title',
'description': 'body',
'labels': [],
'components': [],
'status': 'Assigned',
'projectId': 'chromium',
'owner': {'name': 'someone@chromium.org'},
'cc': [{'name': 'somebody@chromium.org'},
{'name': 'nobody@chromium.org'}],
})
def testNewBug_UsesExpectedParamsSansOwner(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._MakeCreateRequest = mock.Mock()
service.NewBug('Bug title', 'body',
cc='somebody@chromium.org,nobody@chromium.org')
service._MakeCreateRequest.assert_called_with(
{
'title': 'Bug title',
'summary': 'Bug title',
'description': 'body',
'labels': [],
'components': [],
'status': 'Unconfirmed',
'projectId': 'chromium',
'cc': [{'name': 'somebody@chromium.org'},
{'name': 'nobody@chromium.org'}],
})
def testMakeCommentRequest_UserCantOwn_RetryMakeCommentRequest(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
error_content = {
'error': {'message': 'Issue owner must be a project member',
'code': 400}
}
service._ExecuteRequest = mock.Mock(side_effect=errors.HttpError(
mock.Mock(return_value={'status': 404}), json.dumps(error_content)))
service.AddBugComment(12345, 'The comment', owner=['test@chromium.org'])
self.assertEqual(2, service._ExecuteRequest.call_count)
def testMakeCommentRequest_UserDoesNotExist_RetryMakeCommentRequest(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
error_content = {
'error': {'message': 'The user does not exist: test@chromium.org',
'code': 404}
}
service._ExecuteRequest = mock.Mock(side_effect=errors.HttpError(
mock.Mock(return_value={'status': 404}), json.dumps(error_content)))
service.AddBugComment(12345, 'The comment', cc_list=['test@chromium.org'],
owner=['test@chromium.org'])
self.assertEqual(2, service._ExecuteRequest.call_count)
def testMakeCommentRequest_IssueDeleted_ReturnsTrue(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
error_content = {
'error': {'message': 'User is not allowed to view this issue 12345',
'code': 403}
}
service._ExecuteRequest = mock.Mock(side_effect=errors.HttpError(
mock.Mock(return_value={'status': 403}), json.dumps(error_content)))
comment_posted = service.AddBugComment(12345, 'The comment',
owner='test@chromium.org')
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertEqual(True, comment_posted)
if __name__ == '__main__':
unittest.main()
| 42.420213 | 80 | 0.68 |
acf9b149f23fa27125913c1e9515e53035ab5bc6 | 4,766 | py | Python | echovr_api/player.py | egret85/echovr-api | e135f25fb5b188e2931133d04c47c5e66e83a6c5 | [
"MIT"
] | 7 | 2018-11-02T18:12:18.000Z | 2021-03-08T10:47:59.000Z | echovr_api/player.py | egret85/echovr-api | e135f25fb5b188e2931133d04c47c5e66e83a6c5 | [
"MIT"
] | null | null | null | echovr_api/player.py | egret85/echovr-api | e135f25fb5b188e2931133d04c47c5e66e83a6c5 | [
"MIT"
] | 4 | 2018-11-02T18:12:08.000Z | 2020-06-19T19:42:39.000Z | from typing import List
from echovr_api.stats import Stats
from echovr_api.geometry import Vector3D
class Player():
"""Represents the state of a single player in the current game
Initialized using data directly from the Echo VR API. See `the Echo VR API
documentation`__ for further details on the attributes associated with this
class, and the expected intialization parameters.
__ https://github.com/Ajedi32/echovr_api_docs#teamsplayers
:param name:
The username of the player.
:param playerid:
A number representing ID of the player within the current game session.
:param userid:
A unique number identifying the player across all game sessions.
:param level:
A number (1-50) representing the player's experience "level".
:param number:
The number a player chose for themselves in the customization room.
:param possession:
Indicates whether this player currently has posession of the disk.
:param stunned:
Whether the player is currently stunned.
:param blocking:
Whether the player is currently blocking.
:param invulnerable:
Whether or not the player is currently immune to stuns.
:param position:
The current `position`_ of the player within the arena
:param velocity:
The current `velocity`_ (speed and direction of movement) of the player.
:param lhand:
The `position`_ of the player's left hand within the Arena.
:param rhand:
The `position`_ of the player's right hand within the Arena.
:param forward:
The `direction`_ that the player's head is facing.
:param left:
The `direction`_ that the left side of the player's head is facing.
:param up:
The `direction`_ that the top side of the player's head is facing.
:param stats:
A dict containing data used to instantiate the player's current stats.
.. _position:
.. _direction:
.. _velocity: https://github.com/Ajedi32/echovr_api_docs#vectors
"""
def __init__(self, name: str = "", playerid: int = None, userid: int = None,
level: int = 0, number: int = 0,
possession: bool = False, stunned: bool = False,
blocking: bool = False, invulnerable: bool = False,
position: List[float] = None,
velocity: List[float] = None, lhand: List[float] = None,
rhand: List[float] = None, forward: List[float] = None,
left: List[float] = None, up: List[float] = None,
stats: dict = {}):
#: The username of the player.
self.name = name
#: A integer representing ID of the player within the current game
#: session.
self.playerid = playerid
#: A unique integer identifying the player across all game sessions.
self.userid = userid
#: A integer (1-50) representing the player's experience "level".
self.level = level
#: The number a player chose for themselves in the customization room.
self.number = number
#: Whether this player currently has posession of the disk.
self.possession = possession
#: Whether the player is currently stunned.
self.stunned = stunned
#: Whether the player is currently blocking.
self.blocking = blocking
#: Whether or not the player is currently immune to stuns.
self.invulnerable = invulnerable
#: A :class:`~.Vector3D` represnting the position of the player's head
self.position = Vector3D(*position)
#: A :class:`~.Vector3D` representing the current speed and direction of
#: movement of the player.
self.velocity = Vector3D(*velocity)
#: A :class:`~.Vector3D` represnting the position of the player's left
#: hand
self.lhand = Vector3D(*lhand)
#: A :class:`~.Vector3D` represnting the position of the player's right
#: hand
self.rhand = Vector3D(*rhand)
#: A :class:`~.Vector3D` represnting the direction that the player's
#: head is facing.
self.forward = Vector3D(*forward)
#: A :class:`~.Vector3D` represnting the direction that the left side of
#: the player's head is facing.
self.left = Vector3D(*left)
#: A :class:`~.Vector3D` represnting the direction that the top of the
#: player's head is facing.
self.up = Vector3D(*up)
#: The :class:`~.Stats` object for this player
self.stats = Stats(**stats)
@property
def username(self):
"""The username of the player."""
return self.name
| 38.128 | 80 | 0.629878 |
acf9b19400aa46fd9c24d3ba152e6de754ebffb1 | 5,800 | py | Python | weibo.py | Drelf2018/weibo_detector | a5cabf14219ece3b3aa60ac0850772c105c0f958 | [
"MIT"
] | 1 | 2022-01-04T12:22:52.000Z | 2022-01-04T12:22:52.000Z | weibo.py | Drelf2018/weibo_detector | a5cabf14219ece3b3aa60ac0850772c105c0f958 | [
"MIT"
] | null | null | null | weibo.py | Drelf2018/weibo_detector | a5cabf14219ece3b3aa60ac0850772c105c0f958 | [
"MIT"
] | null | null | null | import re
import requests
from lxml import etree
# weibo.cn访问头 根据账号自行填写
headers = {
'Connection': 'keep-alive',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36',
}
class Weibo():
"""
weibo.cn 爬虫
Args:
uid (list) : 博主用户uid
cookies (str) : 用户cookies
"""
def __init__(self, uid, cookies):
try:
self.uid = int(uid)
self.url = 'https://weibo.cn/u/'+str(uid)
except Exception:
self.url = 'https://weibo.cn/n/'+uid
headers.update({'cookie': cookies})
self.resp = None
self.data = None
self.comt = []
self.update()
def get_headers(self):
return headers
def update(self):
# 刷新检测
if not self.url:
raise Exception('URL is Empty')
else:
try:
self.resp = requests.get(self.url, headers=headers)
except Exception as e:
raise e
self.data = etree.HTML(self.resp.text.encode('utf-8'))
def get_user_info(self):
# 获取博主当前信息
resp = requests.get('https://m.weibo.cn/api/container/getIndex?type=uid&value=%d' % self.uid, headers=headers)
data = resp.json()['data']['userInfo']
info = {
'name': data['screen_name'], # 昵称
'face': data['toolbar_menus'][0]['userInfo']['avatar_hd'], # 头像
'desc': data['description'], # 个性签名
'foll': data['follow_count'], # 关注数(str)
'foer': data['followers_count'] # 粉丝数(str)
}
return info
def get_post(self, n: int):
"""
爬取指定位置博文
Args:
n (int) : 正数第 n 条博文 /*包含置顶博文*/
Returns:
博文信息
"""
if self.data is None:
raise Exception('Update First')
try:
post = self.data.xpath('//div[@class="c"][{}]'.format(n))[0]
except Exception as e:
raise Exception('Error happened when n = %d %s' % (n, e))
info = {
'Top': 1 if post.xpath('.//span[@class="kt"]') else 0, # 是否是置顶
'Mid': post.xpath('.//@id')[0][2:], # 这条博文的 mid 每条博文独一无二
'repo': ''.join(post.xpath('./div/span[@class="cmt" and contains(text(), "转发理由:")]/../text()')).replace('\xa0', '')
}
def get_content_text(span):
text = etree.tostring(span, encoding='utf-8').decode('utf-8')
for _img in span.xpath('./span[@class="url-icon"]/img'):
alt, src = _img.xpath('./@alt')[0], _img.xpath('./@src')[0]
text = text.replace(
f'<span class="url-icon"><img alt="{alt}" src="{src}" style="width:1em; height:1em;" /></span>',
alt
)
for _a in span.xpath('.//a'):
href = _a.xpath('./@href')[0].replace('&', '&')
atext = _a.xpath('./text()')[0]
text = text.replace(f'<a href="{href}">{atext}</a>', atext)
text = text.replace('<br />', '\n').replace('<span class="ctt">', '').replace('</span>', '')
dot = len(text)
for i in range(dot, 0, -1):
if not text[i-1] == ' ':
dot = i
break
return text[:dot]
# 博文过长 更换网址进行爬取
murl = post.xpath('.//a[contains(text(), "全文")]/@href')
if murl:
resp = requests.get('https://weibo.cn'+murl[0], headers=headers)
data = etree.HTML(resp.text.encode('utf-8'))
span = data.xpath('//div[@class="c" and @id="M_"]/div/span')[0]
info['text'] = get_content_text(span)[1:]
if info['repo']:
info['text'] = f'转发了 {span.xpath("../a/text()")[0]} 的微博:\n' + info['text']
else:
span = post.xpath('./div/span[@class="ctt"]')[0]
info['text'] = get_content_text(span)
if info['repo']:
info['text'] = ''.join(span.xpath('../span[@class="cmt"][1]//text()')) + '\n' + info['text']
# 爬取博文中图片
pics = re.findall(r'组图共\d张', '/'.join(post.xpath('.//text()')))
if pics:
info['text'] = info['text'][:-1]
turl = post.xpath(f'.//a[contains(text(), "{pics[0]}")]/@href')[0]
resp = requests.get(turl, headers=headers)
data = etree.HTML(resp.text.encode('utf-8'))
info['PicAll'] = [('https://weibo.cn/' + url) for url in data.xpath('.//a[contains(text(), "原图")]/@href')]
else:
opic = post.xpath('.//a[contains(text(), "原图")]/@href')
if opic:
info['PicOri'] = opic[0]
# 将其他信息与博文正文分割
info['Time'] = post.xpath('./div/span[@class="ct"]/text()')[0]
return info
def comment(self, mid, count):
# 未使用 爬取评论的
self.comt = []
total = 0
url = 'https://weibo.cn/comment/' + mid
params = {'page': 1}
while total < count or count < 0:
resp = requests.get(url, headers=headers, params=params)
data = etree.HTML(resp.text.encode('utf-8'))
comts = data.xpath('//div[@class="c" and @id and not(@id="M_")]')
for comt in comts:
for a in comt.xpath('./span[@class="ctt"]/a'):
href = a.xpath("./@href")[0]
if re.search(r'[a-zA-z]+://[^\s]*', href):
self.comt.append(href)
total += 1
params['page'] += 1
| 36.942675 | 129 | 0.483448 |
acf9b329991600670ddf7ca39b99072fd2627248 | 46,432 | py | Python | pyNastran/bdf/cards/test/test_shells.py | JohannesSeidel/pyNastran | 91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf | [
"BSD-3-Clause"
] | null | null | null | pyNastran/bdf/cards/test/test_shells.py | JohannesSeidel/pyNastran | 91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf | [
"BSD-3-Clause"
] | null | null | null | pyNastran/bdf/cards/test/test_shells.py | JohannesSeidel/pyNastran | 91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf | [
"BSD-3-Clause"
] | null | null | null | """defines various shell element tests"""
import os
from io import StringIO
import unittest
import numpy as np
from numpy import array
from cpylog import get_logger
from pyNastran.bdf.bdf import PCOMP, MAT1, BDF
from pyNastran.bdf.cards.materials import get_mat_props_S
from pyNastran.bdf.cards.test.utils import save_load_deck
from pyNastran.bdf.mesh_utils.mass_properties import mass_properties_nsm
try:
import matplotlib
IS_MATPLOTLIB = True
from pyNastran.bdf.cards.elements.plot import plot_equivalent_lamina_vs_theta
except ImportError:
IS_MATPLOTLIB = False
class TestShells(unittest.TestCase):
def test_pshell(self):
log = get_logger(level='warning')
model = BDF(log=log)
pid = 10
pshell = model.add_pshell(pid, mid1=1, mid2=2, mid3=3, mid4=4, tst=3.14)
assert ' 3.14' in pshell.rstrip(), pshell.rstrip()
def _make_cquad4(self, model, rho, nu, G, E, t, nsm):
eid = 10
pid = 20
mid = 30
n1 = 1
n2 = 2
n3 = 3
n4 = 4
A = 2.
z0_elem = 0.1
mid2 = mid3 = mid4 = theta_mcid = twelveIt3 = tst = z1 = z2 = None
#z0_prop = None
mass = A * (t * rho + nsm)
cards = [
['grid', n1, 0, 0., 0., 0.],
['grid', n2, 0, 2., 0., 0.],
['grid', n3, 0, 2., 1., 0.],
['grid', n4, 0, 0., 1., 0.],
['cquad4', eid, pid, n1, n2, n3, n4, theta_mcid, z0_elem],
['pshell', pid, mid, t, mid2, twelveIt3, mid3, tst, nsm, z1, z2],
['mat1', mid, E, G, nu, rho],
]
for fields in cards:
model.add_card(fields, fields[0], is_list=True)
model.validate()
model._verify_bdf(xref=False)
model.mass_properties_no_xref()
model.cross_reference()
model.mass_properties()
model._verify_bdf(xref=True)
cquad4 = model.Element(eid)
cquad4.get_edge_axes()
cquad4.center_of_mass()
pshell = model.Property(pid)
node_ids = cquad4.node_ids
assert node_ids == [n1, n2, n3, n4], node_ids
# cquad4 / pshell
self.assertEqual(cquad4.eid, eid)
self.assertEqual(cquad4.Pid(), pid)
self.assertEqual(cquad4.Mid(), mid)
self.assertEqual(cquad4.Nsm(), nsm)
self.assertEqual(cquad4.Mass(), mass)
self.assertAlmostEqual(cquad4.MassPerArea(), mass / A)
self.assertEqual(cquad4.Area(), A)
self.assertEqual(cquad4.Thickness(), t)
self.assertEqual(cquad4.zoffset, z0_elem)
self.assertEqual(pshell.z1, -t/2.)
#self.assertEqual(cquad4.Rho(), rho) # removed because of PCOMP
def _make_ctria3(self, model, rho, nu, G, E, t, nsm):
eid = 10
pid = 20
mid = 30
n1 = 1
n2 = 2
n3 = 3
mid2 = mid3 = mid4 = theta_mcid = twelveIt3 = tst = z1 = z2 = None
z0_elem = 0.1
z0_prop = sb = ft = tref = ge = lam = None
sout = None
theta0 = 0.
theta1 = 30.
theta2 = 60.
theta3 = 90.
A = 2.
cards = [
['grid', n1, 0, 0., 0., 0.],
['grid', n2, 0, 4., 0., 0.],
['grid', n3, 0, 4., 1., 0.],
['ctria3', eid, pid, n1, n2, n3, theta_mcid, z0_elem], # A = 1/2 * 4 * 1 = 2.
['pshell', pid, mid, t, mid2, twelveIt3, mid3, tst, nsm, z1, z2, mid4],
['ctria3', eid + 1, pid + 1, n1, n2, n3, theta_mcid, z0_elem], # A = 1/2 * 4 * 1 = 2.
[
'pcomp', pid + 1, z0_prop, nsm, sb, ft, tref, ge, lam,
mid, t, theta0, sout,
mid, 2 * t, theta1, sout,
mid, 3 * t, theta2, sout,
mid, 4 * t, theta3, sout,
],
['mat1', mid, E, G, nu, rho],
]
for fields in cards:
model.add_card(fields, fields[0], is_list=True)
model.validate()
model._verify_bdf(xref=False)
model.cross_reference()
model._verify_bdf(xref=True)
# ctria3 / pshell
ctria3 = model.Element(eid)
ctria3.get_edge_axes()
ctria3.center_of_mass()
node_ids = ctria3.node_ids
assert node_ids == [n1, n2, n3], node_ids
mass = A * (t * rho + nsm)
self.assertEqual(ctria3.eid, eid)
self.assertEqual(ctria3.Pid(), pid)
self.assertEqual(ctria3.Mid(), mid)
self.assertEqual(ctria3.Nsm(), nsm)
self.assertEqual(ctria3.Mass(), mass)
self.assertAlmostEqual(ctria3.MassPerArea(), mass / A)
self.assertEqual(ctria3.Area(), A)
self.assertEqual(ctria3.Thickness(), t)
self.assertEqual(ctria3.MassPerArea(), mass / A)
self.assertEqual(ctria3.zoffset, z0_elem)
ctria3.raw_fields()
# removed because of PCOMP
# also no E, G, J, Nu, for the same reason
# what about Mid
#self.assertEqual(ctria3.Rho(), rho)
# pshell
pshell = model.Property(pid)
self.assertEqual(pshell.Pid(), pid)
self.assertEqual(pshell.Mid(), mid)
self.assertEqual(pshell.Nsm(), nsm)
self.assertEqual(pshell.Thickness(), t)
self.assertEqual(pshell.Rho(), rho)
self.assertEqual(pshell.z1, -t / 2.)
self.assertEqual(pshell.z2, t / 2.)
# ctria3 / pcomp
ctria3 = model.Element(eid + 1)
mass = A * (10 * t * rho + nsm)
self.assertEqual(ctria3.eid, eid + 1)
self.assertEqual(ctria3.Pid(), pid + 1)
#self.assertEqual(ctria3.Mid(), mid)
self.assertEqual(ctria3.Nsm(), nsm)
self.assertAlmostEqual(ctria3.Mass(), mass)
self.assertAlmostEqual(ctria3.MassPerArea(), mass / A)
self.assertEqual(ctria3.Area(), A)
self.assertEqual(ctria3.Thickness(), 10 * t)
#self.assertEqual(ctria3.Rho(), rho)
# pcomp
pcomp = model.Property(pid + 1)
self.assertEqual(pcomp.Pid(), pid + 1)
self.assertEqual(pcomp.nplies, 4)
self.assertEqual(pcomp.Mid(0), mid)
self.assertEqual(pcomp.Nsm(), nsm)
with self.assertRaises(IndexError):
self.assertEqual(pcomp.Mid(-1), mid)
self.assertEqual(pcomp.Mids(), [mid] * 4)
self.assertEqual(pcomp.Mid(0), mid)
self.assertEqual(pcomp.Mid(1), mid)
self.assertEqual(pcomp.Mid(2), mid)
self.assertEqual(pcomp.Mid(3), mid)
with self.assertRaises(IndexError):
self.assertEqual(pcomp.Mid(4), mid)
with self.assertRaises(IndexError):
self.assertEqual(pcomp.Thickness(-1), t)
self.assertEqual(pcomp.Thickness(), 10 * t)
self.assertEqual(pcomp.Thickness(0), t)
self.assertEqual(pcomp.Thickness(1), 2 * t)
self.assertAlmostEqual(pcomp.Thickness(2), 3 * t, places=8) # 0.3
self.assertEqual(pcomp.Thickness(3), 4 * t)
with self.assertRaises(IndexError):
self.assertEqual(pcomp.Thickness(4), 5*t)
with self.assertRaises(IndexError):
self.assertEqual(pcomp.Rho(-1), rho)
self.assertEqual(pcomp.Rho(0), rho)
self.assertEqual(pcomp.Rho(1), rho)
self.assertEqual(pcomp.Rho(2), rho)
self.assertEqual(pcomp.Rho(3), rho)
with self.assertRaises(IndexError):
self.assertEqual(pcomp.Rho(4), rho)
with self.assertRaises(IndexError):
self.assertEqual(pcomp.Theta(-1), 0.)
self.assertEqual(pcomp.Theta(0), 0.)
self.assertEqual(pcomp.Theta(1), 30.)
self.assertEqual(pcomp.Theta(2), 60.)
self.assertEqual(pcomp.Theta(3), 90.)
with self.assertRaises(IndexError):
self.assertEqual(pcomp.Theta(4), rho)
self.assertEqual(pcomp.z0, -10*t/2.)
def test_pshell_01(self):
"""tests a CQUAD4 and a PSHELL"""
rho = 0.1
nu = 0.3
G = None
E = 1e7
t = 0.3
nsm = 0.0
model = BDF(debug=False)
self._make_cquad4(model, rho, nu, G, E, t, nsm)
model = BDF(debug=False)
self._make_ctria3(model, rho, nu, G, E, t, nsm)
nsm = 1.0
model = BDF(debug=False)
self._make_cquad4(model, rho, nu, G, E, t, nsm)
model = BDF(debug=False)
self._make_ctria3(model, rho, nu, G, E, t, nsm)
def test_cquad4_01(self):
log = get_logger(level='warning')
model = BDF(log=log)
eid = 10
pid = 20
mid = 30
n1 = 1
n2 = 2
n3 = 3
n4 = 4
n5 = 5
n6 = 6
#A = 2.
t = rho = nsm = E = G = nu = 0.1
mid2 = mid3 = mid4 = twelveIt3 = tst = z1 = z2 = None
#mass = A * (t * rho + nsm)
cards = [
['grid', n1, 0, 0., 0., 0.],
['grid', n2, 0, 2., 0., 0.],
['grid', n3, 0, 2., 1., 0.],
['grid', n4, 0, 0., 1., 0.],
['grid', n5, 0, 0., 0., 0.],
['grid', n6, 0, 2., 0., 0.],
['cquad4', eid, pid, n1, n2, n3, n4],
['cquad4', eid+1, pid, n5, n6, n3, n4],
['pshell', pid, mid, t, mid2, twelveIt3, mid3, tst, nsm, z1, z2],
['mat1', mid, E, G, nu, rho],
]
for fields in cards:
model.add_card(fields, fields[0], is_list=True)
# get node IDs without cross referencing
eids = [10]
nids = model.get_node_ids_with_elements(eids)
assert nids == {1, 2, 3, 4}, nids
eids = [11]
nids = model.get_node_ids_with_elements(eids)
assert nids == {3, 4, 5, 6}, nids
eids = [10, 11]
nids = model.get_node_ids_with_elements(eids)
assert nids == {1, 2, 3, 4, 5, 6}, nids
params = [
('T', 1.0),
(6, 2.0), # 12I/T3
(8, 3.0), # 'TST'
]
make_dvprel_optimization(model, params, 'PSHELL', pid)
# get node IDs with cross referencing
model.cross_reference()
model.update_model_by_desvars(xref=True)
eids = [10]
nids = model.get_node_ids_with_elements(eids)
assert nids == {1, 2, 3, 4}, nids
eids = [11]
nids = model.get_node_ids_with_elements(eids)
assert nids == {3, 4, 5, 6}, nids
eids = [10, 11]
nids = model.get_node_ids_with_elements(eids)
assert nids == {1, 2, 3, 4, 5, 6}, nids
save_load_deck(model)
def test_pcomp_01(self):
"""asymmetrical, nsm=0.0 and nsm=1.0"""
#self.pid = data[0]
#self.z0 = data[1]
#self.nsm = data[2]
#self.sb = data[3]
#self.ft = data[4]
#self.tref = data[5]
#self.ge = data[6]
#self.lam = data[7]
#Mid = data[8]
#T = data[9]
#Theta = data[10]
#Sout = data[11]
pid = 1
z0 = 0.
nsm = 0.
sb = 0.
ft = 0.
tref = 0.
ge = 0.
lam = 'NO' # is_symmetrical YES/NO
Mid = [1, 2, 3]
theta = [0., 10., 20.]
T = [.1, .2, .3]
sout = [1, 1, 0] # 0-NO, 1-YES
data = [pid, z0, nsm, sb, ft, tref, ge, lam, Mid, T, theta, sout]
p = PCOMP.add_op2_data(data)
self.assertFalse(p.is_symmetrical())
self.assertEqual(p.nplies, 3)
self.assertAlmostEqual(p.Thickness(), 0.6)
self.assertAlmostEqual(p.Thickness(0), 0.1)
self.assertAlmostEqual(p.Thickness(1), 0.2)
self.assertAlmostEqual(p.Thickness(2), 0.3)
with self.assertRaises(IndexError):
p.Thickness(3)
self.assertAlmostEqual(p.Theta(0), 0.)
self.assertAlmostEqual(p.Theta(1), 10.)
self.assertAlmostEqual(p.Theta(2), 20.)
with self.assertRaises(IndexError):
p.Theta(3)
self.assertEqual(p.Mid(0), 1)
self.assertEqual(p.Mid(1), 2)
self.assertEqual(p.Mid(2), 3)
with self.assertRaises(IndexError):
p.Mid(3)
self.assertEqual(p.Mids(), [1, 2, 3])
self.assertEqual(p.sout(0), 'YES')
self.assertEqual(p.sout(1), 'YES')
self.assertEqual(p.sout(2), 'NO')
with self.assertRaises(IndexError):
p.sout(3)
# material...
#self.mid = data[0]
#self.e = data[1]
#self.g = data[2]
#self.nu = data[3]
#self.rho = data[4]
#self.a = data[5]
#self.tref = data[6]
#self.ge = data[7]
#self.St = data[8]
#self.Sc = data[9]
#self.Ss = data[10]
#self.mcsid = data[11]
mid = 1
E = None
G = None
nu = None
rho = 1.0
a = None
St = None
Sc = None
Ss = None
mcsid = None
mat1 = [mid, E, G, nu, rho, a, tref, ge, St, Sc, Ss, mcsid]
with self.assertRaises(ValueError):
m = MAT1.add_op2_data(mat1)
G = 42.
mat1 = [mid, E, G, nu, rho, a, tref, ge, St, Sc, Ss, mcsid]
m = MAT1.add_op2_data(mat1)
for iply in range(len(p.plies)):
mid = p.plies[iply][0]
p.mids[iply] = m # MAT1
#p.mids = [m, m, m]
p.mids_ref = p.mids
#Rho
self.assertAlmostEqual(p.Rho(0), 1.0)
self.assertAlmostEqual(p.Rho(1), 1.0)
self.assertAlmostEqual(p.Rho(2), 1.0)
with self.assertRaises(IndexError):
p.Rho(3)
# MassPerArea
self.assertAlmostEqual(p.MassPerArea(), 0.6)
self.assertAlmostEqual(p.MassPerArea(0), 0.1)
self.assertAlmostEqual(p.MassPerArea(1), 0.2)
self.assertAlmostEqual(p.MassPerArea(2), 0.3)
with self.assertRaises(IndexError):
p.MassPerArea(3)
#----------------------
# change the nsm to 1.0
p.nsm = 1.0
self.assertEqual(p.Nsm(), 1.0)
# MassPerArea
self.assertAlmostEqual(p.MassPerArea(), 1.6)
self.assertAlmostEqual(p.MassPerArea(0, method='nplies'), 0.1+1/3.)
self.assertAlmostEqual(p.MassPerArea(1, method='nplies'), 0.2+1/3.)
self.assertAlmostEqual(p.MassPerArea(2, method='nplies'), 0.3+1/3.)
self.assertAlmostEqual(p.MassPerArea(0, method='rho*t'), 0.1+1/6.)
self.assertAlmostEqual(p.MassPerArea(1, method='rho*t'), 0.2+2/6.)
self.assertAlmostEqual(p.MassPerArea(2, method='rho*t'), 0.3+3/6.)
self.assertAlmostEqual(p.MassPerArea(0, method='t'), 0.1+1/6.)
self.assertAlmostEqual(p.MassPerArea(1, method='t'), 0.2+2/6.)
self.assertAlmostEqual(p.MassPerArea(2, method='t'), 0.3+3/6.)
with self.assertRaises(IndexError):
p.MassPerArea(3, method='nplies')
z = p.get_z_locations()
z_expected = array([0., T[0], T[0]+T[1], T[0]+T[1]+T[2]])
for za, ze in zip(z, z_expected):
self.assertAlmostEqual(za, ze)
#z0 =
p.z0 = 1.0
z_expected = 1.0 + z_expected
z = p.get_z_locations()
for za, ze in zip(z, z_expected):
self.assertAlmostEqual(za, ze)
def test_pcomp_02(self):
"""symmetrical, nsm=0.0 and nsm=1.0"""
pid = 1
z0 = 0.
nsm = 0.
sb = 0.
ft = 0.
tref = 0.
ge = 0.
lam = 'SYM' # is_symmetrical SYM
Mid = [1, 2, 3]
theta = [0., 10., 20.]
T = [.1, .2, .3]
sout = [1, 1, 0] # 0-NO, 1-YES
data = [pid, z0, nsm, sb, ft, tref, ge, lam, Mid, T, theta, sout]
p = PCOMP.add_op2_data(data)
self.assertTrue(p.is_symmetrical())
self.assertEqual(p.nplies, 6)
self.assertAlmostEqual(p.Thickness(), 1.2)
self.assertAlmostEqual(p.Thickness(0), 0.1)
self.assertAlmostEqual(p.Thickness(1), 0.2)
self.assertAlmostEqual(p.Thickness(2), 0.3)
self.assertAlmostEqual(p.Thickness(3), 0.3)
self.assertAlmostEqual(p.Thickness(4), 0.2)
self.assertAlmostEqual(p.Thickness(5), 0.1)
with self.assertRaises(IndexError):
p.Thickness(6)
self.assertAlmostEqual(p.Theta(0), 0.)
self.assertAlmostEqual(p.Theta(1), 10.)
self.assertAlmostEqual(p.Theta(2), 20.)
self.assertAlmostEqual(p.Theta(3), 20.)
self.assertAlmostEqual(p.Theta(4), 10.)
self.assertAlmostEqual(p.Theta(5), 0.)
with self.assertRaises(IndexError):
p.Theta(6)
self.assertEqual(p.Mid(0), 1)
self.assertEqual(p.Mid(1), 2)
self.assertEqual(p.Mid(2), 3)
self.assertEqual(p.Mid(3), 3)
self.assertEqual(p.Mid(4), 2)
self.assertEqual(p.Mid(5), 1)
with self.assertRaises(IndexError):
p.Mid(6)
self.assertEqual(p.Mids(), [1, 2, 3, 3, 2, 1])
self.assertEqual(p.sout(0), 'YES')
self.assertEqual(p.sout(1), 'YES')
self.assertEqual(p.sout(2), 'NO')
self.assertEqual(p.sout(3), 'NO')
self.assertEqual(p.sout(4), 'YES')
self.assertEqual(p.sout(5), 'YES')
with self.assertRaises(IndexError):
p.sout(6)
mid = 1
E = None
G = None
nu = None
rho = 1.0
a = None
St = None
Sc = None
Ss = None
mcsid = None
mat1 = [mid, E, G, nu, rho, a, tref, ge, St, Sc, Ss, mcsid]
with self.assertRaises(ValueError):
m = MAT1.add_op2_data(mat1)
G = 42.
mat1 = [mid, E, G, nu, rho, a, tref, ge, St, Sc, Ss, mcsid]
m = MAT1.add_op2_data(mat1)
for iply in range(len(p.plies)):
mid = p.plies[iply][0]
p.mids[iply] = m # MAT1
p.mids_ref = p.mids
#Rho
self.assertAlmostEqual(p.Rho(0), 1.0)
self.assertAlmostEqual(p.Rho(1), 1.0)
self.assertAlmostEqual(p.Rho(2), 1.0)
self.assertAlmostEqual(p.Rho(3), 1.0)
self.assertAlmostEqual(p.Rho(4), 1.0)
self.assertAlmostEqual(p.Rho(5), 1.0)
with self.assertRaises(IndexError):
p.Rho(6)
# MassPerArea
self.assertAlmostEqual(p.MassPerArea(), 1.2)
self.assertAlmostEqual(p.MassPerArea(0), 0.1)
self.assertAlmostEqual(p.MassPerArea(1), 0.2)
self.assertAlmostEqual(p.MassPerArea(2), 0.3)
self.assertAlmostEqual(p.MassPerArea(3), 0.3)
self.assertAlmostEqual(p.MassPerArea(4), 0.2)
self.assertAlmostEqual(p.MassPerArea(5), 0.1)
with self.assertRaises(IndexError):
p.MassPerArea(6)
self.assertEqual(p.Nsm(), 0.0)
#----------------------
# change the nsm to 1.0
p.nsm = 1.0
self.assertEqual(p.Nsm(), 1.0)
# MassPerArea
self.assertAlmostEqual(p.MassPerArea(), 2.2)
self.assertAlmostEqual(p.MassPerArea(0, method='nplies'), 0.1+1/6.)
self.assertAlmostEqual(p.MassPerArea(1, method='nplies'), 0.2+1/6.)
self.assertAlmostEqual(p.MassPerArea(2, method='nplies'), 0.3+1/6.)
self.assertAlmostEqual(p.MassPerArea(3, method='nplies'), 0.3+1/6.)
self.assertAlmostEqual(p.MassPerArea(4, method='nplies'), 0.2+1/6.)
self.assertAlmostEqual(p.MassPerArea(5, method='nplies'), 0.1+1/6.)
with self.assertRaises(IndexError):
p.MassPerArea(6)
def test_cshear(self):
"""tests a PSHEAR/CSHEAR"""
log = get_logger(level='warning')
model = BDF(log=log)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(4, [0., 1., 0.])
eid = 10
pid = 20
mid = 30
t = 0.1
nids = [1, 2, 3, 4]
cshear = model.add_cshear(eid, pid, nids, comment='cshear')
pshear = model.add_pshear(pid, mid, t, nsm=0., f1=0., f2=0., comment='')
dvids = [1]
coeffs = 1.0
model.add_dvprel1(1, 'PSHEAR', pid, 'T', dvids, coeffs,
p_min=None, p_max=1e20,
c0=0.0, validate=True,
comment='')
model.add_desvar(1, 'T', 10.0)
E = 30.e7
G = None
nu = 0.3
mat1 = model.add_mat1(mid, E, G, nu, rho=0.1, comment='mat1')
model.pop_parse_errors()
model.validate()
cshear.raw_fields()
cshear.write_card(size=8)
pshear.raw_fields()
pshear.write_card(size=8)
pshear.write_card(size=16)
pshear.write_card(size=16, is_double=True)
model.validate()
model._verify_bdf(xref=False)
model.cross_reference()
model._verify_bdf(xref=True)
model.mass_properties()
cshear.write_card(size=8)
pshear.write_card(size=8)
model.update_model_by_desvars()
save_load_deck(model)
def test_shells(self):
"""tests a CTRIA3/CQUAD4/PSHELL and CTRIA6/CQUAD8/CQUAD/PCOMP"""
log = get_logger(level='warning')
model = BDF(log=log)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(4, [0., 1., 0.])
model.add_grid(5, [.5, 0., 0.])
model.add_grid(6, [1., 0.5, 0.])
model.add_grid(7, [.5, 1., 0.])
model.add_grid(8, [0., .5, 0.])
model.add_grid(9, [.5, .5, 0.])
E = 30.e7
G = None
nu = 0.3
model.add_mat1(1, E, G, nu, rho=0.1)
model.add_mat1(2, E, G, nu, rho=0.1)
model.add_mat1(3, E, G, nu, rho=0.1)
pid = 1
nids = [1, 2, 3]
model.add_ctria3(1, pid, nids)
nids = [1, 2, 3, 4]
model.add_cquad4(2, pid, nids)
model.add_pshell(pid, mid1=2, t=0.1)
pid = 2
nids = [1, 2, 3, 5, 6, 9]
ctria6 = model.add_ctria6(3, pid, nids, comment='ctria6')
nids = [1, 2, 3, 4, 5, 6, 7, 8]
cquad8 = model.add_cquad8(4, pid, nids, comment='cquad8')
nids = [1, 2, 3, 4, 5, 6, 7, 8, 9]
cquad = model.add_cquad(5, pid, nids, comment='cquad')
mids = [1, 2, 3]
thicknesses = [0.1, 0.2, 0.3]
pcomp = model.add_pcomp(pid, mids, thicknesses)
assert pcomp.Thickness() == sum(thicknesses), thicknesses
assert np.allclose(pcomp.get_thicknesses(), [0.1, 0.2, 0.3]), pcomp.get_thicknesses()
assert np.allclose(pcomp.get_thetas(), [0., 0., 0.]), pcomp.get_thetas()
pcomp.lam = 'SYM'
assert pcomp.Thickness() == sum(thicknesses)*2, thicknesses
assert np.allclose(pcomp.get_thicknesses(), [0.1, 0.2, 0.3, 0.3, 0.2, 0.1]), pcomp.get_thicknesses()
assert np.allclose(pcomp.get_thetas(), [0., 0., 0., 0., 0., 0.]), pcomp.get_thetas()
#---------------------------------------------------
model.validate()
ctria6.raw_fields()
ctria6.write_card(size=8)
cquad8.raw_fields()
cquad8.write_card(size=8)
cquad.raw_fields()
cquad.write_card(size=8)
pcomp.raw_fields()
pcomp.write_card(size=8)
pcomp.write_card(size=16)
pcomp.write_card(size=16, is_double=True)
model._verify_bdf(xref=False)
params = [('T1', 1.0), ('THETA1', 2.0), ('Z0', 3.0), ('SB', 4.0),
('TREF', 0.0), ('GE', 0.1)]
make_dvprel_optimization(model, params, 'PCOMP', pid)
#--------------------------------
model.cross_reference()
model._verify_bdf(xref=True)
model.get_area_breakdown(property_ids=None, stop_if_no_area=True)
model.get_mass_breakdown(property_ids=None, stop_if_no_mass=True, detailed=False)
model.get_mass_breakdown(property_ids=None, stop_if_no_mass=True, detailed=True)
model.get_volume_breakdown(property_ids=None, stop_if_no_volume=True)
model.update_model_by_desvars(xref=True)
ctria6.raw_fields()
ctria6.write_card(size=8)
cquad8.raw_fields()
cquad8.write_card(size=8)
cquad.raw_fields()
cquad.write_card(size=8)
pcomp.raw_fields()
pcomp.write_card(size=8)
pcomp.write_card(size=16)
pcomp.write_card(size=16, is_double=True)
model._verify_bdf(xref=False)
def test_trax(self):
"""tests a CTRAX3/CTRAX6/???"""
log = get_logger(level='warning')
model = BDF(log=log)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(4, [0., 1., 0.])
model.add_grid(5, [.5, 0., 0.])
model.add_grid(6, [1., 0.5, 0.])
model.add_grid(7, [.5, 1., 0.])
model.add_grid(8, [0., .5, 0.])
model.add_grid(9, [.5, .5, 0.])
mid1 = 1
E = 30.e7
G = None
nu = 0.3
model.add_mat1(mid1, E, G, nu, rho=0.1)
#model.add_mat1(2, E, G, nu, rho=0.1)
#model.add_mat1(3, E, G, nu, rho=0.1)
pid = 1
nids = [1, 2, 3]
ctrax3 = model.add_ctrax3(1, pid, nids, theta=0., comment='ctrax3')
#model.add_pshell(pid, mid1=2, t=0.1)
psolid = model.add_psolid(pid, mid1, cordm=0, integ=None, stress=None,
isop=None, fctn='SMECH', comment='psolid')
pid = 2
nids = [1, 2, 3, 5, 6, 9]
ctrax6 = model.add_ctrax6(2, pid, nids, theta=0., comment='ctrax6')
plsolid = model.add_plsolid(pid, mid1, stress_strain='GRID', ge=0.,
comment='plsolid')
mathp = model.add_mathp(mid1)
#assert pcomp.Thickness() == sum(thicknesses), thicknesses
#pcomp.lam = 'SYM'
#assert pcomp.Thickness() == sum(thicknesses)*2, thicknesses
model.validate()
ctrax6.raw_fields()
ctrax6.write_card(size=8)
psolid.raw_fields()
psolid.write_card(size=8)
#psolid.write_card(size=16)
#psolid.write_card(size=16, is_double=True)
plsolid.raw_fields()
plsolid.write_card(size=8)
#plsolid.write_card(size=16)
#plsolid.write_card(size=16, is_double=True)
model._verify_bdf(xref=False)
#--------------------------------
model.cross_reference()
model._verify_bdf(xref=True)
ctrax3.raw_fields()
ctrax3.write_card(size=8)
ctrax6.raw_fields()
ctrax6.write_card(size=8)
#pcomp.raw_fields()
#pcomp.write_card(size=8)
#pcomp.write_card(size=16)
#pcomp.write_card(size=16, is_double=True)
save_load_deck(model, run_convert=False)
def test_ctriar_cquadr(self):
"""tests a CTRIAR/PSHELL/MAT8"""
log = get_logger(level='warning')
model = BDF(log=log)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(4, [0., 1., 0.])
eid = 6
pid = 13
nids = [1, 2, 3]
ctriar = model.add_ctriar(eid, pid, nids, comment='ctriar')
ctriar.raw_fields()
ctriar.write_card(size=8, is_double=False)
ctriar.write_card(size=16, is_double=False)
ctriar.flip_normal()
eid = 8
nids = [1, 2, 3, 4]
cquadr = model.add_cquadr(eid, pid, nids, comment='cquadr')
cquadr.raw_fields()
cquadr.write_card(size=8, is_double=False)
cquadr.write_card(size=16, is_double=False)
cquadr.flip_normal()
mid = 42
model.add_pshell(pid, mid1=mid, t=0.2)
e11 = 1e7
e22 = 1e6
nu12 = 0.3
model.add_mat8(mid, e11, e22, nu12)
model.validate()
model._verify_bdf(xref=False)
model.cross_reference()
model._verify_bdf(xref=True)
model.uncross_reference()
model.safe_cross_reference()
model.get_area_breakdown(property_ids=None, stop_if_no_area=True)
model.get_mass_breakdown(property_ids=None, stop_if_no_mass=True, detailed=False)
model.get_mass_breakdown(property_ids=None, stop_if_no_mass=True, detailed=True)
model.get_volume_breakdown(property_ids=None, stop_if_no_volume=True)
save_load_deck(model)
def test_cplstn34(self):
"""tests a CPLSTN3, CPLSTN4/PSHELL/MAT8"""
log = get_logger(level='warning')
model = BDF(log=log)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(4, [0., 1., 0.])
pid = 4
eid = 3
nids = [1, 2, 3, 4]
cplstn4 = model.add_cplstn4(eid, pid, nids, comment='cplstn4')
cplstn4.flip_normal()
eid = 5
nids = [1, 2, 3]
mid = 10
cplstn3 = model.add_cplstn3(eid, pid, nids, comment='cplstn3')
cplstn3.flip_normal()
pplane = model.add_pplane(pid, mid, t=0.1, nsm=0.,
formulation_option=0, comment='pplane')
E = 1e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu)
cplstn3.repr_fields()
cplstn4.repr_fields()
cplstn3.raw_fields()
cplstn4.raw_fields()
pplane.raw_fields()
model.validate()
model._verify_bdf(xref=False)
cplstn3.write_card(size=8)
cplstn4.write_card(size=8)
pplane.write_card(size=8)
model.cross_reference()
model.pop_xref_errors()
#cplstn3.write_card(size=8)
#cplstn4.write_card(size=8)
model.uncross_reference()
model.safe_cross_reference()
save_load_deck(model)
def test_cplstn68(self):
"""tests a CPLSTN6, CPLSTN8/PSHELL/MAT8"""
log = get_logger(level='warning')
model = BDF(log=log)
model.add_grid(1, [0., 0., 0.])
model.add_grid(5, [.5, 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(6, [1., .5, 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(7, [.5, 1., 0.])
model.add_grid(4, [0., 1., 0.])
model.add_grid(8, [0., .5, 0.])
pid = 4
eid = 3
nids = [1, 2, 3, 4, 5, 6, 7, 8]
cplstn8 = model.add_cplstn8(eid, pid, nids, comment='cplstn8')
eid = 5
nids = [1, 2, 3, 4, 5, 6]
mid = 10
cplstn6 = model.add_cplstn6(eid, pid, nids, comment='cplstn6')
pplane = model.add_pplane(pid, mid, t=0.1, nsm=0.,
formulation_option=0, comment='pplane')
E = 1e7
G = None
nu = 0.3
mat1 = model.add_mat1(mid, E, G, nu)
cplstn6.raw_fields()
cplstn8.raw_fields()
pplane.raw_fields()
model.validate()
model._verify_bdf(xref=False)
cplstn6.write_card(size=8)
cplstn8.write_card(size=8)
pplane.write_card(size=8)
model.cross_reference()
model.pop_xref_errors()
#cplstn3.write_card(size=8)
#cplstn4.write_card(size=8)
model.uncross_reference()
model.safe_cross_reference()
save_load_deck(model)
def test_ctrishell68(self):
"""tests a CPLSTN6, CPLSTN8/PSHELL/MAT8"""
log = get_logger(level='warning')
model = BDF(log=log)
model.add_grid(1, [0., 0., 0.])
model.add_grid(5, [.5, 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(6, [1., .5, 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(7, [.5, 1., 0.])
model.add_grid(4, [0., 1., 0.])
model.add_grid(8, [0., .5, 0.])
pid = 4
eid = 3
nids = [1, 2, 3, 4, 5, 6, 7, 8]
cquad8 = model.add_cquad8(eid, pid, nids, comment='cquad8')
eid = 5
nids = [1, 2, 3, 4, 5, 6]
mid = 10
ctria6 = model.add_ctria6(eid, pid, nids, comment='ctria6')
pplane = model.add_pplane(pid, mid, t=0.1, nsm=0.,
formulation_option=0, comment='pplane')
E = 1e7
G = None
nu = 0.3
mat1 = model.add_mat1(mid, E, G, nu)
ctria6.raw_fields()
cquad8.raw_fields()
pplane.raw_fields()
model.validate()
model._verify_bdf(xref=False)
ctria6.write_card(size=8)
cquad8.write_card(size=8)
pplane.write_card(size=8)
model.cross_reference()
model.pop_xref_errors()
model.uncross_reference()
model.safe_cross_reference()
save_load_deck(model, run_test_bdf=False)
#model.mass_properties()
def test_shear(self):
"""tests a CSHEAR, PSHEAR"""
pid = 10
pid_pshell = 11
mid = 100
log = get_logger(level='warning')
model = BDF(log=log)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(4, [0., 1., 0.])
nsm = 10.0
t = 1.0
rho = 1.0
cshear = model.add_cshear(10, pid, [1, 2, 3, 4],
comment='cshear')
cquad4 = model.add_cquad4(14, pid_pshell, [1, 2, 3, 4],
comment='cquad4')
model.add_pshear(pid, mid, t=t,
nsm=nsm, f1=0., f2=0., comment='pshear')
model.add_pshell(pid_pshell, mid1=mid, t=t, mid2=None, twelveIt3=1.0,
mid3=None, tst=0.833333,
nsm=nsm, z1=None, z2=None,
mid4=None, comment='')
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu, rho=rho)
model.validate()
model.cross_reference()
model.pop_xref_errors()
area = 1.0
mass_expected = area * (rho * t + nsm)
mass = model.mass_properties()[0]
assert np.allclose(mass, mass_expected*2), 'mass_properties all: mass=%s mass_expected=%s' % (mass, mass_expected*2)
mass = model.mass_properties(element_ids=10)[0]
assert np.allclose(mass, mass_expected), 'mass_properties reduced: mass=%s mass_expected=%s' % (mass, mass_expected)
mass = mass_properties_nsm(model)[0]
assert np.allclose(mass, mass_expected*2), 'mass_properties_nsm all: mass=%s mass_expected=%s' % (mass, mass_expected*2)
mass = mass_properties_nsm(model, element_ids=10)[0]
assert np.allclose(mass, mass_expected), 'mass_properties_nsm reduced: mass=%s mass_expected=%s' % (mass, mass_expected)
bdf_file = StringIO()
model.write_bdf(bdf_file)
model.uncross_reference()
model.cross_reference()
model.pop_xref_errors()
model.get_area_breakdown(property_ids=None, stop_if_no_area=True)
model.get_mass_breakdown(property_ids=None, stop_if_no_mass=True, detailed=False)
model.get_mass_breakdown(property_ids=None, stop_if_no_mass=True, detailed=True)
model.get_volume_breakdown(property_ids=None, stop_if_no_volume=True)
assert np.allclose(cshear.Mass(), mass_expected), cshear.Mass()
model.uncross_reference()
model.safe_cross_reference()
model.uncross_reference()
#bdf_file = model.write_bdf(bdf_file)
save_load_deck(model)
def test_cquadx4(self):
"""tests a CQUADX4"""
log = get_logger(level='warning')
model = BDF(log=log)
eid = 1
pid = 2
mid = 3
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(4, [0., 1., 0.])
cquadx4 = model.add_cquadx4(eid, pid, [1, 2, 3, 4], theta=0., comment='cquadx4')
psolid = model.add_psolid(pid, mid, cordm=0, integ=None, stress=None,
isop=None, fctn='SMECH', comment='psolid')
E = 3.0e7
G = None
nu = 0.3
mat1 = model.add_mat1(mid, E, G, nu)
model.cross_reference()
model.pop_xref_errors()
mass = model.mass_properties()[0]
assert np.allclose(mass, 0.0), mass # TODO: not sure
model.uncross_reference()
model.safe_cross_reference()
model.uncross_reference()
#bdf_file = model.write_bdf(bdf_file)
save_load_deck(model)
def test_ctria6_cquad8_cquad9(self):
"""tests a CQUAD8 and CQUAD9"""
log = get_logger(level='warning')
model = BDF(log=log)
eid = 1
pid = 10
mid = 100
model.add_grid(1, [0., 0., 0.])
model.add_grid(5, [.5, 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(6, [1., .5, 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(7, [.5, 1., 0.])
model.add_grid(4, [0., 1., 0.])
model.add_grid(8, [0., .5, 0.])
model.add_grid(9, [.5, .5, 0.])
nids = [1, 2, 3, 4, 5, 6, 7, 8]
cquad8 = model.add_cquad8(eid, pid, nids, theta_mcid=0., comment='cquad8')
cquad8.flip_normal()
eid = 2
nids = [1, 2, 3, 4, 5, 6, 7, 8, 9]
cquad = model.add_cquad(eid, pid, nids, theta_mcid=0., comment='cquad')
model.add_pshell(pid, mid1=mid, t=1.0)
eid = 3
nids = [1, 2, 3, 5, 6, 9]
ctria6 = model.add_ctria6(eid, pid, nids, theta_mcid=0., comment='ctria6')
ctria6.flip_normal()
eid = 4
cquad4 = model.add_cquad4(eid, pid, [1, 2, 3, 4])
cquad4.flip_normal()
str(cquad4)
eid = 5
cquad4 = model.add_cquad4(eid, pid, [1, 2, 3, 4],
tflag=1, T1=2., T2=2., T3=2., T4=2.)
str(cquad4)
eid = 6
ctria3 = model.add_ctria3(eid, pid, [1, 2, 3])
ctria3.flip_normal()
str(ctria3)
eid = 7
ctria3 = model.add_ctria3(eid, pid, [1, 2, 3],
tflag=1, T1=2., T2=2., T3=2.)
str(ctria3)
str(ctria3)
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu, rho=0.1)
model.cross_reference()
model.pop_xref_errors()
ctria3.flip_normal()
cquad4.flip_normal()
ctria6.flip_normal()
cquad8.flip_normal()
assert len(ctria6.Centroid()) == 3, ctria6.Centroid()
assert len(ctria6.center_of_mass()) == 3, ctria6.center_of_mass()
assert np.allclose(cquad8.Mass(), 0.1), cquad8.Mass()
assert np.allclose(cquad.Mass(), 0.1), cquad.Mass()
assert np.allclose(ctria6.Mass(), 0.05), ctria6.Mass()
model.get_area_breakdown(property_ids=None, stop_if_no_area=True)
model.get_mass_breakdown(property_ids=None, stop_if_no_mass=True, detailed=False)
model.get_mass_breakdown(property_ids=None, stop_if_no_mass=True, detailed=True)
model.get_volume_breakdown(property_ids=None, stop_if_no_volume=True)
save_load_deck(model)
def test_cquadx8(self):
"""tests a CQUADX, CTRIAX, CTRIAX6"""
log = get_logger(level='warning')
model = BDF(log=log)
eid = 1
pid = 10
mid = 100
model.add_grid(1, [0., 0., 0.])
model.add_grid(5, [.5, 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(6, [1., .5, 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(7, [.5, 1., 0.])
model.add_grid(4, [0., 1., 0.])
model.add_grid(8, [0., .5, 0.])
model.add_grid(9, [.5, .5, 0.])
nids = [1, 2, 3, 4, 5, 6, 7, 8]
model.add_cquadx8(eid, pid, nids, theta=0., comment='cquadx8')
eid = 2
# 4---7---3
# | / |
# 8 9 6
# |/ |
# 1---5---2
nids = [1, 2, 3, 5, 6, 9]
model.add_ctriax(eid, pid, nids, theta_mcid=0., comment='ctriax')
eid = 3
nids = [1, 5, 2, 6, 3, 9]
model.add_ctriax6(eid, mid, nids, theta=0., comment='ctriax6')
model.add_psolid(pid, mid)
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu)
model.cross_reference()
model.pop_xref_errors()
save_load_deck(model, run_test_bdf=False)
def test_shell_mcid(self):
"""tests that mcids=0 are correctly identified as not 0.0 and thus not dropped"""
log = get_logger(level='warning')
model = BDF(log=log)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [0., 1., 0.])
model.add_grid(3, [0., 1., 1.])
model.add_grid(4, [0., 0., 1.])
eid = 10
pid = 100
mid = 1000
model.add_ctria3(eid, pid, [1, 2, 3], zoffset=0., theta_mcid=0, tflag=0,
T1=None, T2=None, T3=None,
comment='')
eid = 11
model.add_cquad4(eid, pid, [1, 2,3, 4], theta_mcid=0, zoffset=0., tflag=0,
T1=None, T2=None, T3=None, T4=None, comment='')
model.add_pshell(pid, mid1=mid, t=0.1, mid2=mid, twelveIt3=1.0,
mid3=None, tst=0.833333,
nsm=0.0, z1=None, z2=None,
mid4=None, comment='')
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu)
#print(model.elements[11])
assert model.elements[10].rstrip() == 'CTRIA3 10 100 1 2 3 0'
assert model.elements[11].rstrip() == 'CQUAD4 11 100 1 2 3 4 0'
assert model.elements[10].write_card().rstrip() == 'CTRIA3 10 100 1 2 3 0'
model.cross_reference()
assert model.elements[10].rstrip() == 'CTRIA3 10 100 1 2 3 0'
assert model.elements[11].rstrip() == 'CQUAD4 11 100 1 2 3 4 0'
model.uncross_reference()
assert model.elements[10].rstrip() == 'CTRIA3 10 100 1 2 3 0'
assert model.elements[11].rstrip() == 'CQUAD4 11 100 1 2 3 4 0'
model.safe_cross_reference()
model.uncross_reference()
assert model.elements[10].rstrip() == 'CTRIA3 10 100 1 2 3 0'
assert model.elements[11].rstrip() == 'CQUAD4 11 100 1 2 3 4 0'
model2 = save_load_deck(model)
model2.elements[10].comment = ''
assert model2.elements[10].rstrip() == 'CTRIA3 10 100 1 2 3 0'
assert model2.elements[11].rstrip() == 'CQUAD4 11 100 1 2 3 4 0'
def test_abd(self):
"""tests some ABD matrix functionality for a PCOMP"""
log = get_logger(level='warning')
model = BDF(log=log)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(4, [0., 1., 0.])
nids = [1, 2, 3, 4]
eid = 1
pid = 10
mid = 20
model.add_cquad4(eid, pid, nids, theta_mcid=0.0, zoffset=0.,
tflag=0, T1=None, T2=None, T3=None, T4=None, comment='')
thetas = [0., 45., 90.]
thicknesses = [0.1] * 3
mids = len(thicknesses) * [mid]
pcomp = model.add_pcomp(pid, mids, thicknesses, thetas=None,
souts=None, nsm=0., sb=0., ft=None, tref=0., ge=0.,
lam=None, z0=0., comment='')
E = 3.0e7
G = None
nu = 0.3
model.add_mat1(mid, E, G, nu)
#--------------------------
#e1_e2 = 40.
#g12_e2 = 0.5
#nu12 = 0.25
#e22 = 30e6
e1_e2 = 3.
g12_e2 = 0.5
nu12 = 0.25
e22 = 1.
e11 = e1_e2 * e22
g12 = g12_e2 * e22
mid8 = 8
mat8 = model.add_mat8(
mid8, e11, e22, nu12, g12=g12, g1z=1e8, g2z=1e8, rho=0.,
a1=0., a2=0., tref=0.,
Xt=0., Xc=None, Yt=0., Yc=None, S=0., ge=0.,
F12=0., strn=0., comment='')
S = get_mat_props_S(mat8)
pid8 = 8
pcomp8 = model.add_pcomp(pid8, [mid8], [1.], thetas=[0.],
souts=None, nsm=0., sb=0., ft=None, tref=0., ge=0.,
lam=None, z0=0., comment='')
model.pop_parse_errors()
model.cross_reference()
model.pop_xref_errors()
ABD = pcomp.get_ABD_matrices()
thetad = np.linspace(0., 90., num=91)
if IS_MATPLOTLIB:
plot_equivalent_lamina_vs_theta(
pcomp8, mat8, thetad, plot=True, show=False, close=True,
png_filename='lamina.png')
os.remove('lamina_stiffness.png')
os.remove('lamina_nu.png')
def make_dvcrel_optimization(model, params, element_type, eid, i=1):
"""makes a series of DVCREL1 and a DESVAR"""
j = i
for ii, (name, desvar_value) in enumerate(params):
j = i + ii
dvids = [j]
coeffs = [1.0]
model.add_dvcrel1(j, element_type, eid, name, dvids, coeffs,
cp_min=None, cp_max=1e20,
c0=0.0, validate=True,
comment='')
model.add_desvar(j, 'v%s' % name, desvar_value)
return j + 1
def make_dvprel_optimization(model, params, prop_type, pid, i=1):
"""makes a series of DVPREL1 and a DESVAR"""
j = i
for ii, (name, desvar_value) in enumerate(params):
j = i + ii
dvids = [j]
coeffs = [1.0]
model.add_dvprel1(j, prop_type, pid, name, dvids, coeffs,
p_min=None, p_max=1e20,
c0=0.0, validate=True,
comment='')
model.add_desvar(j, 'v%s' % name, desvar_value)
return j + 1
def make_dvmrel_optimization(model, params, material_type, mid, i=1):
"""makes a series of DVMREL1 and a DESVAR"""
j = i
for ii, (name, desvar_value) in enumerate(params):
j = i + ii
dvids = [j]
coeffs = [1.0]
model.add_dvmrel1(j, material_type, mid, name, dvids, coeffs,
mp_min=None, mp_max=1e20,
c0=0.0, validate=True,
comment='')
model.add_desvar(j, 'v%s' % name, desvar_value)
return j + 1
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 33.670776 | 128 | 0.529721 |
acf9b4710e72044563b2bdecffe46118125ced75 | 28,055 | py | Python | cmd2/parsing.py | korygill/cmd2 | 81cbc40b5dfa6f615a621ed42c6ed437faabb4da | [
"MIT"
] | 2 | 2021-04-01T08:46:05.000Z | 2021-04-01T08:46:07.000Z | cmd2/parsing.py | korygill/cmd2 | 81cbc40b5dfa6f615a621ed42c6ed437faabb4da | [
"MIT"
] | 9 | 2021-04-12T13:44:34.000Z | 2021-04-13T16:50:08.000Z | cmd2/parsing.py | korygill/cmd2 | 81cbc40b5dfa6f615a621ed42c6ed437faabb4da | [
"MIT"
] | 1 | 2021-03-31T10:11:02.000Z | 2021-03-31T10:11:02.000Z | #
# -*- coding: utf-8 -*-
"""Statement parsing classes for cmd2"""
import re
import shlex
from typing import (
Dict,
Iterable,
List,
Optional,
Tuple,
Union,
)
import attr
from . import (
constants,
utils,
)
from .exceptions import (
Cmd2ShlexError,
)
def shlex_split(str_to_split: str) -> List[str]:
"""
A wrapper around shlex.split() that uses cmd2's preferred arguments.
This allows other classes to easily call split() the same way StatementParser does.
:param str_to_split: the string being split
:return: A list of tokens
"""
return shlex.split(str_to_split, comments=False, posix=False)
@attr.s(frozen=True)
class MacroArg:
"""
Information used to replace or unescape arguments in a macro value when the macro is resolved
Normal argument syntax: {5}
Escaped argument syntax: {{5}}
"""
# The starting index of this argument in the macro value
start_index = attr.ib(validator=attr.validators.instance_of(int))
# The number string that appears between the braces
# This is a string instead of an int because we support unicode digits and must be able
# to reproduce this string later
number_str = attr.ib(validator=attr.validators.instance_of(str))
# Tells if this argument is escaped and therefore needs to be unescaped
is_escaped = attr.ib(validator=attr.validators.instance_of(bool))
# Pattern used to find normal argument
# Digits surrounded by exactly 1 brace on a side and 1 or more braces on the opposite side
# Match strings like: {5}, {{{{{4}, {2}}}}}
macro_normal_arg_pattern = re.compile(r'(?<!{){\d+}|{\d+}(?!})')
# Pattern used to find escaped arguments
# Digits surrounded by 2 or more braces on both sides
# Match strings like: {{5}}, {{{{{4}}, {{2}}}}}
macro_escaped_arg_pattern = re.compile(r'{{2}\d+}{2}')
# Finds a string of digits
digit_pattern = re.compile(r'\d+')
@attr.s(frozen=True)
class Macro:
"""Defines a cmd2 macro"""
# Name of the macro
name = attr.ib(validator=attr.validators.instance_of(str))
# The string the macro resolves to
value = attr.ib(validator=attr.validators.instance_of(str))
# The minimum number of args the user has to pass to this macro
minimum_arg_count = attr.ib(validator=attr.validators.instance_of(int))
# Used to fill in argument placeholders in the macro
arg_list = attr.ib(default=attr.Factory(list), validator=attr.validators.instance_of(list))
@attr.s(frozen=True)
class Statement(str):
"""String subclass with additional attributes to store the results of parsing.
The ``cmd`` module in the standard library passes commands around as a
string. To retain backwards compatibility, ``cmd2`` does the same. However,
we need a place to capture the additional output of the command parsing, so
we add our own attributes to this subclass.
Instances of this class should not be created by anything other than the
:meth:`cmd2.parsing.StatementParser.parse` method, nor should any of the
attributes be modified once the object is created.
The string portion of the class contains the arguments, but not the
command, nor the output redirection clauses.
Tips:
1. `argparse <https://docs.python.org/3/library/argparse.html>`_ is your
friend for anything complex. ``cmd2`` has the decorator
(:func:`~cmd2.decorators.with_argparser`) which you can
use to make your command method receive a namespace of parsed arguments,
whether positional or denoted with switches.
2. For commands with simple positional arguments, use
:attr:`~cmd2.Statement.args` or :attr:`~cmd2.Statement.arg_list`
3. If you don't want to have to worry about quoted arguments, see
:attr:`argv` for a trick which strips quotes off for you.
"""
# the arguments, but not the command, nor the output redirection clauses.
args = attr.ib(default='', validator=attr.validators.instance_of(str))
# string containing exactly what we input by the user
raw = attr.ib(default='', validator=attr.validators.instance_of(str))
# the command, i.e. the first whitespace delimited word
command = attr.ib(default='', validator=attr.validators.instance_of(str))
# list of arguments to the command, not including any output redirection or terminators; quoted args remain quoted
arg_list = attr.ib(default=attr.Factory(list), validator=attr.validators.instance_of(list))
# if the command is a multiline command, the name of the command, otherwise empty
multiline_command = attr.ib(default='', validator=attr.validators.instance_of(str))
# the character which terminated the multiline command, if there was one
terminator = attr.ib(default='', validator=attr.validators.instance_of(str))
# characters appearing after the terminator but before output redirection, if any
suffix = attr.ib(default='', validator=attr.validators.instance_of(str))
# if output was piped to a shell command, the shell command as a string
pipe_to = attr.ib(default='', validator=attr.validators.instance_of(str))
# if output was redirected, the redirection token, i.e. '>>'
output = attr.ib(default='', validator=attr.validators.instance_of(str))
# if output was redirected, the destination file token (quotes preserved)
output_to = attr.ib(default='', validator=attr.validators.instance_of(str))
def __new__(cls, value: object, *pos_args, **kw_args):
"""Create a new instance of Statement.
We must override __new__ because we are subclassing `str` which is
immutable and takes a different number of arguments as Statement.
NOTE: attrs takes care of initializing other members in the __init__ it
generates.
"""
stmt = super().__new__(cls, value)
return stmt
@property
def command_and_args(self) -> str:
"""Combine command and args with a space separating them.
Quoted arguments remain quoted. Output redirection and piping are
excluded, as are any command terminators.
"""
if self.command and self.args:
rtn = '{} {}'.format(self.command, self.args)
elif self.command:
# there were no arguments to the command
rtn = self.command
else:
rtn = ''
return rtn
@property
def post_command(self) -> str:
"""A string containing any ending terminator, suffix, and redirection chars"""
rtn = ''
if self.terminator:
rtn += self.terminator
if self.suffix:
rtn += ' ' + self.suffix
if self.pipe_to:
rtn += ' | ' + self.pipe_to
if self.output:
rtn += ' ' + self.output
if self.output_to:
rtn += ' ' + self.output_to
return rtn
@property
def expanded_command_line(self) -> str:
"""Concatenate :meth:`~cmd2.Statement.command_and_args`
and :meth:`~cmd2.Statement.post_command`"""
return self.command_and_args + self.post_command
@property
def argv(self) -> List[str]:
"""a list of arguments a-la ``sys.argv``.
The first element of the list is the command after shortcut and macro
expansion. Subsequent elements of the list contain any additional
arguments, with quotes removed, just like bash would. This is very
useful if you are going to use ``argparse.parse_args()``.
If you want to strip quotes from the input, you can use ``argv[1:]``.
"""
if self.command:
rtn = [utils.strip_quotes(self.command)]
for cur_token in self.arg_list:
rtn.append(utils.strip_quotes(cur_token))
else:
rtn = []
return rtn
class StatementParser:
"""Parse user input as a string into discrete command components."""
def __init__(self,
terminators: Optional[Iterable[str]] = None,
multiline_commands: Optional[Iterable[str]] = None,
aliases: Optional[Dict[str, str]] = None,
shortcuts: Optional[Dict[str, str]] = None) -> None:
"""Initialize an instance of StatementParser.
The following will get converted to an immutable tuple before storing internally:
terminators, multiline commands, and shortcuts.
:param terminators: iterable containing strings which should terminate commands
:param multiline_commands: iterable containing the names of commands that accept multiline input
:param aliases: dictionary containing aliases
:param shortcuts: dictionary containing shortcuts
"""
if terminators is None:
self.terminators = (constants.MULTILINE_TERMINATOR,)
else:
self.terminators = tuple(terminators)
if multiline_commands is None:
self.multiline_commands = tuple()
else:
self.multiline_commands = tuple(multiline_commands)
if aliases is None:
self.aliases = dict()
else:
self.aliases = aliases
if shortcuts is None:
shortcuts = constants.DEFAULT_SHORTCUTS
# Sort the shortcuts in descending order by name length because the longest match
# should take precedence. (e.g., @@file should match '@@' and not '@'.
self.shortcuts = tuple(sorted(shortcuts.items(), key=lambda x: len(x[0]), reverse=True))
# commands have to be a word, so make a regular expression
# that matches the first word in the line. This regex has three
# parts:
# - the '\A\s*' matches the beginning of the string (even
# if contains multiple lines) and gobbles up any leading
# whitespace
# - the first parenthesis enclosed group matches one
# or more non-whitespace characters with a non-greedy match
# (that's what the '+?' part does). The non-greedy match
# ensures that this first group doesn't include anything
# matched by the second group
# - the second parenthesis group must be dynamically created
# because it needs to match either whitespace, something in
# REDIRECTION_CHARS, one of the terminators, or the end of
# the string (\Z matches the end of the string even if it
# contains multiple lines)
#
invalid_command_chars = []
invalid_command_chars.extend(constants.QUOTES)
invalid_command_chars.extend(constants.REDIRECTION_CHARS)
invalid_command_chars.extend(self.terminators)
# escape each item so it will for sure get treated as a literal
second_group_items = [re.escape(x) for x in invalid_command_chars]
# add the whitespace and end of string, not escaped because they
# are not literals
second_group_items.extend([r'\s', r'\Z'])
# join them up with a pipe
second_group = '|'.join(second_group_items)
# build the regular expression
expr = r'\A\s*(\S*?)({})'.format(second_group)
self._command_pattern = re.compile(expr)
def is_valid_command(self, word: str, *, is_subcommand: bool = False) -> Tuple[bool, str]:
"""Determine whether a word is a valid name for a command.
Commands can not include redirection characters, whitespace,
or termination characters. They also cannot start with a
shortcut.
:param word: the word to check as a command
:param is_subcommand: Flag whether this command name is a subcommand name
:return: a tuple of a boolean and an error string
If word is not a valid command, return ``False`` and an error string
suitable for inclusion in an error message of your choice::
checkit = '>'
valid, errmsg = statement_parser.is_valid_command(checkit)
if not valid:
errmsg = "alias: {}".format(errmsg)
"""
valid = False
if not isinstance(word, str):
return False, 'must be a string. Received {} instead'.format(str(type(word)))
if not word:
return False, 'cannot be an empty string'
if word.startswith(constants.COMMENT_CHAR):
return False, 'cannot start with the comment character'
if not is_subcommand:
for (shortcut, _) in self.shortcuts:
if word.startswith(shortcut):
# Build an error string with all shortcuts listed
errmsg = 'cannot start with a shortcut: '
errmsg += ', '.join(shortcut for (shortcut, _) in self.shortcuts)
return False, errmsg
errmsg = 'cannot contain: whitespace, quotes, '
errchars = []
errchars.extend(constants.REDIRECTION_CHARS)
errchars.extend(self.terminators)
errmsg += ', '.join([shlex.quote(x) for x in errchars])
match = self._command_pattern.search(word)
if match:
if word == match.group(1):
valid = True
errmsg = ''
return valid, errmsg
def tokenize(self, line: str) -> List[str]:
"""
Lex a string into a list of tokens. Shortcuts and aliases are expanded and
comments are removed.
:param line: the command line being lexed
:return: A list of tokens
:raises: Cmd2ShlexError if a shlex error occurs (e.g. No closing quotation)
"""
# expand shortcuts and aliases
line = self._expand(line)
# check if this line is a comment
if line.lstrip().startswith(constants.COMMENT_CHAR):
return []
# split on whitespace
try:
tokens = shlex_split(line)
except ValueError as ex:
raise Cmd2ShlexError(ex)
# custom lexing
tokens = self.split_on_punctuation(tokens)
return tokens
def parse(self, line: str) -> Statement:
"""
Tokenize the input and parse it into a :class:`~cmd2.Statement` object,
stripping comments, expanding aliases and shortcuts, and extracting output
redirection directives.
:param line: the command line being parsed
:return: a new :class:`~cmd2.Statement` object
:raises: Cmd2ShlexError if a shlex error occurs (e.g. No closing quotation)
"""
# handle the special case/hardcoded terminator of a blank line
# we have to do this before we tokenize because tokenizing
# destroys all unquoted whitespace in the input
terminator = ''
if line[-1:] == constants.LINE_FEED:
terminator = constants.LINE_FEED
command = ''
args = ''
arg_list = []
# lex the input into a list of tokens
tokens = self.tokenize(line)
# of the valid terminators, find the first one to occur in the input
terminator_pos = len(tokens) + 1
for pos, cur_token in enumerate(tokens):
for test_terminator in self.terminators:
if cur_token.startswith(test_terminator):
terminator_pos = pos
terminator = test_terminator
# break the inner loop, and we want to break the
# outer loop too
break
else:
# this else clause is only run if the inner loop
# didn't execute a break. If it didn't, then
# continue to the next iteration of the outer loop
continue
# inner loop was broken, break the outer
break
if terminator:
if terminator == constants.LINE_FEED:
terminator_pos = len(tokens) + 1
# everything before the first terminator is the command and the args
(command, args) = self._command_and_args(tokens[:terminator_pos])
arg_list = tokens[1:terminator_pos]
# we will set the suffix later
# remove all the tokens before and including the terminator
tokens = tokens[terminator_pos + 1:]
else:
(testcommand, testargs) = self._command_and_args(tokens)
if testcommand in self.multiline_commands:
# no terminator on this line but we have a multiline command
# everything else on the line is part of the args
# because redirectors can only be after a terminator
command = testcommand
args = testargs
arg_list = tokens[1:]
tokens = []
pipe_to = ''
output = ''
output_to = ''
# Find which redirector character appears first in the command
try:
pipe_index = tokens.index(constants.REDIRECTION_PIPE)
except ValueError:
pipe_index = len(tokens)
try:
redir_index = tokens.index(constants.REDIRECTION_OUTPUT)
except ValueError:
redir_index = len(tokens)
try:
append_index = tokens.index(constants.REDIRECTION_APPEND)
except ValueError:
append_index = len(tokens)
# Check if output should be piped to a shell command
if pipe_index < redir_index and pipe_index < append_index:
# Get the tokens for the pipe command and expand ~ where needed
pipe_to_tokens = tokens[pipe_index + 1:]
utils.expand_user_in_tokens(pipe_to_tokens)
# Build the pipe command line string
pipe_to = ' '.join(pipe_to_tokens)
# remove all the tokens after the pipe
tokens = tokens[:pipe_index]
# Check for output redirect/append
elif redir_index != append_index:
if redir_index < append_index:
output = constants.REDIRECTION_OUTPUT
output_index = redir_index
else:
output = constants.REDIRECTION_APPEND
output_index = append_index
# Check if we are redirecting to a file
if len(tokens) > output_index + 1:
unquoted_path = utils.strip_quotes(tokens[output_index + 1])
if unquoted_path:
output_to = utils.expand_user(tokens[output_index + 1])
# remove all the tokens after the output redirect
tokens = tokens[:output_index]
if terminator:
# whatever is left is the suffix
suffix = ' '.join(tokens)
else:
# no terminator, so whatever is left is the command and the args
suffix = ''
if not command:
# command could already have been set, if so, don't set it again
(command, args) = self._command_and_args(tokens)
arg_list = tokens[1:]
# set multiline
if command in self.multiline_commands:
multiline_command = command
else:
multiline_command = ''
# build the statement
statement = Statement(args,
raw=line,
command=command,
arg_list=arg_list,
multiline_command=multiline_command,
terminator=terminator,
suffix=suffix,
pipe_to=pipe_to,
output=output,
output_to=output_to)
return statement
def parse_command_only(self, rawinput: str) -> Statement:
"""Partially parse input into a :class:`~cmd2.Statement` object.
The command is identified, and shortcuts and aliases are expanded.
Multiline commands are identified, but terminators and output
redirection are not parsed.
This method is used by tab completion code and therefore must not
generate an exception if there are unclosed quotes.
The :class:`~cmd2.Statement` object returned by this method can at most
contain values in the following attributes:
:attr:`~cmd2.Statement.args`, :attr:`~cmd2.Statement.raw`,
:attr:`~cmd2.Statement.command`,
:attr:`~cmd2.Statement.multiline_command`
:attr:`~cmd2.Statement.args` will include all output redirection
clauses and command terminators.
Different from :meth:`~cmd2.parsing.StatementParser.parse` this method
does not remove redundant whitespace within args. However, it does
ensure args has no leading or trailing whitespace.
:param rawinput: the command line as entered by the user
:return: a new :class:`~cmd2.Statement` object
"""
# expand shortcuts and aliases
line = self._expand(rawinput)
command = ''
args = ''
match = self._command_pattern.search(line)
if match:
# we got a match, extract the command
command = match.group(1)
# take everything from the end of the first match group to
# the end of the line as the arguments (stripping leading
# and trailing spaces)
args = line[match.end(1):].strip()
# if the command is empty that means the input was either empty
# or something weird like '>'. args should be empty if we couldn't
# parse a command
if not command or not args:
args = ''
# set multiline
if command in self.multiline_commands:
multiline_command = command
else:
multiline_command = ''
# build the statement
statement = Statement(args,
raw=rawinput,
command=command,
multiline_command=multiline_command)
return statement
def get_command_arg_list(self, command_name: str, to_parse: Union[Statement, str],
preserve_quotes: bool) -> Tuple[Statement, List[str]]:
"""
Convenience method used by the argument parsing decorators.
Retrieves just the arguments being passed to their ``do_*`` methods as a list.
:param command_name: name of the command being run
:param to_parse: what is being passed to the ``do_*`` method. It can be one of two types:
1. An already parsed :class:`~cmd2.Statement`
2. An argument string in cases where a ``do_*`` method is
explicitly called. Calling ``do_help('alias create')`` would
cause ``to_parse`` to be 'alias create'.
In this case, the string will be converted to a
:class:`~cmd2.Statement` and returned along with
the argument list.
:param preserve_quotes: if ``True``, then quotes will not be stripped from
the arguments
:return: A tuple containing the :class:`~cmd2.Statement` and a list of
strings representing the arguments
"""
# Check if to_parse needs to be converted to a Statement
if not isinstance(to_parse, Statement):
to_parse = self.parse(command_name + ' ' + to_parse)
if preserve_quotes:
return to_parse, to_parse.arg_list
else:
return to_parse, to_parse.argv[1:]
def _expand(self, line: str) -> str:
"""Expand aliases and shortcuts"""
# Make a copy of aliases so we can keep track of what aliases have been resolved to avoid an infinite loop
remaining_aliases = list(self.aliases.keys())
keep_expanding = bool(remaining_aliases)
while keep_expanding:
keep_expanding = False
# apply our regex to line
match = self._command_pattern.search(line)
if match:
# we got a match, extract the command
command = match.group(1)
# Check if this command matches an alias that wasn't already processed
if command in remaining_aliases:
# rebuild line with the expanded alias
line = self.aliases[command] + match.group(2) + line[match.end(2):]
remaining_aliases.remove(command)
keep_expanding = bool(remaining_aliases)
# expand shortcuts
for (shortcut, expansion) in self.shortcuts:
if line.startswith(shortcut):
# If the next character after the shortcut isn't a space, then insert one
shortcut_len = len(shortcut)
if len(line) == shortcut_len or line[shortcut_len] != ' ':
expansion += ' '
# Expand the shortcut
line = line.replace(shortcut, expansion, 1)
break
return line
@staticmethod
def _command_and_args(tokens: List[str]) -> Tuple[str, str]:
"""Given a list of tokens, return a tuple of the command
and the args as a string.
"""
command = ''
args = ''
if tokens:
command = tokens[0]
if len(tokens) > 1:
args = ' '.join(tokens[1:])
return command, args
def split_on_punctuation(self, tokens: List[str]) -> List[str]:
"""Further splits tokens from a command line using punctuation characters.
Punctuation characters are treated as word breaks when they are in
unquoted strings. Each run of punctuation characters is treated as a
single token.
:param tokens: the tokens as parsed by shlex
:return: a new list of tokens, further split using punctuation
"""
punctuation = []
punctuation.extend(self.terminators)
punctuation.extend(constants.REDIRECTION_CHARS)
punctuated_tokens = []
for cur_initial_token in tokens:
# Save tokens up to 1 character in length or quoted tokens. No need to parse these.
if len(cur_initial_token) <= 1 or cur_initial_token[0] in constants.QUOTES:
punctuated_tokens.append(cur_initial_token)
continue
# Iterate over each character in this token
cur_index = 0
cur_char = cur_initial_token[cur_index]
# Keep track of the token we are building
new_token = ''
while True:
if cur_char not in punctuation:
# Keep appending to new_token until we hit a punctuation char
while cur_char not in punctuation:
new_token += cur_char
cur_index += 1
if cur_index < len(cur_initial_token):
cur_char = cur_initial_token[cur_index]
else:
break
else:
cur_punc = cur_char
# Keep appending to new_token until we hit something other than cur_punc
while cur_char == cur_punc:
new_token += cur_char
cur_index += 1
if cur_index < len(cur_initial_token):
cur_char = cur_initial_token[cur_index]
else:
break
# Save the new token
punctuated_tokens.append(new_token)
new_token = ''
# Check if we've viewed all characters
if cur_index >= len(cur_initial_token):
break
return punctuated_tokens
| 38.911234 | 118 | 0.605061 |
acf9b4909659ca7a663d851420220bac650e185b | 14,271 | py | Python | custom_components/xiaomi_cloud_map_extractor/xiaomi/map_data_parser.py | GuyKh/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor | 65e0a905fdb6048facdb34cbec40b7ece4fef991 | [
"MIT"
] | 697 | 2020-09-30T08:35:58.000Z | 2022-03-31T17:14:20.000Z | custom_components/xiaomi_cloud_map_extractor/xiaomi/map_data_parser.py | Neonox31/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor | 7bc868278f74fdaba475987dd5fdf485e430fe53 | [
"MIT"
] | 216 | 2020-10-01T12:05:24.000Z | 2022-03-31T11:35:46.000Z | custom_components/xiaomi_cloud_map_extractor/xiaomi/map_data_parser.py | Neonox31/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor | 7bc868278f74fdaba475987dd5fdf485e430fe53 | [
"MIT"
] | 92 | 2020-09-30T18:10:19.000Z | 2022-03-24T12:15:18.000Z | import logging
from custom_components.xiaomi_cloud_map_extractor.common.map_data import *
from custom_components.xiaomi_cloud_map_extractor.common.map_data_parser import MapDataParser
from custom_components.xiaomi_cloud_map_extractor.xiaomi.image_handler import ImageHandlerXiaomi
_LOGGER = logging.getLogger(__name__)
class MapDataParserXiaomi(MapDataParser):
CHARGER = 1
IMAGE = 2
PATH = 3
GOTO_PATH = 4
GOTO_PREDICTED_PATH = 5
CURRENTLY_CLEANED_ZONES = 6
GOTO_TARGET = 7
ROBOT_POSITION = 8
NO_GO_AREAS = 9
VIRTUAL_WALLS = 10
BLOCKS = 11
NO_MOPPING_AREAS = 12
OBSTACLES = 13
IGNORED_OBSTACLES = 14
OBSTACLES_WITH_PHOTO = 15
IGNORED_OBSTACLES_WITH_PHOTO = 16
CARPET_MAP = 17
DIGEST = 1024
SIZE = 1024
KNOWN_OBSTACLE_TYPES = {
0: 'cable',
2: 'shoes',
3: 'poop',
5: 'extension cord',
9: 'weighting scale',
10: 'clothes'
}
@staticmethod
def parse(raw: bytes, colors, drawables, texts, sizes, image_config) -> MapData:
map_data = MapData(25500, 1000)
map_header_length = MapDataParserXiaomi.get_int16(raw, 0x02)
map_data.major_version = MapDataParserXiaomi.get_int16(raw, 0x08)
map_data.minor_version = MapDataParserXiaomi.get_int16(raw, 0x0A)
map_data.map_index = MapDataParserXiaomi.get_int32(raw, 0x0C)
map_data.map_sequence = MapDataParserXiaomi.get_int32(raw, 0x10)
block_start_position = map_header_length
img_start = None
while block_start_position < len(raw):
block_header_length = MapDataParserXiaomi.get_int16(raw, block_start_position + 0x02)
header = MapDataParserXiaomi.get_bytes(raw, block_start_position, block_header_length)
block_type = MapDataParserXiaomi.get_int16(header, 0x00)
block_data_length = MapDataParserXiaomi.get_int32(header, 0x04)
block_data_start = block_start_position + block_header_length
data = MapDataParserXiaomi.get_bytes(raw, block_data_start, block_data_length)
if block_type == MapDataParserXiaomi.CHARGER:
map_data.charger = MapDataParserXiaomi.parse_charger(block_start_position, raw)
elif block_type == MapDataParserXiaomi.IMAGE:
img_start = block_start_position
image, rooms = MapDataParserXiaomi.parse_image(block_data_length, block_header_length, data, header,
colors, image_config)
map_data.image = image
map_data.rooms = rooms
elif block_type == MapDataParserXiaomi.ROBOT_POSITION:
map_data.vacuum_position = MapDataParserXiaomi.parse_vacuum_position(block_data_length, data)
elif block_type == MapDataParserXiaomi.PATH:
map_data.path = MapDataParserXiaomi.parse_path(block_start_position, header, raw)
elif block_type == MapDataParserXiaomi.GOTO_PATH:
map_data.goto_path = MapDataParserXiaomi.parse_path(block_start_position, header, raw)
elif block_type == MapDataParserXiaomi.GOTO_PREDICTED_PATH:
map_data.predicted_path = MapDataParserXiaomi.parse_path(block_start_position, header, raw)
elif block_type == MapDataParserXiaomi.CURRENTLY_CLEANED_ZONES:
map_data.zones = MapDataParserXiaomi.parse_zones(data, header)
elif block_type == MapDataParserXiaomi.GOTO_TARGET:
map_data.goto = MapDataParserXiaomi.parse_goto_target(data)
elif block_type == MapDataParserXiaomi.DIGEST:
map_data.is_valid = True
elif block_type == MapDataParserXiaomi.VIRTUAL_WALLS:
map_data.walls = MapDataParserXiaomi.parse_walls(data, header)
elif block_type == MapDataParserXiaomi.NO_GO_AREAS:
map_data.no_go_areas = MapDataParserXiaomi.parse_area(header, data)
elif block_type == MapDataParserXiaomi.NO_MOPPING_AREAS:
map_data.no_mopping_areas = MapDataParserXiaomi.parse_area(header, data)
elif block_type == MapDataParserXiaomi.OBSTACLES:
map_data.obstacles = MapDataParserXiaomi.parse_obstacles(data, header)
elif block_type == MapDataParserXiaomi.IGNORED_OBSTACLES:
map_data.ignored_obstacles = MapDataParserXiaomi.parse_obstacles(data, header)
elif block_type == MapDataParserXiaomi.OBSTACLES_WITH_PHOTO:
map_data.obstacles_with_photo = MapDataParserXiaomi.parse_obstacles(data, header)
elif block_type == MapDataParserXiaomi.IGNORED_OBSTACLES_WITH_PHOTO:
map_data.ignored_obstacles_with_photo = MapDataParserXiaomi.parse_obstacles(data, header)
elif block_type == MapDataParserXiaomi.BLOCKS:
block_pairs = MapDataParserXiaomi.get_int16(header, 0x08)
map_data.blocks = MapDataParserXiaomi.get_bytes(data, 0, block_pairs)
block_start_position = block_start_position + block_data_length + MapDataParserXiaomi.get_int8(header, 2)
if not map_data.image.is_empty:
MapDataParserXiaomi.draw_elements(colors, drawables, sizes, map_data, image_config)
if len(map_data.rooms) > 0 and map_data.vacuum_position is not None:
map_data.vacuum_room = MapDataParserXiaomi.get_current_vacuum_room(img_start, raw,
map_data.vacuum_position)
ImageHandlerXiaomi.rotate(map_data.image)
ImageHandlerXiaomi.draw_texts(map_data.image, texts)
return map_data
@staticmethod
def map_to_image(p: Point):
return Point(p.x / MM, p.y / MM)
@staticmethod
def image_to_map(x):
return x * MM
@staticmethod
def get_current_vacuum_room(block_start_position, raw, vacuum_position):
block_header_length = MapDataParserXiaomi.get_int16(raw, block_start_position + 0x02)
header = MapDataParserXiaomi.get_bytes(raw, block_start_position, block_header_length)
block_data_length = MapDataParserXiaomi.get_int32(header, 0x04)
block_data_start = block_start_position + block_header_length
data = MapDataParserXiaomi.get_bytes(raw, block_data_start, block_data_length)
image_top = MapDataParserXiaomi.get_int32(header, block_header_length - 16)
image_left = MapDataParserXiaomi.get_int32(header, block_header_length - 12)
image_width = MapDataParserXiaomi.get_int32(header, block_header_length - 4)
p = MapDataParserXiaomi.map_to_image(vacuum_position)
room = ImageHandlerXiaomi.get_room_at_pixel(data, image_width, round(p.x - image_left), round(p.y - image_top))
return room
@staticmethod
def parse_image(block_data_length, block_header_length, data, header, colors, image_config):
image_size = block_data_length
image_top = MapDataParserXiaomi.get_int32(header, block_header_length - 16)
image_left = MapDataParserXiaomi.get_int32(header, block_header_length - 12)
image_height = MapDataParserXiaomi.get_int32(header, block_header_length - 8)
image_width = MapDataParserXiaomi.get_int32(header, block_header_length - 4)
if image_width \
- image_width * (image_config[CONF_TRIM][CONF_LEFT] + image_config[CONF_TRIM][CONF_RIGHT]) / 100 \
< MINIMAL_IMAGE_WIDTH:
image_config[CONF_TRIM][CONF_LEFT] = 0
image_config[CONF_TRIM][CONF_RIGHT] = 0
if image_height \
- image_height * (image_config[CONF_TRIM][CONF_TOP] + image_config[CONF_TRIM][CONF_BOTTOM]) / 100 \
< MINIMAL_IMAGE_HEIGHT:
image_config[CONF_TRIM][CONF_TOP] = 0
image_config[CONF_TRIM][CONF_BOTTOM] = 0
image, rooms_raw = ImageHandlerXiaomi.parse(data, image_width, image_height, colors, image_config)
rooms = {}
for number, room in rooms_raw.items():
rooms[number] = Room(number, MapDataParserXiaomi.image_to_map(room[0] + image_left),
MapDataParserXiaomi.image_to_map(room[1] + image_top),
MapDataParserXiaomi.image_to_map(room[2] + image_left),
MapDataParserXiaomi.image_to_map(room[3] + image_top))
return ImageData(image_size,
image_top,
image_left,
image_height,
image_width,
image_config,
image, MapDataParserXiaomi.map_to_image), rooms
@staticmethod
def parse_goto_target(data):
x = MapDataParserXiaomi.get_int16(data, 0x00)
y = MapDataParserXiaomi.get_int16(data, 0x02)
return Point(x, y)
@staticmethod
def parse_vacuum_position(block_data_length, data):
x = MapDataParserXiaomi.get_int32(data, 0x00)
y = MapDataParserXiaomi.get_int32(data, 0x04)
a = None
if block_data_length > 8:
a = MapDataParserXiaomi.get_int32(data, 0x08)
return Point(x, y, a)
@staticmethod
def parse_charger(block_start_position, raw):
x = MapDataParserXiaomi.get_int32(raw, block_start_position + 0x08)
y = MapDataParserXiaomi.get_int32(raw, block_start_position + 0x0C)
return Point(x, y)
@staticmethod
def parse_walls(data, header):
wall_pairs = MapDataParserXiaomi.get_int16(header, 0x08)
walls = []
for wall_start in range(0, wall_pairs * 8, 8):
x0 = MapDataParserXiaomi.get_int16(data, wall_start + 0)
y0 = MapDataParserXiaomi.get_int16(data, wall_start + 2)
x1 = MapDataParserXiaomi.get_int16(data, wall_start + 4)
y1 = MapDataParserXiaomi.get_int16(data, wall_start + 6)
walls.append(Wall(x0, y0, x1, y1))
return walls
@staticmethod
def parse_obstacles(data, header):
obstacle_pairs = MapDataParserXiaomi.get_int16(header, 0x08)
obstacles = []
if obstacle_pairs == 0:
return obstacles
obstacle_size = int(len(data) / obstacle_pairs)
for obstacle_start in range(0, obstacle_pairs * obstacle_size, obstacle_size):
x = MapDataParserXiaomi.get_int16(data, obstacle_start + 0)
y = MapDataParserXiaomi.get_int16(data, obstacle_start + 2)
details = {}
if obstacle_size >= 6:
details[ATTR_TYPE] = MapDataParserXiaomi.get_int16(data, obstacle_start + 4)
if details[ATTR_TYPE] in MapDataParserXiaomi.KNOWN_OBSTACLE_TYPES:
details[ATTR_DESCRIPTION] = MapDataParserXiaomi.KNOWN_OBSTACLE_TYPES[details[ATTR_TYPE]]
if obstacle_size >= 10:
u1 = MapDataParserXiaomi.get_int16(data, obstacle_start + 6)
u2 = MapDataParserXiaomi.get_int16(data, obstacle_start + 8)
details[ATTR_CONFIDENCE_LEVEL] = 0 if u2 == 0 else u1 * 10.0 / u2
if obstacle_size == 28 and (data[obstacle_start + 12] & 0xFF) > 0:
txt = MapDataParserXiaomi.get_bytes(data, obstacle_start + 12, 16)
details[ATTR_PHOTO_NAME] = txt.decode('ascii')
obstacles.append(Obstacle(x, y, details))
return obstacles
@staticmethod
def parse_zones(data, header):
zone_pairs = MapDataParserXiaomi.get_int16(header, 0x08)
zones = []
for zone_start in range(0, zone_pairs * 8, 8):
x0 = MapDataParserXiaomi.get_int16(data, zone_start + 0)
y0 = MapDataParserXiaomi.get_int16(data, zone_start + 2)
x1 = MapDataParserXiaomi.get_int16(data, zone_start + 4)
y1 = MapDataParserXiaomi.get_int16(data, zone_start + 6)
zones.append(Zone(x0, y0, x1, y1))
return zones
@staticmethod
def parse_path(block_start_position, header, raw):
path_points = []
end_pos = MapDataParserXiaomi.get_int32(header, 0x04)
point_length = MapDataParserXiaomi.get_int32(header, 0x08)
point_size = MapDataParserXiaomi.get_int32(header, 0x0C)
angle = MapDataParserXiaomi.get_int32(header, 0x10)
start_pos = block_start_position + 0x14
for pos in range(start_pos, start_pos + end_pos, 4):
x = MapDataParserXiaomi.get_int16(raw, pos)
y = MapDataParserXiaomi.get_int16(raw, pos + 2)
path_points.append(Point(x, y))
return Path(point_length, point_size, angle, path_points)
@staticmethod
def parse_area(header, data):
area_pairs = MapDataParserXiaomi.get_int16(header, 0x08)
areas = []
for area_start in range(0, area_pairs * 16, 16):
x0 = MapDataParserXiaomi.get_int16(data, area_start + 0)
y0 = MapDataParserXiaomi.get_int16(data, area_start + 2)
x1 = MapDataParserXiaomi.get_int16(data, area_start + 4)
y1 = MapDataParserXiaomi.get_int16(data, area_start + 6)
x2 = MapDataParserXiaomi.get_int16(data, area_start + 8)
y2 = MapDataParserXiaomi.get_int16(data, area_start + 10)
x3 = MapDataParserXiaomi.get_int16(data, area_start + 12)
y3 = MapDataParserXiaomi.get_int16(data, area_start + 14)
areas.append(Area(x0, y0, x1, y1, x2, y2, x3, y3))
return areas
@staticmethod
def get_bytes(data: bytes, start_index: int, size: int):
return data[start_index: start_index + size]
@staticmethod
def get_int8(data: bytes, address: int):
return data[address] & 0xFF
@staticmethod
def get_int16(data: bytes, address: int):
return \
((data[address + 0] << 0) & 0xFF) | \
((data[address + 1] << 8) & 0xFFFF)
@staticmethod
def get_int32(data: bytes, address: int):
return \
((data[address + 0] << 0) & 0xFF) | \
((data[address + 1] << 8) & 0xFFFF) | \
((data[address + 2] << 16) & 0xFFFFFF) | \
((data[address + 3] << 24) & 0xFFFFFFFF)
| 50.967857 | 119 | 0.656156 |
acf9b5b76bc3cb4643d6d173645f8929867c0c68 | 1,963 | py | Python | app/cpuprofile/chrome.py | DeepInThought/flamescope | 870eebe405b95e3425613f623c776295ddc6b7f6 | [
"Apache-2.0"
] | 1 | 2019-06-10T19:49:33.000Z | 2019-06-10T19:49:33.000Z | app/cpuprofile/chrome.py | applevoice/flamescope | 010a8bc9300bcceefc5b0eff36ffe898574bfd76 | [
"Apache-2.0"
] | 87 | 2019-05-29T11:51:26.000Z | 2021-06-25T15:20:37.000Z | app/cpuprofile/chrome.py | hgzhu-stuff/flamescope | 91bc199f7a8ade7c2ec2f27532246b4c910ddd2f | [
"Apache-2.0"
] | null | null | null | # This file is part of FlameScope, a performance analysis tool created by the
# Netflix cloud performance team. See:
#
# https://github.com/Netflix/flamescope
#
# Copyright 2018 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_cpuprofiles(chrome_profile):
profile_events = []
open_chunked_profile = None
for row in chrome_profile:
if row['ph'] == 'I' and row['name'] == 'CpuProfile':
# older chrome profiles
profile_events.append(row['args']['data']['cpuProfile'])
elif row['ph'] == 'P' and row['name'] == 'Profile':
if open_chunked_profile is not None:
profile_events.append(open_chunked_profile)
open_chunked_profile = {
'nodes': [],
'samples': [],
'timeDeltas': [],
'startTime': row['args']['data']['startTime']
}
elif row['ph'] == 'P' and row['name'] == 'ProfileChunk':
if 'nodes' in row['args']['data']['cpuProfile']:
open_chunked_profile['nodes'].extend(row['args']['data']['cpuProfile']['nodes'])
open_chunked_profile['samples'].extend(row['args']['data']['cpuProfile']['samples'])
open_chunked_profile['timeDeltas'].extend(row['args']['data']['timeDeltas'])
if open_chunked_profile is not None:
profile_events.append(open_chunked_profile)
return profile_events
| 40.895833 | 96 | 0.631177 |
acf9b5e2171bb2c080559b9eab9f78c88a640e83 | 3,099 | py | Python | python/sfxcollectd/config.py | manang-splunk/signalfx-agent | 079998171a9e770383fffef9b60c24c80081e1a4 | [
"Apache-2.0"
] | null | null | null | python/sfxcollectd/config.py | manang-splunk/signalfx-agent | 079998171a9e770383fffef9b60c24c80081e1a4 | [
"Apache-2.0"
] | 55 | 2022-01-24T11:40:41.000Z | 2022-03-31T11:31:33.000Z | python/sfxcollectd/config.py | manang-splunk/signalfx-agent | 079998171a9e770383fffef9b60c24c80081e1a4 | [
"Apache-2.0"
] | null | null | null | """
Logic for converting from the agent config format to the Collectd-python config
object format
"""
import logging
logger = logging.getLogger(__name__)
class Config(object): # pylint: disable=too-few-public-methods
"""
Dummy class that we use to put config that conforms to the collectd-python
Config class
See https://collectd.org/documentation/manpages/collectd-python.5.shtml#config
"""
def __init__(self, root=None, key=None, values=None, children=None):
self.root = root
self.key = key
self.values = values
self.children = children
# pylint:disable=too-many-branches
@classmethod
def from_monitor_config(cls, monitor_plugin_config):
"""
Converts config as expressed in the monitor to the Collectd Config
interface.
"""
assert isinstance(monitor_plugin_config, dict)
conf = cls(root=None)
conf.children = []
for key, val in list(monitor_plugin_config.items()):
values = None
children = None
if val is None:
logging.debug("dropping configuration %s because its value is None", key)
continue
if isinstance(val, (tuple, list)):
if not val:
logging.debug("dropping configuration %s because its value is an empty list or tuple", key)
continue
values = val
elif isinstance(val, str): # pylint: disable=undefined-variable
if not val:
logging.debug("dropping configuration %s because its value is an empty string", key)
continue
values = (val,)
elif isinstance(val, bytes):
if not val:
logging.debug("dropping configuration %s because its value is an empty string", key)
continue
values = (val.decode("utf-8"),)
elif isinstance(val, (int, float, bool)):
values = (val,)
elif isinstance(val, dict):
if not val:
logging.debug("dropping configuration %s because its value is an empty dictionary", key)
continue
if "#flatten" in val and "values" in val:
conf.children += [
cls(root=conf, key=key, values=item if isinstance(item, (list, tuple)) else [item], children=[])
for item in val.get("values") or []
if item is not None
]
continue
dict_conf = cls.from_monitor_config(val)
children = dict_conf.children
values = dict_conf.values
else:
logging.error(
"Cannot convert monitor config to collectd config: %s: %s (type: %s)", key, val, type(val)
)
continue
conf.children.append(cls(root=conf, key=key, values=values, children=children))
return conf
| 37.792683 | 120 | 0.549532 |
acf9b5ecf870e80317db2c489c33fee1d144f9ab | 13,542 | py | Python | py3status/modules/volume_status.py | weberval/py3status | 77751cfe777d3ceeff24e4a8554be439b94d515c | [
"BSD-3-Clause"
] | 876 | 2015-01-02T17:34:09.000Z | 2022-03-31T06:25:29.000Z | py3status/modules/volume_status.py | weberval/py3status | 77751cfe777d3ceeff24e4a8554be439b94d515c | [
"BSD-3-Clause"
] | 1,832 | 2015-01-04T18:02:33.000Z | 2022-03-31T14:07:56.000Z | py3status/modules/volume_status.py | weberval/py3status | 77751cfe777d3ceeff24e4a8554be439b94d515c | [
"BSD-3-Clause"
] | 402 | 2015-01-13T19:54:23.000Z | 2022-03-14T16:13:30.000Z | """
Volume control.
Configuration parameters:
blocks: a string, where each character represents a volume level
(default "_▁▂▃▄▅▆▇█")
button_down: button to decrease volume (default 5)
button_mute: button to toggle mute (default 1)
button_up: button to increase volume (default 4)
cache_timeout: how often we refresh this module in seconds.
(default 10)
card: Card to use. amixer supports this. (default None)
channel: channel to track. Default value is backend dependent.
(default None)
command: Choose between "amixer", "pamixer" or "pactl".
If None, try to guess based on available commands.
(default None)
device: Device to use. Defaults value is backend dependent.
"aplay -L", "pactl list sinks short", "pamixer --list-sinks"
(default None)
format: Format of the output.
(default '[\\?if=is_input 😮|♪]: {percentage}%')
format_muted: Format of the output when the volume is muted.
(default '[\\?if=is_input 😶|♪]: muted')
is_input: Is this an input device or an output device?
(default False)
max_volume: Allow the volume to be increased past 100% if available.
pactl and pamixer supports this. (default 120)
thresholds: Threshold for percent volume.
(default [(0, 'bad'), (20, 'degraded'), (50, 'good')])
volume_delta: Percentage amount that the volume is increased or
decreased by when volume buttons pressed.
(default 5)
Format placeholders:
{icon} Character representing the volume level,
as defined by the 'blocks'
{percentage} Percentage volume
Color options:
color_muted: Volume is muted, if not supplied color_bad is used
if set to `None` then the threshold color will be used.
Requires:
alsa-utils: an alternative implementation of linux sound support
pamixer: pulseaudio command-line mixer like amixer
Notes:
If you are changing volume state by external scripts etc and
want to refresh the module quicker than the i3status interval,
send a USR1 signal to py3status in the keybinding.
Example: killall -s USR1 py3status
Examples:
```
# Set thresholds to rainbow colors
volume_status {
thresholds = [
(0, "#FF0000"),
(10, "#E2571E"),
(20, "#FF7F00"),
(30, "#FFFF00"),
(40, "#00FF00"),
(50, "#96BF33"),
(60, "#0000FF"),
(70, "#4B0082"),
(80, "#8B00FF"),
(90, "#FFFFFF")
]
}
```
@author <Jan T> <jans.tuomi@gmail.com>
@license BSD
SAMPLE OUTPUT
{'color': '#00FF00', 'full_text': u'\u266a: 95%'}
mute
{'color': '#FF0000', 'full_text': u'\u266a: muted'}
"""
import re
import math
from py3status.exceptions import CommandError
STRING_ERROR = "invalid command `{}`"
STRING_NOT_AVAILABLE = "no available binary"
COMMAND_NOT_INSTALLED = "command `{}` not installed"
class Audio:
def __init__(self, parent):
self.card = parent.card
self.channel = parent.channel
self.device = parent.device
self.is_input = parent.is_input
self.max_volume = parent.max_volume
self.parent = parent
self.setup(parent)
def setup(self, parent):
raise NotImplementedError
def run_cmd(self, cmd):
return self.parent.py3.command_run(cmd)
def command_output(self, cmd):
return self.parent.py3.command_output(cmd)
class Amixer(Audio):
def setup(self, parent):
if self.card is None:
self.card = "0"
if self.channel is None:
self.channel = "Capture" if self.is_input else "Master"
if self.device is None:
self.device = "default"
self.cmd = [
"amixer",
"-q",
"-c",
self.card,
"-D",
self.device,
"sset",
self.channel,
]
self.get_volume_cmd = [
"amixer",
"-M",
"-c",
self.card,
"-D",
self.device,
"sget",
self.channel,
]
def get_volume(self):
output = self.command_output(self.get_volume_cmd)
# find percentage and status
p = re.compile(r"\[(\d{1,3})%\].*\[(\w{2,3})\]")
perc, muted = p.search(output).groups()
# muted should be 'on' or 'off'
if muted in ["on", "off"]:
muted = muted == "off"
else:
muted = False
return perc, muted
def volume_up(self, delta):
self.run_cmd(self.cmd + [f"{delta}%+"])
def volume_down(self, delta):
self.run_cmd(self.cmd + [f"{delta}%-"])
def toggle_mute(self):
self.run_cmd(self.cmd + ["toggle"])
class Pamixer(Audio):
def setup(self, parent):
is_input = "--source" if self.is_input else "--sink"
self.cmd = ["pamixer", "--allow-boost", is_input, self.device or "0"]
def get_volume(self):
try:
line = self.command_output(self.cmd + ["--get-mute", "--get-volume"])
except CommandError as ce:
# pamixer throws error on zero percent. see #1135
line = ce.output
try:
muted, perc = line.split()
muted = muted == "true"
except ValueError:
muted, perc = None, None
return perc, muted
def volume_up(self, delta):
perc, muted = self.get_volume()
if int(perc) + delta >= self.max_volume:
options = ["--set-volume", str(self.max_volume)]
else:
options = ["--increase", str(delta)]
self.run_cmd(self.cmd + options)
def volume_down(self, delta):
self.run_cmd(self.cmd + ["--decrease", str(delta)])
def toggle_mute(self):
self.run_cmd(self.cmd + ["--toggle-mute"])
class Pactl(Audio):
def setup(self, parent):
# get available device number if not specified
self.detected_devices = {}
self.device_type = "source" if self.is_input else "sink"
self.device_type_pl = self.device_type + "s"
self.device_type_cap = self.device_type[0].upper() + self.device_type[1:]
self.use_default_device = self.device is None
if self.use_default_device:
self.device = self.get_default_device()
else:
# if a device name was present but is used to match multiple
# possible devices sharing the same name pattern we allow ourselves
# to override the device name
self.set_selected_device()
self.update_device()
def update_device(self):
self.re_volume = re.compile(
r"{} (?:#{}|.*?Name: {}).*?Mute: (\w{{2,3}}).*?Volume:.*?(\d{{1,3}})%".format(
self.device_type_cap, self.device, self.device
),
re.M | re.DOTALL,
)
def get_default_device(self):
device_id = None
# Find the default device for the device type
default_dev_pattern = re.compile(fr"^Default {self.device_type_cap}: (.*)$")
output = self.command_output(["pactl", "info"])
for info_line in output.splitlines():
default_dev_match = default_dev_pattern.match(info_line)
if default_dev_match is not None:
device_id = default_dev_match.groups()[0]
break
# with the long gross id, find the associated number
if device_id is not None:
for d_number, d_id in self.get_current_devices().items():
if d_id == device_id:
return d_number
raise RuntimeError(
"Failed to find default {} device. Looked for {}".format(
"input" if self.is_input else "output", device_id
)
)
def set_selected_device(self):
current_devices = self.get_current_devices()
if self.device in current_devices.values():
return
for device_name in current_devices.values():
if self.device in device_name:
self.parent.py3.log(f"device {self.device} detected as {device_name}")
self.device = device_name
break
def get_current_devices(self):
current_devices = {}
output = self.command_output(["pactl", "list", "short", self.device_type_pl])
for line in output.splitlines():
parts = line.split()
if len(parts) < 2:
continue
current_devices[parts[0]] = parts[1]
if current_devices != self.detected_devices:
self.detected_devices = current_devices
self.parent.py3.log(f"available {self.device_type_pl}: {current_devices}")
return current_devices
def get_volume(self):
output = self.command_output(["pactl", "list", self.device_type_pl]).strip()
if self.use_default_device:
self.device = self.get_default_device()
self.update_device()
try:
muted, perc = self.re_volume.search(output).groups()
muted = muted == "yes"
except AttributeError:
muted, perc = None, None
return perc, muted
def volume_up(self, delta):
perc, muted = self.get_volume()
if int(perc) + delta >= self.max_volume:
change = f"{self.max_volume}%"
else:
change = f"+{delta}%"
self.run_cmd(
["pactl", "--", f"set-{self.device_type}-volume", self.device, change]
)
def volume_down(self, delta):
self.run_cmd(
[
"pactl",
"--",
f"set-{self.device_type}-volume",
self.device,
f"-{delta}%",
]
)
def toggle_mute(self):
self.run_cmd(["pactl", f"set-{self.device_type}-mute", self.device, "toggle"])
class Py3status:
""""""
# available configuration parameters
blocks = "_▁▂▃▄▅▆▇█"
button_down = 5
button_mute = 1
button_up = 4
cache_timeout = 10
card = None
channel = None
command = None
device = None
format = r"[\?if=is_input 😮|♪]: {percentage}%"
format_muted = r"[\?if=is_input 😶|♪]: muted"
is_input = False
max_volume = 120
thresholds = [(0, "bad"), (20, "degraded"), (50, "good")]
volume_delta = 5
class Meta:
def deprecate_function(config):
# support old thresholds
return {
"thresholds": [
(0, "bad"),
(config.get("threshold_bad", 20), "degraded"),
(config.get("threshold_degraded", 50), "good"),
]
}
deprecated = {
"function": [{"function": deprecate_function}],
"remove": [
{
"param": "threshold_bad",
"msg": "obsolete set using thresholds parameter",
},
{
"param": "threshold_degraded",
"msg": "obsolete set using thresholds parameter",
},
],
}
def post_config_hook(self):
if not self.command:
commands = ["pamixer", "pactl", "amixer"]
# pamixer, pactl requires pulseaudio to work
if not self.py3.check_commands(["pulseaudio", "pipewire"]):
commands = ["amixer"]
self.command = self.py3.check_commands(commands)
elif self.command not in ["amixer", "pamixer", "pactl"]:
raise Exception(STRING_ERROR.format(self.command))
elif not self.py3.check_commands(self.command):
raise Exception(COMMAND_NOT_INSTALLED.format(self.command))
if not self.command:
raise Exception(STRING_NOT_AVAILABLE)
# turn integers to strings
if self.card is not None:
self.card = str(self.card)
if self.device is not None:
self.device = str(self.device)
self.backend = globals()[self.command.capitalize()](self)
self.color_muted = self.py3.COLOR_MUTED or self.py3.COLOR_BAD
def volume_status(self):
perc, muted = self.backend.get_volume()
color = None
icon = None
new_format = self.format
if perc is None:
perc = "?"
elif muted:
color = self.color_muted
new_format = self.format_muted
else:
color = self.py3.threshold_get_color(perc)
icon = self.blocks[
min(
len(self.blocks) - 1,
math.ceil(int(perc) / 100 * (len(self.blocks) - 1)),
)
]
volume_data = {"icon": icon, "percentage": perc}
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(new_format, volume_data),
"color": color,
}
def on_click(self, event):
button = event["button"]
if button == self.button_up:
try:
self.backend.volume_up(self.volume_delta)
except TypeError:
pass
elif button == self.button_down:
self.backend.volume_down(self.volume_delta)
elif button == self.button_mute:
self.backend.toggle_mute()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| 31.863529 | 90 | 0.564466 |
acf9b6eefa304d3d7ded0c3a762313bfb0e9bb74 | 5,349 | py | Python | docs/source/conf.py | aurelienline/scikit-extremes | 90be86f8212b7cd293492d15cefdf4fd48121739 | [
"MIT"
] | 37 | 2017-10-13T15:26:30.000Z | 2022-03-14T16:09:02.000Z | docs/source/conf.py | aurelienline/scikit-extremes | 90be86f8212b7cd293492d15cefdf4fd48121739 | [
"MIT"
] | 10 | 2017-09-21T06:31:16.000Z | 2022-01-14T18:55:47.000Z | docs/source/conf.py | aurelienline/scikit-extremes | 90be86f8212b7cd293492d15cefdf4fd48121739 | [
"MIT"
] | 12 | 2017-07-05T01:57:25.000Z | 2021-08-21T11:23:30.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'skextremes'
copyright = '2019, Kiko Correoso'
author = 'Kiko Correoso'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx_rtd_theme'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'skextremesdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'skextremes.tex', 'skextremes Documentation',
'Kiko Correoso', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'skextremes', 'skextremes Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'skextremes', 'skextremes Documentation',
author, 'skextremes', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 29.552486 | 79 | 0.648345 |
acf9b7dbdb031abfb3fecfff63949cf9489df213 | 13,966 | py | Python | src/seqLister/__init__.py | jrowellfx/seqLister | 95ae958d590cb343efdaca52f7c6b31e6aedc21f | [
"BSD-3-Clause"
] | null | null | null | src/seqLister/__init__.py | jrowellfx/seqLister | 95ae958d590cb343efdaca52f7c6b31e6aedc21f | [
"BSD-3-Clause"
] | null | null | null | src/seqLister/__init__.py | jrowellfx/seqLister | 95ae958d590cb343efdaca52f7c6b31e6aedc21f | [
"BSD-3-Clause"
] | null | null | null | # BSD 3-Clause License
#
# Copyright (c) 2008-2021, James Philip Rowell,
# Alpha Eleven Incorporated
# www.alpha-eleven.com
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# - Neither the name of "Alpha Eleven, Inc." nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# seqLister module - used for expanding and condensing ranges of
# frame numbers to/from a common format to describe such ranges.
# Expands the argument 'seqList' into a list of integers.
# 'seqList' may be a single string with the following format
# (see description below), or a list of integers and/or
# strings of the following format:
#
# individual frame numbers: [1, "4", 10, 15]
# yeilds -> [1, 4, 10, 15]
# sequences of successive frame numbers: ["1-4", "10-15"]
# yeilds -> [1, 2, 3, 4, 10, 11, 12, 13, 14, 15]
# sequences of skipped frame numbers: ["1-10x2", "20-60x10"]
# yeilds -> [1, 3, 5, 7, 9, 20, 30, 40, 50, 60]
# reverse sequences work too: ["5-1"]
# yeilds -> [5, 4, 3, 2, 1]
# as do negative numbers: ["-10--3"]
# yeilds -> [-10, -9, -8, -7, -6, -5, -4, -3]
#
# These formats may be listed in any order, but if a number has
# been listed once, it will not be listed again.
#
# Eg. ["0-16x8", "0-16x2"]
# yeilds -> [0, 8, 16, 2, 4, 6, 10, 12, 14]
#
# Anything that is not of the above format is ignored for
# the purposes of building the list of integers and the ignored
# item is appended to the optional argument "nonSeqList".
#
# The returned list of integers is NOT sorted.
#
def expandSeq(seqList, nonSeqList=[]) :
if not isinstance(seqList, list) :
tmp=seqList
seqList = [tmp]
resultList = []
for seqItem in seqList :
origItem = seqItem
if not (isinstance(seqItem, int) or isinstance(seqItem, str)) :
# Discard item and continue to next one
nonSeqList.append(origItem)
continue
if isinstance(seqItem, int) :
if seqItem not in resultList :
resultList.append(seqItem)
continue
stepValue = 1
seqItem = seqItem.replace(" ", "") # Strip all whitespace.
seqItem = seqItem.replace(" ", "")
# No stepping by negative numbers - step back by reversing start/end
seqItem = seqItem.replace("x-", "x")
seqItemList = seqItem.split("-") # might be range or neg number.
if "x" in seqItemList[-1] :
lastItem = seqItemList[-1].split("x")
if len(lastItem) != 2 :
nonSeqList.append(origItem)
continue
if not lastItem[1].isdigit() :
nonSeqList.append(origItem)
continue
stepValue = int(lastItem[1])
seqItemList[-1] = lastItem[0] # Stick back in list minus "xN" part
if seqItemList[0] == "" : # Means there was leading minus sign.
seqItemList.pop(0)
if len(seqItemList) == 0:
nonSeqList.append(origItem)
continue
if not seqItemList[0].isdigit() :
nonSeqList.append(origItem)
continue
seqItemList[0] = -1 * int(seqItemList[0]) # Repace first entry...
elif seqItemList[0].isdigit() :
seqItemList[0] = int(seqItemList[0]) #...with an ingeter.
else :
nonSeqList.append(origItem)
continue
if len(seqItemList) == 1 : # Was just string with one number in it.
if seqItemList[0] not in resultList :
resultList.append(seqItemList[0])
continue
if seqItemList[1] == "" : # Same as above for next entry.
seqItemList.pop(1)
if len(seqItemList) == 1:
nonSeqList.append(origItem)
continue
if not seqItemList[1].isdigit() :
nonSeqList.append(origItem)
continue
seqItemList[1] = -1 * int(seqItemList[1])
elif seqItemList[1].isdigit() :
seqItemList[1] = int(seqItemList[1])
else :
nonSeqList.append(origItem)
continue
# Should only be exactly two entries at this point.
if len(seqItemList) != 2 :
nonSeqList.append(origItem)
continue
# Ummm - dumb but why not? list from n to n, i.e., one number.
if seqItemList[0] == seqItemList[1] :
if seqItemList[0] not in resultList :
resultList.append(seqItemList[0])
elif seqItemList[0] < seqItemList[1] : # Counting up.
frameNum = seqItemList[0]
while frameNum <= seqItemList[1] :
if frameNum not in resultList :
resultList.append(frameNum)
frameNum = frameNum + stepValue
else : # Counting down.
frameNum = seqItemList[0]
while frameNum >= seqItemList[1] :
if frameNum not in resultList :
resultList.append(frameNum)
frameNum = frameNum - stepValue
return resultList
class _gapRun :
def __init__(self, seqLen, startInd, gapSize, isCorrected=False) :
self.seqLen = seqLen
self.startInd = startInd
self.gapSize = gapSize
self.isCorrected = isCorrected
def __str__(self) :
return "[seqLen = " + str(self.seqLen) + \
" startInd = " + str(self.startInd) + \
" gapSize = " + str(self.gapSize) + \
" isCorrected = " + str(self.isCorrected) + "]"
# "__" at the start of function nane indicated private in module.
#
def __debugPrintList(li) :
for l in li :
# print "%02d" % l,
print("%02d" % l, end='')
# print ""
print()
# Takes a list of numbers and condenses it into the most minimal
# form using the notation described in 'expandSeq()' above.
#
# This [2, 1, 3, 7, 8, 4, 5, 6, 9, 10]
# yeilds -> ['1-10']
# and this [0, 8, 16, 2, 4, 6, 10, 12, 14]
# yeilds -> ['0-16x2']
#
# and it tries to keep runs of condensed frame lists as
# long as possible while also trying to keep random smatterings
# of frame numbers, simply as numbers and not strange sequences.
#
# Eg. condenseSeq(expandSeq(["0-100x2", 51]))
# yeilds -> ['0-50x2', '51', '52-100x2']
# and [1, 5, 13]
# yeilds -> ['1', '5', '13']
#
# and other examples:
# [1, 1, 1, 3, 3, 5, 5, 5] -> ['1-5x2']
# [1, 2, 3, 4, 6, 8, 10] -> ['1-4', '6-10x2']
# [1, 2, 3, 4, 6, 8] -> ['1-4', '6', '8']
#
# condenseSeq(expandSeq(["2-50x2", "3-50x3", "5-50x5", "7-50x7", "11-50x11", "13-50x13", "17-50x17", "19-50x19", "23-50x23"]))
# yeilds -> ['2-28', '30', '32-36', '38-40', '42', '44-46', '48-50']
#
def condenseSeq(seqList, pad=1) :
# Turn seqList into all integers and throw away invalid entries
#
tmpSeqList = seqList
seqList = []
for n in tmpSeqList :
if isinstance(n, int) :
seqList.append(int(n))
if isinstance(n, str) :
if n.isdigit() :
seqList.append(int(n))
elif n[0] == "-" and n[1:].isdigit() :
seqList.append(-1 * int(n))
if len(seqList) == 0 : # Take care of 1st trivial case
return []
# Remove duplicates
#
seqList.sort()
tmpSeqList = seqList
seqList = []
seqList.append(tmpSeqList[0])
tmpSeqList.pop(0)
for n in tmpSeqList :
if n != seqList[-1] :
seqList.append(n)
formatStr = "%0" + str(pad) + "d"
if len(seqList) == 1 : # Take care of second trivial case.
return [formatStr % seqList[0]]
# At this point - guaranteed that len(seqList) > 1
gapList = []
i = 1
while i < len(seqList) : # Record gaps between frame #'s
gapList.append(seqList[i] - seqList[i-1])
i += 1
# Count lengths of similar "gaps".
i = 0
currentGap = 0 # Impossible - good starting point.
gapRunList = []
while i < len(gapList) :
if gapList[i] != currentGap :
currentGap = gapList[i]
gapRunList.append(_gapRun(2, i, currentGap))
else :
gapRunList[-1].seqLen += 1
i += 1
gapRunList.append(_gapRun(0, i, 0)) # Add entry for last number in seqList (note zero gapSize)
# The largest runs steals from the prior and next runs last and first frame (respectively)
# if possible, working our way to smaller and smaller runs.
#
while True :
# Find largest run with smallest gapSize.
#
runInd = len(gapRunList) - 1 # This will contain index to desired run
maxSeqLen = 0
maxSeqLenGapSize = 0
i = 0
for run in gapRunList :
if not run.isCorrected :
if run.seqLen > maxSeqLen :
runInd = i
maxSeqLen = run.seqLen
maxSeqLenGapSize = run.gapSize
elif run.seqLen == maxSeqLen and run.gapSize < maxSeqLenGapSize :
runInd = i
maxSeqLenGapSize = run.gapSize
i += 1
if runInd == len(gapRunList) - 1 :
break
gapRunList[runInd].isCorrected = True
if gapRunList[runInd].seqLen == 0 :
continue
# Correct prior sequence if possible.
if runInd > 0 :
if not gapRunList[runInd-1].isCorrected :
gapRunList[runInd-1].seqLen -= 1
# Also correct next sequence if possible.
if runInd < len(gapRunList) - 1 :
if not gapRunList[runInd+1].isCorrected : # Means it was bigger than this one and we can't steal from it.
gapRunList[runInd+1].seqLen -= 1
gapRunList[runInd+1].startInd += 1
condensedList = []
for run in gapRunList :
if run.seqLen <= 0 :
continue
if run.seqLen == 1 :
condensedList.append(formatStr % seqList[run.startInd])
continue
# Don't print out this case as a range, but as two separate entries.
#
if run.seqLen == 2 and run.gapSize > 1:
condensedList.append(formatStr % seqList[run.startInd])
condensedList.append(formatStr % seqList[run.startInd+1])
continue
firstFrame = seqList[run.startInd]
lastFrame = seqList[run.startInd + run.seqLen - 1]
gap = run.gapSize
condensedList.append(formatStr % firstFrame +"-"+ formatStr % lastFrame)
if gap > 1 :
condensedList[-1] = condensedList[-1] + "x" + str(gap)
return condensedList
# Takes a list of numbers and condenses it into the most minimal
# form using with the restriction that sequences are compressed
# to a range (A-B) if and only if the numbers are successive.
#
# This [2, 1, 3, 7, 8, 4, 5, 6, 9, 10]
# yeilds -> ['1-10']
# and this [0, 8, 16, 2, 4, 6, 10, 12, 14]
# yeilds -> [0, 2, 4, 6, 8, 10, 12, 14, 16]
#
def condenseSeqOnes(seqList, pad=1) :
# Turn seqList into all integers and throw away invalid entries
#
tmpSeqList = seqList
seqList = []
for n in tmpSeqList :
if isinstance(n, int) :
seqList.append(int(n))
if isinstance(n, str) :
if n.isdigit() :
seqList.append(int(n))
elif n[0] == "-" and n[1:].isdigit() :
seqList.append(-1 * int(n))
if len(seqList) == 0 : # Take care of 1st trivial case
return []
# Remove duplicates
#
seqList.sort()
tmpSeqList = seqList
seqList = []
seqList.append(tmpSeqList[0])
tmpSeqList.pop(0)
for n in tmpSeqList :
if n != seqList[-1] :
seqList.append(n)
formatStr = "%0" + str(pad) + "d"
if len(seqList) == 1 : # Take care of second trivial case.
return [formatStr % seqList[0]]
# At this point - guaranteed that len(seqList) > 1
condensedList = []
firstFrame = seqList[0]
lastFrame = seqList[0]
seqList.pop(0)
for f in seqList :
if f == lastFrame + 1 : # Sequence is on ones.
lastFrame = f
else :
if firstFrame == lastFrame : # Last one was a single entry.
condensedList.append(formatStr % firstFrame)
else : # Had a range.
condensedList.append(formatStr % firstFrame +"-"+ formatStr % lastFrame)
firstFrame = f
lastFrame = f
if firstFrame == lastFrame :
condensedList.append(formatStr % firstFrame)
else :
condensedList.append(formatStr % firstFrame +"-"+ formatStr % lastFrame)
return condensedList
| 34.569307 | 126 | 0.585565 |
acf9b81ec0174b522cf1114b06347bc1421b1657 | 22,780 | py | Python | traceback2/__init__.py | jelmer/traceback2 | 8d28d1d25780fa68204b73017d5398148e4df0a6 | [
"PSF-2.0"
] | 4 | 2015-03-30T08:02:35.000Z | 2021-06-24T23:06:31.000Z | traceback2/__init__.py | jelmer/traceback2 | 8d28d1d25780fa68204b73017d5398148e4df0a6 | [
"PSF-2.0"
] | 16 | 2015-04-03T23:48:11.000Z | 2021-12-26T07:16:59.000Z | traceback2/__init__.py | jelmer/traceback2 | 8d28d1d25780fa68204b73017d5398148e4df0a6 | [
"PSF-2.0"
] | 11 | 2015-04-24T07:43:14.000Z | 2022-02-14T20:26:54.000Z | """Extract, format and print information about Python stack traces."""
import sys
import operator
import linecache2 as linecache
from six import u, PY2
__all__ = ['extract_stack', 'extract_tb', 'format_exception',
'format_exception_only', 'format_list', 'format_stack',
'format_tb', 'print_exc', 'format_exc', 'print_exception',
'print_last', 'print_stack', 'print_tb',
'clear_frames']
#
# Formatting and printing lists of traceback lines.
#
def print_list(extracted_list, file=None):
"""Print the list of tuples as returned by extract_tb() or
extract_stack() as a formatted stack trace to the given file."""
if file is None:
file = sys.stderr
for item in StackSummary.from_list(extracted_list).format():
file.write(item)
def format_list(extracted_list):
"""Format a list of traceback entry tuples for printing.
Given a list of tuples as returned by extract_tb() or
extract_stack(), return a list of strings ready for printing.
Each string in the resulting list corresponds to the item with the
same index in the argument list. Each string ends in a newline;
the strings may contain internal newlines as well, for those items
whose source text line is not None.
"""
return StackSummary.from_list(extracted_list).format()
#
# Printing and Extracting Tracebacks.
#
def print_tb(tb, limit=None, file=None):
"""Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method.
"""
print_list(extract_tb(tb, limit=limit), file=file)
def format_tb(tb, limit=None):
"""A shorthand for 'format_list(extract_tb(tb, limit))'."""
return extract_tb(tb, limit=limit).format()
def extract_tb(tb, limit=None):
"""Return list of up to limit pre-processed entries from traceback.
This is useful for alternate formatting of stack traces. If
'limit' is omitted or None, all entries are extracted. A
pre-processed stack trace entry is a quadruple (filename, line
number, function name, text) representing the information that is
usually printed for a stack trace. The text is a string with
leading and trailing whitespace stripped; if the source is not
available it is None.
"""
return StackSummary.extract(walk_tb(tb), limit=limit)
#
# Exception formatting and output.
#
_cause_message = (
"\nThe above exception was the direct cause "
"of the following exception:\n\n")
_context_message = (
"\nDuring handling of the above exception, "
"another exception occurred:\n\n")
def print_exception(etype, value, tb, limit=None, file=None, chain=True):
"""Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
This differs from print_tb() in the following ways: (1) if
traceback is not None, it prints a header "Traceback (most recent
call last):"; (2) it prints the exception type and value after the
stack trace; (3) if type is SyntaxError and value has the
appropriate format, it prints the line where the syntax error
occurred with a caret on the next line indicating the approximate
position of the error.
"""
# format_exception has ignored etype for some time, and code such as cgitb
# passes in bogus values as a result. For compatibility with such code we
# ignore it here (rather than in the new TracebackException API).
if file is None:
file = sys.stderr
for line in TracebackException(
type(value), value, tb, limit=limit).format(chain=chain):
file.write(line)
def format_exception(etype, value, tb, limit=None, chain=True):
"""Format a stack trace and the exception information.
The arguments have the same meaning as the corresponding arguments
to print_exception(). The return value is a list of strings, each
ending in a newline and some containing internal newlines. When
these lines are concatenated and printed, exactly the same text is
printed as does print_exception().
"""
# format_exception has ignored etype for some time, and code such as cgitb
# passes in bogus values as a result. For compatibility with such code we
# ignore it here (rather than in the new TracebackException API).
return list(TracebackException(
type(value), value, tb, limit=limit).format(chain=chain))
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
return list(TracebackException(etype, value, None).format_exception_only())
# -- not offical API but folk probably use these two functions.
def _format_final_exc_line(etype, value):
valuestr = _some_str(value)
if value == 'None' or value is None or not valuestr:
line = u("%s\n") % etype
else:
line = u("%s: %s\n") % (etype, valuestr)
return line
def _some_str(value):
try:
if PY2:
# If there is a working __unicode__, great.
# Otherwise see if we can get a bytestring...
# Otherwise we fallback to unprintable.
try:
return unicode(value)
except:
return "b%s" % repr(str(value))
else:
# For Python3, bytestrings don't implicit decode, so its trivial.
return str(value)
except:
return '<unprintable %s object>' % type(value).__name__
# --
def _some_fs_str(value):
"""_some_str, but for filesystem paths."""
if value is None:
return None
try:
if type(value) is bytes:
return value.decode(sys.getfilesystemencoding())
except:
pass
return _some_str(value)
def print_exc(limit=None, file=None, chain=True):
"""Shorthand for 'print_exception(*sys.exc_info(), limit, file)'."""
print_exception(*sys.exc_info(), limit=limit, file=file, chain=chain)
def format_exc(limit=None, chain=True):
"""Like print_exc() but return a string."""
return "".join(format_exception(*sys.exc_info(), limit=limit, chain=chain))
def print_last(limit=None, file=None, chain=True):
"""This is a shorthand for 'print_exception(sys.last_type,
sys.last_value, sys.last_traceback, limit, file)'."""
if not hasattr(sys, "last_type"):
raise ValueError("no last exception")
print_exception(sys.last_type, sys.last_value, sys.last_traceback,
limit, file, chain)
#
# Printing and Extracting Stacks.
#
def print_stack(f=None, limit=None, file=None):
"""Print a stack trace from its invocation point.
The optional 'f' argument can be used to specify an alternate
stack frame at which to start. The optional 'limit' and 'file'
arguments have the same meaning as for print_exception().
"""
print_list(extract_stack(f, limit=limit), file=file)
def format_stack(f=None, limit=None):
"""Shorthand for 'format_list(extract_stack(f, limit))'."""
return format_list(extract_stack(f, limit=limit))
def extract_stack(f=None, limit=None):
"""Extract the raw traceback from the current stack frame.
The return value has the same format as for extract_tb(). The
optional 'f' and 'limit' arguments have the same meaning as for
print_stack(). Each item in the list is a quadruple (filename,
line number, function name, text), and the entries are in order
from oldest to newest stack frame.
"""
stack = StackSummary.extract(walk_stack(f), limit=limit)
stack.reverse()
return stack
_identity = lambda:None
def clear_frames(tb):
"Clear all references to local variables in the frames of a traceback."
while tb is not None:
try:
getattr(tb.tb_frame, 'clear', _identity)()
except RuntimeError:
# Ignore the exception raised if the frame is still executing.
pass
tb = tb.tb_next
class FrameSummary:
"""A single frame from a traceback.
- :attr:`filename` The filename for the frame.
- :attr:`lineno` The line within filename for the frame that was
active when the frame was captured.
- :attr:`name` The name of the function or method that was executing
when the frame was captured.
- :attr:`line` The text from the linecache module for the
of code that was running when the frame was captured.
- :attr:`locals` Either None if locals were not supplied, or a dict
mapping the name to the repr() of the variable.
"""
__slots__ = ('filename', 'lineno', 'name', '_line', 'locals')
def __init__(self, filename, lineno, name, lookup_line=True,
locals=None, line=None):
"""Construct a FrameSummary.
:param lookup_line: If True, `linecache` is consulted for the source
code line. Otherwise, the line will be looked up when first needed.
:param locals: If supplied the frame locals, which will be captured as
object representations.
:param line: If provided, use this instead of looking up the line in
the linecache.
"""
self.filename = filename
self.lineno = lineno
self.name = name
self._line = line
if lookup_line:
self.line
self.locals = \
dict((k, repr(v)) for k, v in locals.items()) if locals else None
def __eq__(self, other):
return (self.filename == other.filename and
self.lineno == other.lineno and
self.name == other.name and
self.locals == other.locals)
def __getitem__(self, pos):
return (self.filename, self.lineno, self.name, self.line)[pos]
def __iter__(self):
return iter([self.filename, self.lineno, self.name, self.line])
def __repr__(self):
return "<FrameSummary file {filename}, line {lineno} in {name}>".format(
filename=self.filename, lineno=self.lineno, name=self.name)
@property
def line(self):
if self._line is None:
self._line = linecache.getline(self.filename, self.lineno).strip()
return self._line
def walk_stack(f):
"""Walk a stack yielding the frame and line number for each frame.
This will follow f.f_back from the given frame. If no frame is given, the
current stack is used. Usually used with StackSummary.extract.
"""
if f is None:
f = sys._getframe().f_back.f_back
while f is not None:
yield f, f.f_lineno
f = f.f_back
def walk_tb(tb):
"""Walk a traceback yielding the frame and line number for each frame.
This will follow tb.tb_next (and thus is in the opposite order to
walk_stack). Usually used with StackSummary.extract.
"""
while tb is not None:
yield tb.tb_frame, tb.tb_lineno
tb = tb.tb_next
class StackSummary(list):
"""A stack of frames."""
@classmethod
def extract(klass, frame_gen, limit=None, lookup_lines=True,
capture_locals=False):
"""Create a StackSummary from a traceback or stack object.
:param frame_gen: A generator that yields (frame, lineno) tuples to
include in the stack.
:param limit: None to include all frames or the number of frames to
include.
:param lookup_lines: If True, lookup lines for each frame immediately,
otherwise lookup is deferred until the frame is rendered.
:param capture_locals: If True, the local variables from each frame will
be captured as object representations into the FrameSummary.
"""
if limit is None:
limit = getattr(sys, 'tracebacklimit', None)
result = klass()
fnames = set()
for pos, (f, lineno) in enumerate(frame_gen):
if limit is not None and pos >= limit:
break
co = f.f_code
filename = co.co_filename
name = co.co_name
fnames.add(filename)
linecache.lazycache(filename, f.f_globals)
# Must defer line lookups until we have called checkcache.
if capture_locals:
f_locals = f.f_locals
else:
f_locals = None
result.append(FrameSummary(
filename, lineno, name, lookup_line=False, locals=f_locals))
for filename in fnames:
linecache.checkcache(filename)
# If immediate lookup was desired, trigger lookups now.
if lookup_lines:
for f in result:
f.line
return result
@classmethod
def from_list(klass, a_list):
"""Create a StackSummary from a simple list of tuples.
This method supports the older Python API. Each tuple should be a
4-tuple with (filename, lineno, name, line) elements.
"""
if isinstance(a_list, StackSummary):
return StackSummary(a_list)
result = StackSummary()
for filename, lineno, name, line in a_list:
result.append(FrameSummary(filename, lineno, name, line=line))
return result
def format(self):
"""Format the stack ready for printing.
Returns a list of strings ready for printing. Each string in the
resulting list corresponds to a single frame from the stack.
Each string ends in a newline; the strings may contain internal
newlines as well, for those items with source text lines.
"""
result = []
for frame in self:
row = []
row.append(u(' File "{0}", line {1}, in {2}\n').format(
_some_fs_str(frame.filename), frame.lineno, frame.name))
if frame.line:
row.append(u(' {0}\n').format(frame.line.strip()))
if frame.locals:
for name, value in sorted(frame.locals.items()):
row.append(u(' {name} = {value}\n').format(name=name, value=value))
result.append(u('').join(row))
return result
class TracebackException:
"""An exception ready for rendering.
The traceback module captures enough attributes from the original exception
to this intermediary form to ensure that no references are held, while
still being able to fully print or format it.
Use `from_exception` to create TracebackException instances from exception
objects, or the constructor to create TracebackException instances from
individual components.
- :attr:`__cause__` A TracebackException of the original *__cause__*.
- :attr:`__context__` A TracebackException of the original *__context__*.
- :attr:`__suppress_context__` The *__suppress_context__* value from the
original exception.
- :attr:`stack` A `StackSummary` representing the traceback.
- :attr:`exc_type` The class of the original traceback.
- :attr:`filename` For syntax errors - the filename where the error
occured.
- :attr:`lineno` For syntax errors - the linenumber where the error
occured.
- :attr:`text` For syntax errors - the text where the error
occured.
- :attr:`offset` For syntax errors - the offset into the text where the
error occured.
- :attr:`msg` For syntax errors - the compiler error message.
"""
def __init__(self, exc_type, exc_value, exc_traceback, limit=None,
lookup_lines=True, capture_locals=False, _seen=None):
# NB: we need to accept exc_traceback, exc_value, exc_traceback to
# permit backwards compat with the existing API, otherwise we
# need stub thunk objects just to glue it together.
# Handle loops in __cause__ or __context__.
if _seen is None:
_seen = set()
_seen.add(id(exc_value))
# Gracefully handle (the way Python 2.4 and earlier did) the case of
# being called with no type or value (None, None, None).
if (exc_value and getattr(exc_value, '__cause__', None) is not None
and id(exc_value.__cause__) not in _seen):
cause = TracebackException(
type(exc_value.__cause__),
exc_value.__cause__,
exc_value.__cause__.__traceback__,
limit=limit,
lookup_lines=False,
capture_locals=capture_locals,
_seen=_seen)
else:
cause = None
if (exc_value and getattr(exc_value, '__context__', None) is not None
and id(exc_value.__context__) not in _seen):
context = TracebackException(
type(exc_value.__context__),
exc_value.__context__,
exc_value.__context__.__traceback__,
limit=limit,
lookup_lines=False,
capture_locals=capture_locals,
_seen=_seen)
else:
context = None
self.exc_traceback = exc_traceback
self.__cause__ = cause
self.__context__ = context
self.__suppress_context__ = \
getattr(exc_value, '__suppress_context__', False) if exc_value else False
# TODO: locals.
self.stack = StackSummary.extract(
walk_tb(exc_traceback), limit=limit, lookup_lines=lookup_lines,
capture_locals=capture_locals)
self.exc_type = exc_type
# Capture now to permit freeing resources: only complication is in the
# unofficial API _format_final_exc_line
self._str = _some_str(exc_value)
if exc_type and issubclass(exc_type, SyntaxError):
# Handle SyntaxError's specially
self.filename = exc_value.filename
self.lineno = str(exc_value.lineno)
self.text = exc_value.text
self.offset = exc_value.offset
self.msg = exc_value.msg
if lookup_lines:
self._load_lines()
@classmethod
def from_exception(cls, exc, *args, **kwargs):
"""Create a TracebackException from an exception.
Only useful in Python 3 specific code.
"""
return cls(type(exc), exc, exc.__traceback__, *args, **kwargs)
def _load_lines(self):
"""Private API. force all lines in the stack to be loaded."""
for frame in self.stack:
frame.line
if self.__context__:
self.__context__._load_lines()
if self.__cause__:
self.__cause__._load_lines()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __str__(self):
return self._str
def format_exception_only(self):
"""Format the exception part of the traceback.
The return value is a generator of strings, each ending in a newline.
Normally, the generator emits a single string; however, for
SyntaxError exceptions, it emites several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the output.
"""
if self.exc_type is None:
yield _format_final_exc_line(None, self._str)
return
stype = getattr(self.exc_type, '__qualname__', self.exc_type.__name__)
smod = u(self.exc_type.__module__)
if smod not in ("__main__", "builtins", "exceptions"):
stype = smod + u('.') + stype
if not issubclass(self.exc_type, SyntaxError):
yield _format_final_exc_line(stype, self._str)
return
# It was a syntax error; show exactly where the problem was found.
filename = _some_fs_str(self.filename) or u("<string>")
lineno = str(self.lineno) or u('?')
yield u(' File "{0}", line {1}\n').format(filename, lineno)
badline = None
if self.text is not None:
if type(self.text) is bytes:
# Not decoded - get the line via linecache which will decode
# for us.
if self.lineno:
badline = linecache.getline(filename, int(lineno))
if not badline:
# But we can't for some reason, so fallback to attempting a
# u cast.
badline = u(self.text)
else:
badline = self.text
offset = self.offset
if badline is not None:
yield u(' {0}\n').format(badline.strip())
if offset is not None:
caretspace = badline.rstrip('\n')
offset = min(len(caretspace), offset) - 1
caretspace = caretspace[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
yield u(' {0}^\n').format(''.join(caretspace))
msg = self.msg or u("<no detail available>")
yield u("{0}: {1}\n").format(stype, msg)
def format(self, chain=True):
"""Format the exception.
If chain is not *True*, *__cause__* and *__context__* will not be formatted.
The return value is a generator of strings, each ending in a newline and
some containing internal newlines. `print_exception` is a wrapper around
this method which just prints the lines to a file.
The message indicating which exception occurred is always the last
string in the output.
"""
if chain:
if self.__cause__ is not None:
for line in self.__cause__.format(chain=chain):
yield line
yield _cause_message
elif (self.__context__ is not None and
not self.__suppress_context__):
for line in self.__context__.format(chain=chain):
yield line
yield _context_message
if self.exc_traceback is not None:
yield u('Traceback (most recent call last):\n')
for line in self.stack.format():
yield line
for line in self.format_exception_only():
yield line
| 38.03005 | 90 | 0.638191 |
acf9b902d1dbbdcf5c80dd65bd8cdc628e5380a1 | 20,964 | py | Python | fhirclient/r4models/measure.py | Healthedata1/Flask-PL | 88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1 | [
"MIT"
] | null | null | null | fhirclient/r4models/measure.py | Healthedata1/Flask-PL | 88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1 | [
"MIT"
] | null | null | null | fhirclient/r4models/measure.py | Healthedata1/Flask-PL | 88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Measure) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class Measure(domainresource.DomainResource):
""" A quality measure definition.
The Measure resource provides the definition of a quality measure.
"""
resource_type = "Measure"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.approvalDate = None
""" When the measure was approved by publisher.
Type `FHIRDate` (represented as `str` in JSON). """
self.author = None
""" Who authored the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.clinicalRecommendationStatement = None
""" Summary of clinical guidelines.
Type `str`. """
self.compositeScoring = None
""" opportunity | all-or-nothing | linear | weighted.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.definition = None
""" Defined terms used in the measure documentation.
List of `str` items. """
self.description = None
""" Natural language description of the measure.
Type `str`. """
self.disclaimer = None
""" Disclaimer for use of the measure or its referenced content.
Type `str`. """
self.editor = None
""" Who edited the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.effectivePeriod = None
""" When the measure is expected to be used.
Type `Period` (represented as `dict` in JSON). """
self.endorser = None
""" Who endorsed the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self.group = None
""" Population criteria group.
List of `MeasureGroup` items (represented as `dict` in JSON). """
self.guidance = None
""" Additional guidance for implementers.
Type `str`. """
self.identifier = None
""" Additional identifier for the measure.
List of `Identifier` items (represented as `dict` in JSON). """
self.improvementNotation = None
""" increase | decrease.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.jurisdiction = None
""" Intended jurisdiction for measure (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.lastReviewDate = None
""" When the measure was last reviewed.
Type `FHIRDate` (represented as `str` in JSON). """
self.library = None
""" Logic used by the measure.
List of `str` items. """
self.name = None
""" Name for this measure (computer friendly).
Type `str`. """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.purpose = None
""" Why this measure is defined.
Type `str`. """
self.rateAggregation = None
""" How is rate aggregation performed for this measure.
Type `str`. """
self.rationale = None
""" Detailed description of why the measure exists.
Type `str`. """
self.relatedArtifact = None
""" Additional documentation, citations, etc..
List of `RelatedArtifact` items (represented as `dict` in JSON). """
self.reviewer = None
""" Who reviewed the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.riskAdjustment = None
""" How risk adjustment is applied for this measure.
Type `str`. """
self.scoring = None
""" proportion | ratio | continuous-variable | cohort.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.subjectCodeableConcept = None
""" E.g. Patient, Practitioner, RelatedPerson, Organization, Location,
Device.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.subjectReference = None
""" E.g. Patient, Practitioner, RelatedPerson, Organization, Location,
Device.
Type `FHIRReference` (represented as `dict` in JSON). """
self.subtitle = None
""" Subordinate title of the measure.
Type `str`. """
self.supplementalData = None
""" What other data should be reported with the measure.
List of `MeasureSupplementalData` items (represented as `dict` in JSON). """
self.title = None
""" Name for this measure (human friendly).
Type `str`. """
self.topic = None
""" The category of the measure, such as Education, Treatment,
Assessment, etc..
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.type = None
""" process | outcome | structure | patient-reported-outcome |
composite.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.url = None
""" Canonical identifier for this measure, represented as a URI
(globally unique).
Type `str`. """
self.usage = None
""" Describes the clinical usage of the measure.
Type `str`. """
self.useContext = None
""" The context that the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the measure.
Type `str`. """
super(Measure, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Measure, self).elementProperties()
js.extend([
("approvalDate", "approvalDate", fhirdate.FHIRDate, False, None, False),
("author", "author", contactdetail.ContactDetail, True, None, False),
("clinicalRecommendationStatement", "clinicalRecommendationStatement", str, False, None, False),
("compositeScoring", "compositeScoring", codeableconcept.CodeableConcept, False, None, False),
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("definition", "definition", str, True, None, False),
("description", "description", str, False, None, False),
("disclaimer", "disclaimer", str, False, None, False),
("editor", "editor", contactdetail.ContactDetail, True, None, False),
("effectivePeriod", "effectivePeriod", period.Period, False, None, False),
("endorser", "endorser", contactdetail.ContactDetail, True, None, False),
("experimental", "experimental", bool, False, None, False),
("group", "group", MeasureGroup, True, None, False),
("guidance", "guidance", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("improvementNotation", "improvementNotation", codeableconcept.CodeableConcept, False, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("lastReviewDate", "lastReviewDate", fhirdate.FHIRDate, False, None, False),
("library", "library", str, True, None, False),
("name", "name", str, False, None, False),
("publisher", "publisher", str, False, None, False),
("purpose", "purpose", str, False, None, False),
("rateAggregation", "rateAggregation", str, False, None, False),
("rationale", "rationale", str, False, None, False),
("relatedArtifact", "relatedArtifact", relatedartifact.RelatedArtifact, True, None, False),
("reviewer", "reviewer", contactdetail.ContactDetail, True, None, False),
("riskAdjustment", "riskAdjustment", str, False, None, False),
("scoring", "scoring", codeableconcept.CodeableConcept, False, None, False),
("status", "status", str, False, None, True),
("subjectCodeableConcept", "subjectCodeableConcept", codeableconcept.CodeableConcept, False, "subject", False),
("subjectReference", "subjectReference", fhirreference.FHIRReference, False, "subject", False),
("subtitle", "subtitle", str, False, None, False),
("supplementalData", "supplementalData", MeasureSupplementalData, True, None, False),
("title", "title", str, False, None, False),
("topic", "topic", codeableconcept.CodeableConcept, True, None, False),
("type", "type", codeableconcept.CodeableConcept, True, None, False),
("url", "url", str, False, None, False),
("usage", "usage", str, False, None, False),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class MeasureGroup(backboneelement.BackboneElement):
""" Population criteria group.
A group of population criteria for the measure.
"""
resource_type = "MeasureGroup"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Meaning of the group.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.description = None
""" Summary description.
Type `str`. """
self.population = None
""" Population criteria.
List of `MeasureGroupPopulation` items (represented as `dict` in JSON). """
self.stratifier = None
""" Stratifier criteria for the measure.
List of `MeasureGroupStratifier` items (represented as `dict` in JSON). """
super(MeasureGroup, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureGroup, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("description", "description", str, False, None, False),
("population", "population", MeasureGroupPopulation, True, None, False),
("stratifier", "stratifier", MeasureGroupStratifier, True, None, False),
])
return js
class MeasureGroupPopulation(backboneelement.BackboneElement):
""" Population criteria.
A population criteria for the measure.
"""
resource_type = "MeasureGroupPopulation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" initial-population | numerator | numerator-exclusion | denominator
| denominator-exclusion | denominator-exception | measure-
population | measure-population-exclusion | measure-observation.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.criteria = None
""" The criteria that defines this population.
Type `Expression` (represented as `dict` in JSON). """
self.description = None
""" The human readable description of this population criteria.
Type `str`. """
super(MeasureGroupPopulation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureGroupPopulation, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("criteria", "criteria", expression.Expression, False, None, True),
("description", "description", str, False, None, False),
])
return js
class MeasureGroupStratifier(backboneelement.BackboneElement):
""" Stratifier criteria for the measure.
The stratifier criteria for the measure report, specified as either the
name of a valid CQL expression defined within a referenced library or a
valid FHIR Resource Path.
"""
resource_type = "MeasureGroupStratifier"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Meaning of the stratifier.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.component = None
""" Stratifier criteria component for the measure.
List of `MeasureGroupStratifierComponent` items (represented as `dict` in JSON). """
self.criteria = None
""" How the measure should be stratified.
Type `Expression` (represented as `dict` in JSON). """
self.description = None
""" The human readable description of this stratifier.
Type `str`. """
super(MeasureGroupStratifier, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureGroupStratifier, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("component", "component", MeasureGroupStratifierComponent, True, None, False),
("criteria", "criteria", expression.Expression, False, None, False),
("description", "description", str, False, None, False),
])
return js
class MeasureGroupStratifierComponent(backboneelement.BackboneElement):
""" Stratifier criteria component for the measure.
A component of the stratifier criteria for the measure report, specified as
either the name of a valid CQL expression defined within a referenced
library or a valid FHIR Resource Path.
"""
resource_type = "MeasureGroupStratifierComponent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Meaning of the stratifier component.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.criteria = None
""" Component of how the measure should be stratified.
Type `Expression` (represented as `dict` in JSON). """
self.description = None
""" The human readable description of this stratifier component.
Type `str`. """
super(MeasureGroupStratifierComponent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureGroupStratifierComponent, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("criteria", "criteria", expression.Expression, False, None, True),
("description", "description", str, False, None, False),
])
return js
class MeasureSupplementalData(backboneelement.BackboneElement):
""" What other data should be reported with the measure.
The supplemental data criteria for the measure report, specified as either
the name of a valid CQL expression within a referenced library, or a valid
FHIR Resource Path.
"""
resource_type = "MeasureSupplementalData"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Meaning of the supplemental data.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.criteria = None
""" Expression describing additional data to be reported.
Type `Expression` (represented as `dict` in JSON). """
self.description = None
""" The human readable description of this supplemental data.
Type `str`. """
self.usage = None
""" supplemental-data | risk-adjustment-factor.
List of `CodeableConcept` items (represented as `dict` in JSON). """
super(MeasureSupplementalData, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureSupplementalData, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("criteria", "criteria", expression.Expression, False, None, True),
("description", "description", str, False, None, False),
("usage", "usage", codeableconcept.CodeableConcept, True, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import expression
except ImportError:
expression = sys.modules[__package__ + '.expression']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import relatedartifact
except ImportError:
relatedartifact = sys.modules[__package__ + '.relatedartifact']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext']
| 40.945313 | 124 | 0.596737 |
acf9ba73fafd5dbadafebee175c89c0f367a6706 | 15,853 | py | Python | lte/gateway/python/magma/pipelined/service_manager.py | hotlib/magma | 393013d947e0e6e6e1c8ae3893eeac26095beca5 | [
"BSD-3-Clause"
] | null | null | null | lte/gateway/python/magma/pipelined/service_manager.py | hotlib/magma | 393013d947e0e6e6e1c8ae3893eeac26095beca5 | [
"BSD-3-Clause"
] | null | null | null | lte/gateway/python/magma/pipelined/service_manager.py | hotlib/magma | 393013d947e0e6e6e1c8ae3893eeac26095beca5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""
Copyright (c) 2019-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
ServiceManager manages the lifecycle and chaining of network services,
which are cloud managed and provide discrete network functions.
These network services consist of Ryu apps, which operate on tables managed by
the ServiceManager. OVS provides a set number of tables that can be
programmed to match and modify traffic. We split these tables two categories,
main tables and scratch tables.
All apps from the same service are associated with a main table, which is
visible to other services and they are used to forward traffic between
different services.
Apps can also optionally claim additional scratch tables, which may be
required for complex flow matching and aggregation use cases. Scratch tables
should not be accessible to apps from other services.
"""
# pylint: skip-file
# pylint does not play well with aioeventlet, as it uses asyncio.async which
# produces a parse error
import asyncio
from concurrent.futures import Future
from collections import namedtuple, OrderedDict
from typing import List
import aioeventlet
from lte.protos.mconfig.mconfigs_pb2 import PipelineD
from lte.protos.meteringd_pb2_grpc import MeteringdRecordsControllerStub
from lte.protos.mobilityd_pb2_grpc import MobilityServiceStub
from lte.protos.session_manager_pb2_grpc import LocalSessionManagerStub
from magma.pipelined.app import of_rest_server
from magma.pipelined.app.access_control import AccessControlController
from magma.pipelined.app.arp import ArpController
from magma.pipelined.app.dpi import DPIController
from magma.pipelined.app.enforcement import EnforcementController
from magma.pipelined.app.enforcement_stats import EnforcementStatsController
from magma.pipelined.app.inout import EGRESS, INGRESS, InOutController
from magma.pipelined.app.meter import MeterController
from magma.pipelined.app.meter_stats import MeterStatsController
from magma.pipelined.app.subscriber import SubscriberController
from magma.pipelined.app.ue_mac import UEMacAddressController
from magma.pipelined.rule_mappers import RuleIDToNumMapper, \
SessionRuleToVersionMapper
from ryu.base.app_manager import AppManager
from magma.common.service import MagmaService
from magma.common.service_registry import ServiceRegistry
from magma.configuration import environment
class Tables:
__slots__ = ['main_table', 'scratch_tables']
def __init__(self, main_table, scratch_tables=None):
self.main_table = main_table
self.scratch_tables = scratch_tables
if self.scratch_tables is None:
self.scratch_tables = []
class TableNumException(Exception):
"""
Exception used for when table number allocation fails.
"""
pass
class _TableManager:
"""
TableManager maintains an internal mapping between apps to their
main and scratch tables.
"""
INGRESS_TABLE_NUM = 1
EGRESS_TABLE_NUM = 20
MAIN_TABLE_START_NUM = 2
MAIN_TABLE_LIMIT_NUM = EGRESS_TABLE_NUM # exclusive
SCRATCH_TABLE_START_NUM = EGRESS_TABLE_NUM + 1 # 21
SCRATCH_TABLE_LIMIT_NUM = 255 # exclusive
def __init__(self):
self._tables_by_app = {
INGRESS: Tables(main_table=self.INGRESS_TABLE_NUM),
EGRESS: Tables(main_table=self.EGRESS_TABLE_NUM),
}
self._next_main_table = self.MAIN_TABLE_START_NUM
self._next_scratch_table = self.SCRATCH_TABLE_START_NUM
def _allocate_main_table(self) -> int:
if self._next_main_table == self.MAIN_TABLE_LIMIT_NUM:
raise TableNumException(
'Cannot generate more tables. Table limit of %s '
'reached!' % self.MAIN_TABLE_LIMIT_NUM)
table_num = self._next_main_table
self._next_main_table += 1
return table_num
def register_apps_for_service(self, app_names: List[str]):
"""
Register the apps for a service with a main table.
"""
table_num = self._allocate_main_table()
for app in app_names:
self._tables_by_app[app] = Tables(main_table=table_num)
def register_apps_for_table0_service(self, app_names: List[str]):
"""
Register the apps for a service with main table 0
"""
for app in app_names:
self._tables_by_app[app] = Tables(main_table=0)
def get_table_num(self, app_name: str) -> int:
if app_name not in self._tables_by_app:
raise Exception('App is not registered: %s' % app_name)
return self._tables_by_app[app_name].main_table
def get_next_table_num(self, app_name: str) -> int:
"""
Returns the main table number of the next service.
If there are no more services after the current table, return the
EGRESS table
"""
if app_name not in self._tables_by_app:
raise Exception('App is not registered: %s' % app_name)
main_table = self._tables_by_app[app_name].main_table
next_table = main_table + 1
if next_table < self._next_main_table:
return next_table
return self.EGRESS_TABLE_NUM
def is_app_enabled(self, app_name: str) -> bool:
return app_name in self._tables_by_app or \
app_name == InOutController.APP_NAME
def allocate_scratch_tables(self, app_name: str, count: int) -> \
List[int]:
if self._next_scratch_table + count > self.SCRATCH_TABLE_LIMIT_NUM:
raise TableNumException(
'Cannot generate more tables. Table limit of %s '
'reached!' % self.SCRATCH_TABLE_LIMIT_NUM)
tbl_nums = []
for _ in range(count):
tbl_nums.append(self._next_scratch_table)
self._next_scratch_table += 1
self._tables_by_app[app_name].scratch_tables.extend(tbl_nums)
return tbl_nums
def get_scratch_table_nums(self, app_name: str) -> List[int]:
if app_name not in self._tables_by_app:
raise Exception('App is not registered: %s' % app_name)
return self._tables_by_app[app_name].scratch_tables
def get_all_table_assignments(self) -> 'OrderedDict[str, Tables]':
resp = OrderedDict(sorted(self._tables_by_app.items(),
key=lambda kv: (kv[1].main_table, kv[0])))
# Include table 0 when it is managed by the EPC, for completeness.
if 'ue_mac' not in self._tables_by_app:
resp['mme'] = Tables(main_table=0)
resp.move_to_end('mme', last=False)
return resp
class ServiceManager:
"""
ServiceManager manages the service lifecycle and chaining of services for
the Ryu apps. Ryu apps are loaded based on the services specified in the
YAML config for static apps and mconfig for dynamic apps.
ServiceManager also maintains a mapping between apps to the flow
tables they use.
Currently, its use cases include:
- Starting all Ryu apps
- Flow table number lookup for Ryu apps
- Main & scratch tables management
"""
App = namedtuple('App', ['name', 'module'])
UE_MAC_ADDRESS_SERVICE_NAME = 'ue_mac'
ARP_SERVICE_NAME = 'arpd'
ACCESS_CONTROL_SERVICE_NAME = 'access_control'
RYU_REST_SERVICE_NAME = 'ryu_rest_service'
# Mapping between services defined in mconfig and the names and modules of
# the corresponding Ryu apps in PipelineD. The module is used for the Ryu
# app manager to instantiate the app.
# Note that a service may require multiple apps.
DYNAMIC_SERVICE_TO_APPS = {
PipelineD.METERING: [
App(name=MeterController.APP_NAME,
module=MeterController.__module__),
App(name=MeterStatsController.APP_NAME,
module=MeterStatsController.__module__),
App(name=SubscriberController.APP_NAME,
module=SubscriberController.__module__),
],
PipelineD.DPI: [
App(name=DPIController.APP_NAME, module=DPIController.__module__),
],
PipelineD.ENFORCEMENT: [
App(name=EnforcementController.APP_NAME,
module=EnforcementController.__module__),
App(name=EnforcementStatsController.APP_NAME,
module=EnforcementStatsController.__module__),
],
}
# Mapping between the app names defined in pipelined.yml and the names and
# modules of their corresponding Ryu apps in PipelineD.
STATIC_SERVICE_TO_APPS = {
UE_MAC_ADDRESS_SERVICE_NAME: [
App(name=UEMacAddressController.APP_NAME,
module=UEMacAddressController.__module__),
],
ARP_SERVICE_NAME: [
App(name=ArpController.APP_NAME, module=ArpController.__module__),
],
ACCESS_CONTROL_SERVICE_NAME: [
App(name=AccessControlController.APP_NAME,
module=AccessControlController.__module__),
],
RYU_REST_SERVICE_NAME: [
App(name='ryu_rest_app', module='ryu.app.ofctl_rest'),
],
}
# Some apps do not use a table, so they need to be excluded from table
# allocation.
STATIC_SERVICE_WITH_NO_TABLE = [
RYU_REST_SERVICE_NAME,
]
def __init__(self, magma_service: MagmaService):
self._magma_service = magma_service
# inout is a mandatory app and it occupies both table 1(for ingress)
# and table 20(for egress).
self._apps = [self.App(name=InOutController.APP_NAME,
module=InOutController.__module__)]
self._table_manager = _TableManager()
self.session_rule_version_mapper = SessionRuleToVersionMapper()
self._init_static_services()
self._init_dynamic_services()
def _init_static_services(self):
"""
_init_static_services populates app modules and allocates a main table
for each static service.
"""
static_services = self._magma_service.config['static_services']
static_apps = \
[app for service in static_services for app in
self.STATIC_SERVICE_TO_APPS[service]]
self._apps.extend(static_apps)
# Register static apps for each service to a main table. Filter out any
# apps that do not need a table.
services_with_tables = \
[service for service in static_services if
service not in self.STATIC_SERVICE_WITH_NO_TABLE]
for service in services_with_tables:
app_names = [app.name for app in
self.STATIC_SERVICE_TO_APPS[service]]
# UE MAC service must be registered with Table 0
if service == self.UE_MAC_ADDRESS_SERVICE_NAME:
self._table_manager.register_apps_for_table0_service(app_names)
continue
self._table_manager.register_apps_for_service(app_names)
def _init_dynamic_services(self):
"""
_init_dynamic_services populates app modules and allocates a main table
for each dynamic service.
"""
dynamic_services = self._magma_service.mconfig.services
dynamic_apps = [app for service in dynamic_services for
app in self.DYNAMIC_SERVICE_TO_APPS[service]]
self._apps.extend(dynamic_apps)
# Register dynamic apps for each service to a main table. Filter out
# any apps that do not need a table.
for service in dynamic_services:
app_names = [app.name for app in
self.DYNAMIC_SERVICE_TO_APPS[service]]
self._table_manager.register_apps_for_service(app_names)
def load(self):
"""
Instantiates and schedules the Ryu app eventlets in the service
eventloop.
"""
manager = AppManager.get_instance()
manager.load_apps([app.module for app in self._apps])
contexts = manager.create_contexts()
contexts['rule_id_mapper'] = RuleIDToNumMapper()
contexts[
'session_rule_version_mapper'] = self.session_rule_version_mapper
contexts['app_futures'] = {app.name: Future() for app in self._apps}
contexts['config'] = self._magma_service.config
contexts['mconfig'] = self._magma_service.mconfig
contexts['loop'] = self._magma_service.loop
contexts['service_manager'] = self
records_chan = ServiceRegistry.get_rpc_channel(
'meteringd_records', ServiceRegistry.CLOUD)
sessiond_chan = ServiceRegistry.get_rpc_channel(
'sessiond', ServiceRegistry.LOCAL)
mobilityd_chan = ServiceRegistry.get_rpc_channel(
'mobilityd', ServiceRegistry.LOCAL)
contexts['rpc_stubs'] = {
'metering_cloud': MeteringdRecordsControllerStub(records_chan),
'mobilityd': MobilityServiceStub(mobilityd_chan),
'sessiond': LocalSessionManagerStub(sessiond_chan),
}
# Instantiate and schedule apps
for app in manager.instantiate_apps(**contexts):
# Wrap the eventlet in asyncio so it will stop when the loop is
# stopped
future = aioeventlet.wrap_greenthread(app,
self._magma_service.loop)
# Schedule the eventlet for evaluation in service loop
asyncio.ensure_future(future)
# In development mode, run server so that
if environment.is_dev_mode():
server_thread = of_rest_server.start(manager)
future = aioeventlet.wrap_greenthread(server_thread,
self._magma_service.loop)
asyncio.ensure_future(future)
def get_table_num(self, app_name: str) -> int:
"""
Args:
app_name: Name of the app
Returns:
The app's main table number
"""
return self._table_manager.get_table_num(app_name)
def get_next_table_num(self, app_name: str) -> int:
"""
Args:
app_name: Name of the app
Returns:
The main table number of the next service.
If there are no more services after the current table,
return the EGRESS table
"""
return self._table_manager.get_next_table_num(app_name)
def is_app_enabled(self, app_name: str) -> bool:
"""
Args:
app_name: Name of the app
Returns:
Whether or not the app is enabled
"""
return self._table_manager.is_app_enabled(app_name)
def allocate_scratch_tables(self, app_name: str, count: int) -> List[int]:
"""
Args:
app_name:
Each scratch table is associated with an app. This is used to
help enforce scratch table isolation between apps.
count: Number of scratch tables to be claimed
Returns:
List of scratch table numbers
Raises:
TableNumException if there are no more available tables
"""
return self._table_manager.allocate_scratch_tables(app_name, count)
def get_scratch_table_nums(self, app_name: str) -> List[int]:
"""
Returns the scratch tables claimed by the given app.
"""
return self._table_manager.get_scratch_table_nums(app_name)
def get_all_table_assignments(self):
"""
Returns: OrderedDict of app name to tables mapping, ordered by main
table number, and app name.
"""
return self._table_manager.get_all_table_assignments()
| 39.337469 | 79 | 0.673122 |
acf9baa6f619a56aba81f8bbe16bac03c73188cf | 6,213 | py | Python | tests_functional/tests_reactions/test_reactions_channel.py | brailovskiy/grpc-test | 70eeb7e6fb68a6257bf549a7927c270a89cbe6c2 | [
"MIT"
] | null | null | null | tests_functional/tests_reactions/test_reactions_channel.py | brailovskiy/grpc-test | 70eeb7e6fb68a6257bf549a7927c270a89cbe6c2 | [
"MIT"
] | null | null | null | tests_functional/tests_reactions/test_reactions_channel.py | brailovskiy/grpc-test | 70eeb7e6fb68a6257bf549a7927c270a89cbe6c2 | [
"MIT"
] | null | null | null | import allure
from hamcrest import *
import pytest
from dialog_api.peers_pb2 import OutPeer
@allure.issue("SAN-13", "Reaction in channels")
@pytest.mark.incremental
@pytest.mark.usefixtures("d_user", "channel_reactons", "update1", "update2", "update3")
@pytest.mark.parametrize('d_user', ["3 users"], indirect=True)
class TestReactionChannel:
""" Tests for setting reaction on message in channel """
@allure.title("Test set reaction in channel")
@allure.testcase("XTE-429", "Test set reaction in channel")
def test_set_reaction_channel(self, d_user, channel_reactons, update1, update2, update3):
""" Test same reaction in channel
"""
group = channel_reactons[0]
msg = channel_reactons[1]
outpeer = OutPeer(id=group.group.id, access_hash=group.group.access_hash, type=2)
code1 = ':thumbs_up:'
with allure.step('User 2 set reaction to second message'):
mid = msg[1].message_id
reaction = d_user.set_reaction(d_user.u2, outpeer=outpeer, mid=mid, code=code1)
set_emoji = reaction.reactions[0].code.encode('utf-8').decode('utf-8')
print(set_emoji)
with allure.step('All users load their history'):
hist3 = d_user.load_history(d_user.u3, outpeer=outpeer)
hist2 = d_user.load_history(d_user.u2, outpeer=outpeer)
hist1 = d_user.load_history(d_user.u1, outpeer=outpeer)
emoji1 = hist1.history[0].reactions[0].code.encode('utf-8').decode('utf-8')
emoji2 = hist2.history[1].reactions[0].code.encode('utf-8').decode('utf-8')
emoji3 = hist3.history[1].reactions[0].code.encode('utf-8').decode('utf-8')
with allure.step('Reaction of all users shown in their message history are equal to sent'):
assert_that(set_emoji, equal_to(emoji1) and equal_to(emoji2) and equal_to(emoji3))
@allure.title("User1 get update for reaction in channel")
def test_user1_get_update(self, update1, channel_reactons, d_user):
updates1 = update1
group = channel_reactons[0]
outpeer = OutPeer(id=group.group.id, access_hash=group.group.access_hash, type=2)
hist1 = d_user.load_history(d_user.u1, outpeer=outpeer)
emoji1 = hist1.history[0].reactions[0].code.encode('utf-8').decode('utf-8')
with allure.step('User 1 update for reaction'):
for update in updates1:
if update.unboxed_update.HasField('updateReactionsUpdate'):
reaction = update.unboxed_update.updateReactionsUpdate
assert_that(reaction.peer.id, equal_to(group.group.id))
assert_that(reaction.reactions[-1].code.encode('utf-8').decode('utf-8'), equal_to(emoji1))
break
@allure.title("User2 get update for reaction in channel")
def test_user2_get_update(self, update2, channel_reactons, d_user):
updates2 = update2
group = channel_reactons[0]
outpeer = OutPeer(id=group.group.id, access_hash=group.group.access_hash, type=2)
hist2 = d_user.load_history(d_user.u2, outpeer=outpeer)
print(hist2)
emoji2 = hist2.history[1].reactions[0].code.encode('utf-8').decode('utf-8')
with allure.step('User 2 update for reaction'):
for update in updates2:
print(update)
if update.unboxed_update.HasField('updateReactionsUpdate'):
reaction = update.unboxed_update.updateReactionsUpdate
assert_that(reaction.peer.id, equal_to(group.group.id))
assert_that(reaction.reactions[-1].code.encode('utf-8').decode('utf-8'), equal_to(emoji2))
break
@allure.title("User3 get update for reaction in channel")
def test_user3_get_update(self, update3, channel_reactons, d_user):
updates3 = update3
group = channel_reactons[0]
outpeer = OutPeer(id=group.group.id, access_hash=group.group.access_hash, type=2)
hist3 = d_user.load_history(d_user.u3, outpeer=outpeer)
print(hist3)
emoji3 = hist3.history[1].reactions[0].code.encode('utf-8').decode('utf-8')
with allure.step('User 3 update for reaction'):
for update in updates3:
print(update)
if update.unboxed_update.HasField('updateReactionsUpdate'):
reaction = update.unboxed_update.updateReactionsUpdate
assert_that(reaction.peer.id, equal_to(group.group.id))
assert_that(reaction.reactions[-1].code.encode('utf-8').decode('utf-8'), equal_to(emoji3))
break
@allure.title("Test count different reactions in channel")
@allure.testcase("XTE-431", "Test count different reactions in channel")
def test_counter_reactions_channel(self, d_user, group_reactons, update1):
""" Test count different reactions in channel"""
updates1 = update1
group = group_reactons[0]
msg = group_reactons[1]
outpeer = OutPeer(id=group.group.id, access_hash=group.group.access_hash, type=2)
with allure.step('User 2 set reaction to second message'):
mid = msg[1].message_id
reaction = d_user.set_reaction(d_user.u2, outpeer=outpeer, mid=mid, code=None)
set_emoji = reaction.reactions[0].code.encode('utf-8').decode('utf-8')
print(set_emoji)
with allure.step('User 1 and User 3 set reactions'):
first_r = reaction.reactions[0]
reaction2 = d_user.set_reaction(d_user.u3, outpeer=outpeer, mid=mid, code=None)
second_r = reaction2.reactions[0]
reaction3 = d_user.set_reaction(d_user.u1, outpeer=outpeer, mid=mid, code=None)
third_r = reaction3.reactions[0]
with allure.step('All reactions shows in update'):
for update in updates1:
reaction = update.unboxed_update.updateReactionsUpdate.reactions
if len(reaction) >= 3:
reaction = [item.code for item in reaction]
assert_that(reaction, has_items(first_r.code, second_r.code, third_r.code))
break
| 54.5 | 110 | 0.646387 |
acf9bbc09819e8abef95aae4f7996cb589b8b64c | 2,072 | py | Python | pygithub.py | ncos/gitparser | 27f814f8bb8ddfdde174ff1277c38f1cad56b12f | [
"MIT"
] | null | null | null | pygithub.py | ncos/gitparser | 27f814f8bb8ddfdde174ff1277c38f1cad56b12f | [
"MIT"
] | null | null | null | pygithub.py | ncos/gitparser | 27f814f8bb8ddfdde174ff1277c38f1cad56b12f | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys
def execute(command, critical=True):
s = "Executing '"+command+"'... "
if os.system(command) == 0:
print s + "Success."
return
print s + "FAILED!"
if critical:
sys.exit(0)
execute("git status")
execute("git checkout master")
#execute("git branch -v")
PATTERN = " --"
REPLACEMENT = "~---"
HASH = "a_t_"
def commit(fname, text, branch, message):
execute("git checkout -b " + branch)
f = open(fname, 'w')
f.write(text)
f.close()
execute("git commit -a -m \"" + message + "\"")
execute("git push origin " + branch)
execute("git checkout master")
execute("git reset --hard HEAD", False)
def replace_entry(text, number):
return text.replace(PATTERN, "FOOBAR", number).replace("FOOBAR", PATTERN, number - 1).replace("FOOBAR", REPLACEMENT)
def apply_pattern(file_list):
CURRENT_ID = 0
for name in file_list:
f = open(name, 'r')
base_text = f.read()
f.close()
number = 1
while(1):
text = replace_entry(base_text, number)
if (base_text == text):
break
commit(name, text, HASH + str(CURRENT_ID), "Update " + name)
number = number + 1
CURRENT_ID = CURRENT_ID + 1
def reset_branches(id_min, id_max):
for i in range (id_min, id_max + 1, 1):
branch = HASH + str(i)
execute("git checkout " + branch)
execute("git reset --hard origin/master")
execute("git pull origin master")
execute("git checkout master")
def delete_branches(id_min, id_max):
execute("git checkout master")
for i in range (id_min, id_max + 1, 1):
branch = HASH + str(i)
execute("git push origin --delete " + branch, False)
execute("git branch -D " + branch, False)
filenames = os.listdir("./")
texfiles = []
for f in filenames:
if ".tex" in f:
texfiles.append(f)
apply_pattern(texfiles)
#reset_branches(0, 34)
#delete_branches(7, 42)
| 23.816092 | 120 | 0.580598 |
acf9bcbfab1a3f145456e5062a30491199eaa7d5 | 3,319 | py | Python | tests/utils/test_exports.py | amanbansal2709/ctfd | 941335a5e205ca818ce1758076858b628e4fa05b | [
"Apache-2.0"
] | null | null | null | tests/utils/test_exports.py | amanbansal2709/ctfd | 941335a5e205ca818ce1758076858b628e4fa05b | [
"Apache-2.0"
] | null | null | null | tests/utils/test_exports.py | amanbansal2709/ctfd | 941335a5e205ca818ce1758076858b628e4fa05b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from tests.helpers import (
create_ctfd,
destroy_ctfd,
register_user,
login_as_user,
gen_challenge,
gen_flag,
gen_user,
gen_hint
)
from CTFd.models import Challenges, Flags, Users
from CTFd.utils import text_type
from CTFd.utils.exports import import_ctf, export_ctf
import json
import os
import zipfile
def test_export_ctf():
"""Test that CTFd can export the database"""
app = create_ctfd()
if not app.config.get('SQLALCHEMY_DATABASE_URI').startswith('sqlite'):
with app.app_context():
register_user(app)
chal1 = gen_challenge(app.db, name=text_type('🐺'))
gen_challenge(app.db, name=text_type('🐺'), requirements={
"prerequisites": [1]
})
chal_id = chal1.id
gen_hint(app.db, chal_id)
client = login_as_user(app)
with client.session_transaction():
data = {
"target": 1,
"type": "hints"
}
r = client.post('/api/v1/unlocks', json=data)
output = r.get_data(as_text=True)
json.loads(output)
app.db.session.commit()
backup = export_ctf()
with open('export.test_export_ctf.zip', 'wb') as f:
f.write(backup.read())
export = zipfile.ZipFile('export.test_export_ctf.zip', 'r')
data = json.loads(export.read('db/challenges.json'))
assert data['results'][1]['requirements'] == {"prerequisites": [1]}
os.remove('export.test_export_ctf.zip')
destroy_ctfd(app)
def test_import_ctf():
"""Test that CTFd can import a CTF"""
app = create_ctfd()
if not app.config.get('SQLALCHEMY_DATABASE_URI').startswith('sqlite'):
with app.app_context():
base_user = 'user'
for x in range(10):
user = base_user + str(x)
user_email = user + "@ctfd.io"
gen_user(app.db, name=user, email=user_email)
for x in range(9):
chal = gen_challenge(app.db, name='chal_name{}'.format(x))
gen_flag(app.db, challenge_id=chal.id, content='flag')
chal = gen_challenge(app.db, name='chal_name10', requirements={"prerequisites": [1]})
gen_flag(app.db, challenge_id=chal.id, content='flag')
app.db.session.commit()
backup = export_ctf()
with open('export.test_import_ctf.zip', 'wb') as f:
f.write(backup.read())
destroy_ctfd(app)
app = create_ctfd()
# TODO: These databases should work but they don't...
if not app.config.get('SQLALCHEMY_DATABASE_URI').startswith('sqlite'):
with app.app_context():
import_ctf('export.test_import_ctf.zip')
if not app.config.get('SQLALCHEMY_DATABASE_URI').startswith('postgres'):
# TODO: Dig deeper into why Postgres fails here
assert Users.query.count() == 11
assert Challenges.query.count() == 10
assert Flags.query.count() == 10
chal = Challenges.query.filter_by(name='chal_name10').first()
assert chal.requirements == {"prerequisites": [1]}
destroy_ctfd(app)
| 34.216495 | 97 | 0.575776 |
acf9bdcc4cdd08edf41dd3ba2a196c3a77c3f8f1 | 5,936 | py | Python | train_sdf_space.py | microsoft/SplinePosEnc | c2a28b76c6cbdac40cef3ee23b5ae936cfcd19b2 | [
"MIT"
] | 14 | 2021-09-17T13:04:33.000Z | 2022-03-30T11:42:27.000Z | train_sdf_space.py | microsoft/SplinePosEnc | c2a28b76c6cbdac40cef3ee23b5ae936cfcd19b2 | [
"MIT"
] | 2 | 2021-09-09T08:31:06.000Z | 2022-03-28T02:23:57.000Z | train_sdf_space.py | microsoft/SplinePosEnc | c2a28b76c6cbdac40cef3ee23b5ae936cfcd19b2 | [
"MIT"
] | 2 | 2021-09-13T12:08:54.000Z | 2022-03-22T10:17:24.000Z | import os
import torch
import numpy as np
from tqdm import tqdm
from config import parse_args
from models import MLPSpace
from losses import sdf_loss
from utils import write_sdf_summary, create_mesh
from datasets import DFaustDataset
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from functools import partial
# configs
FLAGS = parse_args()
# dataset
flags_data = FLAGS.DATA.train
dfaust_dataset = DFaustDataset(**flags_data)
dataloader = DataLoader(dfaust_dataset, batch_size=flags_data.batch_size,
num_workers=24, shuffle=True, pin_memory=True,
drop_last=True)
# model
model = MLPSpace(**FLAGS.MODEL)
print(model)
model.cuda()
# load checkpoints
flags_solver = FLAGS.SOLVER
if flags_solver.ckpt:
print('loading checkpoint %s' % flags_solver.ckpt)
model.load_state_dict(torch.load(flags_solver.ckpt))
# init from sphere
if FLAGS.MODEL.name == 'optpos' and flags_solver.sphere_init:
print('Init from sphere, load: %s' % flags_solver.sphere_init)
trained_dict = torch.load(flags_solver.sphere_init)
shape_num = FLAGS.MODEL.shape_num
shape_code = trained_dict.pop('pos_enc.shape_code')
trained_dict['pos_enc.shape_code'] = shape_code.repeat(1, shape_num)
model_dict = model.state_dict()
model_dict.update(trained_dict)
model.load_state_dict(model_dict)
if FLAGS.MODEL.name == 'mlp' and flags_solver.sphere_init:
net = model.net.net
for i in range(len(net)-1):
weight, bias = net[i].linear.weight, net[i].linear.bias
torch.nn.init.normal_(weight, 0.0, np.sqrt(2 / weight.shape[0]))
torch.nn.init.constant_(bias, 0.0)
weight, bias = net[-1].linear.weight, net[-1].linear.bias
torch.nn.init.constant_(bias, -0.6)
torch.nn.init.normal_(weight, mean=np.sqrt(np.pi / weight.shape[1]), std=1e-5)
# optmizer
lr = flags_solver.learning_rate
optim = torch.optim.Adam(lr=lr, params=model.parameters())
if flags_solver.optim_ckpt:
print('loading checkpoint %s' % flags_solver.optim_ckpt)
optim.load_state_dict(torch.load(flags_solver.optim_ckpt))
# summaries
logdir = flags_solver.logdir
ckpt_dir = os.path.join(logdir, 'checkpoints')
writer = SummaryWriter(logdir)
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
# latent code regularization
def shape_code_reg(idx):
shape_code = model.pos_enc.get_shape_code(idx)
code_loss = shape_code.square().mean() # or sum()
return code_loss
# train
def train_step(model_train, global_step):
model_train.train()
avg_loss = []
for i, data in enumerate(dataloader):
coords = data[0].cuda().requires_grad_()
sdf_gt, normal_gt, idx = data[1].cuda(), data[2].cuda(), data[3].cuda()
sdf = model_train(coords, idx)
losses = sdf_loss(sdf, coords, sdf_gt, normal_gt,
normal_weight=FLAGS.LOSS.normal_weight,
grad_weight=FLAGS.LOSS.grad_weight)
total_train_loss = losses['total_train_loss']
# latent code regularization
code_loss = shape_code_reg(idx)
total_loss = total_train_loss + code_loss * 1e-4
optim.zero_grad()
total_loss.backward()
optim.step()
# tqdm.write("step %d" % (global_step + i))
for k, v in losses.items():
writer.add_scalar(k, v.detach().cpu().item(), global_step + i)
writer.add_scalar('latent', code_loss.detach().cpu().item(), global_step+1)
avg_loss.append(total_loss.detach().cpu().item())
return np.mean(avg_loss)
# test
def test_step(epoch=0, idx=None, save_sdf=True):
model.eval()
if idx is None:
idx = np.random.randint(len(dfaust_dataset))
output_path = os.path.join(logdir, 'mesh')
if not os.path.exists(output_path): os.makedirs(output_path)
filename = '%s_%04d_%04d.ply' % (flags_solver.alias, epoch, idx)
filename = os.path.join(output_path, filename)
model_test = partial(model, idx=idx)
create_mesh(model_test, filename, N=flags_solver.resolution,
save_sdf=save_sdf, level=flags_solver.level_set)
# run train
def train():
model_train = model
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model_train = torch.nn.DataParallel(model) # use multiple gpus
num = len(dataloader)
rng = range(flags_solver.start_epoch, flags_solver.num_epochs)
for epoch in tqdm(rng, ncols=80):
global_step = epoch * num
if epoch % flags_solver.test_every_epoch == 0:
write_sdf_summary(model, writer, global_step)
save_state(filename='model_%05d' % epoch)
test_step(epoch, save_sdf=False)
train_loss = train_step(model_train, global_step)
tqdm.write("Epoch %d, Total loss %0.6f" % (epoch, train_loss))
save_state(filename='model_final')
upsample_code()
# run test
def test():
num = FLAGS.MODEL.shape_num
for i in tqdm(range(num), ncols=80):
test_step(idx=i, save_sdf=True)
# upsample the hidden code
def upsample_code():
size = flags_solver.upsample_size
if size < 0: return
# upsample
model_dict = model.state_dict()
with torch.no_grad():
code = model.pos_enc.upsample(size)
model_dict['pos_enc.shape_code'] = code
# save checkpoints
ckpt_name = os.path.join(ckpt_dir, 'model_final_upsample_%03d.pth' % size)
torch.save(model_dict, ckpt_name)
# save model and solver state
def save_state(filename):
model_dict = model.state_dict()
ckpt_name = os.path.join(ckpt_dir, filename + '.pth')
torch.save(model_dict, ckpt_name)
ckpt_name = os.path.join(ckpt_dir, filename + '.mean.pth')
model_dict['pos_enc.shape_code'] = model.pos_enc.get_mean_code()
torch.save(model_dict, ckpt_name)
ckpt_name = os.path.join(ckpt_dir, filename + '.solver.pth')
torch.save(optim.state_dict(), ckpt_name)
if __name__ == '__main__':
eval('{}()'.format(flags_solver.run)) | 32.977778 | 81 | 0.697608 |
acf9bdf1858c01df9b124df4b256dae69a48e200 | 856 | py | Python | day-08/part-1/sfluor.py | TPXP/adventofcode-2019 | ee653d6bfb510d14f2c2b3efc730d328c16b3f71 | [
"MIT"
] | 8 | 2019-12-01T08:56:46.000Z | 2019-12-05T21:21:12.000Z | day-08/part-1/sfluor.py | TPXP/adventofcode-2019 | ee653d6bfb510d14f2c2b3efc730d328c16b3f71 | [
"MIT"
] | 10 | 2019-11-25T09:56:20.000Z | 2021-05-10T19:57:48.000Z | day-08/part-1/sfluor.py | TPXP/adventofcode-2019 | ee653d6bfb510d14f2c2b3efc730d328c16b3f71 | [
"MIT"
] | 5 | 2019-12-01T08:19:57.000Z | 2020-11-23T09:50:19.000Z | from collections import defaultdict
from tool.runners.python import SubmissionPy
def checksum(inp, width, height):
layer_size = width * height
n_layers = len(inp) // layer_size
layers = [defaultdict(int) for i in range(n_layers)]
# idx, value
min_digits = (0, 1e10)
for i, layer in enumerate(layers):
start = i * layer_size
for j in range(layer_size):
digit = inp[start + j]
if digit in "012":
layer[digit] += 1
if layer["0"] < min_digits[1]:
min_digits = (i, layer["0"])
idx, _ = min_digits
l = layers[idx]
return l["1"] * l["2"]
class SfluorSubmission(SubmissionPy):
def run(self, s):
# :param s: input in string format
# :return: solution flag
# Your code goes here
return checksum(s, 25, 6)
| 21.948718 | 56 | 0.577103 |
acf9bea4c39b19d046e312a46d01f0b56a2e0e0c | 2,809 | py | Python | train/update_label_map.py | abfleishman/active-learning-detect | 2241cb5895ebf057161e2a305c49fd6848512151 | [
"MIT"
] | null | null | null | train/update_label_map.py | abfleishman/active-learning-detect | 2241cb5895ebf057161e2a305c49fd6848512151 | [
"MIT"
] | null | null | null | train/update_label_map.py | abfleishman/active-learning-detect | 2241cb5895ebf057161e2a305c49fd6848512151 | [
"MIT"
] | null | null | null | import csv
import cv2
from pathlib import Path
import time
# def extract_data(filename):
# height, width, _ = cv2.imread(str(filename),1).shape
# return filename.name, height, width
def update_label_map(map_filename, classes):
with open(map_filename, "w") as map_file:
for index, name in enumerate(classes, 1):
map_file.write("item {{\n id: {}\n name: '{}'\n}}".format(index, name))
# def select_jsons(image_directory, user_folders, classes, csv_filename, map_filename):
# with open(map_filename, "w") as map_file:
# for index, name in enumerate(classes, 1):
# map_file.write("item {{\n id: {}\n name: '{}'\n}}".format(index, name))
# image_directory = Path(image_directory)
# if user_folders:
# all_images = [(extract_data(filename),filename.parent) for filename in image_directory.glob('**/*') if filename.is_file()]
# else:
# all_images = [extract_data(filename) for filename in image_directory.iterdir()]
# with open(csv_filename, 'w', newline='') as csv_file:
# csv_writer = csv.writer(csv_file)
# if user_folders:
# csv_writer.writerow(["filename","class","xmin","xmax","ymin","ymax","height","width","folder","box_confidence", "image_confidence"])
# for (filename,true_height,true_width),folder in all_images:
# csv_writer.writerow([filename,"NULL",0,0,0,0,true_height,true_width,str(folder).replace(str(image_directory)+"/","",1),0,0])
# else:
# csv_writer.writerow(["filename","class","xmin","xmax","ymin","ymax","height","width","box_confidence", "image_confidence"])
# for filename,true_height,true_width in all_images:
# csv_writer.writerow([filename,"NULL",0,0,0,0,true_height,true_width,0,0])
if __name__ == "__main__":
from azure.storage.blob import BlockBlobService
import sys
import os
# Allow us to import utils
config_dir = str(Path.cwd().parent / "utils")
if config_dir not in sys.path:
sys.path.append(config_dir)
from config import Config
if len(sys.argv)<2:
raise ValueError("Need to specify config file")
config_file = Config.parse_file(sys.argv[1])
block_blob_service = BlockBlobService(account_name=config_file["AZURE_STORAGE_ACCOUNT"], account_key=config_file["AZURE_STORAGE_KEY"])
update_label_map(config_file["label_map_path"], config_file["classes"].split(","))
# container_name = config_file["label_container_name"]
# select_jsons(config_file["image_dir"],config_file["user_folders"]=="True", config_file["classes"].split(","), "totag.csv", config_file["label_map_path"])
# block_blob_service.create_blob_from_path(container_name, "{}_{}.{}".format("totag",int(time.time() * 1000),"csv"), "totag.csv")
| 52.018519 | 159 | 0.672481 |
acf9bfdfcfbaa469a8aaa326683e6803d083af42 | 3,803 | py | Python | sa/profiles/SKS/SKS/get_version.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | sa/profiles/SKS/SKS/get_version.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | sa/profiles/SKS/SKS/get_version.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# SKS.SKS.get_version
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
from noc.core.text import parse_table
class Script(BaseScript):
name = "SKS.SKS.get_version"
interface = IGetVersion
cache = True
rx_ver = re.compile(
r"^\s*SW version\s+(?P<version>\S+).*\n"
r"^\s*Boot version\s+(?P<bootprom>\S+).*\n"
r"^\s*HW version\s+(?P<hardware>\S+).*\n",
re.MULTILINE,
)
rx_platform = re.compile(r"^\s*System Description:\s+(?P<platform>.+)\n", re.MULTILINE)
rx_serial = re.compile(r"^\s*Serial number : (?P<serial>\S+)")
rx_ver2 = re.compile(
r"^(?P<platform>S(?:KS|WA)\-\S+) Series Software, Version (?P<version>\S+)", re.MULTILINE
)
rx_rs = re.compile(
r"^ROM: System Bootstrap, Version (?P<bootprom>\S+),\s*"
r"hardware version:\s*(?P<hardware>\S+)\s*\n"
r"^Serial num:\s*(?P<serial>\w+),?",
re.MULTILINE,
)
def execute(self):
v = self.cli("show version", cached=True)
match = self.rx_ver.search(v)
if match:
r = {
"vendor": "SKS",
"version": match.group("version"),
"attributes": {
"Boot PROM": match.group("bootprom"),
"HW version": match.group("hardware"),
},
}
v = self.cli("show system", cached=True)
match = self.rx_platform.search(v)
platform = match.group("platform")
if platform == "SKS 10G":
platform = "SKS-16E1-IP-1U"
elif platform.startswith("SKS"):
platform = "SW-24"
r["platform"] = platform
v = self.cli("show system id", cached=True)
match = self.rx_serial.search(v)
if match:
r["attributes"]["Serial Number"] = match.group("serial")
else:
match = self.rx_ver2.search(v)
if match:
r = {
"vendor": "SKS",
"platform": match.group("platform"),
"version": match.group("version"),
}
match = self.rx_rs.search(v)
r["attributes"] = {
"Boot PROM": match.group("bootprom"),
"HW version": match.group("hardware"),
"Serial Number": match.group("serial"),
}
else:
t = parse_table(v)
for i in t:
r = {
"vendor": "SKS",
"version": i[1],
"attributes": {"Boot PROM": i[2], "HW version": i[3]},
}
break
v = self.cli("show system", cached=True)
t = parse_table(v)
for i in t:
platform = i[1]
break
if platform == "SKS 10G":
platform = "SKS-16E1-IP-1U"
elif platform.startswith("SKS"):
platform = "SW-24"
r["platform"] = platform
v = self.cli("show system id", cached=True)
t = parse_table(v)
for i in t:
serial = i[1]
break
r["attributes"]["Serial Number"] = serial
return r
| 35.542056 | 97 | 0.434657 |
acf9c0f31c8b7434bed95193edc27b1af7465438 | 1,958 | py | Python | scripts/python/boot2.py | brakmic/cm3 | b99e280eca00c322e04e0586951de50108e51343 | [
"BSD-4-Clause-UC",
"BSD-4-Clause",
"BSD-3-Clause"
] | null | null | null | scripts/python/boot2.py | brakmic/cm3 | b99e280eca00c322e04e0586951de50108e51343 | [
"BSD-4-Clause-UC",
"BSD-4-Clause",
"BSD-3-Clause"
] | null | null | null | scripts/python/boot2.py | brakmic/cm3 | b99e280eca00c322e04e0586951de50108e51343 | [
"BSD-4-Clause-UC",
"BSD-4-Clause",
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
# Roughly:
#!/bin/sh
#
#set -e
#set -x
#
#./make-dist-cfg.py $*
#./do-pkg.py m3cc buildship $*
#./do-cm3-all.py realclean skipgcc $*
#./do-pkg.py m3cc m3core libm3 buildship $*
#./upgrade.py skipgcc $*
#./do-cm3-all.py realclean skipgcc $*
#./do-cm3-all.py buildship $*
import os, sys, pylib
from os import getenv
argv = sys.argv
env_OS = getenv("OS")
def Posix():
return os.name == "posix"
if Posix():
from os import uname
elif env_OS == "Windows_NT":
DevNull = "nul:"
def uname():
PROCESSOR_ARCHITECTURE = getenv("PROCESSOR_ARCHITECTURE")
return (env_OS, "", PROCESSOR_ARCHITECTURE, "", PROCESSOR_ARCHITECTURE)
else:
print("fatal error: unknown host")
sys.exit(1)
def RemoveTrailingSpaces(a):
while len(a) > 0 and a[-1] == ' ':
a = a[:-1]
return a
_CBackend = "c" in argv or "C" in argv
def Run(command):
command = RemoveTrailingSpaces(command + " " + " ".join(argv[1:]))
print("'" + command + "'")
os.system(command) and sys.exit("ERROR: " + command)
# ./do-pkg.py doesn't like skipgcc plus just m3cc -- no packages to build
# Which is why this was rewritten in Python from Bourne shell.
c = ""
if _CBackend:
c = "c"
pyexe = ""
def Posix():
return os.name == "posix"
if Posix():
pass
elif env_OS == "Windows_NT":
pyexe = (pylib.SearchPath("python.exe") or pylib.SearchPath("python3.exe") or pylib.SearchPath("py.exe") or pylib.SearchPath("python2.exe")) + " "
Run(pyexe + "./make-dist-cfg.py")
if not _CBackend and env_OS != "Windows_NT":
Run(pyexe + "./do-pkg.py m3cc buildship " + c)
defines = pylib.PassThroughDefines()
Run(pyexe + "./do-cm3-all.py realclean skipgcc " + c + defines)
Run(pyexe + "./do-pkg.py m3cc m3core libm3 buildship " + c + defines)
Run(pyexe + "./upgrade.py skipgcc " + c + defines)
Run(pyexe + "./do-cm3-all.py realclean skipgcc " + c + defines)
Run(pyexe + "./do-cm3-all.py buildship " + c + defines)
| 25.428571 | 150 | 0.637385 |
acf9c14a53e470a56e0433270590421d96237f39 | 3,994 | py | Python | textattack/attack_recipes/textbugger_li_2018.py | k-ivey/TextAttack | 47d15acea90bf92e6a7f19200a59da29e74731e6 | [
"MIT"
] | 1 | 2020-12-04T18:05:44.000Z | 2020-12-04T18:05:44.000Z | textattack/attack_recipes/textbugger_li_2018.py | k-ivey/TextAttack | 47d15acea90bf92e6a7f19200a59da29e74731e6 | [
"MIT"
] | null | null | null | textattack/attack_recipes/textbugger_li_2018.py | k-ivey/TextAttack | 47d15acea90bf92e6a7f19200a59da29e74731e6 | [
"MIT"
] | null | null | null | # from textattack.constraints.grammaticality import PartOfSpeech
from textattack.constraints.pre_transformation import (
RepeatModification,
StopwordModification,
)
from textattack.constraints.semantics.sentence_encoders import UniversalSentenceEncoder
from textattack.goal_functions import UntargetedClassification
from textattack.search_methods import GreedyWordSwapWIR
from textattack.shared.attack import Attack
from textattack.transformations import (
CompositeTransformation,
WordSwapEmbedding,
WordSwapHomoglyphSwap,
WordSwapNeighboringCharacterSwap,
WordSwapRandomCharacterDeletion,
WordSwapRandomCharacterInsertion,
)
from .attack_recipe import AttackRecipe
class TextBuggerLi2018(AttackRecipe):
"""Li, J., Ji, S., Du, T., Li, B., and Wang, T. (2018).
TextBugger: Generating Adversarial Text Against Real-world Applications.
ArXiv, abs/1812.05271.
"""
@staticmethod
def build(model):
#
# we propose five bug generation methods for TEXTBUGGER:
#
transformation = CompositeTransformation(
[
# (1) Insert: Insert a space into the word.
# Generally, words are segmented by spaces in English. Therefore,
# we can deceive classifiers by inserting spaces into words.
WordSwapRandomCharacterInsertion(
random_one=True,
letters_to_insert=" ",
skip_first_char=True,
skip_last_char=True,
),
# (2) Delete: Delete a random character of the word except for the first
# and the last character.
WordSwapRandomCharacterDeletion(
random_one=True, skip_first_char=True, skip_last_char=True
),
# (3) Swap: Swap random two adjacent letters in the word but do not
# alter the first or last letter. This is a common occurrence when
# typing quickly and is easy to implement.
WordSwapNeighboringCharacterSwap(
random_one=True, skip_first_char=True, skip_last_char=True
),
# (4) Substitute-C (Sub-C): Replace characters with visually similar
# characters (e.g., replacing “o” with “0”, “l” with “1”, “a” with “@”)
# or adjacent characters in the keyboard (e.g., replacing “m” with “n”).
WordSwapHomoglyphSwap(),
# (5) Substitute-W
# (Sub-W): Replace a word with its topk nearest neighbors in a
# context-aware word vector space. Specifically, we use the pre-trained
# GloVe model [30] provided by Stanford for word embedding and set
# topk = 5 in the experiment.
WordSwapEmbedding(max_candidates=5),
]
)
constraints = [RepeatModification(), StopwordModification()]
# In our experiment, we first use the Universal Sentence
# Encoder [7], a model trained on a number of natural language
# prediction tasks that require modeling the meaning of word
# sequences, to encode sentences into high dimensional vectors.
# Then, we use the cosine similarity to measure the semantic
# similarity between original texts and adversarial texts.
# ... "Furthermore, the semantic similarity threshold \eps is set
# as 0.8 to guarantee a good trade-off between quality and
# strength of the generated adversarial text."
constraints.append(UniversalSentenceEncoder(threshold=0.8))
#
# Goal is untargeted classification
#
goal_function = UntargetedClassification(model)
#
# Greedily swap words with "Word Importance Ranking".
#
search_method = GreedyWordSwapWIR()
return Attack(goal_function, constraints, transformation, search_method)
| 43.89011 | 88 | 0.640461 |
acf9c39ff3e300062ce7e08af5b8b31e8755a1e5 | 1,648 | py | Python | src/pybel/struct/__init__.py | djinnome/pybel | 6ffc1df662fef51f4d740daf6d7643010a9d5be8 | [
"MIT"
] | 103 | 2016-10-25T05:51:26.000Z | 2022-03-23T02:21:12.000Z | src/pybel/struct/__init__.py | djinnome/pybel | 6ffc1df662fef51f4d740daf6d7643010a9d5be8 | [
"MIT"
] | 444 | 2016-10-22T13:09:10.000Z | 2022-03-21T12:01:39.000Z | src/pybel/struct/__init__.py | cthoyt/pybel | ed66f013a77f9cbc513892b0dad1025b8f68bb46 | [
"Apache-2.0"
] | 38 | 2017-01-06T03:32:38.000Z | 2022-03-19T11:27:30.000Z | # -*- coding: utf-8 -*-
"""The :mod:`pybel.struct` module houses functions for handling the main data structure in PyBEL.
Because BEL expresses how biological entities interact within many
different contexts, with descriptive annotations, PyBEL represents data as a directed multi-graph by sub-classing the
:class:`networkx.MultiDiGraph`. Each node is an instance of a subclass of the :class:`pybel.dsl.BaseEntity` and each
edge has a stable key and associated data dictionary for storing relevant contextual information.
The graph contains metadata for the PyBEL version, the BEL script metadata, the namespace definitions, the
annotation definitions, and the warnings produced in analysis. Like any :mod:`networkx` graph, all attributes of
a given object can be accessed through the :code:`graph` property, like in: :code:`my_graph.graph['my key']`.
Convenient property definitions are given for these attributes that are outlined in the documentation for
:class:`pybel.BELGraph`.
This allows for much easier programmatic access to answer more complicated questions, which can be written with python
code. Because the data structure is the same in Neo4J, the data can be directly exported with :func:`pybel.to_neo4j`.
Neo4J supports the Cypher querying language so that the same queries can be written in an elegant and simple way.
"""
from . import filters, graph, grouping, mutation, node_utils, operations, summary
from .filters import *
from .graph import *
from .grouping import *
from .mutation import *
from .node_utils import *
from .operations import *
from .pipeline import Pipeline
from .query import Query
from .summary import *
| 53.16129 | 118 | 0.792476 |
acf9c46e1b5328de13af7c92142025fe9526afc3 | 1,206 | py | Python | tests/commands/ddtrace_run_integration.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 308 | 2016-12-07T16:49:27.000Z | 2022-03-15T10:06:45.000Z | tests/commands/ddtrace_run_integration.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1,928 | 2016-11-28T17:13:18.000Z | 2022-03-31T21:43:19.000Z | tests/commands/ddtrace_run_integration.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 311 | 2016-11-27T03:01:49.000Z | 2022-03-18T21:34:03.000Z | """
An integration test that uses a real Redis client
that we expect to be implicitly traced via `ddtrace-run`
"""
import redis
from ddtrace import Pin
from tests.contrib.config import REDIS_CONFIG
from tests.utils import DummyWriter
if __name__ == "__main__":
r = redis.Redis(port=REDIS_CONFIG["port"])
pin = Pin.get_from(r)
assert pin
writer = DummyWriter()
pin.tracer.configure(writer=writer)
r.flushall()
spans = writer.pop()
assert len(spans) == 1
assert spans[0].service == "redis"
assert spans[0].resource == "FLUSHALL"
long_cmd = "mget %s" % " ".join(map(str, range(1000)))
us = r.execute_command(long_cmd)
spans = writer.pop()
assert len(spans) == 1
span = spans[0]
assert span.service == "redis"
assert span.name == "redis.command"
assert span.span_type == "redis"
assert span.error == 0
assert span.get_metric("out.port") == REDIS_CONFIG["port"]
assert span.get_metric("out.redis_db") == 0
assert span.get_tag("out.host") == "localhost"
assert span.get_tag("redis.raw_command").startswith(u"mget 0 1 2 3")
assert span.get_tag("redis.raw_command").endswith(u"...")
print("Test success")
| 27.409091 | 72 | 0.664179 |
acf9c5cd781b414605dba2436fa6a36d764fbb92 | 2,135 | py | Python | vspk/v6/fetchers/nuvsdcomponents_fetcher.py | axxyhtrx/vspk-python | 4495882c6bcbb1ef51b14b9f4dc7efe46476ff50 | [
"BSD-3-Clause"
] | 19 | 2016-03-07T12:34:22.000Z | 2020-06-11T11:09:02.000Z | vspk/v6/fetchers/nuvsdcomponents_fetcher.py | axxyhtrx/vspk-python | 4495882c6bcbb1ef51b14b9f4dc7efe46476ff50 | [
"BSD-3-Clause"
] | 40 | 2016-06-13T15:36:54.000Z | 2020-11-10T18:14:43.000Z | vspk/v6/fetchers/nuvsdcomponents_fetcher.py | axxyhtrx/vspk-python | 4495882c6bcbb1ef51b14b9f4dc7efe46476ff50 | [
"BSD-3-Clause"
] | 15 | 2016-06-10T22:06:01.000Z | 2020-12-15T18:37:42.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NUVSDComponentsFetcher(NURESTFetcher):
""" Represents a NUVSDComponents fetcher
Notes:
This fetcher enables to fetch NUVSDComponent objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUVSDComponent class that is managed.
Returns:
.NUVSDComponent: the managed class
"""
from .. import NUVSDComponent
return NUVSDComponent
| 40.283019 | 86 | 0.728806 |
acf9c668c90f652998e89ab8af9150f2ea42cabf | 1,312 | py | Python | models/CTC/hyperparams.py | poodarchu/gluon_step_by_step | 5c98a057f1ef0b30dfbe47fa7b6bc7e667e0bb3b | [
"MIT"
] | 1 | 2018-04-03T07:03:01.000Z | 2018-04-03T07:03:01.000Z | models/CTC/hyperparams.py | poodarchu/gluon_step_by_step | 5c98a057f1ef0b30dfbe47fa7b6bc7e667e0bb3b | [
"MIT"
] | null | null | null | models/CTC/hyperparams.py | poodarchu/gluon_step_by_step | 5c98a057f1ef0b30dfbe47fa7b6bc7e667e0bb3b | [
"MIT"
] | null | null | null | from __future__ import print_function
class Hyperparams(object):
"""
Hyperparameters for LSTM network
"""
def __init__(self):
# Training hyper parameters
self._train_epoch_size = 30000
self._eval_epoch_size = 3000
self._batch_size = 128
self._num_epoch = 100
self._learning_rate = 0.001
self._momentum = 0.9
self._num_label = 4
# Network hyper parameters
self._seq_length = 80
self._num_hidden = 100
self._num_lstm_layer = 2
@property
def train_epoch_size(self):
return self._train_epoch_size
@property
def eval_epoch_size(self):
return self._eval_epoch_size
@property
def batch_size(self):
return self._batch_size
@property
def num_epoch(self):
return self._num_epoch
@property
def learning_rate(self):
return self._learning_rate
@property
def momentum(self):
return self._momentum
@property
def num_label(self):
return self._num_label
@property
def seq_length(self):
return self._seq_length
@property
def num_hidden(self):
return self._num_hidden
@property
def num_lstm_layer(self):
return self._num_lstm_layer | 21.866667 | 38 | 0.628811 |
acf9c7151f5ad48c7a11deeb818a0562c56b351a | 3,113 | py | Python | stacks/XIAOMATECH/1.0/services/HDFS/package/scripts/hdfs_nfsgateway.py | tvorogme/dataops | acfa21df42a20768c004c6630a064f4e38e280b2 | [
"Apache-2.0"
] | 3 | 2019-08-13T01:44:16.000Z | 2019-12-10T04:05:56.000Z | stacks/XIAOMATECH/1.0/services/HDFS/package/scripts/hdfs_nfsgateway.py | tvorogme/dataops | acfa21df42a20768c004c6630a064f4e38e280b2 | [
"Apache-2.0"
] | null | null | null | stacks/XIAOMATECH/1.0/services/HDFS/package/scripts/hdfs_nfsgateway.py | tvorogme/dataops | acfa21df42a20768c004c6630a064f4e38e280b2 | [
"Apache-2.0"
] | 7 | 2019-05-29T17:35:25.000Z | 2021-12-04T07:55:10.000Z | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.core.resources import Directory
from resource_management.core.source import Template
from resource_management.core import shell
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.generate_logfeeder_input_config import generate_logfeeder_input_config
from utils import service
import subprocess, os
# NFS GATEWAY is always started by root using jsvc due to rpcbind bugs
# on Linux such as CentOS6.2. https://bugzilla.redhat.com/show_bug.cgi?id=731542
def prepare_rpcbind():
Logger.info("check if native nfs server is running")
p, output = shell.call("pgrep nfsd")
if p == 0:
Logger.info("native nfs server is running. shutting it down...")
# shutdown nfs
shell.call("service nfs stop")
shell.call("service nfs-kernel-server stop")
Logger.info("check if the native nfs server is down...")
p, output = shell.call("pgrep nfsd")
if p == 0:
raise Fail("Failed to shutdown native nfs service")
Logger.info("check if rpcbind or portmap is running")
p, output = shell.call("pgrep rpcbind")
q, output = shell.call("pgrep portmap")
if p != 0 and q != 0:
Logger.info("no portmap or rpcbind running. starting one...")
p, output = shell.call(("service", "rpcbind", "start"), sudo=True)
q, output = shell.call(("service", "portmap", "start"), sudo=True)
if p != 0 and q != 0:
raise Fail("Failed to start rpcbind or portmap")
Logger.info("now we are ready to start nfs gateway")
def nfsgateway(action=None, format=False):
import params
if action == "start":
prepare_rpcbind()
if action == "configure":
Directory(
params.nfs_file_dump_dir,
owner=params.hdfs_user,
group=params.user_group,
)
generate_logfeeder_input_config(
'hdfs',
Template("input.config-hdfs.json.j2", extra_imports=[default]))
elif action == "start" or action == "stop":
service(
action=action,
name="nfs3",
user=params.root_user,
create_pid_dir=True,
create_log_dir=True)
| 37.059524 | 115 | 0.694507 |
acf9c7737e3fe6073040f55e6d0d2a1cd87112ab | 2,085 | py | Python | Examples/BasicTutorial2/Filters.py | SimpleITK/SimpleITK-MICCAI-2011-Tutorial | c8cffa8888fda71b9e4f2fdb3e10c2c66dba8371 | [
"CC-BY-3.0"
] | 25 | 2015-03-08T16:24:13.000Z | 2021-07-23T02:44:04.000Z | Examples/BasicTutorial2/Filters.py | SimpleITK/SimpleITK-MICCAI-2011-Tutorial | c8cffa8888fda71b9e4f2fdb3e10c2c66dba8371 | [
"CC-BY-3.0"
] | null | null | null | Examples/BasicTutorial2/Filters.py | SimpleITK/SimpleITK-MICCAI-2011-Tutorial | c8cffa8888fda71b9e4f2fdb3e10c2c66dba8371 | [
"CC-BY-3.0"
] | 4 | 2015-01-29T21:29:40.000Z | 2022-03-11T08:14:07.000Z | # Welcome to the Filters demo
print 'SimpleITK Filters'
# <demo> auto
# Every demo starts by importing the SimpleITK module
import SimpleITK as sitk
# <demo> stop
# Find some data
import os
dataDir = os.environ["HOME"] + "/src/SimpleITK/Testing/Data/Input"
image = sitk.ReadImage ( dataDir + "/RA-Short.nrrd" )
sitk.Show ( image )
# <demo> --- stop ---
# Simple smoothing
smooth = sitk.SmoothingRecursiveGaussian ( image, 2.0 )
sitk.Show ( smooth )
# <demo> --- stop ---
# Tired of typing SmoothingRecursiveGaussian ?
Gaussian = sitk.SmoothingRecursiveGaussian
smooth = Gaussian ( image, 4. )
sitk.Show ( smooth )
# <demo> --- stop ---
# Show the difference between the original and smoothed
sitk.Show ( sitk.Subtract ( image, smooth ) )
# Boom! Back to slides to explain!
# <demo> --- stop ---
# Much better
print "Before: ", smooth.GetPixelIDTypeAsString()
smooth = sitk.Cast ( smooth, image.GetPixelIDValue() )
print "After: ", smooth.GetPixelIDTypeAsString()
sitk.Show ( sitk.Subtract ( image, smooth ), "DiffWithGaussian" )
# <demo> --- stop ---
# Some other example filters
# Flip
sitk.Show ( sitk.Flip ( image ), "Flipped" )
# <demo> stop
# Canny edges
sitk.Show ( sitk.CannyEdgeDetection ( sitk.Cast(image, sitk.sitkFloat32) ), "Canny" )
# <demo> stop
# Sharpen
sitk.Show ( sitk.LaplacianSharpening ( image ), "Sharp" )
# <demo> stop
# Shrink
sitk.Show ( sitk.Shrink ( image, [2,2,2] ), "Shrunk" )
# <demo> stop
# Distance map, 25 pixels to a feature between 700 and 750
distanceMap = sitk.SignedMaurerDistanceMap ( sitk.BinaryThreshold ( image, 700, 750 ) )
sitk.Show ( sitk.IntensityWindowing ( distanceMap, 0, 25, 0, 255 ), "DistanceMap" )
# <Demo> stop
# 3D image
image = sitk.ReadImage ( dataDir + "/OAS1_0001_MR1_mpr-1_anon.nrrd" )
sitk.Show ( image )
# <demo> --- stop ---
# Flip
sitk.Show ( sitk.Flip ( image ), "Flipped" )
# <demo> stop
# Canny edges
sitk.Show ( sitk.CannyEdgeDetection ( sitk.Cast ( image, sitk.sitkFloat32 ) ), "Canny" )
# <demo> stop
# Sharpen
sitk.Show ( sitk.LaplacianSharpening ( image ), "Sharp" )
# <demo> stop
| 22.180851 | 88 | 0.682974 |
acf9c7a8ffb09f165aa63d7f86241aa04a4b64b5 | 493 | py | Python | cmsplugins/headers/migrations/0003_auto_20200508_2003.py | e621-Inc/django-cmsplugins | f9e81ac4f58d8e1b751a9f5209306f675185c112 | [
"MIT"
] | null | null | null | cmsplugins/headers/migrations/0003_auto_20200508_2003.py | e621-Inc/django-cmsplugins | f9e81ac4f58d8e1b751a9f5209306f675185c112 | [
"MIT"
] | 4 | 2020-01-16T08:17:16.000Z | 2020-05-13T10:59:01.000Z | cmsplugins/headers/migrations/0003_auto_20200508_2003.py | e621-Inc/django-cmsplugins | f9e81ac4f58d8e1b751a9f5209306f675185c112 | [
"MIT"
] | 2 | 2017-01-24T10:24:21.000Z | 2017-01-24T10:25:08.000Z | # Generated by Django 2.2.12 on 2020-05-08 20:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('headers', '0002_header_cms_page'),
]
operations = [
migrations.AlterField(
model_name='header',
name='cms_page',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cms.Page'),
),
]
| 24.65 | 124 | 0.643002 |
acf9c89ad6fa1ab7c79c83865da98a6e172d9abc | 7,261 | py | Python | raw_packet/Senders/send_icmpv6_ra_packets.py | Vladimir-Ivanov-Git/raw_packet | 78d27b3dc9532d27faa6e5d853c62bc9c8b21e71 | [
"MIT"
] | 146 | 2018-09-28T13:34:01.000Z | 2022-03-21T21:35:12.000Z | raw_packet/Senders/send_icmpv6_ra_packets.py | Vladimir-Ivanov-Git/raw_packet | 78d27b3dc9532d27faa6e5d853c62bc9c8b21e71 | [
"MIT"
] | 18 | 2019-06-05T17:59:08.000Z | 2021-12-22T10:26:18.000Z | raw_packet/Senders/send_icmpv6_ra_packets.py | Vladimir-Ivanov-Git/raw_packet | 78d27b3dc9532d27faa6e5d853c62bc9c8b21e71 | [
"MIT"
] | 26 | 2018-11-09T07:47:42.000Z | 2022-03-12T22:40:33.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# region Description
"""
nmap_scanner.py: Scan local network
Author: Vladimir Ivanov
License: MIT
Copyright 2020, Raw-packet Project
"""
# endregion
# region Import
# region Add project root path
from sys import path
from os.path import dirname, abspath
path.append(dirname(dirname(dirname(abspath(__file__)))))
# endregion
# region Raw-packet modules
from raw_packet.Utils.base import Base
from raw_packet.Utils.network import ICMPv6_raw, Ethernet_raw
# endregion
# region Import libraries
from argparse import ArgumentParser
from socket import socket, AF_PACKET, SOCK_RAW
from sys import stdout
from time import sleep
from traceback import format_exc
from datetime import datetime
# endregion
# endregion
# region Authorship information
__author__ = 'Vladimir Ivanov'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = 'Vladimir Ivanov'
__email__ = 'ivanov.vladimir.mail@gmail.com'
__status__ = 'Stable'
# endregion
# region Main function
if __name__ == "__main__":
# region Check user, platform and print banner
Base = Base()
Base.check_platform()
Base.check_user()
Base.print_banner()
# endregion
# region Parse script arguments
parser = ArgumentParser(description='ICMPv6 router advertisement packets sender')
parser.add_argument('-i', '--interface', type=str, help='Set interface name for send TCP packets')
parser.add_argument('-m', '--src_mac', type=str, help='Set src mac address (not required)', default=None)
parser.add_argument('-M', '--dst_mac', type=str, help='Set dst mac address (not required)', default=None)
parser.add_argument('-a', '--src_ipv6', type=str, help='Set src ipv6 address (not required)', default=None)
parser.add_argument('-A', '--dst_ipv6', type=str, help='Set dst ipv6 address (not required)', default=None)
parser.add_argument('-d', '--dns', type=str, help='Set DNS IPv6 address (not required)', default=None)
parser.add_argument('-D', '--domain', type=str, help='Set domain search (default: test.com)', default="test.com")
parser.add_argument('-P', '--prefix', type=str, help='Set network prefix (default: fd00::/64)', default="fd00::/64")
parser.add_argument('-p', '--number_of_packets', type=int, help='Set number of packets (default=100000)', default=10000)
parser.add_argument('-t', '--number_of_iterations', type=int, help='Set number of iteration (default=100)', default=100)
parser.add_argument('--delay', type=float, help='Set delay between packets (default=0.0)', default=0.0)
args = parser.parse_args()
# endregion
# region Variables
icmpv6 = ICMPv6_raw()
eth = Ethernet_raw()
SOCK = None
iteration = 0
index = 0
# endregion
# region Get network settings
# region Set network interface
if args.interface is None:
current_network_interface = Base.netiface_selection()
else:
current_network_interface = args.interface
# endregion
# region Set source MAC address
if args.src_mac is None:
src_mac_address = Base.get_netiface_mac_address(current_network_interface)
else:
src_mac_address = args.src_mac
# endregion
# region Set destination MAC address
if args.dst_mac is None:
dst_mac_address = "33:33:00:00:00:01" # IPv6mcast
else:
dst_mac_address = args.dst_mac
# endregion
# region Set source IPv6 address
if args.src_ipv6 is None:
src_ipv6_address = Base.get_netiface_ipv6_link_address(current_network_interface)
else:
src_ipv6_address = args.src_ipv6
# endregion
# region Set destination IPv6 address
if args.dst_ipv6 is None:
dst_ipv6_address = "ff02::1"
else:
dst_ipv6_address = args.dst_ipv6
# endregion
# region Set DNS server address
if args.dns is None:
dns_ipv6_address = Base.get_netiface_ipv6_link_address(current_network_interface)
else:
dns_ipv6_address = args.dns
# endregion
# endregion
# region General output
Base.print_info("Interface: ", current_network_interface)
Base.print_info("Src IPv6 address: ", src_ipv6_address)
Base.print_info("Dst IPv6 address: ", dst_ipv6_address)
Base.print_info("Src MAC address: ", src_mac_address)
Base.print_info("Dst MAC address: ", dst_mac_address)
Base.print_info("Prefix: ", args.domain)
Base.print_info("DNS IPv6 address: ", dns_ipv6_address)
Base.print_info("Domain search: ", args.domain)
Base.print_info("Sending ICMPv6 router advertisement packets ...")
start_time = datetime.now()
Base.print_info("Start sending time: ", str(start_time))
# endregion
# region Send ICMPv6 RA packets
try:
# Create raw socket
SOCK = socket(AF_PACKET, SOCK_RAW)
SOCK.bind((current_network_interface, 0))
# Make ICMPv6 RA packet
ra_packet = icmpv6.make_router_advertisement_packet(ethernet_src_mac=src_mac_address,
ethernet_dst_mac=dst_mac_address,
ipv6_src=src_ipv6_address,
ipv6_dst=dst_ipv6_address,
dns_address=dns_ipv6_address,
domain_search=args.domain,
prefix=args.prefix)
# Send ICMPv6 RA packets in cycle
for iteration in range(args.number_of_iterations):
progress_percent = int((iteration / args.number_of_iterations) * 100) + 1
stdout.write('\r')
stdout.write(Base.c_info + 'Progress: ' + Base.cINFO + str(progress_percent) + '%' + Base.cEND)
stdout.flush()
index = 0
while index < args.number_of_packets:
SOCK.send(ra_packet)
index += 1
sleep(args.delay)
# Keyboard interrupt
except KeyboardInterrupt:
pass
# Any exceptions
except:
stdout.write('\n')
Base.print_info("End sending time: ", str(datetime.now()))
Base.print_error("Do not send ICMPv6 router advertisement packets!")
Base.print_info(str(format_exc()))
Base.print_info("Close socket and exit ...")
if SOCK is not None:
SOCK.close()
exit(1)
# endregion
# region Calculate send speed
end_time = datetime.now()
number_of_packets = (int(iteration)*int(args.number_of_packets)) + index
speed = float('{:.3f}'.format(number_of_packets / (end_time - start_time).total_seconds()))
# endregion
# region Output script results
stdout.write('\n')
Base.print_info("End sending time: ", str(end_time))
Base.print_info("Send: ", str(number_of_packets), " ICMPv6 router advertisement packets!")
Base.print_info("Speed: ", str(speed), " pkt/s")
Base.print_info("Close socket and exit ...")
# endregion
# region Close raw socket and exit
SOCK.close()
exit(0)
# endregion
# endregion
| 33.307339 | 124 | 0.651425 |
acf9c8a328310064926cc74ac11e9a8128c1eef9 | 17,961 | py | Python | electrum/paymentrequest.py | MatthewWesley/electrum | f923d2ecdb145c5c84767a30f1aadf8a035a1dc6 | [
"MIT"
] | null | null | null | electrum/paymentrequest.py | MatthewWesley/electrum | f923d2ecdb145c5c84767a30f1aadf8a035a1dc6 | [
"MIT"
] | null | null | null | electrum/paymentrequest.py | MatthewWesley/electrum | f923d2ecdb145c5c84767a30f1aadf8a035a1dc6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import sys
import time
from typing import Optional, List, TYPE_CHECKING
import asyncio
import urllib.parse
import certifi
import aiohttp
try:
from . import paymentrequest_pb2 as pb2
except ImportError:
# sudo apt-get install protobuf-compiler
sys.exit("Error: could not find paymentrequest_pb2.py. Create it with 'protoc --proto_path=electrum/ --python_out=electrum/ electrum/paymentrequest.proto'")
from . import bitcoin, constants, ecc, util, transaction, x509, rsakey
from .util import bh2u, bfh, make_aiohttp_session
from .invoices import Invoice, get_id_from_onchain_outputs
from .crypto import sha256
from .bitcoin import address_to_script
from .transaction import PartialTxOutput
from .network import Network
from .logging import get_logger, Logger
if TYPE_CHECKING:
from .simple_config import SimpleConfig
_logger = get_logger(__name__)
REQUEST_HEADERS = {'Accept': 'application/bitcoin-paymentrequest', 'User-Agent': 'Electrum'}
ACK_HEADERS = {'Content-Type':'application/bitcoin-payment','Accept':'application/bitcoin-paymentack','User-Agent':'Electrum'}
ca_path = certifi.where()
ca_list = None
ca_keyID = None
def load_ca_list():
global ca_list, ca_keyID
if ca_list is None:
ca_list, ca_keyID = x509.load_certificates(ca_path)
async def get_payment_request(url: str) -> 'PaymentRequest':
u = urllib.parse.urlparse(url)
error = None
if u.scheme in ('http', 'https'):
resp_content = None
try:
proxy = Network.get_instance().proxy
async with make_aiohttp_session(proxy, headers=REQUEST_HEADERS) as session:
async with session.get(url) as response:
resp_content = await response.read()
response.raise_for_status()
# Guard against `bitcoin:`-URIs with invalid payment request URLs
if "Content-Type" not in response.headers \
or response.headers["Content-Type"] != "application/bitcoin-paymentrequest":
data = None
error = "payment URL not pointing to a payment request handling server"
else:
data = resp_content
data_len = len(data) if data is not None else None
_logger.info(f'fetched payment request {url} {data_len}')
except (aiohttp.ClientError, asyncio.TimeoutError) as e:
error = f"Error while contacting payment URL: {url}.\nerror type: {type(e)}"
if isinstance(e, aiohttp.ClientResponseError):
error += f"\nGot HTTP status code {e.status}."
if resp_content:
try:
error_text_received = resp_content.decode("utf8")
except UnicodeDecodeError:
error_text_received = "(failed to decode error)"
else:
error_text_received = error_text_received[:400]
error_oneline = ' -- '.join(error.split('\n'))
_logger.info(f"{error_oneline} -- [DO NOT TRUST THIS MESSAGE] "
f"{repr(e)} text: {error_text_received}")
data = None
else:
data = None
error = f"Unknown scheme for payment request. URL: {url}"
pr = PaymentRequest(data, error=error)
return pr
class PaymentRequest:
def __init__(self, data: bytes, *, error=None):
self.raw = data
self.error = error # FIXME overloaded and also used when 'verify' succeeds
self.parse(data)
self.requestor = None # known after verify
self.tx = None
def __str__(self):
return str(self.raw)
def parse(self, r: bytes):
self.outputs = [] # type: List[PartialTxOutput]
if self.error:
return
try:
self.data = pb2.PaymentRequest()
self.data.ParseFromString(r)
except:
self.error = "cannot parse payment request"
return
self.details = pb2.PaymentDetails()
self.details.ParseFromString(self.data.serialized_payment_details)
pr_network = self.details.network
client_network = 'test' if constants.net.TESTNET else 'main'
if pr_network != client_network:
self.error = (f'Payment request network "{pr_network}" does not'
f' match client network "{client_network}".')
return
for o in self.details.outputs:
addr = transaction.get_address_from_output_script(o.script)
if not addr:
# TODO maybe rm restriction but then get_requestor and get_id need changes
self.error = "only addresses are allowed as outputs"
return
self.outputs.append(PartialTxOutput.from_address_and_value(addr, o.amount))
self.memo = self.details.memo
self.payment_url = self.details.payment_url
def verify(self, contacts):
if self.error:
return False
if not self.raw:
self.error = "Empty request"
return False
pr = pb2.PaymentRequest()
try:
pr.ParseFromString(self.raw)
except:
self.error = "Error: Cannot parse payment request"
return False
if not pr.signature:
# the address will be displayed as requestor
self.requestor = None
return True
if pr.pki_type in ["x509+sha256", "x509+sha1"]:
return self.verify_x509(pr)
elif pr.pki_type in ["dnssec+btc", "dnssec+ecdsa"]:
return self.verify_dnssec(pr, contacts)
else:
self.error = "ERROR: Unsupported PKI Type for Message Signature"
return False
def verify_x509(self, paymntreq):
load_ca_list()
if not ca_list:
self.error = "Trusted certificate authorities list not found"
return False
cert = pb2.X509Certificates()
cert.ParseFromString(paymntreq.pki_data)
# verify the chain of certificates
try:
x, ca = verify_cert_chain(cert.certificate)
except BaseException as e:
_logger.exception('')
self.error = str(e)
return False
# get requestor name
self.requestor = x.get_common_name()
if self.requestor.startswith('*.'):
self.requestor = self.requestor[2:]
# verify the BIP70 signature
pubkey0 = rsakey.RSAKey(x.modulus, x.exponent)
sig = paymntreq.signature
paymntreq.signature = b''
s = paymntreq.SerializeToString()
sigBytes = bytearray(sig)
msgBytes = bytearray(s)
if paymntreq.pki_type == "x509+sha256":
hashBytes = bytearray(hashlib.sha256(msgBytes).digest())
verify = pubkey0.verify(sigBytes, x509.PREFIX_RSA_SHA256 + hashBytes)
elif paymntreq.pki_type == "x509+sha1":
verify = pubkey0.hashAndVerify(sigBytes, msgBytes)
else:
self.error = f"ERROR: unknown pki_type {paymntreq.pki_type} in Payment Request"
return False
if not verify:
self.error = "ERROR: Invalid Signature for Payment Request Data"
return False
### SIG Verified
self.error = 'Signed by Trusted CA: ' + ca.get_common_name()
return True
def verify_dnssec(self, pr, contacts):
sig = pr.signature
alias = pr.pki_data
info = contacts.resolve(alias)
if info.get('validated') is not True:
self.error = "Alias verification failed (DNSSEC)"
return False
if pr.pki_type == "dnssec+btc":
self.requestor = alias
address = info.get('address')
pr.signature = b''
message = pr.SerializeToString()
if ecc.verify_message_with_address(address, sig, message):
self.error = 'Verified with DNSSEC'
return True
else:
self.error = "verify failed"
return False
else:
self.error = "unknown algo"
return False
def has_expired(self) -> Optional[bool]:
if not hasattr(self, 'details'):
return None
return self.details.expires and self.details.expires < int(time.time())
def get_time(self):
return self.details.time
def get_expiration_date(self):
return self.details.expires
def get_amount(self):
return sum(map(lambda x:x.value, self.outputs))
def get_address(self):
o = self.outputs[0]
addr = o.address
assert addr
return addr
def get_requestor(self):
return self.requestor if self.requestor else self.get_address()
def get_verify_status(self):
return self.error if self.requestor else "No Signature"
def get_memo(self):
return self.memo
def get_name_for_export(self) -> Optional[str]:
if not hasattr(self, 'details'):
return None
return get_id_from_onchain_outputs(self.outputs, timestamp=self.get_time())
def get_outputs(self):
return self.outputs[:]
async def send_payment_and_receive_paymentack(self, raw_tx, refund_addr):
pay_det = self.details
if not self.details.payment_url:
return False, "no url"
paymnt = pb2.Payment()
paymnt.merchant_data = pay_det.merchant_data
paymnt.transactions.append(bfh(raw_tx))
ref_out = paymnt.refund_to.add()
ref_out.script = util.bfh(address_to_script(refund_addr))
paymnt.memo = "Paid using Electrum"
pm = paymnt.SerializeToString()
payurl = urllib.parse.urlparse(pay_det.payment_url)
resp_content = None
try:
proxy = Network.get_instance().proxy
async with make_aiohttp_session(proxy, headers=ACK_HEADERS) as session:
async with session.post(payurl.geturl(), data=pm) as response:
resp_content = await response.read()
response.raise_for_status()
try:
paymntack = pb2.PaymentACK()
paymntack.ParseFromString(resp_content)
except Exception:
return False, "PaymentACK could not be processed. Payment was sent; please manually verify that payment was received."
print(f"PaymentACK message received: {paymntack.memo}")
return True, paymntack.memo
except aiohttp.ClientError as e:
error = f"Payment Message/PaymentACK Failed:\nerror type: {type(e)}"
if isinstance(e, aiohttp.ClientResponseError):
error += f"\nGot HTTP status code {e.status}."
if resp_content:
try:
error_text_received = resp_content.decode("utf8")
except UnicodeDecodeError:
error_text_received = "(failed to decode error)"
else:
error_text_received = error_text_received[:400]
error_oneline = ' -- '.join(error.split('\n'))
_logger.info(f"{error_oneline} -- [DO NOT TRUST THIS MESSAGE] "
f"{repr(e)} text: {error_text_received}")
return False, error
def make_unsigned_request(req: 'Invoice'):
addr = req.get_address()
time = req.time
exp = req.exp
if time and type(time) != int:
time = 0
if exp and type(exp) != int:
exp = 0
amount = req.get_amount_sat()
if amount is None:
amount = 0
memo = req.message
script = bfh(address_to_script(addr))
outputs = [(script, amount)]
pd = pb2.PaymentDetails()
if constants.net.TESTNET:
pd.network = 'test'
for script, amount in outputs:
pd.outputs.add(amount=amount, script=script)
pd.time = time
pd.expires = time + exp if exp else 0
pd.memo = memo
pr = pb2.PaymentRequest()
pr.serialized_payment_details = pd.SerializeToString()
pr.signature = util.to_bytes('')
return pr
def sign_request_with_alias(pr, alias, alias_privkey):
pr.pki_type = 'dnssec+btc'
pr.pki_data = str(alias)
message = pr.SerializeToString()
ec_key = ecc.ECPrivkey(alias_privkey)
compressed = bitcoin.is_compressed_privkey(alias_privkey)
pr.signature = ec_key.sign_message(message, compressed)
def verify_cert_chain(chain):
""" Verify a chain of certificates. The last certificate is the CA"""
load_ca_list()
# parse the chain
cert_num = len(chain)
x509_chain = []
for i in range(cert_num):
x = x509.X509(bytearray(chain[i]))
x509_chain.append(x)
if i == 0:
x.check_date()
else:
if not x.check_ca():
raise Exception("ERROR: Supplied CA Certificate Error")
if not cert_num > 1:
raise Exception("ERROR: CA Certificate Chain Not Provided by Payment Processor")
# if the root CA is not supplied, add it to the chain
ca = x509_chain[cert_num-1]
if ca.getFingerprint() not in ca_list:
keyID = ca.get_issuer_keyID()
f = ca_keyID.get(keyID)
if f:
root = ca_list[f]
x509_chain.append(root)
else:
raise Exception("Supplied CA Not Found in Trusted CA Store.")
# verify the chain of signatures
cert_num = len(x509_chain)
for i in range(1, cert_num):
x = x509_chain[i]
prev_x = x509_chain[i-1]
algo, sig, data = prev_x.get_signature()
sig = bytearray(sig)
pubkey = rsakey.RSAKey(x.modulus, x.exponent)
if algo == x509.ALGO_RSA_SHA1:
verify = pubkey.hashAndVerify(sig, data)
elif algo == x509.ALGO_RSA_SHA256:
hashBytes = bytearray(hashlib.sha256(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA256 + hashBytes)
elif algo == x509.ALGO_RSA_SHA384:
hashBytes = bytearray(hashlib.sha384(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA384 + hashBytes)
elif algo == x509.ALGO_RSA_SHA512:
hashBytes = bytearray(hashlib.sha512(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA512 + hashBytes)
else:
raise Exception("Algorithm not supported: {}".format(algo))
if not verify:
raise Exception("Certificate not Signed by Provided CA Certificate Chain")
return x509_chain[0], ca
def check_ssl_config(config):
from . import pem
key_path = config.get('ssl_keyfile')
cert_path = config.get('ssl_certfile')
with open(key_path, 'r', encoding='utf-8') as f:
params = pem.parse_private_key(f.read())
with open(cert_path, 'r', encoding='utf-8') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
# verify chain
x, ca = verify_cert_chain(bList)
# verify that privkey and pubkey match
privkey = rsakey.RSAKey(*params)
pubkey = rsakey.RSAKey(x.modulus, x.exponent)
assert x.modulus == params[0]
assert x.exponent == params[1]
# return requestor
requestor = x.get_common_name()
if requestor.startswith('*.'):
requestor = requestor[2:]
return requestor
def sign_request_with_x509(pr, key_path, cert_path):
from . import pem
with open(key_path, 'r', encoding='utf-8') as f:
params = pem.parse_private_key(f.read())
privkey = rsakey.RSAKey(*params)
with open(cert_path, 'r', encoding='utf-8') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
certificates = pb2.X509Certificates()
certificates.certificate.extend(map(bytes, bList))
pr.pki_type = 'x509+sha256'
pr.pki_data = certificates.SerializeToString()
msgBytes = bytearray(pr.SerializeToString())
hashBytes = bytearray(hashlib.sha256(msgBytes).digest())
sig = privkey.sign(x509.PREFIX_RSA_SHA256 + hashBytes)
pr.signature = bytes(sig)
def serialize_request(req): # FIXME this is broken
pr = make_unsigned_request(req)
signature = req.get('sig')
requestor = req.get('name')
if requestor and signature:
pr.signature = bfh(signature)
pr.pki_type = 'dnssec+btc'
pr.pki_data = str(requestor)
return pr
def make_request(config: 'SimpleConfig', req: 'Invoice'):
pr = make_unsigned_request(req)
key_path = config.get('ssl_keyfile')
cert_path = config.get('ssl_certfile')
if key_path and cert_path:
sign_request_with_x509(pr, key_path, cert_path)
return pr
| 38.296375 | 160 | 0.623184 |
acf9ca178edffecf09ef2ae02c0fd4f5d99cc5c5 | 5,839 | py | Python | stream_alert_cli/terraform/lambda_module.py | tuapuikia/streamalert | b1f733259aa051f8d533e7881018280fe77d7bda | [
"Apache-2.0"
] | null | null | null | stream_alert_cli/terraform/lambda_module.py | tuapuikia/streamalert | b1f733259aa051f8d533e7881018280fe77d7bda | [
"Apache-2.0"
] | null | null | null | stream_alert_cli/terraform/lambda_module.py | tuapuikia/streamalert | b1f733259aa051f8d533e7881018280fe77d7bda | [
"Apache-2.0"
] | null | null | null | """
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from stream_alert.shared import metrics
from stream_alert_cli.terraform.common import monitoring_topic_arn
def _tf_metric_alarms(lambda_config, sns_arn):
"""Compute metric alarm Terraform configuration from the Lambda config."""
result = {}
alarms_config = lambda_config.get('metric_alarms', {})
if not alarms_config:
return result
result['alarm_actions'] = [sns_arn]
for alarm_type in ['errors', 'throttles']:
settings = alarms_config.get(alarm_type)
if not settings:
continue
for key in ['enabled', 'evaluation_periods', 'period_secs', 'threshold']:
if key in settings:
result['{}_alarm_{}'.format(alarm_type, key)] = settings[key]
return result
def _tf_metric_filters(lambda_config, metrics_lookup):
"""Compute metric filter Terraform configuration from the Lambda config."""
if not lambda_config.get('enable_metrics') or not metrics_lookup:
return {}
# Create a metric filter for each custom metric associated with this function.
metric_filters = []
function_metrics = metrics.MetricLogger.get_available_metrics()[metrics_lookup]
for metric, settings in function_metrics.items():
metric_name = '{}-{}'.format(metrics.FUNC_PREFIXES[metrics_lookup], metric)
filter_pattern, filter_value = settings
metric_filters.append('{},{},{}'.format(metric_name, filter_pattern, filter_value))
return {'log_metric_filters': metric_filters}
def _tf_vpc_config(lambda_config):
"""Compute VPC configuration from the Lambda config."""
result = {}
vpc_config = lambda_config.get('vpc_config', {})
if not vpc_config:
return result
if 'security_group_ids' in vpc_config:
result['vpc_security_group_ids'] = vpc_config['security_group_ids']
if 'subnet_ids' in vpc_config:
result['vpc_subnet_ids'] = vpc_config['subnet_ids']
return result
def generate_lambda(function_name, lambda_config, config, environment=None, metrics_lookup=None):
"""Generate an instance of the Lambda Terraform module.
Args:
function_name (str): Name of the Lambda function (e.g. 'alert_processor')
config (dict): Parsed config from conf/
lambda_config (dict): Section of the config for this particular Lambda function
environment (dict): Optional environment variables to specify.
ENABLE_METRICS and LOGGER_LEVEL are included automatically.
metrics_lookup (str): Canonical name of this function (used to lookup custom metrics)
Example Lambda config:
{
"concurrency_limit": 1,
"current_version": "$LATEST",
"handler": "main.handler",
"log_level": "info",
"log_retention_days": 14,
"memory": 128,
"metric_alarms": {
"errors": {
"enabled": true,
"evaluation_periods": 1,
"period_secs": 120,
"threshold": 0
},
"throttles": {
"enabled": true,
"evaluation_periods": 1,
"period_secs": 120,
"threshold": 0
}
},
"schedule_expression": "rate(5 minutes)",
"source_bucket": "BUCKET",
"source_object_key": "OBJECT_KEY",
"timeout": 10,
"vpc_config": {
"security_group_ids": [
"sg-id"
],
"subnet_ids": [
"subnet-id"
]
}
}
Returns:
dict: Terraform config for an instance of the tf_lambda module.
"""
# Add logger level to any custom environment variables
environment_variables = {
# Convert True/False to "1" or "0", respectively
'ENABLE_METRICS': str(int(lambda_config.get('enable_metrics', False))),
'LOGGER_LEVEL': lambda_config.get('log_level', 'info')
}
if environment:
environment_variables.update(environment)
lambda_module = {
'source': 'modules/tf_lambda',
'function_name': function_name,
'description': function_name.replace('_', ' ').title(),
'handler': lambda_config['handler'],
'memory_size_mb': lambda_config['memory'],
'timeout_sec': lambda_config['timeout'],
'source_bucket': lambda_config['source_bucket'],
'source_object_key': lambda_config['source_object_key'],
'environment_variables': environment_variables,
'aliased_version': lambda_config['current_version'],
}
# Include optional keys only if they are defined (otherwise use the module defaults)
for key in ['concurrency_limit', 'log_retention_days', 'schedule_expression']:
if key in lambda_config:
lambda_module[key] = lambda_config[key]
# Add metric alarms and filters to the Lambda module definition
lambda_module.update(_tf_metric_alarms(lambda_config, monitoring_topic_arn(config)))
lambda_module.update(_tf_metric_filters(lambda_config, metrics_lookup))
# Add VPC config to the Lambda module definition
lambda_module.update(_tf_vpc_config(lambda_config))
return lambda_module
| 37.191083 | 97 | 0.64994 |
acf9cc8346a5ba9def5f5bbcd657852ba2676224 | 5,774 | py | Python | python/app/thirdparty/oneforall/takeover.py | taomujian/linbing | fe772a58f41e3b046b51a866bdb7e4655abaf51a | [
"MIT"
] | 351 | 2020-02-26T05:23:26.000Z | 2022-03-26T12:39:19.000Z | python/app/thirdparty/oneforall/takeover.py | taomujian/linbing | fe772a58f41e3b046b51a866bdb7e4655abaf51a | [
"MIT"
] | 15 | 2020-03-26T07:31:49.000Z | 2022-03-09T02:12:17.000Z | python/app/thirdparty/oneforall/takeover.py | taomujian/linbing | fe772a58f41e3b046b51a866bdb7e4655abaf51a | [
"MIT"
] | 99 | 2020-02-28T07:30:46.000Z | 2022-03-16T16:41:09.000Z | #!/usr/bin/python3
# coding=utf-8
"""
OneForAll subdomain takeover module
:copyright: Copyright (c) 2019, Jing Ling. All rights reserved.
:license: GNU General Public License v3.0, see LICENSE for more details.
"""
import time
import json
from threading import Thread
from queue import Queue
import fire
from app.thirdparty.oneforall.common.tablib.tablib import Dataset
from tqdm import tqdm
from app.thirdparty.oneforall.config.log import logger
from app.thirdparty.oneforall.config import settings
from app.thirdparty.oneforall.common import utils
from app.thirdparty.oneforall.common.module import Module
def get_fingerprint():
path = settings.data_storage_dir.joinpath('fingerprints.json')
with open(path, encoding='utf-8', errors='ignore') as file:
fingerprints = json.load(file)
return fingerprints
def get_cname(subdomain):
resolver = utils.dns_resolver()
try:
answers = resolver.query(subdomain, 'CNAME')
except Exception as e:
logger.log('TRACE', e.args)
return None
for answer in answers:
return answer.to_text() # 一个子域只有一个CNAME记录
class Takeover(Module):
"""
OneForAll subdomain takeover module
Example:
python3 takeover.py --target www.example.com --fmt csv run
python3 takeover.py --targets ./subdomains.txt --thread 10 run
Note:
--fmt txt/csv/json (result format)
--path Result directory (default directory is ./results)
:param str target: One domain (target or targets must be provided)
:param str targets: File path of one domain per line
:param int thread: threads number (default 20)
:param str fmt: Result format (default csv)
:param str path: Result directory (default None)
"""
def __init__(self, target=None, targets=None, thread=20, path=None, fmt='csv'):
Module.__init__(self)
self.subdomains = set()
self.module = 'Check'
self.source = 'Takeover'
self.target = target
self.targets = targets
self.thread = thread
self.path = path
self.fmt = fmt
self.fingerprints = None
self.queue = Queue() # subdomain queue
self.cnames = list()
self.results = Dataset()
def save(self):
logger.log('DEBUG', 'Saving results')
if self.fmt == 'txt':
data = str(self.results)
else:
data = self.results.export(self.fmt)
utils.save_to_file(self.path, data)
def compare(self, subdomain, cname, responses):
domain_resp = self.get('http://' + subdomain, check=False, ignore=True)
cname_resp = self.get('http://' + cname, check=False, ignore=True)
if domain_resp is None or cname_resp is None:
return
for resp in responses:
if resp in domain_resp.text and resp in cname_resp.text:
logger.log('ALERT', f'{subdomain} takeover threat found')
self.results.append([subdomain, cname])
break
def worker(self, subdomain):
cname = get_cname(subdomain)
if cname is None:
return
main_domain = utils.get_main_domain(cname)
for fingerprint in self.fingerprints:
cnames = fingerprint.get('cname')
if main_domain not in cnames:
continue
responses = fingerprint.get('response')
self.compare(subdomain, cname, responses)
def check(self):
while not self.queue.empty(): # 保证域名队列遍历结束后能退出线程
subdomain = self.queue.get() # 从队列中获取域名
self.worker(subdomain)
self.queue.task_done()
def progress(self):
bar = tqdm()
bar.total = len(self.subdomains)
bar.desc = 'Check Progress'
bar.ncols = 80
while True:
done = bar.total - self.queue.qsize()
bar.n = done
bar.update()
if done == bar.total: # 完成队列中所有子域的检查退出
break
def run(self):
start = time.time()
logger.log('INFOR', f'Start running {self.source} module')
if isinstance(self.targets, set):
self.subdomains = self.targets
else:
self.subdomains = utils.get_domains(self.target, self.targets)
self.fmt = utils.check_format(self.fmt)
timestamp = utils.get_timestamp()
name = f'takeover_check_result_{timestamp}'
self.path = utils.check_path(self.path, name, self.fmt)
if self.subdomains:
logger.log('INFOR', f'Checking subdomain takeover')
self.fingerprints = get_fingerprint()
self.results.headers = ['subdomain', 'cname']
# 创建待检查的子域队列
for domain in self.subdomains:
self.queue.put(domain)
# 进度线程
progress_thread = Thread(target=self.progress, name='ProgressThread',
daemon=True)
progress_thread.start()
# 检查线程
for i in range(self.thread):
check_thread = Thread(target=self.check, name=f'CheckThread{i}',
daemon=True)
check_thread.start()
self.queue.join()
self.save()
else:
logger.log('FATAL', f'Failed to obtain domain')
end = time.time()
elapse = round(end - start, 1)
logger.log('ALERT', f'{self.source} module takes {elapse} seconds, '
f'There are {len(self.results)} subdomains exists takeover')
logger.log('INFOR', f'Subdomain takeover results: {self.path}')
logger.log('INFOR', f'Finished {self.source} module')
if __name__ == '__main__':
fire.Fire(Takeover)
| 34.16568 | 88 | 0.6053 |
acf9ccbba3ddd526219c5663d7c81266270e777b | 9,140 | py | Python | posthog/models/user.py | iprithvitharun/posthog | 763e9f1c9430f0371a61711af871a78b8dc95928 | [
"MIT"
] | null | null | null | posthog/models/user.py | iprithvitharun/posthog | 763e9f1c9430f0371a61711af871a78b8dc95928 | [
"MIT"
] | null | null | null | posthog/models/user.py | iprithvitharun/posthog | 763e9f1c9430f0371a61711af871a78b8dc95928 | [
"MIT"
] | null | null | null | from typing import Any, Callable, Dict, List, Optional, Tuple
from django.conf import settings
from django.contrib.auth.models import AbstractUser, BaseUserManager
from django.db import models, transaction
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from rest_framework.exceptions import ValidationError
from posthog.utils import get_instance_realm
from .organization import Organization, OrganizationMembership
from .personal_api_key import PersonalAPIKey
from .team import Team
from .utils import UUIDClassicModel, generate_random_token, sane_repr
class UserManager(BaseUserManager):
"""Define a model manager for User model with no username field."""
use_in_migrations = True
def create_user(self, email: str, password: Optional[str], first_name: str, **extra_fields) -> "User":
"""Create and save a User with the given email and password."""
if email is None:
raise ValueError("Email must be provided!")
email = self.normalize_email(email)
extra_fields.setdefault("distinct_id", generate_random_token())
user = self.model(email=email, first_name=first_name, **extra_fields)
if password is not None:
user.set_password(password)
user.save()
return user
def bootstrap(
self,
organization_name: str,
email: str,
password: Optional[str],
first_name: str = "",
organization_fields: Optional[Dict[str, Any]] = None,
team_fields: Optional[Dict[str, Any]] = None,
create_team: Optional[Callable[["Organization", "User"], "Team"]] = None,
is_staff: bool = False,
**user_fields,
) -> Tuple["Organization", "Team", "User"]:
"""Instead of doing the legwork of creating a user from scratch, delegate the details with bootstrap."""
with transaction.atomic():
organization_fields = organization_fields or {}
organization_fields.setdefault("name", organization_name)
organization = Organization.objects.create(**organization_fields)
user = self.create_user(
email=email, password=password, first_name=first_name, is_staff=is_staff, **user_fields
)
if create_team:
team = create_team(organization, user)
else:
team = Team.objects.create_with_data(user=user, organization=organization, **(team_fields or {}))
user.join(
organization=organization, level=OrganizationMembership.Level.OWNER,
)
return organization, team, user
def create_and_join(
self,
organization: Organization,
email: str,
password: Optional[str],
first_name: str = "",
level: OrganizationMembership.Level = OrganizationMembership.Level.MEMBER,
**extra_fields,
) -> "User":
with transaction.atomic():
user = self.create_user(email=email, password=password, first_name=first_name, **extra_fields)
user.join(organization=organization, level=level)
return user
def get_from_personal_api_key(self, key_value: str) -> Optional["User"]:
try:
personal_api_key: PersonalAPIKey = (
PersonalAPIKey.objects.select_related("user").filter(user__is_active=True).get(value=key_value)
)
except PersonalAPIKey.DoesNotExist:
return None
else:
personal_api_key.last_used_at = timezone.now()
personal_api_key.save()
return personal_api_key.user
def events_column_config_default() -> Dict[str, Any]:
return {"active": "DEFAULT"}
class User(AbstractUser, UUIDClassicModel):
USERNAME_FIELD = "email"
REQUIRED_FIELDS: List[str] = []
DISABLED = "disabled"
TOOLBAR = "toolbar"
TOOLBAR_CHOICES = [
(DISABLED, DISABLED),
(TOOLBAR, TOOLBAR),
]
current_organization = models.ForeignKey(
"posthog.Organization", models.SET_NULL, null=True, related_name="users_currently+",
)
current_team = models.ForeignKey("posthog.Team", models.SET_NULL, null=True, related_name="teams_currently+")
email = models.EmailField(_("email address"), unique=True)
temporary_token: models.CharField = models.CharField(max_length=200, null=True, blank=True, unique=True)
distinct_id: models.CharField = models.CharField(max_length=200, null=True, blank=True, unique=True)
# Preferences / configuration options
email_opt_in: models.BooleanField = models.BooleanField(default=False, null=True, blank=True)
anonymize_data: models.BooleanField = models.BooleanField(default=False, null=True, blank=True)
toolbar_mode: models.CharField = models.CharField(
max_length=200, null=True, blank=True, choices=TOOLBAR_CHOICES, default=TOOLBAR
)
# DEPRECATED
events_column_config: models.JSONField = models.JSONField(default=events_column_config_default)
# Remove unused attributes from `AbstractUser`
username = None # type: ignore
objects: UserManager = UserManager() # type: ignore
@property
def is_superuser(self) -> bool: # type: ignore
return self.is_staff
@property
def teams(self):
return Team.objects.filter(organization__in=self.organizations.all())
@property
def organization(self) -> Optional[Organization]:
if self.current_organization is None:
if self.current_team is not None:
self.current_organization_id = self.current_team.organization_id
self.current_organization = self.organizations.first()
self.save()
return self.current_organization
@property
def team(self) -> Optional[Team]:
if self.current_team is None and self.organization is not None:
self.current_team = self.organization.teams.order_by("access_control", "id").first() # Prefer open projects
self.save()
return self.current_team
def join(
self, *, organization: Organization, level: OrganizationMembership.Level = OrganizationMembership.Level.MEMBER,
) -> OrganizationMembership:
with transaction.atomic():
membership = OrganizationMembership.objects.create(user=self, organization=organization, level=level)
self.current_organization = organization
self.current_team = organization.teams.first()
self.save()
return membership
def leave(self, *, organization: Organization) -> None:
membership: OrganizationMembership = OrganizationMembership.objects.get(user=self, organization=organization)
if membership.level == OrganizationMembership.Level.OWNER:
raise ValidationError("Cannot leave the organization as its owner!")
with transaction.atomic():
membership.delete()
if self.current_organization == organization:
self.current_organization = self.organizations.first()
self.current_team = (
None if self.current_organization is None else self.current_organization.teams.first()
)
self.save()
def get_analytics_metadata(self):
team_member_count_all: int = (
OrganizationMembership.objects.filter(organization__in=self.organizations.all(),)
.values("user_id")
.distinct()
.count()
)
project_setup_complete = False
if self.team and self.team.completed_snippet_onboarding and self.team.ingested_event:
project_setup_complete = True
return {
"realm": get_instance_realm(),
"is_ee_available": settings.EE_AVAILABLE,
"email_opt_in": self.email_opt_in,
"anonymize_data": self.anonymize_data,
"email": self.email if not self.anonymize_data else None,
"is_signed_up": True,
"organization_count": self.organization_memberships.count(),
"project_count": self.teams.count(),
"team_member_count_all": team_member_count_all,
"completed_onboarding_once": self.teams.filter(
completed_snippet_onboarding=True, ingested_event=True,
).exists(), # has completed the onboarding at least for one project
# properties dependent on current project / org below
"billing_plan": self.organization.billing_plan if self.organization else None,
"organization_id": str(self.organization.id) if self.organization else None,
"project_id": str(self.team.uuid) if self.team else None,
"project_setup_complete": project_setup_complete,
"joined_at": self.date_joined,
"has_password_set": self.has_usable_password(),
"has_social_auth": self.social_auth.exists(), # type: ignore
"social_providers": list(self.social_auth.values_list("provider", flat=True)), # type: ignore
}
__repr__ = sane_repr("email", "first_name", "distinct_id")
| 42.910798 | 120 | 0.666302 |
acf9ce78bf6d58069b2d18e0893d90e8eefbe2ac | 1,547 | py | Python | catutils/log/errors.py | lazybradol/py-catutils | 4a0073b3b2848343b690f4af2ccbd4b88363912c | [
"BSD-2-Clause"
] | null | null | null | catutils/log/errors.py | lazybradol/py-catutils | 4a0073b3b2848343b690f4af2ccbd4b88363912c | [
"BSD-2-Clause"
] | null | null | null | catutils/log/errors.py | lazybradol/py-catutils | 4a0073b3b2848343b690f4af2ccbd4b88363912c | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python3
"""
BSD 2-Clause License
Copyright (c) 2019, wenqian
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
class TemplateFormatError(Exception):
def __init__(self, msg):
super().__init__(msg)
class TemplateNotFoundError(Exception):
def __init__(self, msg):
super().__init__(msg)
| 36.833333 | 78 | 0.786037 |
acf9cea0bed9fe9714010303e179500bacaaed5d | 22,302 | py | Python | test/functional/importmulti.py | arthurcolle/bootstrapping-ellocash | 9495f1e3741c7f893457e4f6602d6ef0d84b7b3d | [
"MIT"
] | null | null | null | test/functional/importmulti.py | arthurcolle/bootstrapping-ellocash | 9495f1e3741c7f893457e4f6602d6ef0d84b7b3d | [
"MIT"
] | null | null | null | test/functional/importmulti.py | arthurcolle/bootstrapping-ellocash | 9495f1e3741c7f893457e4f6602d6ef0d84b7b3d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC."""
from test_framework.test_framework import EllocashTestFramework
from test_framework.util import *
class ImportMultiTest (EllocashTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.setup_nodes()
def run_test (self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
# keyword definition
PRIV_KEY = 'privkey'
PUB_KEY = 'pubkey'
ADDRESS_KEY = 'address'
SCRIPT_KEY = 'script'
node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
node0_address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
node0_address3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
#Check only one address
assert_equal(node0_address1['ismine'], True)
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),1)
#Address Test - before import
address_info = self.nodes[1].validateaddress(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Ellocash Address
self.log.info("Should import an address")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_address = address['address']
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": "not valid address",
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Invalid address')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + !internal
self.log.info("Should not import a scriptPubKey without internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Public key + !Internal
self.log.info("Should import an address with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + !internal
self.log.info("Should not import a scriptPubKey without internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
self.log.info("Should not import an address with private key if is already imported")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script')
# Address + Private key + watchonly
self.log.info("Should not import an address with private key and with watchonly")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Private key + !internal
self.log.info("Should not import a scriptPubKey without internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# P2SH address
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['isscript'], True)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript']
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
transaction = self.nodes[1].gettransaction(transactionid)
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should not import a scriptPubKey with internal and with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should not import an address with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should not import a scriptPubKey with internal and with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": watchonly_address,
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_message(JSONRPCException, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
}])
assert_raises_message(JSONRPCException, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "",
}])
if __name__ == '__main__':
ImportMultiTest ().main ()
| 48.064655 | 137 | 0.631692 |
acf9ceff836b592b711a3a53d65c349ddd3f4317 | 480 | py | Python | tests/system_tests/common.py | Curtis241/taskmgr | ac485395d189e0c150e87bab8807b42d341545ed | [
"MIT"
] | null | null | null | tests/system_tests/common.py | Curtis241/taskmgr | ac485395d189e0c150e87bab8807b42d341545ed | [
"MIT"
] | 4 | 2021-03-25T22:39:57.000Z | 2021-07-19T05:46:38.000Z | tests/system_tests/common.py | Curtis241/taskmgr | ac485395d189e0c150e87bab8807b42d341545ed | [
"MIT"
] | null | null | null | from dpath import util
class Common:
@staticmethod
def verify_structure(response: dict) -> bool:
assert type(response) is dict
return "tasks" in response
@staticmethod
def count_tasks(response: dict) -> int:
task_list = util.get(response, "tasks")
return len(task_list)
@staticmethod
def get_by_index(response: dict, index: int) -> dict:
task_list = util.get(response, "tasks")
return task_list[index]
| 22.857143 | 57 | 0.645833 |
acf9cfbc16b3ae3f724b728b28aae7af90929a91 | 119 | py | Python | Python OOP/Inheritance/Zoo/bear.py | bvoytash/Software-University | f2c6940cde093cea7b1c38bd88305206564c9947 | [
"MIT"
] | null | null | null | Python OOP/Inheritance/Zoo/bear.py | bvoytash/Software-University | f2c6940cde093cea7b1c38bd88305206564c9947 | [
"MIT"
] | null | null | null | Python OOP/Inheritance/Zoo/bear.py | bvoytash/Software-University | f2c6940cde093cea7b1c38bd88305206564c9947 | [
"MIT"
] | null | null | null | from project_1.mammal import Mammal
class Bear(Mammal):
def __init__(self, name):
super().__init__(name)
| 17 | 35 | 0.689076 |
acf9d08b23a2742841f7a14bc1a892918a535967 | 58,154 | py | Python | python/pyspark/sql/dataframe.py | tilumi/spark | cc6778ee0bf4fa7a78abd30542c4a6f80ea371c5 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | null | null | null | python/pyspark/sql/dataframe.py | tilumi/spark | cc6778ee0bf4fa7a78abd30542c4a6f80ea371c5 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | null | null | null | python/pyspark/sql/dataframe.py | tilumi/spark | cc6778ee0bf4fa7a78abd30542c4a6f80ea371c5 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
import random
if sys.version >= '3':
basestring = unicode = str
long = int
from functools import reduce
else:
from itertools import imap as map
from pyspark import copy_func, since
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.serializers import BatchedSerializer, PickleSerializer, UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter, DataStreamWriter
from pyspark.sql.types import *
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(object):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SQLContext`::
people = sqlContext.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the data frame, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SQLContext
people = sqlContext.read.parquet("...")
department = sqlContext.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id)\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
@ignore_unicode_prefix
@since(1.3)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
>>> df.toJSON().first()
u'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
@since(1.3)
def registerTempTable(self, name):
"""Registers this RDD as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SQLContext`
that was used to create this :class:`DataFrame`.
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
.. note:: Deprecated in 2.0, use createOrReplaceTempView instead.
"""
self._jdf.createOrReplaceTempView(name)
@since(2.0)
def createTempView(self, name):
"""Creates a temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
@since(2.0)
def createOrReplaceTempView(self, name):
"""Creates or replaces a temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
@property
@since(1.4)
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
:return: :class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
@since(2.0)
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. note:: Experimental.
:return: :class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
@since(1.3)
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`types.StructType`.
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
@since(1.3)
def printSchema(self):
"""Prints out the schema in the tree format.
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
@since(1.3)
def explain(self, extended=False):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
>>> df.explain()
== Physical Plan ==
Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
"""
if extended:
print(self._jdf.queryExecution().toString())
else:
print(self._jdf.queryExecution().simpleString())
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
@since(2.0)
def isStreaming(self):
"""Returns true if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`startStream` method in
:class:`DataFrameWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. note:: Experimental
"""
return self._jdf.isStreaming()
@since(1.3)
def show(self, n=20, truncate=True):
"""Prints the first ``n`` rows to the console.
:param n: Number of rows to show.
:param truncate: Whether truncate long strings and align cells right.
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
print(self._jdf.showString(n, truncate))
def __repr__(self):
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
@since(1.3)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
>>> df.count()
2
"""
return int(self._jdf.count())
@ignore_unicode_prefix
@since(1.3)
def collect(self):
"""Returns all the records as a list of :class:`Row`.
>>> df.collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
port = self._jdf.collectToPython()
return list(_load_from_socket(port, BatchedSerializer(PickleSerializer())))
@ignore_unicode_prefix
@since(2.0)
def toLocalIterator(self):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this DataFrame.
>>> list(df.toLocalIterator())
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
port = self._jdf.toPythonIterator()
return _load_from_socket(port, BatchedSerializer(PickleSerializer()))
@ignore_unicode_prefix
@since(1.3)
def limit(self, num):
"""Limits the result count to the number specified.
>>> df.limit(1).collect()
[Row(age=2, name=u'Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
>>> df.take(2)
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
port = self._sc._jvm.org.apache.spark.sql.execution.python.EvaluatePython.takeAndServe(
self._jdf, num)
return list(_load_from_socket(port, BatchedSerializer(PickleSerializer())))
@since(1.3)
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
@since(1.3)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
@since(1.3)
def cache(self):
""" Persists with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self._jdf.cache()
return self
@since(1.3)
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""Sets the storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@since(1.3)
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. note:: `blocking` default has changed to False to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
@since(1.4)
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions.
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
@since(1.3)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
``numPartitions`` can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
@since(1.3)
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
@since(1.3)
def sample(self, withReplacement, fraction, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
>>> df.sample(False, 0.5, 42).count()
2
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd = self._jdf.sample(withReplacement, fraction, long(seed))
return DataFrame(rdd, self.sql_ctx)
@since(1.5)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
:param col: column that defines strata
:param fractions:
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
:param seed: random seed
:return: a new DataFrame that represents the stratified sample
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 5|
| 1| 9|
+---+-----+
"""
if not isinstance(col, str):
raise ValueError("col must be a string, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, long, basestring)):
raise ValueError("key must be float, int, long, or string, but got %r" % type(k))
fractions[k] = float(v)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
@since(1.4)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
:param weights: list of doubles as weights with which to split the DataFrame. Weights will
be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
1
>>> splits[1].count()
3
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
@since(1.3)
def dtypes(self):
"""Returns all column names and their data types as a list.
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
@since(1.3)
def columns(self):
"""Returns all column names as a list.
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
@ignore_unicode_prefix
@since(1.3)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
"""
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
The following performs a full outer join between ``df1`` and ``df2``.
:param other: Right side of the join
:param on: a string for join column name, a list of column names,
, a join expression (Column) or a list of Columns.
If `on` is a string or a list of string indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
:param how: str, default 'inner'.
One of `inner`, `outer`, `left_outer`, `right_outer`, `leftsemi`.
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect()
[Row(name=None, height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name=u'Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name=u'Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is None or len(on) == 0:
jdf = self._jdf.join(other._jdf)
elif isinstance(on[0], basestring):
if how is None:
jdf = self._jdf.join(other._jdf, self._jseq(on), "inner")
else:
assert isinstance(how, basestring), "how should be basestring"
jdf = self._jdf.join(other._jdf, self._jseq(on), how)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
if len(on) > 1:
on = reduce(lambda x, y: x.__and__(y), on)
else:
on = on[0]
if how is None:
jdf = self._jdf.join(other._jdf, on._jc, "inner")
else:
assert isinstance(how, basestring), "how should be basestring"
jdf = self._jdf.join(other._jdf, on._jc, how)
return DataFrame(jdf, self.sql_ctx)
@since(1.6)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
@since("1.3.1")
def describe(self, *cols):
"""Computes statistics for numeric columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical columns.
.. note:: This function is meant for exploratory data analysis, as we make no \
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.describe().show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe(['age', 'name']).show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def head(self, n=None):
"""Returns the first ``n`` rows.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
@ignore_unicode_prefix
@since(1.3)
def first(self):
"""Returns the first row as a :class:`Row`.
>>> df.first()
Row(age=2, name=u'Alice')
"""
return self.head()
@ignore_unicode_prefix
@since(1.3)
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name=u'Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name=u'Bob')]
"""
if isinstance(item, basestring):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
@since(1.3)
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current DataFrame.
>>> df.select('*').collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.select('name', 'age').collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
:param condition: a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
>>> df.filter(df.age > 3).collect()
[Row(age=5, name=u'Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name=u'Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name=u'Alice')]
"""
if isinstance(condition, basestring):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.4)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.4)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.3)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy.agg()``).
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this
frame and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by a distinct.
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this
frame and another frame.
.. note:: Deprecated in 2.0, use union instead.
"""
return self.union(other)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this frame and another frame.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this frame
but not in another frame.
This is equivalent to `EXCEPT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
@since(1.4)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \
Row(name='Alice', age=5, height=80), \
Row(name='Alice', age=5, height=80), \
Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
| 10| 80|Alice|
+---+------+-----+
>>> df.dropDuplicates(['name', 'height']).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
+---+------+-----+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
@since("1.3.1")
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
@since("1.3.1")
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
:param value: int, long, float, string, or dict.
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, long, float, or string.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, long, basestring, dict)):
raise ValueError("value should be a float, int, long, string, or dict")
if isinstance(value, (int, long)):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
@since(1.4)
def replace(self, to_replace, value, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
:param to_replace: int, long, float, string, or list.
Value to be replaced.
If the value is a dict, then `value` is ignored and `to_replace` must be a
mapping from column name (string) to replacement value. The value to be
replaced must be an int, long, float, or string.
:param value: int, long, float, string, or list.
Value to use to replace holes.
The replacement value must be an int, long, float, or string. If `value` is a
list or tuple, `value` should be of the same length with `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if not isinstance(to_replace, (float, int, long, basestring, list, tuple, dict)):
raise ValueError(
"to_replace should be a float, int, long, string, list, tuple, or dict")
if not isinstance(value, (float, int, long, basestring, list, tuple)):
raise ValueError("value should be a float, int, long, string, list, or tuple")
rep_dict = dict()
if isinstance(to_replace, (float, int, long, basestring)):
to_replace = [to_replace]
if isinstance(to_replace, tuple):
to_replace = list(to_replace)
if isinstance(value, tuple):
value = list(value)
if isinstance(to_replace, list) and isinstance(value, list):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length")
rep_dict = dict(zip(to_replace, value))
elif isinstance(to_replace, list) and isinstance(value, (float, int, long, basestring)):
rep_dict = dict([(tr, value) for tr in to_replace])
elif isinstance(to_replace, dict):
rep_dict = to_replace
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
elif isinstance(subset, basestring):
subset = [subset]
if not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
@since(2.0)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of a numerical column of a
DataFrame.
The result of this algorithm has the following deterministic bound:
If the DataFrame has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the DataFrame so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[http://dx.doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
:param col: the name of the numerical column
:param probabilities: a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
:param relativeError: The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
:return: the approximate quantiles at the given probabilities
"""
if not isinstance(col, str):
raise ValueError("col should be a string.")
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int, long)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int, long) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int, long)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int, long) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
return list(jaq)
@since(1.4)
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a DataFrame as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
:param col1: The name of the first column
:param col2: The name of the second column
:param method: The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
@since(1.4)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
:param col1: The name of the first column
:param col2: The name of the second column
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
@since(1.4)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
:param col1: The name of the first column. Distinct items will make the first item of
each row.
:param col2: The name of the second column. Distinct items will make the column names
of the DataFrame.
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
@since(1.4)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"http://dx.doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no \
guarantee about the backward compatibility of the schema of the resulting DataFrame.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
:param colName: string, name of the new column.
:param col: a :class:`Column` expression for the new column.
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
:param existing: string, name of the existing column to rename.
:param col: string, new name of the column.
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
@since(1.4)
@ignore_unicode_prefix
def drop(self, col):
"""Returns a new :class:`DataFrame` that drops the specified column.
:param col: a string name of the column to drop, or a
:class:`Column` to drop.
>>> df.drop('age').collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.drop(df.age).collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name=u'Bob', height=85)]
"""
if isinstance(col, basestring):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
def toDF(self, *cols):
"""Returns a new class:`DataFrame` that with new specified column names
:param cols: list of new column names (string)
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def toPandas(self):
"""Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
Note that this method should only be used if the resulting Pandas's DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
This is only available if Pandas is installed and available.
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
"""
import pandas as pd
return pd.DataFrame.from_records(self.collect(), columns=self.columns)
##########################################################################################
# Pandas compatibility
##########################################################################################
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()
globs['df3'] = sc.parallelize([Row(name='Alice', age=2),
Row(name='Bob', age=5)]).toDF()
globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),
Row(name='Bob', age=5, height=None),
Row(name='Tom', age=None, height=None),
Row(name=None, age=None, height=None)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| 37.111678 | 100 | 0.577243 |
acf9d0a82e1b368af71cf696bcba0f678f27ef93 | 2,122 | py | Python | python/Prometheus_client_Histogram.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | null | null | null | python/Prometheus_client_Histogram.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | null | null | null | python/Prometheus_client_Histogram.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | 1 | 2020-08-29T17:12:52.000Z | 2020-08-29T17:12:52.000Z | from prometheus_client import Histogram
from prometheus_client import start_http_server, Summary
import random
import time
h = Histogram('request_latency_seconds', 'Description of histogram')
h.observe(4.7) # Observe 4.7 (seconds in this case)
@h.time()
def f():
pass
with h.time():
pass
if __name__ == '__main__':
# Start up the server to expose the metrics.
start_http_server(8000)
# Generate some requests.
while True:
h.observe(random.uniform(0,5))
'''
http://localhost:8000/
# HELP python_gc_collected_objects Objects collected during gc
# TYPE python_gc_collected_objects histogram
# HELP python_gc_uncollectable_objects Uncollectable object found during GC
# TYPE python_gc_uncollectable_objects histogram
# HELP python_gc_duration_seconds Time spent in garbage collection
# TYPE python_gc_duration_seconds histogram
# HELP python_info Python platform information
# TYPE python_info gauge
python_info{implementation="CPython",major="3",minor="7",patchlevel="0",version="3.7.0"} 1.0
# HELP request_latency_seconds Description of histogram
# TYPE request_latency_seconds histogram
request_latency_seconds_bucket{le="0.005"} 32001.0
request_latency_seconds_bucket{le="0.01"} 63749.0
request_latency_seconds_bucket{le="0.025"} 159652.0
request_latency_seconds_bucket{le="0.05"} 319993.0
request_latency_seconds_bucket{le="0.075"} 480149.0
request_latency_seconds_bucket{le="0.1"} 640626.0
request_latency_seconds_bucket{le="0.25"} 1.603277e+06
request_latency_seconds_bucket{le="0.5"} 3.208363e+06
request_latency_seconds_bucket{le="0.75"} 4.812899e+06
request_latency_seconds_bucket{le="1.0"} 6.417821e+06
request_latency_seconds_bucket{le="2.5"} 1.6053888e+07
request_latency_seconds_bucket{le="5.0"} 3.2115897e+07
request_latency_seconds_bucket{le="7.5"} 3.2115897e+07
request_latency_seconds_bucket{le="10.0"} 3.2115897e+07
request_latency_seconds_bucket{le="+Inf"} 3.2115897e+07
request_latency_seconds_count 3.2115897e+07
request_latency_seconds_sum 8.030116580928595e+07
# TYPE request_latency_seconds_created gauge
request_latency_seconds_created 1.549349734980678e+09
'''
| 36.586207 | 92 | 0.808671 |
acf9d0edd3619cffeaf4a1469fbd1e04e367c137 | 7,201 | py | Python | services/users/project/tests/test_users.py | internetmosquito/quiz_app | 88979aae4e199d0878e9703df3160646b270feba | [
"MIT"
] | null | null | null | services/users/project/tests/test_users.py | internetmosquito/quiz_app | 88979aae4e199d0878e9703df3160646b270feba | [
"MIT"
] | null | null | null | services/users/project/tests/test_users.py | internetmosquito/quiz_app | 88979aae4e199d0878e9703df3160646b270feba | [
"MIT"
] | null | null | null | # services/users/project/tests/test_users.py
import json
import unittest
from project.tests.base import BaseTestCase
from project import db
from project.api.models import User
def add_user(username, email):
user = User(username=username, email=email)
db.session.add(user)
db.session.commit()
return user
class TestUserService(BaseTestCase):
"""Tests for the Users Service."""
def test_users(self):
"""Ensure the /ping route behaves correctly."""
response = self.client.get('/users/ping')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn('pong!', data['message'])
self.assertIn('success', data['status'])
def test_add_user(self):
"""Ensure a new user can be added to the database."""
with self.client:
response = self.client.post(
'/users',
data=json.dumps({
'username': 'michael',
'email': 'michael@mherman.org'
}),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertIn('michael@mherman.org was added!', data['message'])
self.assertIn('success', data['status'])
def test_add_user_invalid_json(self):
"""Ensure error is thrown if the JSON object is empty."""
with self.client:
response = self.client.post(
'/users',
data=json.dumps({}),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload.', data['message'])
self.assertIn('fail', data['status'])
def test_add_user_invalid_json_keys(self):
"""
Ensure error is thrown if the JSON object does not have a username key.
"""
with self.client:
response = self.client.post(
'/users',
data=json.dumps({'email': 'michael@mherman.org'}),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload.', data['message'])
self.assertIn('fail', data['status'])
def test_add_user_duplicate_email(self):
"""Ensure error is thrown if the email already exists."""
with self.client:
self.client.post(
'/users',
data=json.dumps({
'username': 'michael',
'email': 'michael@mherman.org'
}),
content_type='application/json',
)
response = self.client.post(
'/users',
data=json.dumps({
'username': 'michael',
'email': 'michael@mherman.org'
}),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn(
'Sorry. That email already exists.', data['message'])
self.assertIn('fail', data['status'])
def test_single_user(self):
"""Ensure get single user behaves correctly."""
user = add_user('michael', 'michael@mherman.org')
with self.client:
response = self.client.get(f'/users/{user.id}')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn('michael', data['data']['username'])
self.assertIn('michael@mherman.org', data['data']['email'])
self.assertIn('success', data['status'])
def test_single_user_no_id(self):
"""Ensure error is thrown if an id is not provided."""
with self.client:
response = self.client.get('/users/blah')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertIn('User does not exist', data['message'])
self.assertIn('fail', data['status'])
def test_single_user_incorrect_id(self):
"""Ensure error is thrown if the id does not exist."""
with self.client:
response = self.client.get('/users/999')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertIn('User does not exist', data['message'])
self.assertIn('fail', data['status'])
def test_all_users(self):
"""Ensure get all users behaves correctly."""
add_user('michael', 'michael@mherman.org')
add_user('fletcher', 'fletcher@notreal.com')
with self.client:
response = self.client.get('/users')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertEqual(len(data['data']['users']), 2)
self.assertIn('michael', data['data']['users'][0]['username'])
self.assertIn(
'michael@mherman.org', data['data']['users'][0]['email'])
self.assertIn('fletcher', data['data']['users'][1]['username'])
self.assertIn(
'fletcher@notreal.com', data['data']['users'][1]['email'])
self.assertIn('success', data['status'])
def test_main_no_users(self):
"""Ensure the main route behaves correctly when no users have been
added to the database."""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertIn(b'All Users', response.data)
self.assertIn(b'<p>No users!</p>', response.data)
def test_main_with_users(self):
"""Ensure the main route behaves correctly when users have been
added to the database."""
add_user('michael', 'michael@mherman.org')
add_user('fletcher', 'fletcher@notreal.com')
with self.client:
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertIn(b'All Users', response.data)
self.assertNotIn(b'<p>No users!</p>', response.data)
self.assertIn(b'michael', response.data)
self.assertIn(b'fletcher', response.data)
def test_main_add_user(self):
"""
Ensure a new user can be added to the database via a POST request.
"""
with self.client:
response = self.client.post(
'/',
data=dict(username='michael', email='michael@sonotreal.com'),
follow_redirects=True
)
self.assertEqual(response.status_code, 200)
self.assertIn(b'All Users', response.data)
self.assertNotIn(b'<p>No users!</p>', response.data)
self.assertIn(b'michael', response.data)
if __name__ == '__main__':
unittest.main()
| 39.565934 | 79 | 0.568671 |
acf9d1b8e46f4d8c323215b09572e3a381fe13a3 | 608 | py | Python | web3/utils/request.py | voBits/web3 | 947e252124f04b33ac5f96179dccd1a3476b3936 | [
"MIT"
] | 326 | 2016-04-29T21:51:06.000Z | 2022-03-31T03:20:54.000Z | web3/utils/request.py | voBits/web3 | 947e252124f04b33ac5f96179dccd1a3476b3936 | [
"MIT"
] | 283 | 2016-04-15T16:41:31.000Z | 2017-11-28T16:41:36.000Z | web3/utils/request.py | voBits/web3 | 947e252124f04b33ac5f96179dccd1a3476b3936 | [
"MIT"
] | 146 | 2016-04-14T16:27:54.000Z | 2021-10-03T13:31:07.000Z | import pylru
import requests
from web3.utils.caching import generate_cache_key
_session_cache = pylru.lrucache(8)
def _get_session(*args, **kwargs):
cache_key = generate_cache_key((args, kwargs))
if cache_key not in _session_cache:
_session_cache[cache_key] = requests.Session()
return _session_cache[cache_key]
def make_post_request(endpoint_uri, data, *args, **kwargs):
kwargs.setdefault('timeout', 10)
session = _get_session(endpoint_uri)
response = session.post(endpoint_uri, data=data, *args, **kwargs)
response.raise_for_status()
return response.content
| 25.333333 | 69 | 0.743421 |
acf9d1e3abfcfc78767aec65eb3cb076ba37d0fb | 71,764 | py | Python | ckan/tests/logic/action/test_update.py | hackhit/ckan | 53b9442509b46525d653f2f705e98319752ceb2d | [
"BSD-3-Clause"
] | 6 | 2015-11-09T00:44:51.000Z | 2019-11-21T14:56:01.000Z | ckan/tests/logic/action/test_update.py | hackhit/ckan | 53b9442509b46525d653f2f705e98319752ceb2d | [
"BSD-3-Clause"
] | 39 | 2015-02-18T17:32:23.000Z | 2022-03-11T18:03:36.000Z | ckan/tests/logic/action/test_update.py | hackhit/ckan | 53b9442509b46525d653f2f705e98319752ceb2d | [
"BSD-3-Clause"
] | 17 | 2015-03-13T18:05:05.000Z | 2020-11-06T13:55:32.000Z | # encoding: utf-8
"""Unit tests for ckan/logic/action/update.py."""
import datetime
import unittest.mock as mock
import pytest
import ckan
import ckan.lib.app_globals as app_globals
import ckan.logic as logic
import ckan.plugins as p
import ckan.tests.factories as factories
import ckan.tests.helpers as helpers
from ckan import model
from freezegun import freeze_time
def datetime_from_string(s):
"""Return a standard datetime.datetime object initialised from a string in
the same format used for timestamps in dictized activities (the format
produced by datetime.datetime.isoformat())
"""
return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S.%f")
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestUpdate(object):
def teardown(self):
# Since some of the test methods below use the mock module to patch
# things, we use this teardown() method to remove remove all patches.
# (This makes sure the patches always get removed even if the test
# method aborts with an exception or something.)
mock.patch.stopall()
# START-AFTER
def test_user_update_name(self):
"""Test that updating a user's name works successfully."""
# The canonical form of a test has four steps:
# 1. Setup any preconditions needed for the test.
# 2. Call the function that's being tested, once only.
# 3. Make assertions about the return value and/or side-effects of
# of the function that's being tested.
# 4. Do nothing else!
# 1. Setup.
user = factories.User()
user["name"] = "updated"
# 2. Make assertions about the return value and/or side-effects.
with pytest.raises(logic.ValidationError):
helpers.call_action("user_update", **user)
# END-BEFORE
def test_user_generate_apikey(self):
user = factories.User()
context = {"user": user["name"]}
result = helpers.call_action(
"user_generate_apikey", context=context, id=user["id"]
)
updated_user = helpers.call_action(
"user_show", context=context, id=user["id"]
)
assert updated_user["apikey"] != user["apikey"]
assert result["apikey"] == updated_user["apikey"]
def test_user_generate_apikey_sysadmin_user(self):
user = factories.User()
sysadmin = factories.Sysadmin()
context = {"user": sysadmin["name"], "ignore_auth": False}
result = helpers.call_action(
"user_generate_apikey", context=context, id=user["id"]
)
updated_user = helpers.call_action(
"user_show", context=context, id=user["id"]
)
assert updated_user["apikey"] != user["apikey"]
assert result["apikey"] == updated_user["apikey"]
def test_user_generate_apikey_nonexistent_user(self):
user = {
"id": "nonexistent",
"name": "nonexistent",
"email": "does@notexist.com",
}
context = {"user": user["name"]}
with pytest.raises(logic.NotFound):
helpers.call_action(
"user_generate_apikey", context=context, id=user["id"]
)
def test_user_update_with_id_that_does_not_exist(self):
user_dict = factories.User.attributes()()
user_dict["id"] = "there's no user with this id"
with pytest.raises(logic.NotFound):
helpers.call_action("user_update", **user_dict)
def test_user_update_with_no_id(self):
user_dict = factories.User.attributes()()
assert "id" not in user_dict
with pytest.raises(logic.ValidationError):
helpers.call_action("user_update", **user_dict)
@pytest.mark.parametrize(
"name",
(
"",
"a",
False,
0,
-1,
23,
"new",
"edit",
"search",
"a" * 200,
"Hi!",
"i++%",
),
)
def test_user_update_with_invalid_name(self, name):
user = factories.User()
user["name"] = name
with pytest.raises(logic.ValidationError):
helpers.call_action("user_update", **user)
def test_user_update_to_name_that_already_exists(self):
fred = factories.User(name="fred")
bob = factories.User(name="bob")
# Try to update fred and change his user name to bob, which is already
# bob's user name
fred["name"] = bob["name"]
with pytest.raises(logic.ValidationError):
helpers.call_action("user_update", **fred)
def test_user_update_password(self):
"""Test that updating a user's password works successfully."""
user = factories.User()
# FIXME we have to pass the email address to user_update even though
# we're not updating it, otherwise validation fails.
helpers.call_action(
"user_update",
id=user["id"],
name=user["name"],
email=user["email"],
password="new password",
)
# user_show() never returns the user's password, so we have to access
# the model directly to test it.
import ckan.model as model
updated_user = model.User.get(user["id"])
assert updated_user.validate_password("new password")
def test_user_update_with_short_password(self):
user = factories.User()
user["password"] = "xxx" # This password is too short.
with pytest.raises(logic.ValidationError):
helpers.call_action("user_update", **user)
def test_user_update_with_empty_password(self):
"""If an empty password is passed to user_update, nothing should
happen.
No error (e.g. a validation error) is raised, but the password is not
changed either.
"""
user_dict = factories.User.attributes()()
original_password = user_dict["password"]
user_dict = factories.User(**user_dict)
user_dict["password"] = ""
helpers.call_action("user_update", **user_dict)
import ckan.model as model
updated_user = model.User.get(user_dict["id"])
assert updated_user.validate_password(original_password)
def test_user_update_with_null_password(self):
user = factories.User()
user["password"] = None
with pytest.raises(logic.ValidationError):
helpers.call_action("user_update", **user)
def test_user_update_with_invalid_password(self):
user = factories.User()
for password in (False, -1, 23, 30.7):
user["password"] = password
with pytest.raises(logic.ValidationError):
helpers.call_action("user_update", **user)
def test_user_update_without_email_address(self):
"""You have to pass an email address when you call user_update.
Even if you don't want to change the user's email address, you still
have to pass their current email address to user_update.
FIXME: The point of this feature seems to be to prevent people from
removing email addresses from user accounts, but making them post the
current email address every time they post to user update is just
annoying, they should be able to post a dict with no 'email' key to
user_update and it should simply not change the current email.
"""
user = factories.User()
del user["email"]
with pytest.raises(logic.ValidationError):
helpers.call_action("user_update", **user)
# TODO: Valid and invalid values for the rest of the user model's fields.
def test_user_update_activity_stream(self):
"""Test that the right activity is emitted when updating a user."""
user = factories.User()
before = datetime.datetime.utcnow()
# FIXME we have to pass the email address and password to user_update
# even though we're not updating those fields, otherwise validation
# fails.
helpers.call_action(
"user_update",
id=user["id"],
name=user["name"],
email=user["email"],
password=factories.User.password,
fullname="updated full name",
)
activity_stream = helpers.call_action(
"user_activity_list", id=user["id"]
)
latest_activity = activity_stream[0]
assert latest_activity["activity_type"] == "changed user"
assert latest_activity["object_id"] == user["id"]
assert latest_activity["user_id"] == user["id"]
after = datetime.datetime.utcnow()
timestamp = datetime_from_string(latest_activity["timestamp"])
assert timestamp >= before and timestamp <= after
def test_user_update_with_custom_schema(self):
"""Test that custom schemas passed to user_update do get used.
user_update allows a custom validation schema to be passed to it in the
context dict. This is just a simple test that if you pass a custom
schema user_update does at least call a custom method that's given in
the custom schema. We assume this means it did use the custom schema
instead of the default one for validation, so user_update's custom
schema feature does work.
"""
import ckan.logic.schema
user = factories.User()
# A mock validator method, it doesn't do anything but it records what
# params it gets called with and how many times. We are using function
# instead of MagicMock, because validator must have __code__ attribute
calls = []
def mock_validator(v):
calls.append(v)
return v
# Build a custom schema by taking the default schema and adding our
# mock method to its 'id' field.
schema = ckan.logic.schema.default_update_user_schema()
schema["id"].append(mock_validator)
# Call user_update and pass our custom schema in the context.
# FIXME: We have to pass email and password even though we're not
# trying to update them, or validation fails.
helpers.call_action(
"user_update",
context={"schema": schema},
id=user["id"],
name=user["name"],
email=user["email"],
password=factories.User.password,
fullname="updated full name",
)
assert calls == [user['id']]
def test_user_update_multiple(self):
"""Test that updating multiple user attributes at once works."""
user = factories.User()
params = {
"id": user["id"],
"fullname": "updated full name",
"about": "updated about",
# FIXME: We shouldn't have to put email here since we're not
# updating it, but user_update sucks.
"email": user["email"],
# FIXME: We shouldn't have to put password here since we're not
# updating it, but user_update sucks.
"password": factories.User.password,
}
helpers.call_action("user_update", **params)
updated_user = helpers.call_action("user_show", id=user["id"])
assert updated_user["fullname"] == "updated full name"
assert updated_user["about"] == "updated about"
def test_user_update_does_not_return_password(self):
"""The user dict that user_update returns should not include the user's
password."""
user = factories.User()
params = {
"id": user["id"],
"fullname": "updated full name",
"about": "updated about",
"email": user["email"],
"password": factories.User.password,
}
updated_user = helpers.call_action("user_update", **params)
assert "password" not in updated_user
def test_user_update_does_not_return_apikey(self):
"""The user dict that user_update returns should not include the user's
API key."""
user = factories.User()
params = {
"id": user["id"],
"fullname": "updated full name",
"about": "updated about",
"email": user["email"],
"password": factories.User.password,
}
updated_user = helpers.call_action("user_update", **params)
assert "apikey" not in updated_user
def test_user_update_does_not_return_reset_key(self):
"""The user dict that user_update returns should not include the user's
reset key."""
import ckan.lib.mailer
import ckan.model
user = factories.User()
ckan.lib.mailer.create_reset_key(ckan.model.User.get(user["id"]))
params = {
"id": user["id"],
"fullname": "updated full name",
"about": "updated about",
"email": user["email"],
"password": factories.User.password,
}
updated_user = helpers.call_action("user_update", **params)
assert "reset_key" not in updated_user
def test_resource_reorder(self):
resource_urls = ["http://a.html", "http://b.html", "http://c.html"]
dataset = {
"name": "basic",
"resources": [{"url": url} for url in resource_urls],
}
dataset = helpers.call_action("package_create", **dataset)
created_resource_urls = [
resource["url"] for resource in dataset["resources"]
]
assert created_resource_urls == resource_urls
mapping = dict(
(resource["url"], resource["id"])
for resource in dataset["resources"]
)
# This should put c.html at the front
reorder = {"id": dataset["id"], "order": [mapping["http://c.html"]]}
helpers.call_action("package_resource_reorder", **reorder)
dataset = helpers.call_action("package_show", id=dataset["id"])
reordered_resource_urls = [
resource["url"] for resource in dataset["resources"]
]
assert reordered_resource_urls == [
"http://c.html",
"http://a.html",
"http://b.html",
]
reorder = {
"id": dataset["id"],
"order": [
mapping["http://b.html"],
mapping["http://c.html"],
mapping["http://a.html"],
],
}
helpers.call_action("package_resource_reorder", **reorder)
dataset = helpers.call_action("package_show", id=dataset["id"])
reordered_resource_urls = [
resource["url"] for resource in dataset["resources"]
]
assert reordered_resource_urls == [
"http://b.html",
"http://c.html",
"http://a.html",
]
def test_update_dataset_cant_change_type(self):
user = factories.User()
dataset = factories.Dataset(
type="dataset", name="unchanging", user=user
)
dataset = helpers.call_action(
"package_update",
id=dataset["id"],
name="unchanging",
type="cabinet",
)
assert dataset["type"] == "dataset"
assert (
helpers.call_action("package_show", id="unchanging")["type"]
== "dataset"
)
def test_update_organization_cant_change_type(self):
user = factories.User()
context = {"user": user["name"]}
org = factories.Organization(
type="organization", name="unchanging", user=user
)
org = helpers.call_action(
"organization_update",
context=context,
id=org["id"],
name="unchanging",
type="ragtagband",
)
assert org["type"] == "organization"
assert (
helpers.call_action("organization_show", id="unchanging")["type"]
== "organization"
)
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestDatasetUpdate(object):
def test_missing_id(self):
user = factories.User()
dataset = factories.Dataset(user=user)
with pytest.raises(logic.ValidationError):
helpers.call_action("package_update")
def test_name(self):
user = factories.User()
dataset = factories.Dataset(user=user)
dataset_ = helpers.call_action(
"package_update", id=dataset["id"], name="new-name"
)
assert dataset_["name"] == "new-name"
assert (
helpers.call_action("package_show", id=dataset["id"])["name"]
== "new-name"
)
def test_title(self):
user = factories.User()
dataset = factories.Dataset(user=user)
dataset_ = helpers.call_action(
"package_update", id=dataset["id"], title="New Title"
)
assert dataset_["title"] == "New Title"
assert (
helpers.call_action("package_show", id=dataset["id"])["title"]
== "New Title"
)
def test_extras(self):
user = factories.User()
dataset = factories.Dataset(user=user)
dataset_ = helpers.call_action(
"package_update",
id=dataset["id"],
extras=[{"key": u"original media", "value": u'"book"'}],
)
assert dataset_["extras"][0]["key"] == "original media"
assert dataset_["extras"][0]["value"] == '"book"'
dataset_ = helpers.call_action("package_show", id=dataset["id"])
assert dataset_["extras"][0]["key"] == "original media"
assert dataset_["extras"][0]["value"] == '"book"'
def test_extra_can_be_restored_after_deletion(self):
user = factories.User()
dataset = factories.Dataset(user=user)
dataset_ = helpers.call_action(
"package_update",
id=dataset["id"],
extras=[
{"key": u"old attribute", "value": u'value'},
{"key": u"original media", "value": u'"book"'},
],
)
assert len(dataset_["extras"]) == 2
dataset_ = helpers.call_action(
"package_update",
id=dataset["id"],
extras=[],
)
assert dataset_["extras"] == []
dataset_ = helpers.call_action(
"package_update",
id=dataset["id"],
extras=[
{"key": u"original media", "value": u'"book"'},
{"key": u"new attribute", "value": u'value'},
],
)
assert len(dataset_["extras"]) == 2
def test_license(self):
user = factories.User()
dataset = factories.Dataset(user=user)
dataset_ = helpers.call_action(
"package_update", id=dataset["id"], license_id="other-open"
)
assert dataset_["license_id"] == "other-open"
dataset_ = helpers.call_action("package_show", id=dataset["id"])
assert dataset_["license_id"] == "other-open"
def test_notes(self):
user = factories.User()
dataset = factories.Dataset(user=user)
dataset_ = helpers.call_action(
"package_update", id=dataset["id"], notes="some notes"
)
assert dataset_["notes"] == "some notes"
dataset_ = helpers.call_action("package_show", id=dataset["id"])
assert dataset_["notes"] == "some notes"
def test_resources(self):
user = factories.User()
dataset = factories.Dataset(user=user)
dataset_ = helpers.call_action(
"package_update",
id=dataset["id"],
resources=[
{
"alt_url": u"alt123",
"description": u"Full text.",
"somekey": "somevalue", # this is how to do resource extras
"extras": {u"someotherkey": u"alt234"}, # this isnt
"format": u"plain text",
"hash": u"abc123",
"position": 0,
"url": u"http://datahub.io/download/",
},
{
"description": u"Index of the novel",
"format": u"JSON",
"position": 1,
"url": u"http://datahub.io/index.json",
},
],
)
resources_ = dataset_["resources"]
assert resources_[0]["alt_url"] == "alt123"
assert resources_[0]["description"] == "Full text."
assert resources_[0]["somekey"] == "somevalue"
assert "extras" not in resources_[0]
assert "someotherkey" not in resources_[0]
assert resources_[0]["format"] == "plain text"
assert resources_[0]["hash"] == "abc123"
assert resources_[0]["position"] == 0
assert resources_[0]["url"] == "http://datahub.io/download/"
assert resources_[1]["description"] == "Index of the novel"
assert resources_[1]["format"] == "JSON"
assert resources_[1]["url"] == "http://datahub.io/index.json"
assert resources_[1]["position"] == 1
resources_ = helpers.call_action("package_show", id=dataset["id"])[
"resources"
]
assert resources_[0]["alt_url"] == "alt123"
assert resources_[0]["description"] == "Full text."
assert resources_[0]["somekey"] == "somevalue"
assert "extras" not in resources_[0]
assert "someotherkey" not in resources_[0]
assert resources_[0]["format"] == "plain text"
assert resources_[0]["hash"] == "abc123"
assert resources_[0]["position"] == 0
assert resources_[0]["url"] == "http://datahub.io/download/"
assert resources_[1]["description"] == "Index of the novel"
assert resources_[1]["format"] == "JSON"
assert resources_[1]["url"] == "http://datahub.io/index.json"
assert resources_[1]["position"] == 1
def test_tags(self):
user = factories.User()
dataset = factories.Dataset(user=user)
dataset_ = helpers.call_action(
"package_update",
id=dataset["id"],
tags=[{"name": u"russian"}, {"name": u"tolstoy"}],
)
tag_names = sorted([tag_dict["name"] for tag_dict in dataset_["tags"]])
assert tag_names == ["russian", "tolstoy"]
dataset_ = helpers.call_action("package_show", id=dataset["id"])
tag_names = sorted([tag_dict["name"] for tag_dict in dataset_["tags"]])
assert tag_names == ["russian", "tolstoy"]
def test_return_id_only(self):
user = factories.User()
dataset = factories.Dataset(user=user)
updated_dataset = helpers.call_action(
"package_update",
id=dataset["id"],
notes="Test",
context={"return_id_only": True},
)
assert updated_dataset == dataset["id"]
@pytest.mark.usefixtures("with_request_context")
class TestUpdateSendEmailNotifications(object):
@pytest.mark.ckan_config("ckan.activity_streams_email_notifications", True)
@mock.patch("ckan.logic.action.update.request")
def test_calling_through_paster_doesnt_validates_auth(self, mock_request):
mock_request.environ.get.return_value = True
helpers.call_action("send_email_notifications")
@pytest.mark.ckan_config("ckan.activity_streams_email_notifications", True)
@mock.patch("ckan.logic.action.update.request")
def test_not_calling_through_paster_validates_auth(self, mock_request):
mock_request.environ.get.return_value = False
with pytest.raises(logic.NotAuthorized):
helpers.call_action(
"send_email_notifications", context={"ignore_auth": False}
)
@pytest.mark.ckan_config("ckan.plugins", "image_view")
@pytest.mark.usefixtures("clean_db", "with_plugins", "with_request_context")
class TestResourceViewUpdate(object):
def test_resource_view_update(self):
resource_view = factories.ResourceView()
params = {
"id": resource_view["id"],
"title": "new title",
"description": "new description",
}
result = helpers.call_action("resource_view_update", **params)
assert result["title"] == params["title"]
assert result["description"] == params["description"]
@mock.patch("ckan.lib.datapreview")
def test_filterable_views_converts_filter_fields_and_values_into_filters_dict(
self, datapreview_mock
):
filterable_view = mock.MagicMock()
filterable_view.info.return_value = {"filterable": True}
datapreview_mock.get_view_plugin.return_value = filterable_view
resource_view = factories.ResourceView()
context = {}
params = {
"id": resource_view["id"],
"filter_fields": ["country", "weather", "country"],
"filter_values": ["Brazil", "warm", "Argentina"],
}
result = helpers.call_action("resource_view_update", context, **params)
expected_filters = {
"country": ["Brazil", "Argentina"],
"weather": ["warm"],
}
assert result["filters"] == expected_filters
def test_resource_view_update_requires_id(self):
params = {}
with pytest.raises(logic.ValidationError):
helpers.call_action("resource_view_update", **params)
def test_resource_view_update_requires_existing_id(self):
params = {"id": "inexistent_id"}
with pytest.raises(logic.NotFound):
helpers.call_action("resource_view_update", **params)
def test_resource_view_list_reorder(self):
resource_view_1 = factories.ResourceView(title="View 1")
resource_id = resource_view_1["resource_id"]
resource_view_2 = factories.ResourceView(
resource_id=resource_id, title="View 2"
)
resource_view_list = helpers.call_action(
"resource_view_list", id=resource_id
)
assert resource_view_list[0]["title"] == "View 1"
assert resource_view_list[1]["title"] == "View 2"
# Reorder views
result = helpers.call_action(
"resource_view_reorder",
id=resource_id,
order=[resource_view_2["id"], resource_view_1["id"]],
)
assert result["order"] == [
resource_view_2["id"],
resource_view_1["id"],
]
resource_view_list = helpers.call_action(
"resource_view_list", id=resource_id
)
assert resource_view_list[0]["title"] == "View 2"
assert resource_view_list[1]["title"] == "View 1"
def test_resource_view_list_reorder_just_one_id(self):
resource_view_1 = factories.ResourceView(title="View 1")
resource_id = resource_view_1["resource_id"]
resource_view_2 = factories.ResourceView(
resource_id=resource_id, title="View 2"
)
# Reorder Views back just by specifiying a single view to go first
result = helpers.call_action(
"resource_view_reorder",
id=resource_id,
order=[resource_view_2["id"]],
)
assert result["order"] == [
resource_view_2["id"],
resource_view_1["id"],
]
resource_view_list = helpers.call_action(
"resource_view_list", id=resource_id
)
assert resource_view_list[0]["title"] == "View 2"
assert resource_view_list[1]["title"] == "View 1"
def test_calling_with_only_id_doesnt_update_anything(self):
resource_view = factories.ResourceView()
params = {"id": resource_view["id"]}
result = helpers.call_action("resource_view_update", **params)
assert result == resource_view
@pytest.mark.ckan_config("ckan.plugins", "image_view recline_view")
@pytest.mark.usefixtures("clean_db", "with_plugins", "with_request_context")
class TestResourceUpdate(object):
def test_url_only(self):
dataset = factories.Dataset()
resource = factories.Resource(package=dataset, url="http://first")
res_returned = helpers.call_action(
"resource_update", id=resource["id"], url="http://second"
)
assert res_returned["url"] == "http://second"
resource = helpers.call_action("resource_show", id=resource["id"])
assert resource["url"] == "http://second"
def test_extra_only(self):
dataset = factories.Dataset()
resource = factories.Resource(package=dataset, newfield="first")
res_returned = helpers.call_action(
"resource_update",
id=resource["id"],
url=resource["url"],
newfield="second",
)
assert res_returned["newfield"] == "second"
resource = helpers.call_action("resource_show", id=resource["id"])
assert resource["newfield"] == "second"
def test_both_extra_and_url(self):
dataset = factories.Dataset()
resource = factories.Resource(
package=dataset, url="http://first", newfield="first"
)
res_returned = helpers.call_action(
"resource_update",
id=resource["id"],
url="http://second",
newfield="second",
)
assert res_returned["url"] == "http://second"
assert res_returned["newfield"] == "second"
resource = helpers.call_action("resource_show", id=resource["id"])
assert res_returned["url"] == "http://second"
assert resource["newfield"] == "second"
def test_extra_gets_deleted_on_both_core_and_extra_update(self):
dataset = factories.Dataset()
resource = factories.Resource(
package=dataset, url="http://first", newfield="first"
)
res_returned = helpers.call_action(
"resource_update",
id=resource["id"],
url="http://second",
anotherfield="second",
)
assert res_returned["url"] == "http://second"
assert res_returned["anotherfield"] == "second"
assert "newfield" not in res_returned
resource = helpers.call_action("resource_show", id=resource["id"])
assert res_returned["url"] == "http://second"
assert res_returned["anotherfield"] == "second"
assert "newfield" not in res_returned
def test_extra_gets_deleted_on_extra_only_update(self):
dataset = factories.Dataset()
resource = factories.Resource(
package=dataset, url="http://first", newfield="first"
)
res_returned = helpers.call_action(
"resource_update",
id=resource["id"],
url="http://first",
anotherfield="second",
)
assert res_returned["url"] == "http://first"
assert res_returned["anotherfield"] == "second"
assert "newfield" not in res_returned
resource = helpers.call_action("resource_show", id=resource["id"])
assert res_returned["url"] == "http://first"
assert res_returned["anotherfield"] == "second"
assert "newfield" not in res_returned
def test_datastore_active_is_persisted_if_true_and_not_provided(self):
dataset = factories.Dataset()
resource = factories.Resource(
package=dataset, url="http://example.com", datastore_active=True
)
res_returned = helpers.call_action(
"resource_update",
id=resource["id"],
url="http://example.com",
name="Test",
)
assert res_returned["datastore_active"]
def test_datastore_active_is_persisted_if_false_and_not_provided(self):
dataset = factories.Dataset()
resource = factories.Resource(
package=dataset, url="http://example.com", datastore_active=False
)
res_returned = helpers.call_action(
"resource_update",
id=resource["id"],
url="http://example.com",
name="Test",
)
assert not res_returned["datastore_active"]
def test_datastore_active_is_updated_if_false_and_provided(self):
dataset = factories.Dataset()
resource = factories.Resource(
package=dataset, url="http://example.com", datastore_active=False
)
res_returned = helpers.call_action(
"resource_update",
id=resource["id"],
url="http://example.com",
name="Test",
datastore_active=True,
)
assert res_returned["datastore_active"]
def test_datastore_active_is_updated_if_true_and_provided(self):
dataset = factories.Dataset()
resource = factories.Resource(
package=dataset, url="http://example.com", datastore_active=True
)
res_returned = helpers.call_action(
"resource_update",
id=resource["id"],
url="http://example.com",
name="Test",
datastore_active=False,
)
assert not res_returned["datastore_active"]
def test_datastore_active_not_present_if_not_provided_and_not_datastore_plugin_enabled(
self,
):
assert not p.plugin_loaded("datastore")
dataset = factories.Dataset()
resource = factories.Resource(
package=dataset, url="http://example.com"
)
res_returned = helpers.call_action(
"resource_update",
id=resource["id"],
url="http://example.com",
name="Test",
)
assert "datastore_active" not in res_returned
def test_mimetype_by_url(self, monkeypatch, tmpdir):
"""The mimetype is guessed from the url
Real world usage would be externally linking the resource and
the mimetype would be guessed, based on the url
"""
dataset = factories.Dataset()
resource = factories.Resource(
package=dataset, url="http://localhost/data.csv", name="Test"
)
monkeypatch.setattr(ckan.lib.uploader, "_storage_path", str(tmpdir))
res_update = helpers.call_action(
"resource_update",
id=resource["id"],
url="http://localhost/data.json",
)
org_mimetype = resource.pop("mimetype")
upd_mimetype = res_update.pop("mimetype")
assert org_mimetype != upd_mimetype
assert upd_mimetype == "application/json"
def test_mimetype_by_user(self):
"""
The mimetype is supplied by the user
Real world usage would be using the FileStore API or web UI form to create a resource
and the user wanted to specify the mimetype themselves
"""
dataset = factories.Dataset()
resource = factories.Resource(
package=dataset, url="http://localhost/data.csv", name="Test"
)
res_update = helpers.call_action(
"resource_update",
id=resource["id"],
url="http://localhost/data.csv",
mimetype="text/plain",
)
org_mimetype = resource.pop("mimetype")
upd_mimetype = res_update.pop("mimetype")
assert org_mimetype != upd_mimetype
assert upd_mimetype == "text/plain"
@pytest.mark.ckan_config("ckan.mimetype_guess", "file_contents")
def test_mimetype_by_upload_by_file(self, create_with_upload):
"""The mimetype is guessed from an uploaded file by the contents inside
Real world usage would be using the FileStore API or web UI
form to upload a file, that has no extension If the mimetype
can't be guessed by the url or filename, mimetype will be
guessed by the contents inside the file
"""
dataset = factories.Dataset()
resource = factories.Resource(
package=dataset, url="http://localhost/data.csv", name="Test"
)
content = """
Snow Course Name, Number, Elev. metres, Date of Survey, Snow Depth cm, Water Equiv. mm, Survey Code, % of Normal, Density %, Survey Period, Normal mm
SKINS LAKE,1B05,890,2015/12/30,34,53,,98,16,JAN-01,54
MCGILLIVRAY PASS,1C05,1725,2015/12/31,88,239,,87,27,JAN-01,274
NAZKO,1C08,1070,2016/01/05,20,31,,76,16,JAN-01,41
"""
res_update = create_with_upload(
content, "update_test", action="resource_update",
id=resource["id"], url="http://localhost",
package_id=dataset["id"])
org_mimetype = resource.pop("mimetype")
upd_mimetype = res_update.pop("mimetype")
assert org_mimetype != upd_mimetype
assert upd_mimetype == "text/plain"
def test_mimetype_by_upload_by_filename(self, create_with_upload):
"""The mimetype is guessed from an uploaded file with a filename
Real world usage would be using the FileStore API or web UI
form to upload a file, with a filename plus extension If
there's no url or the mimetype can't be guessed by the url,
mimetype will be guessed by the extension in the filename
"""
content = """
"info": {
"title": "BC Data Catalogue API",
"description": "This API provides information about datasets in the BC Data Catalogue.",
"termsOfService": "http://www.data.gov.bc.ca/local/dbc/docs/license/API_Terms_of_Use.pdf",
"contact": {
"name": "Data BC",
"url": "http://data.gov.bc.ca/",
"email": ""
},
"license": {
"name": "Open Government License - British Columbia",
"url": "http://www.data.gov.bc.ca/local/dbc/docs/license/OGL-vbc2.0.pdf"
},
"version": "3.0.0"
}
"""
dataset = factories.Dataset()
resource = create_with_upload(
content, 'test.json',
package_id=dataset['id'], url="http://localhost")
content = """
Snow Course Name, Number, Elev. metres, Date of Survey, Snow Depth cm, Water Equiv. mm, Survey Code, % of Normal, Density %, Survey Period, Normal mm
SKINS LAKE,1B05,890,2015/12/30,34,53,,98,16,JAN-01,54
MCGILLIVRAY PASS,1C05,1725,2015/12/31,88,239,,87,27,JAN-01,274
NAZKO,1C08,1070,2016/01/05,20,31,,76,16,JAN-01,41
"""
res_update = create_with_upload(
content, "update_test.csv", action="resource_update",
id=resource["id"], url="http://localhost",
package_id=dataset['id'])
org_mimetype = resource.pop("mimetype")
upd_mimetype = res_update.pop("mimetype")
assert org_mimetype != upd_mimetype
assert upd_mimetype == "text/csv"
def test_size_of_resource_by_user(self):
"""
The size of the resource is provided by the users
Real world usage would be using the FileStore API and the user provides a size for the resource
"""
dataset = factories.Dataset()
resource = factories.Resource(
package=dataset,
url="http://localhost/data.csv",
name="Test",
size=500,
)
res_update = helpers.call_action(
"resource_update",
id=resource["id"],
url="http://localhost/data.csv",
size=600,
)
org_size = int(resource.pop("size"))
upd_size = int(res_update.pop("size"))
assert org_size < upd_size
def test_size_of_resource_by_upload(self, create_with_upload):
"""The size of the resource determined by the uploaded file
"""
content = """
"info": {
"title": "BC Data Catalogue API",
"description": "This API provides information about datasets in the BC Data Catalogue.",
"termsOfService": "http://www.data.gov.bc.ca/local/dbc/docs/license/API_Terms_of_Use.pdf",
"contact": {
"name": "Data BC",
"url": "http://data.gov.bc.ca/",
"email": ""
},
"license": {
"name": "Open Government License - British Columbia",
"url": "http://www.data.gov.bc.ca/local/dbc/docs/license/OGL-vbc2.0.pdf"
},
"version": "3.0.0"
}
"""
dataset = factories.Dataset()
resource = create_with_upload(
content, 'test.json',
package_id=dataset['id'], url="http://localhost")
content = """
Snow Course Name, Number, Elev. metres, Date of Survey, Snow Depth cm, Water Equiv. mm, Survey Code, % of Normal, Density %, Survey Period, Normal mm
SKINS LAKE,1B05,890,2015/12/30,34,53,,98,16,JAN-01,54
MCGILLIVRAY PASS,1C05,1725,2015/12/31,88,239,,87,27,JAN-01,274
NAZKO,1C08,1070,2016/01/05,20,31,,76,16,JAN-01,41
"""
res_update = create_with_upload(
content, "update_test.csv", action="resource_update",
id=resource["id"], url="http://localhost",
package_id=dataset["id"])
org_size = int(resource.pop("size")) # 669 bytes
upd_size = int(res_update.pop("size")) # 358 bytes
assert org_size > upd_size
def test_extras(self):
user = factories.User()
dataset = factories.Dataset(
user=user,
resources=[dict(format=u"json", url=u"http://datahub.io/")],
)
resource = helpers.call_action(
"resource_update",
id=dataset["resources"][0]["id"],
somekey="somevalue", # this is how to do resource extras
extras={u"someotherkey": u"alt234"}, # this isnt
format=u"plain text",
url=u"http://datahub.io/download/",
)
assert resource["somekey"] == "somevalue"
assert "extras" not in resource
assert "someotherkey" not in resource
resource = helpers.call_action("package_show", id=dataset["id"])[
"resources"
][0]
assert resource["somekey"] == "somevalue"
assert "extras" not in resource
assert "someotherkey" not in resource
@pytest.mark.ckan_config(
"ckan.views.default_views", "image_view recline_view"
)
def test_resource_format_update(self):
dataset = factories.Dataset()
# Create resource without format
resource = factories.Resource(
package=dataset, url="http://localhost", name="Test"
)
res_views = helpers.call_action(
"resource_view_list", id=resource["id"]
)
assert len(res_views) == 0
# Update resource with format
resource = helpers.call_action(
"resource_update", id=resource["id"], format="CSV"
)
# Format changed
assert resource["format"] == "CSV"
res_views = helpers.call_action(
"resource_view_list", id=resource["id"]
)
# View for resource is created
assert len(res_views) == 1
second_resource = factories.Resource(
package=dataset, url="http://localhost", name="Test2", format="CSV"
)
res_views = helpers.call_action(
"resource_view_list", id=second_resource["id"]
)
assert len(res_views) == 1
second_resource = helpers.call_action(
"resource_update", id=second_resource["id"], format="PNG"
)
# Format changed
assert second_resource["format"] == "PNG"
res_views = helpers.call_action(
"resource_view_list", id=second_resource["id"]
)
assert len(res_views) == 2
third_resource = factories.Resource(
package=dataset, url="http://localhost", name="Test2"
)
res_views = helpers.call_action(
"resource_view_list", id=third_resource["id"]
)
assert len(res_views) == 0
third_resource = helpers.call_action(
"resource_update", id=third_resource["id"], format="Test format"
)
# Format added
assert third_resource["format"] == "Test format"
res_views = helpers.call_action(
"resource_view_list", id=third_resource["id"]
)
# No view created, cause no such format
assert len(res_views) == 0
third_resource = helpers.call_action(
"resource_update", id=third_resource["id"], format="CSV"
)
# Format changed
assert third_resource["format"] == "CSV"
res_views = helpers.call_action(
"resource_view_list", id=third_resource["id"]
)
# View is created
assert len(res_views) == 1
def test_edit_metadata_updates_metadata_modified_field(self):
dataset = factories.Dataset()
resource = factories.Resource(package_id=dataset['id'])
with freeze_time('2020-02-25 12:00:00'):
resource = helpers.call_action(
"resource_update",
id=resource["id"],
description='New Description',
)
assert resource['metadata_modified'] == '2020-02-25T12:00:00'
def test_same_values_dont_update_metadata_modified_field(self):
dataset = factories.Dataset()
with freeze_time('1987-03-04 23:30:00'):
resource = factories.Resource(
package_id=dataset['id'],
description='Test',
some_custom_field='test',
)
assert (resource['metadata_modified'] ==
datetime.datetime.utcnow().isoformat())
with freeze_time('2020-02-25 12:00:00'):
resource = helpers.call_action(
"resource_update",
id=resource["id"],
description='Test',
some_custom_field='test',
url='http://link.to.some.data' # Default Value from Factory
)
assert (resource['metadata_modified'] !=
datetime.datetime.utcnow().isoformat())
assert (resource['metadata_modified'] ==
'1987-03-04T23:30:00')
def test_new_keys_update_metadata_modified_field(self):
dataset = factories.Dataset()
with freeze_time('1987-03-04 23:30:00'):
resource = factories.Resource(package_id=dataset['id'], description='test')
assert (resource['metadata_modified'] ==
datetime.datetime.utcnow().isoformat())
with freeze_time('2020-02-25 12:00:00'):
resource = helpers.call_action(
"resource_update",
id=resource["id"],
description='test',
some_custom_field='test',
url='http://link.to.some.data' # default value from factory
)
assert (resource['metadata_modified'] ==
datetime.datetime.utcnow().isoformat())
assert (resource['metadata_modified'] ==
'2020-02-25T12:00:00')
def test_remove_keys_update_metadata_modified_field(self):
dataset = factories.Dataset()
with freeze_time('1987-03-04 23:30:00'):
resource = factories.Resource(
package_id=dataset['id'],
description='test',
some_custom_field='test',
)
assert (resource['metadata_modified'] ==
datetime.datetime.utcnow().isoformat())
with freeze_time('2020-02-25 12:00:00'):
resource = helpers.call_action(
"resource_update",
id=resource["id"],
description='test',
url='http://link.to.some.data' # default value from factory
)
assert (resource['metadata_modified'] ==
datetime.datetime.utcnow().isoformat())
assert (resource['metadata_modified'] ==
'2020-02-25T12:00:00')
def test_update_keys_update_metadata_modified_field(self):
dataset = factories.Dataset()
with freeze_time('1987-03-04 23:30:00'):
resource = factories.Resource(
package_id=dataset['id'],
description='test',
some_custom_field='test',
)
assert (resource['metadata_modified'] ==
datetime.datetime.utcnow().isoformat())
with freeze_time('2020-02-25 12:00:00'):
resource = helpers.call_action(
"resource_update",
id=resource["id"],
description='test',
some_custom_field='test2',
url='http://link.to.some.data' # default value from factory
)
assert (resource['metadata_modified'] ==
datetime.datetime.utcnow().isoformat())
assert (resource['metadata_modified'] ==
'2020-02-25T12:00:00')
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestConfigOptionUpdate(object):
# NOTE: the opposite is tested in
# ckan/ckanext/example_iconfigurer/tests/test_iconfigurer_update_config.py
# as we need to enable an external config option from an extension
def test_app_globals_set_if_defined(self):
key = "ckan.site_title"
value = "Test site title"
params = {key: value}
helpers.call_action("config_option_update", **params)
globals_key = app_globals.get_globals_key(key)
assert hasattr(app_globals.app_globals, globals_key)
assert getattr(app_globals.app_globals, globals_key) == value
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestUserUpdate(object):
def test_user_update_with_password_hash(self):
sysadmin = factories.Sysadmin()
context = {"user": sysadmin["name"]}
user = helpers.call_action(
"user_update",
context=context,
email="test@example.com",
id=sysadmin["name"],
password_hash="pretend-this-is-a-valid-hash",
)
user_obj = model.User.get(user["id"])
assert user_obj.password == "pretend-this-is-a-valid-hash"
def test_user_create_password_hash_not_for_normal_users(self):
normal_user = factories.User()
context = {"user": normal_user["name"], "ignore_auth": False}
user = helpers.call_action(
"user_update",
context=context,
email="test@example.com",
id=normal_user["name"],
password="required",
password_hash="pretend-this-is-a-valid-hash",
)
user_obj = model.User.get(user["id"])
assert user_obj.password != "pretend-this-is-a-valid-hash"
def test_user_update_image_url(self):
user = factories.User(image_url='user_image.jpg')
context = {"user": user["name"]}
user = helpers.call_action(
"user_update",
context=context,
id=user["name"],
email="test@example.com",
image_url="new_image_url.jpg",
)
assert user["image_url"] == "new_image_url.jpg"
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestGroupUpdate(object):
def test_group_update_image_url_field(self):
user = factories.User()
context = {"user": user["name"]}
group = factories.Group(
type="group",
name="testing",
user=user,
image_url='group_image.jpg')
group = helpers.call_action(
"group_update",
context=context,
id=group["id"],
name=group["name"],
type=group["type"],
image_url="new_image_url.jpg"
)
assert group["image_url"] == "new_image_url.jpg"
def test_group_update_cant_change_type(self):
user = factories.User()
context = {"user": user["name"]}
group = factories.Group(type="group", name="unchanging", user=user)
group = helpers.call_action(
"group_update",
context=context,
id=group["id"],
name="unchanging",
type="favouritecolour",
)
assert group["type"] == "group"
assert (
helpers.call_action("group_show", id="unchanging")["type"]
== "group"
)
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestPackageOwnerOrgUpdate(object):
def test_package_owner_org_added(self):
"""A package without an owner_org can have one added."""
sysadmin = factories.Sysadmin()
org = factories.Organization()
dataset = factories.Dataset()
context = {"user": sysadmin["name"]}
assert dataset["owner_org"] is None
helpers.call_action(
"package_owner_org_update",
context=context,
id=dataset["id"],
organization_id=org["id"],
)
dataset_obj = model.Package.get(dataset["id"])
assert dataset_obj.owner_org == org["id"]
def test_package_owner_org_changed(self):
"""A package with an owner_org can have it changed."""
sysadmin = factories.Sysadmin()
org_1 = factories.Organization()
org_2 = factories.Organization()
dataset = factories.Dataset(owner_org=org_1["id"])
context = {"user": sysadmin["name"]}
assert dataset["owner_org"] == org_1["id"]
helpers.call_action(
"package_owner_org_update",
context=context,
id=dataset["id"],
organization_id=org_2["id"],
)
dataset_obj = model.Package.get(dataset["id"])
assert dataset_obj.owner_org == org_2["id"]
def test_package_owner_org_removed(self):
"""A package with an owner_org can have it removed."""
sysadmin = factories.Sysadmin()
org = factories.Organization()
dataset = factories.Dataset(owner_org=org["id"])
context = {"user": sysadmin["name"]}
assert dataset["owner_org"] == org["id"]
helpers.call_action(
"package_owner_org_update",
context=context,
id=dataset["id"],
organization_id=None,
)
dataset_obj = model.Package.get(dataset["id"])
assert dataset_obj.owner_org is None
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestBulkOperations(object):
def test_bulk_make_private(self):
org = factories.Organization()
dataset1 = factories.Dataset(owner_org=org["id"])
dataset2 = factories.Dataset(owner_org=org["id"])
helpers.call_action(
"bulk_update_private",
{},
datasets=[dataset1["id"], dataset2["id"]],
org_id=org["id"],
)
# Check search index
datasets = helpers.call_action(
"package_search", {}, q="owner_org:{0}".format(org["id"])
)
for dataset in datasets["results"]:
assert dataset["private"]
# Check DB
datasets = (
model.Session.query(model.Package)
.filter(model.Package.owner_org == org["id"])
.all()
)
for dataset in datasets:
assert dataset.private
def test_bulk_make_public(self):
org = factories.Organization()
dataset1 = factories.Dataset(owner_org=org["id"], private=True)
dataset2 = factories.Dataset(owner_org=org["id"], private=True)
helpers.call_action(
"bulk_update_public",
{},
datasets=[dataset1["id"], dataset2["id"]],
org_id=org["id"],
)
# Check search index
datasets = helpers.call_action(
"package_search", {}, q="owner_org:{0}".format(org["id"])
)
for dataset in datasets["results"]:
assert not (dataset["private"])
# Check DB
datasets = (
model.Session.query(model.Package)
.filter(model.Package.owner_org == org["id"])
.all()
)
for dataset in datasets:
assert not (dataset.private)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert activities[0]['activity_type'] == 'changed package'
def test_bulk_delete(self):
org = factories.Organization()
dataset1 = factories.Dataset(owner_org=org["id"])
dataset2 = factories.Dataset(owner_org=org["id"])
helpers.call_action(
"bulk_update_delete",
{},
datasets=[dataset1["id"], dataset2["id"]],
org_id=org["id"],
)
# Check search index
datasets = helpers.call_action(
"package_search", {}, q="owner_org:{0}".format(org["id"])
)
assert datasets["results"] == []
# Check DB
datasets = (
model.Session.query(model.Package)
.filter(model.Package.owner_org == org["id"])
.all()
)
for dataset in datasets:
assert dataset.state == "deleted"
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert activities[0]['activity_type'] == 'deleted package'
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestDashboardMarkActivitiesOld(object):
def test_mark_as_old_some_activities_by_a_followed_user(self):
# do some activity that will show up on user's dashboard
user = factories.User()
# now some activity that is "new" because it is by a followed user
followed_user = factories.User()
helpers.call_action(
"follow_user", context={"user": user["name"]}, **followed_user
)
dataset = factories.Dataset(user=followed_user)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update",
context={"user": followed_user["name"]},
**dataset
)
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 3
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [
("new user", False),
("new user", True),
("new package", True),
("changed package", True),
]
helpers.call_action(
"dashboard_mark_activities_old", context={"user": user["name"]}
)
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 0
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [
("new user", False),
("new user", False),
("new package", False),
("changed package", False),
]
@pytest.mark.usefixtures("clean_db", "with_request_context")
@pytest.mark.ckan_config('ckan.auth.allow_dataset_collaborators', True)
class TestCollaboratorsUpdate(object):
@pytest.mark.ckan_config('ckan.auth.allow_admin_collaborators', True)
@pytest.mark.parametrize('role', ['admin', 'editor'])
def test_collaborators_can_update_resources(self, role):
org1 = factories.Organization()
dataset = factories.Dataset(owner_org=org1['id'])
resource = factories.Resource(package_id=dataset['id'])
user = factories.User()
helpers.call_action(
'package_collaborator_create',
id=dataset['id'], user_id=user['id'], capacity=role)
context = {
'user': user['name'],
'ignore_auth': False,
}
updated_resource = helpers.call_action(
'resource_update',
context=context,
id=resource['id'],
description='updated')
assert updated_resource['description'] == 'updated'
def test_collaborators_can_not_change_owner_org_by_default(self):
org1 = factories.Organization()
dataset = factories.Dataset(owner_org=org1['id'])
user = factories.User()
org2 = factories.Organization(users=[{'name': user['id'], 'capacity': 'admin'}])
helpers.call_action(
'package_collaborator_create',
id=dataset['id'], user_id=user['id'], capacity='editor')
context = {
'user': user['name'],
'ignore_auth': False,
}
dataset['owner_org'] = org2['id']
with pytest.raises(logic.ValidationError) as e:
helpers.call_action('package_update', context=context, **dataset)
assert e.value.error_dict['owner_org'] == [
'You cannot move this dataset to another organization']
@pytest.mark.ckan_config('ckan.auth.allow_collaborators_to_change_owner_org', True)
def test_collaborators_can_change_owner_org_if_config_true(self):
org1 = factories.Organization()
dataset = factories.Dataset(owner_org=org1['id'])
user = factories.User()
org2 = factories.Organization(users=[{'name': user['id'], 'capacity': 'admin'}])
helpers.call_action(
'package_collaborator_create',
id=dataset['id'], user_id=user['id'], capacity='editor')
context = {
'user': user['name'],
'ignore_auth': False,
}
dataset['owner_org'] = org2['id']
updated_dataset = helpers.call_action('package_update', context=context, **dataset)
assert updated_dataset['owner_org'] == org2['id']
@pytest.mark.ckan_config('ckan.auth.allow_collaborators_to_change_owner_org', True)
def test_editors_can_change_owner_org_even_if_collaborators(self):
user = factories.User()
org1 = factories.Organization(users=[{'name': user['id'], 'capacity': 'admin'}])
dataset = factories.Dataset(owner_org=org1['id'])
org2 = factories.Organization(users=[{'name': user['id'], 'capacity': 'admin'}])
helpers.call_action(
'package_collaborator_create',
id=dataset['id'], user_id=user['id'], capacity='editor')
context = {
'user': user['name'],
'ignore_auth': False,
}
dataset['owner_org'] = org2['id']
updated_dataset = helpers.call_action('package_update', context=context, **dataset)
assert updated_dataset['owner_org'] == org2['id']
@pytest.mark.usefixtures("clean_db", "with_request_context")
class TestDatasetRevise(object):
def test_revise_description(self):
factories.Dataset(name='xyz', notes='old notes')
response = helpers.call_action(
'package_revise',
match={'notes': 'old notes', 'name': 'xyz'},
update={'notes': 'new notes'},
)
assert response['package']['notes'] == 'new notes'
def test_revise_failed_match(self):
factories.Dataset(name='xyz', notes='old notes')
with pytest.raises(logic.ValidationError):
helpers.call_action(
'package_revise',
match={'notes': 'wrong notes', 'name': 'xyz'},
update={'notes': 'new notes'},
)
def test_revise_description_flattened(self):
factories.Dataset(name='xyz', notes='old notes')
response = helpers.call_action(
'package_revise',
match__notes='old notes',
match__name='xyz',
update__notes='new notes',
)
assert response['package']['notes'] == 'new notes'
def test_revise_dataset_fields_only(self):
dataset = factories.Dataset(
name='xyz',
notes='old notes',
resources=[{'url': 'http://example.com'}])
response = helpers.call_action(
'package_revise',
match={'id': dataset['id']},
filter=[
'+resources', # keep everything under resources
'-*', # remove everything else
],
update={'name': 'fresh-start', 'title': 'Fresh Start'},
)
assert response['package']['notes'] is None
assert response['package']['name'] == 'fresh-start'
assert response['package']['resources'][0]['url'] == 'http://example.com'
def test_revise_add_resource(self):
dataset = factories.Dataset()
response = helpers.call_action(
'package_revise',
match={'id': dataset['id']},
update__resources__extend=[{'name': 'new resource', 'url': 'http://example.com'}],
)
assert response['package']['resources'][0]['name'] == 'new resource'
def test_revise_resource_by_index(self):
dataset = factories.Dataset(resources=[{'url': 'http://example.com'}])
response = helpers.call_action(
'package_revise',
match={'id': dataset['id']},
update__resources__0={'name': 'new name'},
)
assert response['package']['resources'][0]['name'] == 'new name'
def test_revise_resource_by_id(self):
dataset = factories.Dataset(resources=[{
'id': '34a12bc-1420-cbad-1922',
'url': 'http://example.com',
'name': 'old name',
}])
response = helpers.call_action(
'package_revise',
match={'id': dataset['id']},
update__resources__34a12={'name': 'new name'}, # prefixes allowed >4 chars
)
assert response['package']['resources'][0]['name'] == 'new name'
def test_revise_resource_replace_all(self):
dataset = factories.Dataset(resources=[{
'id': '34a12bc-1420-cbad-1922',
'url': 'http://example.com',
'name': 'old name',
}])
response = helpers.call_action(
'package_revise',
match={'id': dataset['id']},
filter=['+resources__34a12__id', '-resources__34a12__*'],
update__resources__34a12={'name': 'new name'},
)
assert response['package']['resources'][0]['name'] == 'new name'
assert response['package']['resources'][0]['url'] == ''
def test_revise_normal_user(self):
user = factories.User()
org = factories.Organization(users=[{'name': user['id'], 'capacity': 'admin'}])
# make sure normal users can use package_revise
context = {'user': user['name'], 'ignore_auth': False}
ds = factories.Dataset(owner_org=org['id'])
response = helpers.call_action(
'package_revise',
match={'id': ds['id']},
update={'notes': 'new notes'},
context=context,
)
assert response['package']['notes'] == 'new notes'
@pytest.mark.usefixtures("clean_db")
class TestUserPluginExtras(object):
def test_stored_on_update_if_sysadmin(self):
sysadmin = factories.Sysadmin()
user = factories.User(
plugin_extras={
'plugin1': {
'key1': 'value1'
}
}
)
user['plugin_extras'] = {
'plugin1': {
'key1': 'value1.2',
'key2': 'value2'
}
}
# helpers.call_action sets 'ignore_auth' to True by default
context = {'user': sysadmin['name'], 'ignore_auth': False}
updated_user = helpers.call_action(
'user_update', context=context, **user)
assert updated_user['plugin_extras'] == {
'plugin1': {
'key1': 'value1.2',
'key2': 'value2',
}
}
context = {'user': sysadmin['name'], 'ignore_auth': False}
user = helpers.call_action(
'user_show', context=context, id=user['id'], include_plugin_extras=True)
assert updated_user['plugin_extras'] == {
'plugin1': {
'key1': 'value1.2',
'key2': 'value2',
}
}
plugin_extras_from_db = model.Session.execute(
'SELECT plugin_extras FROM "user" WHERE id=:id',
{'id': user['id']}
).first().values()[0]
assert plugin_extras_from_db == {
'plugin1': {
'key1': 'value1.2',
'key2': 'value2',
}
}
def test_ignored_on_update_if_non_sysadmin(self):
sysadmin = factories.Sysadmin()
user = factories.User(
plugin_extras={
'plugin1': {
'key1': 'value1'
}
}
)
user['plugin_extras'] = {
'plugin1': {
'key1': 'value1.2',
'key2': 'value2'
}
}
# User edits themselves
context = {'user': user['name'], 'ignore_auth': False}
created_user = helpers.call_action(
'user_update', context=context, **user)
assert 'plugin_extras' not in created_user
context = {'user': sysadmin['name'], 'ignore_auth': False}
user = helpers.call_action(
'user_show', context=context, id=created_user['id'], include_plugin_extras=True)
assert user['plugin_extras'] == {
'plugin1': {
'key1': 'value1'
}
}
def test_ignored_on_update_if_non_sysadmin_when_empty(self):
sysadmin = factories.Sysadmin()
user = factories.User()
user['plugin_extras'] = {
'plugin1': {
'key1': 'value1.2',
'key2': 'value2'
}
}
# User edits themselves
context = {'user': user['name'], 'ignore_auth': False}
created_user = helpers.call_action(
'user_update', context=context, **user)
assert 'plugin_extras' not in created_user
context = {'user': sysadmin['name'], 'ignore_auth': False}
user = helpers.call_action(
'user_show', context=context, id=created_user['id'], include_plugin_extras=True)
assert user['plugin_extras'] is None
def test_nested_updates_are_reflected_in_db(self):
user = factories.User(
plugin_extras={
'plugin1': {
'key1': 'value1'
}
}
)
sysadmin = factories.Sysadmin()
context = {'user': sysadmin['name']}
user = helpers.call_action(
'user_show', context=context, id=user['id'], include_plugin_extras=True)
user['plugin_extras']['plugin1']['key1'] = 'value2'
updated_user = helpers.call_action('user_update', context=context, **user)
assert updated_user['plugin_extras']['plugin1']['key1'] == 'value2'
# Hold on, partner
plugin_extras = model.Session.execute(
'SELECT plugin_extras FROM "user" WHERE id=:id',
{'id': user['id']}
).first().values()[0]
assert plugin_extras['plugin1']['key1'] == 'value2'
| 33.930969 | 157 | 0.57912 |
acf9d1eec1eea549851b0a00e0ed82bb84dfa933 | 1,257 | py | Python | random-py-scripts/generate_file_in_location.py | carlosperate/microbit-programs | 6500b6a80a8ab04204d3447d9de0f1115a89b95e | [
"MIT"
] | null | null | null | random-py-scripts/generate_file_in_location.py | carlosperate/microbit-programs | 6500b6a80a8ab04204d3447d9de0f1115a89b95e | [
"MIT"
] | null | null | null | random-py-scripts/generate_file_in_location.py | carlosperate/microbit-programs | 6500b6a80a8ab04204d3447d9de0f1115a89b95e | [
"MIT"
] | null | null | null | """
This script is used to write a file in a specific flash location.
It overwrites the file continuously until it falls on the right place.
Assumes the given address to check for is the 1st byte of a file chunk.
"""
import machine
import os
from microbit import *
# Configure Me ---------------------------------------------------------
file_start_address = 0x38c00
file_name = 'two_chunks.py'
file_content = 'a = """abcdefghijklmnopqrstuvwxyz\n' + \
'abcdefghijklmnopqrstuvwxyz\n' + \
'abcdefghijklmnopqrstuvwxyz\n' + \
'abcdefghijklmnopqrst"""\n'
# Code starts ----------------------------------------------------------
chunk_marker = machine.mem8[file_start_address]
count = 1
while chunk_marker != 0xfe:
# Write the file we want
with open(file_name, 'w') as f:
f.write(file_content)
# Write and remove a small file, used to offset the next round around the
# filesystem space by one chunk (so we don't loop on the same spots)
#with open('small_file_to_delete.py', 'w') as f:
# f.write('hello')
#os.remove('small_file_to_delete.py')
chunk_marker = machine.mem8[file_start_address]
count += 1
print('{}: {}'.format(count, chunk_marker))
print(chunk_marker)
display.show(Image.HAPPY) | 35.914286 | 77 | 0.6428 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.